This commit is contained in:
yunwei37
2023-08-15 19:20:05 +00:00
parent ae50b3d00f
commit 5cbe03808d
8 changed files with 147 additions and 127 deletions

View File

@@ -24,13 +24,13 @@ INCLUDES := -I$(OUTPUT) -I../../libbpf/include/uapi -I$(dir $(VMLINUX))
CFLAGS := -g -Wall
ALL_LDFLAGS := $(LDFLAGS) $(EXTRA_LDFLAGS)
APPS = memleak # minimal minimal_legacy uprobe kprobe fentry usdt sockfilter tc ksyscall
APPS = # minimal minimal_legacy bootstrap uprobe kprobe fentry usdt sockfilter tc ksyscall
CARGO ?= $(shell which cargo)
ifeq ($(strip $(CARGO)),)
BZS_APPS :=
else
BZS_APPS := # profile
BZS_APPS := memleak
APPS += $(BZS_APPS)
# Required by libblazesym
ALL_LDFLAGS += -lrt -ldl -lpthread -lm

View File

@@ -302,26 +302,101 @@ int BPF_KRETPROBE(pvalloc_exit)
return gen_alloc_exit(ctx);
}
SEC("tracepoint/kmem/kmalloc")
int memleak__kmalloc(struct trace_event_raw_kmem_alloc *ctx)
/**
* commit 11e9734bcb6a("mm/slab_common: unify NUMA and UMA version of
* tracepoints") drops kmem_alloc event class, rename kmem_alloc_node to
* kmem_alloc, so `trace_event_raw_kmem_alloc_node` is not existed any more.
* see:
* https://github.com/torvalds/linux/commit/11e9734bcb6a
*/
struct trace_event_raw_kmem_alloc_node___x {
const void *ptr;
size_t bytes_alloc;
} __attribute__((preserve_access_index));
static __always_inline bool has_kmem_alloc_node(void)
{
if (bpf_core_type_exists(struct trace_event_raw_kmem_alloc_node___x))
return true;
return false;
}
/**
* commit 2c1d697fb8ba("mm/slab_common: drop kmem_alloc & avoid dereferencing
* fields when not using") drops kmem_alloc event class. As a result,
* `trace_event_raw_kmem_alloc` is removed, `trace_event_raw_kmalloc` and
* `trace_event_raw_kmem_cache_alloc` are added.
* see:
* https://github.com/torvalds/linux/commit/2c1d697fb8ba
*/
struct trace_event_raw_kmem_alloc___x {
const void *ptr;
size_t bytes_alloc;
} __attribute__((preserve_access_index));
struct trace_event_raw_kmalloc___x {
const void *ptr;
size_t bytes_alloc;
} __attribute__((preserve_access_index));
struct trace_event_raw_kmem_cache_alloc___x {
const void *ptr;
size_t bytes_alloc;
} __attribute__((preserve_access_index));
static __always_inline bool has_kmem_alloc(void)
{
if (bpf_core_type_exists(struct trace_event_raw_kmem_alloc___x))
return true;
return false;
}
SEC("tracepoint/kmem/kmalloc")
int memleak__kmalloc(void *ctx)
{
const void *ptr;
size_t bytes_alloc;
if (has_kmem_alloc()) {
struct trace_event_raw_kmem_alloc___x *args = ctx;
ptr = BPF_CORE_READ(args, ptr);
bytes_alloc = BPF_CORE_READ(args, bytes_alloc);
} else {
struct trace_event_raw_kmalloc___x *args = ctx;
ptr = BPF_CORE_READ(args, ptr);
bytes_alloc = BPF_CORE_READ(args, bytes_alloc);
}
if (wa_missing_free)
gen_free_enter(ctx->ptr);
gen_free_enter(ptr);
gen_alloc_enter(ctx->bytes_alloc);
gen_alloc_enter(bytes_alloc);
return gen_alloc_exit2(ctx, (u64)(ctx->ptr));
return gen_alloc_exit2(ctx, (u64)ptr);
}
SEC("tracepoint/kmem/kmalloc_node")
int memleak__kmalloc_node(struct trace_event_raw_kmem_alloc_node *ctx)
int memleak__kmalloc_node(void *ctx)
{
if (wa_missing_free)
gen_free_enter(ctx->ptr);
const void *ptr;
size_t bytes_alloc;
gen_alloc_enter(ctx->bytes_alloc);
if (has_kmem_alloc_node()) {
struct trace_event_raw_kmem_alloc_node___x *args = ctx;
ptr = BPF_CORE_READ(args, ptr);
bytes_alloc = BPF_CORE_READ(args, bytes_alloc);
return gen_alloc_exit2(ctx, (u64)(ctx->ptr));
if (wa_missing_free)
gen_free_enter(ptr);
gen_alloc_enter( bytes_alloc);
return gen_alloc_exit2(ctx, (u64)ptr);
} else {
/* tracepoint is disabled if not exist, avoid compile warning */
return 0;
}
}
SEC("tracepoint/kmem/kfree")
@@ -341,25 +416,50 @@ int memleak__kfree(void *ctx)
}
SEC("tracepoint/kmem/kmem_cache_alloc")
int memleak__kmem_cache_alloc(struct trace_event_raw_kmem_alloc *ctx)
int memleak__kmem_cache_alloc(void *ctx)
{
const void *ptr;
size_t bytes_alloc;
if (has_kmem_alloc()) {
struct trace_event_raw_kmem_alloc___x *args = ctx;
ptr = BPF_CORE_READ(args, ptr);
bytes_alloc = BPF_CORE_READ(args, bytes_alloc);
} else {
struct trace_event_raw_kmem_cache_alloc___x *args = ctx;
ptr = BPF_CORE_READ(args, ptr);
bytes_alloc = BPF_CORE_READ(args, bytes_alloc);
}
if (wa_missing_free)
gen_free_enter(ctx->ptr);
gen_free_enter(ptr);
gen_alloc_enter(ctx->bytes_alloc);
gen_alloc_enter(bytes_alloc);
return gen_alloc_exit2(ctx, (u64)(ctx->ptr));
return gen_alloc_exit2(ctx, (u64)ptr);
}
SEC("tracepoint/kmem/kmem_cache_alloc_node")
int memleak__kmem_cache_alloc_node(struct trace_event_raw_kmem_alloc_node *ctx)
int memleak__kmem_cache_alloc_node(void *ctx)
{
if (wa_missing_free)
gen_free_enter(ctx->ptr);
const void *ptr;
size_t bytes_alloc;
gen_alloc_enter(ctx->bytes_alloc);
if (has_kmem_alloc_node()) {
struct trace_event_raw_kmem_alloc_node___x *args = ctx;
ptr = BPF_CORE_READ(args, ptr);
bytes_alloc = BPF_CORE_READ(args, bytes_alloc);
return gen_alloc_exit2(ctx, (u64)(ctx->ptr));
if (wa_missing_free)
gen_free_enter(ptr);
gen_alloc_enter(bytes_alloc);
return gen_alloc_exit2(ctx, (u64)ptr);
} else {
/* tracepoint is disabled if not exist, avoid compile warning */
return 0;
}
}
SEC("tracepoint/kmem/kmem_cache_free")
@@ -406,4 +506,4 @@ int memleak__percpu_free_percpu(struct trace_event_raw_percpu_free_percpu *ctx)
return gen_free_enter(ctx->ptr);
}
char LICENSE[] SEC("license") = "GPL";
char LICENSE[] SEC("license") = "GPL";

View File

@@ -22,11 +22,8 @@
#include "memleak.h"
#include "memleak.skel.h"
#include "trace_helpers.h"
#ifdef USE_BLAZESYM
#include "blazesym.h"
#endif
static struct env {
int interval;
@@ -86,6 +83,10 @@ struct allocation {
struct allocation_node* allocations;
};
#ifndef NSEC_PER_SEC
#define NSEC_PER_SEC 1000000000L
#endif
#define __ATTACH_UPROBE(skel, sym_name, prog_name, is_retprobe) \
do { \
LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts, \
@@ -132,13 +133,8 @@ static int event_notify(int fd, uint64_t event);
static pid_t fork_sync_exec(const char *command, int fd);
#ifdef USE_BLAZESYM
static void print_stack_frame_by_blazesym(size_t frame, uint64_t addr, const blazesym_csym *sym);
static void print_stack_frames_by_blazesym();
#else
static void print_stack_frames_by_ksyms();
static void print_stack_frames_by_syms_cache();
#endif
static int print_stack_frames(struct allocation *allocs, size_t nr_allocs, int stack_traces_fd);
static int alloc_size_compare(const void *a, const void *b);
@@ -146,7 +142,6 @@ static int alloc_size_compare(const void *a, const void *b);
static int print_outstanding_allocs(int allocs_fd, int stack_traces_fd);
static int print_outstanding_combined_allocs(int combined_allocs_fd, int stack_traces_fd);
static bool has_kernel_node_tracepoints();
static void disable_kernel_node_tracepoints(struct memleak_bpf *skel);
static void disable_kernel_percpu_tracepoints(struct memleak_bpf *skel);
static void disable_kernel_tracepoints(struct memleak_bpf *skel);
@@ -210,13 +205,8 @@ static struct sigaction sig_action = {
static int child_exec_event_fd = -1;
#ifdef USE_BLAZESYM
static blazesym *symbolizer;
static sym_src_cfg src_cfg;
#else
struct syms_cache *syms_cache;
struct ksyms *ksyms;
#endif
static void (*print_stack_frames_func)();
static uint64_t *stack;
@@ -225,6 +215,14 @@ static struct allocation *allocs;
static const char default_object[] = "libc.so.6";
unsigned long long get_ktime_ns(void)
{
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
}
int main(int argc, char *argv[])
{
int ret = 0;
@@ -304,7 +302,6 @@ int main(int argc, char *argv[])
goto cleanup;
}
#ifdef USE_BLAZESYM
if (env.pid < 0) {
src_cfg.src_type = SRC_T_KERNEL;
src_cfg.params.kernel.kallsyms = NULL;
@@ -313,7 +310,6 @@ int main(int argc, char *argv[])
src_cfg.src_type = SRC_T_PROCESS;
src_cfg.params.process.pid = env.pid;
}
#endif
// allocate space for storing "allocation" structs
if (env.combined_only)
@@ -352,8 +348,7 @@ int main(int argc, char *argv[])
// disable kernel tracepoints based on settings or availability
if (env.kernel_trace) {
if (!has_kernel_node_tracepoints())
disable_kernel_node_tracepoints(skel);
disable_kernel_node_tracepoints(skel);
if (!env.percpu)
disable_kernel_percpu_tracepoints(skel);
@@ -400,7 +395,6 @@ int main(int argc, char *argv[])
}
}
#ifdef USE_BLAZESYM
symbolizer = blazesym_new();
if (!symbolizer) {
fprintf(stderr, "Failed to load blazesym\n");
@@ -409,28 +403,6 @@ int main(int argc, char *argv[])
goto cleanup;
}
print_stack_frames_func = print_stack_frames_by_blazesym;
#else
if (env.kernel_trace) {
ksyms = ksyms__load();
if (!ksyms) {
fprintf(stderr, "Failed to load ksyms\n");
ret = -ENOMEM;
goto cleanup;
}
print_stack_frames_func = print_stack_frames_by_ksyms;
} else {
syms_cache = syms_cache__new(0);
if (!syms_cache) {
fprintf(stderr, "Failed to create syms_cache\n");
ret = -ENOMEM;
goto cleanup;
}
print_stack_frames_func = print_stack_frames_by_syms_cache;
}
#endif
printf("Tracing outstanding memory allocs... Hit Ctrl-C to end\n");
// main loop
@@ -467,14 +439,7 @@ int main(int argc, char *argv[])
}
cleanup:
#ifdef USE_BLAZESYM
blazesym_free(symbolizer);
#else
if (syms_cache)
syms_cache__free(syms_cache);
if (ksyms)
ksyms__free(ksyms);
#endif
memleak_bpf__destroy(skel);
free(allocs);
@@ -671,7 +636,6 @@ pid_t fork_sync_exec(const char *command, int fd)
return pid;
}
#if USE_BLAZESYM
void print_stack_frame_by_blazesym(size_t frame, uint64_t addr, const blazesym_csym *sym)
{
if (!sym)
@@ -721,51 +685,6 @@ void print_stack_frames_by_blazesym()
blazesym_result_free(result);
}
#else
void print_stack_frames_by_ksyms()
{
for (size_t i = 0; i < env.perf_max_stack_depth; ++i) {
const uint64_t addr = stack[i];
if (addr == 0)
break;
const struct ksym *ksym = ksyms__map_addr(ksyms, addr);
if (ksym)
printf("\t%zu [<%016lx>] %s+0x%lx\n", i, addr, ksym->name, addr - ksym->addr);
else
printf("\t%zu [<%016lx>] <%s>\n", i, addr, "null sym");
}
}
void print_stack_frames_by_syms_cache()
{
const struct syms *syms = syms_cache__get_syms(syms_cache, env.pid);
if (!syms) {
fprintf(stderr, "Failed to get syms\n");
return;
}
for (size_t i = 0; i < env.perf_max_stack_depth; ++i) {
const uint64_t addr = stack[i];
if (addr == 0)
break;
char *dso_name;
uint64_t dso_offset;
const struct sym *sym = syms__map_addr_dso(syms, addr, &dso_name, &dso_offset);
if (sym) {
printf("\t%zu [<%016lx>] %s+0x%lx", i, addr, sym->name, sym->offset);
if (dso_name)
printf(" [%s]", dso_name);
printf("\n");
} else {
printf("\t%zu [<%016lx>] <%s>\n", i, addr, "null sym");
}
}
}
#endif
int print_stack_frames(struct allocation *allocs, size_t nr_allocs, int stack_traces_fd)
{
@@ -994,12 +913,6 @@ int print_outstanding_combined_allocs(int combined_allocs_fd, int stack_traces_f
return 0;
}
bool has_kernel_node_tracepoints()
{
return tracepoint_exists("kmem", "kmalloc_node") &&
tracepoint_exists("kmem", "kmem_cache_alloc_node");
}
void disable_kernel_node_tracepoints(struct memleak_bpf *skel)
{
bpf_program__set_autoload(skel->progs.memleak__kmalloc_node, false);

View File

@@ -20,7 +20,6 @@
#include <bpf/libbpf.h>
#include <limits.h>
#include "trace_helpers.h"
#include "uprobe_helpers.h"
#define min(x, y) ({ \
typeof(x) _min1 = (x); \

View File

@@ -167,7 +167,11 @@
<div id="content" class="content">
<main>
<h1 id="更多的参考资料"><a class="header" href="#更多的参考资料">更多的参考资料</a></h1>
<p>TODO</p>
<p>可以在这里找到更多关于 eBPF 的信息:</p>
<ul>
<li><a href="https://github.com/zoidbergwill/awesome-ebpf">https://github.com/zoidbergwill/awesome-ebpf</a></li>
<li><a href="https://ebpf.io/">https://ebpf.io/</a></li>
</ul>
</main>

View File

@@ -4109,7 +4109,11 @@ char LICENSE[] SEC(&quot;license&quot;) = &quot;GPL&quot;;
<li>bcc 工具:<a href="https://github.com/iovisor/bcc/blob/master/libbpf-tools/biopattern.c">https://github.com/iovisor/bcc/blob/master/libbpf-tools/biopattern.c</a></li>
</ul>
<div style="break-before: page; page-break-before: always;"></div><h1 id="更多的参考资料"><a class="header" href="#更多的参考资料">更多的参考资料</a></h1>
<p>TODO</p>
<p>可以在这里找到更多关于 eBPF 的信息:</p>
<ul>
<li><a href="https://github.com/zoidbergwill/awesome-ebpf">https://github.com/zoidbergwill/awesome-ebpf</a></li>
<li><a href="https://ebpf.io/">https://ebpf.io/</a></li>
</ul>
<div style="break-before: page; page-break-before: always;"></div><h1 id="ebpf-入门实践教程使用-lsm-进行安全检测防御"><a class="header" href="#ebpf-入门实践教程使用-lsm-进行安全检测防御">eBPF 入门实践教程:使用 LSM 进行安全检测防御</a></h1>
<p>eBPF (扩展的伯克利数据包过滤器) 是一项强大的网络和性能分析工具,被广泛应用在 Linux 内核上。eBPF 使得开发者能够动态地加载、更新和运行用户定义的代码,而无需重启内核或更改内核源代码。这个特性使得 eBPF 能够提供极高的灵活性和性能,使其在网络和系统性能分析方面具有广泛的应用。安全方面的 eBPF 应用也是如此,本文将介绍如何使用 eBPF LSMLinux Security Modules机制实现一个简单的安全检查程序。</p>
<h2 id="背景-1"><a class="header" href="#背景-1">背景</a></h2>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long