mirror of
https://github.com/eunomia-bpf/bpf-developer-tutorial.git
synced 2026-02-09 21:25:24 +08:00
141 lines
3.3 KiB
C
141 lines
3.3 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
// Copyright (c) 2019 Facebook
|
|
// Copyright (c) 2020 Netflix
|
|
#include <vmlinux.h>
|
|
#include <bpf/bpf_helpers.h>
|
|
#include "opensnoop.h"
|
|
|
|
struct args_t {
|
|
const char *fname;
|
|
int flags;
|
|
};
|
|
|
|
/// Process ID to trace
|
|
const volatile int pid_target = 0;
|
|
/// Thread ID to trace
|
|
const volatile int tgid_target = 0;
|
|
/// @description User ID to trace
|
|
const volatile int uid_target = 0;
|
|
/// @cmdarg {"default": false, "short": "f", "long": "failed"}
|
|
/// @description trace only failed events
|
|
const volatile bool targ_failed = false;
|
|
|
|
struct {
|
|
__uint(type, BPF_MAP_TYPE_HASH);
|
|
__uint(max_entries, 10240);
|
|
__type(key, u32);
|
|
__type(value, struct args_t);
|
|
} start SEC(".maps");
|
|
|
|
struct {
|
|
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
|
|
__uint(key_size, sizeof(u32));
|
|
__uint(value_size, sizeof(u32));
|
|
} events SEC(".maps");
|
|
|
|
static __always_inline bool valid_uid(uid_t uid) {
|
|
return uid != INVALID_UID;
|
|
}
|
|
|
|
static __always_inline
|
|
bool trace_allowed(u32 tgid, u32 pid)
|
|
{
|
|
u32 uid;
|
|
|
|
/* filters */
|
|
if (tgid_target && tgid_target != tgid)
|
|
return false;
|
|
if (pid_target && pid_target != pid)
|
|
return false;
|
|
if (valid_uid(uid_target)) {
|
|
uid = (u32)bpf_get_current_uid_gid();
|
|
if (uid_target != uid) {
|
|
return false;
|
|
}
|
|
}
|
|
return true;
|
|
}
|
|
|
|
SEC("tracepoint/syscalls/sys_enter_open")
|
|
int tracepoint__syscalls__sys_enter_open(struct trace_event_raw_sys_enter* ctx)
|
|
{
|
|
u64 id = bpf_get_current_pid_tgid();
|
|
/* use kernel terminology here for tgid/pid: */
|
|
u32 tgid = id >> 32;
|
|
u32 pid = id;
|
|
|
|
/* store arg info for later lookup */
|
|
if (trace_allowed(tgid, pid)) {
|
|
struct args_t args = {};
|
|
args.fname = (const char *)ctx->args[0];
|
|
args.flags = (int)ctx->args[1];
|
|
bpf_map_update_elem(&start, &pid, &args, 0);
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
SEC("tracepoint/syscalls/sys_enter_openat")
|
|
int tracepoint__syscalls__sys_enter_openat(struct trace_event_raw_sys_enter* ctx)
|
|
{
|
|
u64 id = bpf_get_current_pid_tgid();
|
|
/* use kernel terminology here for tgid/pid: */
|
|
u32 tgid = id >> 32;
|
|
u32 pid = id;
|
|
|
|
/* store arg info for later lookup */
|
|
if (trace_allowed(tgid, pid)) {
|
|
struct args_t args = {};
|
|
args.fname = (const char *)ctx->args[1];
|
|
args.flags = (int)ctx->args[2];
|
|
bpf_map_update_elem(&start, &pid, &args, 0);
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static __always_inline
|
|
int trace_exit(struct trace_event_raw_sys_exit* ctx)
|
|
{
|
|
struct event event = {};
|
|
struct args_t *ap;
|
|
int ret;
|
|
u32 pid = bpf_get_current_pid_tgid();
|
|
|
|
ap = bpf_map_lookup_elem(&start, &pid);
|
|
if (!ap)
|
|
return 0; /* missed entry */
|
|
ret = ctx->ret;
|
|
if (targ_failed && ret >= 0)
|
|
goto cleanup; /* want failed only */
|
|
|
|
/* event data */
|
|
event.pid = bpf_get_current_pid_tgid() >> 32;
|
|
event.uid = bpf_get_current_uid_gid();
|
|
bpf_get_current_comm(&event.comm, sizeof(event.comm));
|
|
bpf_probe_read_user_str(&event.fname, sizeof(event.fname), ap->fname);
|
|
event.flags = ap->flags;
|
|
event.ret = ret;
|
|
|
|
/* emit event */
|
|
bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU,
|
|
&event, sizeof(event));
|
|
|
|
cleanup:
|
|
bpf_map_delete_elem(&start, &pid);
|
|
return 0;
|
|
}
|
|
|
|
SEC("tracepoint/syscalls/sys_exit_open")
|
|
int tracepoint__syscalls__sys_exit_open(struct trace_event_raw_sys_exit* ctx)
|
|
{
|
|
return trace_exit(ctx);
|
|
}
|
|
|
|
SEC("tracepoint/syscalls/sys_exit_openat")
|
|
int tracepoint__syscalls__sys_exit_openat(struct trace_event_raw_sys_exit* ctx)
|
|
{
|
|
return trace_exit(ctx);
|
|
}
|
|
|
|
/// Trace open family syscalls.
|
|
char LICENSE[] SEC("license") = "GPL";
|