mirror of
https://github.com/eunomia-bpf/bpf-developer-tutorial.git
synced 2026-02-03 10:14:44 +08:00
fix linter issues for 4-9
This commit is contained in:
@@ -174,3 +174,5 @@ eunomia-bpf 由一个编译工具链和一个运行时库组成, 对比传统的
|
||||
- eBPF 介绍:<https://ebpf.io/>
|
||||
- BPF Compiler Collection (BCC):<https://github.com/iovisor/bcc>
|
||||
- eunomia-bpf:<https://github.com/eunomia-bpf/eunomia-bpf>
|
||||
|
||||
完整的教程和源代码已经全部开源,可以在 <https://github.com/eunomia-bpf/bpf-developer-tutorial> 中查看。
|
||||
|
||||
@@ -148,3 +148,5 @@ eBPF 程序的开发和使用流程可以概括为如下几个步骤:
|
||||
- 在实际开发中,还可能需要进行其他的步骤,例如配置编译和加载参数,管理 eBPF 内核模块和内核映射,以及使用其他高级功能等。
|
||||
|
||||
需要注意的是,BPF 程序的执行是在内核空间进行的,因此需要使用特殊的工具和技术来编写、编译和调试 BPF 程序。eunomia-bpf 是一个开源的 BPF 编译器和工具包,它可以帮助开发者快速和简单地编写和运行 BPF 程序。
|
||||
|
||||
完整的教程和源代码已经全部开源,可以在 <https://github.com/eunomia-bpf/bpf-developer-tutorial> 中查看。
|
||||
|
||||
@@ -99,4 +99,6 @@ $ sudo cat /sys/kernel/debug/tracing/trace_pipe
|
||||
|
||||
通过本文的示例,我们学习了如何使用 eBPF 的 kprobe 和 kretprobe 捕获 unlink 系统调用。更多的例子和详细的开发指南,请参考 eunomia-bpf 的官方文档:<https://github.com/eunomia-bpf/eunomia-bpf>
|
||||
|
||||
本文是 eBPF 入门开发实践指南的第二篇。下一篇文章将介绍如何在 eBPF 中使用 fentry 监测捕获 unlink 系统调用
|
||||
本文是 eBPF 入门开发实践指南的第二篇。下一篇文章将介绍如何在 eBPF 中使用 fentry 监测捕获 unlink 系统调用。
|
||||
|
||||
完整的教程和源代码已经全部开源,可以在 <https://github.com/eunomia-bpf/bpf-developer-tutorial> 中查看。
|
||||
|
||||
@@ -74,3 +74,5 @@ $ sudo cat /sys/kernel/debug/tracing/trace_pipe
|
||||
这段程序是一个 eBPF 程序,通过使用 fentry 和 fexit 捕获 do_unlinkat 和 do_unlinkat_exit 函数,并通过使用 bpf_get_current_pid_tgid 和 bpf_printk 函数获取调用 do_unlinkat 的进程 ID、文件名和返回值,并在内核日志中打印出来。
|
||||
|
||||
编译这个程序可以使用 ecc 工具,运行时可以使用 ecli 命令,并通过查看 /sys/kernel/debug/tracing/trace_pipe 文件查看 eBPF 程序的输出。更多的例子和详细的开发指南,请参考 eunomia-bpf 的官方文档:<https://github.com/eunomia-bpf/eunomia-bpf>
|
||||
|
||||
完整的教程和源代码已经全部开源,可以在 <https://github.com/eunomia-bpf/bpf-developer-tutorial> 中查看。
|
||||
|
||||
@@ -97,3 +97,5 @@ $ sudo cat /sys/kernel/debug/tracing/trace_pipe
|
||||
本文介绍了如何使用 eBPF 程序来捕获进程打开文件的系统调用。在 eBPF 程序中,我们可以通过定义 tracepoint__syscalls__sys_enter_open 和 tracepoint__syscalls__sys_enter_openat 函数并使用 SEC 宏把它们附加到 sys_enter_open 和 sys_enter_openat 两个 tracepoint 来捕获进程打开文件的系统调用。我们可以使用 bpf_get_current_pid_tgid 函数获取调用 open 或 openat 系统调用的进程 ID,并使用 bpf_printk 函数在内核日志中打印出来。在 eBPF 程序中,我们还可以通过定义一个全局变量 pid_target 来指定要捕获的进程的 pid,从而过滤输出,只输出指定的进程的信息。
|
||||
|
||||
更多的例子和详细的开发指南,请参考 eunomia-bpf 的官方文档:<https://github.com/eunomia-bpf/eunomia-bpf>
|
||||
|
||||
完整的教程和源代码已经全部开源,可以在 <https://github.com/eunomia-bpf/bpf-developer-tutorial> 中查看。
|
||||
|
||||
@@ -39,21 +39,21 @@ uprobe 是一种用于捕获用户空间函数调用的 eBPF 的探针,我们
|
||||
SEC("uretprobe//bin/bash:readline")
|
||||
int BPF_KRETPROBE(printret, const void *ret)
|
||||
{
|
||||
char str[MAX_LINE_SIZE];
|
||||
char comm[TASK_COMM_LEN];
|
||||
u32 pid;
|
||||
char str[MAX_LINE_SIZE];
|
||||
char comm[TASK_COMM_LEN];
|
||||
u32 pid;
|
||||
|
||||
if (!ret)
|
||||
return 0;
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
bpf_get_current_comm(&comm, sizeof(comm));
|
||||
bpf_get_current_comm(&comm, sizeof(comm));
|
||||
|
||||
pid = bpf_get_current_pid_tgid() >> 32;
|
||||
bpf_probe_read_user_str(str, sizeof(str), ret);
|
||||
pid = bpf_get_current_pid_tgid() >> 32;
|
||||
bpf_probe_read_user_str(str, sizeof(str), ret);
|
||||
|
||||
bpf_printk("PID %d (%s) read: %s ", pid, comm, str);
|
||||
bpf_printk("PID %d (%s) read: %s ", pid, comm, str);
|
||||
|
||||
return 0;
|
||||
return 0;
|
||||
};
|
||||
|
||||
char LICENSE[] SEC("license") = "GPL";
|
||||
@@ -105,4 +105,6 @@ $ sudo cat /sys/kernel/debug/tracing/trace_pipe
|
||||
|
||||
在上述代码中,我们使用了 SEC 宏来定义了一个 uprobe 探针,它指定了要捕获的用户空间程序 (bin/bash) 和要捕获的函数 (readline)。此外,我们还使用了 BPF_KRETPROBE 宏来定义了一个用于处理 readline 函数返回值的回调函数 (printret)。该函数可以获取到 readline 函数的返回值,并将其打印到内核日志中。通过这样的方式,我们就可以使用 eBPF 来捕获 bash 的 readline 函数调用,并获取用户在 bash 中输入的命令行。
|
||||
|
||||
更多的例子和详细的开发指南,请参考 eunomia-bpf 的官方文档:https://github.com/eunomia-bpf/eunomia-bpf
|
||||
更多的例子和详细的开发指南,请参考 eunomia-bpf 的官方文档:<https://github.com/eunomia-bpf/eunomia-bpf>
|
||||
|
||||
完整的教程和源代码已经全部开源,可以在 <https://github.com/eunomia-bpf/bpf-developer-tutorial> 中查看。
|
||||
|
||||
@@ -13,73 +13,73 @@ eBPF (Extended Berkeley Packet Filter) 是 Linux 内核上的一个强大的网
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
|
||||
#define MAX_ENTRIES 10240
|
||||
#define TASK_COMM_LEN 16
|
||||
#define MAX_ENTRIES 10240
|
||||
#define TASK_COMM_LEN 16
|
||||
|
||||
struct event {
|
||||
unsigned int pid;
|
||||
unsigned int tpid;
|
||||
int sig;
|
||||
int ret;
|
||||
char comm[TASK_COMM_LEN];
|
||||
unsigned int pid;
|
||||
unsigned int tpid;
|
||||
int sig;
|
||||
int ret;
|
||||
char comm[TASK_COMM_LEN];
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
__type(key, __u32);
|
||||
__type(value, struct event);
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
__type(key, __u32);
|
||||
__type(value, struct event);
|
||||
} values SEC(".maps");
|
||||
|
||||
|
||||
static int probe_entry(pid_t tpid, int sig)
|
||||
{
|
||||
struct event event = {};
|
||||
__u64 pid_tgid;
|
||||
__u32 tid;
|
||||
struct event event = {};
|
||||
__u64 pid_tgid;
|
||||
__u32 tid;
|
||||
|
||||
pid_tgid = bpf_get_current_pid_tgid();
|
||||
tid = (__u32)pid_tgid;
|
||||
event.pid = pid_tgid >> 32;
|
||||
event.tpid = tpid;
|
||||
event.sig = sig;
|
||||
bpf_get_current_comm(event.comm, sizeof(event.comm));
|
||||
bpf_map_update_elem(&values, &tid, &event, BPF_ANY);
|
||||
return 0;
|
||||
pid_tgid = bpf_get_current_pid_tgid();
|
||||
tid = (__u32)pid_tgid;
|
||||
event.pid = pid_tgid >> 32;
|
||||
event.tpid = tpid;
|
||||
event.sig = sig;
|
||||
bpf_get_current_comm(event.comm, sizeof(event.comm));
|
||||
bpf_map_update_elem(&values, &tid, &event, BPF_ANY);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int probe_exit(void *ctx, int ret)
|
||||
{
|
||||
__u64 pid_tgid = bpf_get_current_pid_tgid();
|
||||
__u32 tid = (__u32)pid_tgid;
|
||||
struct event *eventp;
|
||||
__u64 pid_tgid = bpf_get_current_pid_tgid();
|
||||
__u32 tid = (__u32)pid_tgid;
|
||||
struct event *eventp;
|
||||
|
||||
eventp = bpf_map_lookup_elem(&values, &tid);
|
||||
if (!eventp)
|
||||
return 0;
|
||||
eventp = bpf_map_lookup_elem(&values, &tid);
|
||||
if (!eventp)
|
||||
return 0;
|
||||
|
||||
eventp->ret = ret;
|
||||
bpf_printk("PID %d (%s) sent signal %d to PID %d, ret = %d",
|
||||
eventp->pid, eventp->comm, eventp->sig, eventp->tpid, ret);
|
||||
eventp->ret = ret;
|
||||
bpf_printk("PID %d (%s) sent signal %d to PID %d, ret = %d",
|
||||
eventp->pid, eventp->comm, eventp->sig, eventp->tpid, ret);
|
||||
|
||||
cleanup:
|
||||
bpf_map_delete_elem(&values, &tid);
|
||||
return 0;
|
||||
bpf_map_delete_elem(&values, &tid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("tracepoint/syscalls/sys_enter_kill")
|
||||
int kill_entry(struct trace_event_raw_sys_enter *ctx)
|
||||
{
|
||||
pid_t tpid = (pid_t)ctx->args[0];
|
||||
int sig = (int)ctx->args[1];
|
||||
pid_t tpid = (pid_t)ctx->args[0];
|
||||
int sig = (int)ctx->args[1];
|
||||
|
||||
return probe_entry(tpid, sig);
|
||||
return probe_entry(tpid, sig);
|
||||
}
|
||||
|
||||
SEC("tracepoint/syscalls/sys_exit_kill")
|
||||
int kill_exit(struct trace_event_raw_sys_exit *ctx)
|
||||
{
|
||||
return probe_exit(ctx, ctx->ret);
|
||||
return probe_exit(ctx, ctx->ret);
|
||||
}
|
||||
|
||||
char LICENSE[] SEC("license") = "Dual BSD/GPL";
|
||||
@@ -121,4 +121,6 @@ $ sudo cat /sys/kernel/debug/tracing/trace_pipe
|
||||
|
||||
## 总结
|
||||
|
||||
更多的例子和详细的开发指南,请参考 eunomia-bpf 的官方文档:https://github.com/eunomia-bpf/eunomia-bpf
|
||||
更多的例子和详细的开发指南,请参考 eunomia-bpf 的官方文档:<https://github.com/eunomia-bpf/eunomia-bpf>
|
||||
|
||||
完整的教程和源代码已经全部开源,可以在 <https://github.com/eunomia-bpf/bpf-developer-tutorial> 中查看。
|
||||
|
||||
@@ -17,12 +17,12 @@ eBPF (Extended Berkeley Packet Filter) 是 Linux 内核上的一个强大的网
|
||||
#define TASK_COMM_LEN 16
|
||||
|
||||
struct event {
|
||||
int pid;
|
||||
int ppid;
|
||||
int uid;
|
||||
int retval;
|
||||
bool is_exit;
|
||||
char comm[TASK_COMM_LEN];
|
||||
int pid;
|
||||
int ppid;
|
||||
int uid;
|
||||
int retval;
|
||||
bool is_exit;
|
||||
char comm[TASK_COMM_LEN];
|
||||
};
|
||||
|
||||
#endif /* __EXECSNOOP_H */
|
||||
@@ -38,31 +38,31 @@ struct event {
|
||||
#include "execsnoop.h"
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
|
||||
__uint(key_size, sizeof(u32));
|
||||
__uint(value_size, sizeof(u32));
|
||||
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
|
||||
__uint(key_size, sizeof(u32));
|
||||
__uint(value_size, sizeof(u32));
|
||||
} events SEC(".maps");
|
||||
|
||||
SEC("tracepoint/syscalls/sys_enter_execve")
|
||||
int tracepoint__syscalls__sys_enter_execve(struct trace_event_raw_sys_enter* ctx)
|
||||
{
|
||||
u64 id;
|
||||
pid_t pid, tgid;
|
||||
struct event event;
|
||||
struct task_struct *task;
|
||||
u64 id;
|
||||
pid_t pid, tgid;
|
||||
struct event event;
|
||||
struct task_struct *task;
|
||||
|
||||
uid_t uid = (u32)bpf_get_current_uid_gid();
|
||||
id = bpf_get_current_pid_tgid();
|
||||
pid = (pid_t)id;
|
||||
tgid = id >> 32;
|
||||
uid_t uid = (u32)bpf_get_current_uid_gid();
|
||||
id = bpf_get_current_pid_tgid();
|
||||
pid = (pid_t)id;
|
||||
tgid = id >> 32;
|
||||
|
||||
event.pid = tgid;
|
||||
event.uid = uid;
|
||||
task = (struct task_struct*)bpf_get_current_task();
|
||||
event.ppid = BPF_CORE_READ(task, real_parent, tgid);
|
||||
bpf_get_current_comm(&event.comm, sizeof(event.comm));
|
||||
bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, &event, sizeof(event));
|
||||
return 0;
|
||||
event.pid = tgid;
|
||||
event.uid = uid;
|
||||
task = (struct task_struct*)bpf_get_current_task();
|
||||
event.ppid = BPF_CORE_READ(task, real_parent, tgid);
|
||||
bpf_get_current_comm(&event.comm, sizeof(event.comm));
|
||||
bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, &event, sizeof(event));
|
||||
return 0;
|
||||
}
|
||||
|
||||
char LICENSE[] SEC("license") = "GPL";
|
||||
@@ -99,3 +99,9 @@ TIME PID PPID UID COMM
|
||||
21:28:30 40752 40751 1000 sh
|
||||
21:28:30 40753 40752 1000 cpuUsage.sh
|
||||
```
|
||||
|
||||
## 总结
|
||||
|
||||
更多的例子和详细的开发指南,请参考 eunomia-bpf 的官方文档:<https://github.com/eunomia-bpf/eunomia-bpf>
|
||||
|
||||
完整的教程和源代码已经全部开源,可以在 <https://github.com/eunomia-bpf/bpf-developer-tutorial> 中查看。
|
||||
|
||||
@@ -1,89 +1,74 @@
|
||||
## eBPF 入门开发实践指南八:在 eBPF 中使用 exitsnoop 监控 进程退出事件:
|
||||
# eBPF 入门开发实践指南八:在 eBPF 中使用 exitsnoop 监控 进程退出事件
|
||||
|
||||
## exitsnoop
|
||||
|
||||
eBPF (Extended Berkeley Packet Filter) 是 Linux 内核上的一个强大的网络和性能分析工具。它允许开发者在内核运行时动态加载、更新和运行用户定义的代码。
|
||||
|
||||
本文是 eBPF 入门开发实践指南的第八篇,在 eBPF 中使用 exitsnoop 监控进程退出事件。
|
||||
|
||||
```c
|
||||
|
||||
#include "vmlinux.h"
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include <bpf/bpf_core_read.h>
|
||||
#include "exitsnoop.h"
|
||||
char LICENSE[] SEC("license") = "Dual BSD/GPL";
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_RINGBUF);
|
||||
__uint(max_entries, 256 * 1024);
|
||||
__uint(type, BPF_MAP_TYPE_RINGBUF);
|
||||
__uint(max_entries, 256 * 1024);
|
||||
} rb SEC(".maps");
|
||||
|
||||
SEC("tp/sched/sched_process_exit")
|
||||
int handle_exit(struct trace_event_raw_sched_process_template* ctx)
|
||||
{
|
||||
struct task_struct *task;
|
||||
struct event *e;
|
||||
pid_t pid, tid;
|
||||
u64 id, ts, *start_ts, duration_ns = 0;
|
||||
|
||||
/* get PID and TID of exiting thread/process */
|
||||
id = bpf_get_current_pid_tgid();
|
||||
pid = id >> 32;
|
||||
tid = (u32)id;
|
||||
struct task_struct *task;
|
||||
struct event *e;
|
||||
pid_t pid, tid;
|
||||
u64 id, ts, *start_ts, duration_ns = 0;
|
||||
|
||||
/* get PID and TID of exiting thread/process */
|
||||
id = bpf_get_current_pid_tgid();
|
||||
pid = id >> 32;
|
||||
tid = (u32)id;
|
||||
|
||||
/* ignore thread exits */
|
||||
if (pid != tid)
|
||||
return 0;
|
||||
/* ignore thread exits */
|
||||
if (pid != tid)
|
||||
return 0;
|
||||
|
||||
/* reserve sample from BPF ringbuf */
|
||||
e = bpf_ringbuf_reserve(&rb, sizeof(*e), 0);
|
||||
if (!e)
|
||||
return 0;
|
||||
/* reserve sample from BPF ringbuf */
|
||||
e = bpf_ringbuf_reserve(&rb, sizeof(*e), 0);
|
||||
if (!e)
|
||||
return 0;
|
||||
|
||||
/* fill out the sample with data */
|
||||
task = (struct task_struct *)bpf_get_current_task();
|
||||
/* fill out the sample with data */
|
||||
task = (struct task_struct *)bpf_get_current_task();
|
||||
|
||||
e->duration_ns = duration_ns;
|
||||
e->pid = pid;
|
||||
e->ppid = BPF_CORE_READ(task, real_parent, tgid);
|
||||
e->exit_code = (BPF_CORE_READ(task, exit_code) >> 8) & 0xff;
|
||||
bpf_get_current_comm(&e->comm, sizeof(e->comm));
|
||||
e->duration_ns = duration_ns;
|
||||
e->pid = pid;
|
||||
e->ppid = BPF_CORE_READ(task, real_parent, tgid);
|
||||
e->exit_code = (BPF_CORE_READ(task, exit_code) >> 8) & 0xff;
|
||||
bpf_get_current_comm(&e->comm, sizeof(e->comm));
|
||||
|
||||
/* send data to user-space for post-processing */
|
||||
bpf_ringbuf_submit(e, 0);
|
||||
return 0;
|
||||
/* send data to user-space for post-processing */
|
||||
bpf_ringbuf_submit(e, 0);
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
这段代码是一个 BPF 程序,用于监控 Linux 系统中的进程退出事件。BPF(Berkeley Packet Filter)是一种内核态程序设计语言,允许开发人员编写内核模块以捕获和处理内核事件。
|
||||
|
||||
该程序通过注册一个 tracepoint,来监控进程退出事件。Tracepoint 是一种内核特性,允许内核模块获取特定事件的通知。在本程序中,注册的 tracepoint 是“tp/sched/sched_process_exit”,表示该程序监控的是进程退出事件。
|
||||
|
||||
当系统中发生进程退出事件时,BPF 程序会捕获该事件,并调用“handle_exit”函数来处理它。该函数首先检查当前退出事件是否是进程退出事件(而不是线程退出事件),然后在 BPF 环形缓冲区(“rb”)中保留一个事件结构体,并填充该结构体中的其他信息,例如进程 ID、进程名称、退出代码和退出信号等信息。最后,该函数还会调用 BPF 的“perf_event_output”函数,将捕获的事件发送给用户空间程序。
|
||||
总而言之,这段代码是一个 BPF 程序,用于监控 Linux 系统中的进程退出事件
|
||||
|
||||
|
||||
## origin
|
||||
|
||||
origin from:
|
||||
|
||||
https://github.com/iovisor/bcc/blob/master/libbpf-tools/runqslower.bpf.c
|
||||
|
||||
result:
|
||||
|
||||
这段代码定义了一个 eBPF 程序,该程序用于跟踪进程在运行队列中的等待时间。它通过使用 tracepoint 和 perf event 输出来实现。
|
||||
程序首先定义了两个 BPF 内核映射:start 映射用于存储每个进程在被调度运行之前的时间戳,events 映射用于存储 perf 事件。然后,
|
||||
程序定义了一些帮助函数,用于跟踪每个进程的调度状态。 trace_enqueue 函数用于在进程被调度运行之前记录时间戳, handle_switch
|
||||
函数用于处理进程切换,并计算进程在队列中等待的时间。接下来,程序定义了五个 tracepoint 程序,用于捕获不同的调度器事件。 sched_wakeup
|
||||
和 sched_wakeup_new 程序用于捕获新进程被唤醒的事件, sched_switch 程序用于捕获进程切换事件, handle_sched_wakeup 和handle_sched_wakeup_new 程序用于捕获 raw tracepoint 事件。这些 tracepoint 程序调用了前面定义的帮助函数来跟踪进程的调度状态。
|
||||
最后,程序将计算得到的等待时间输出到 perf 事件中,供用户空间工具进行捕获和分析。
|
||||
|
||||
|
||||
|
||||
总而言之,这段代码是一个 BPF 程序,用于监控 Linux 系统中的进程退出事件.
|
||||
|
||||
## Compile and Run
|
||||
|
||||
Compile:
|
||||
|
||||
```
|
||||
```shell
|
||||
docker run -it -v `pwd`/:/src/ yunwei37/ebpm:latest
|
||||
```
|
||||
|
||||
@@ -112,35 +97,8 @@ TIME PID PPID EXIT_CODE DURATION_NS COMM
|
||||
21:40:09 42059 42054 0 0 cat
|
||||
```
|
||||
|
||||
## 总结
|
||||
|
||||
更多的例子和详细的开发指南,请参考 eunomia-bpf 的官方文档:<https://github.com/eunomia-bpf/eunomia-bpf>
|
||||
|
||||
## origin
|
||||
|
||||
origin from:
|
||||
|
||||
https://github.com/iovisor/bcc/blob/master/libbpf-tools/runqslower.bpf.c
|
||||
|
||||
result:
|
||||
|
||||
```
|
||||
$ sudo ecli/build/bin/Release/ecli run examples/bpftools/runqslower/package.json
|
||||
|
||||
running and waiting for the ebpf events from perf event...
|
||||
time task prev_task delta_us pid prev_pid
|
||||
20:11:59 gnome-shell swapper/0 32 2202 0
|
||||
20:11:59 ecli swapper/3 23 3437 0
|
||||
20:11:59 rcu_sched swapper/1 1 14 0
|
||||
20:11:59 gnome-terminal- swapper/1 13 2714 0
|
||||
20:11:59 ecli swapper/3 2 3437 0
|
||||
20:11:59 kworker/3:3 swapper/3 3 215 0
|
||||
20:11:59 containerd swapper/1 8 1088 0
|
||||
20:11:59 ecli swapper/2 5 3437 0
|
||||
20:11:59 HangDetector swapper/3 6 854 0
|
||||
20:11:59 ecli swapper/2 60 3437 0
|
||||
20:11:59 rcu_sched swapper/1 26 14 0
|
||||
20:11:59 kworker/0:1 swapper/0 26 3414 0
|
||||
20:11:59 ecli swapper/2 6 3437 0
|
||||
```
|
||||
|
||||
|
||||
|
||||
完整的教程和源代码已经全部开源,可以在 <https://github.com/eunomia-bpf/bpf-developer-tutorial> 中查看。
|
||||
|
||||
@@ -1,17 +1,20 @@
|
||||
## eBPF 入门开发实践指南九:一个 Linux 内核 BPF 程序,通过柱状图来总结调度程序运行队列延迟,显示任务等待运行在 CPU 上的时间长度
|
||||
# eBPF 入门开发实践指南九:一个 Linux 内核 BPF 程序,通过柱状图来总结调度程序运行队列延迟,显示任务等待运行在 CPU 上的时间长度
|
||||
|
||||
eBPF (Extended Berkeley Packet Filter) 是 Linux 内核上的一个强大的网络和性能分析工具。它允许开发者在内核运行时动态加载、更新和运行用户定义的代码。
|
||||
|
||||
## runqlat是什么?
|
||||
bcc-tools 是一组用于在 Linux 系统上使用 BPF 程序的工具。runqlat 是 bcc-tools 中的一个工具,用于分析 Linux 系统的调度性能。
|
||||
具体来说,runqlat 用于测量一个任务在被调度到 CPU 上运行之前在运行队列中等待的时间。这些信息对于识别性能瓶颈和提高 Linux 内核
|
||||
调度算法的整体效率非常有用
|
||||
|
||||
## runqlat原理:
|
||||
使用内核跟踪点和函数探针的结合来测量进程在运行队列中的时间。当进程被排队时,trace_enqueue 函数会在一个映射中记录时间戳。
|
||||
当进程被调度到 CPU 上运行时,handle_switch 函数会检索时间戳,并计算当前时间与排队时间之间的时间差。这个差值(或 delta)
|
||||
然后用于更新进程的直方图,该直方图记录运行队列延迟的分布。该直方图可用于分析 Linux 内核的调度性能。
|
||||
## runqlat代码实现
|
||||
```
|
||||
bcc-tools 是一组用于在 Linux 系统上使用 BPF 程序的工具。runqlat 是 bcc-tools 中的一个工具,用于分析 Linux 系统的调度性能。具体来说,runqlat 用于测量一个任务在被调度到 CPU 上运行之前在运行队列中等待的时间。这些信息对于识别性能瓶颈和提高 Linux 内核调度算法的整体效率非常有用。
|
||||
|
||||
## runqlat 原理
|
||||
|
||||
runqlat 使用内核跟踪点和函数探针的结合来测量进程在运行队列中的时间。当进程被排队时,trace_enqueue 函数会在一个映射中记录时间戳。当进程被调度到 CPU 上运行时,handle_switch 函数会检索时间戳,并计算当前时间与排队时间之间的时间差。这个差值(或 delta)然后用于更新进程的直方图,该直方图记录运行队列延迟的分布。该直方图可用于分析 Linux 内核的调度性能。
|
||||
|
||||
## runqlat 代码实现
|
||||
|
||||
首先我们需要编写一个源代码文件 runqlat.bpf.c:
|
||||
|
||||
```c
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (c) 2020 Wenbo Zhang
|
||||
#include <vmlinux.h>
|
||||
@@ -23,8 +26,8 @@ eBPF (Extended Berkeley Packet Filter) 是 Linux 内核上的一个强大的网
|
||||
#include "maps.bpf.h"
|
||||
#include "core_fixes.bpf.h"
|
||||
|
||||
#define MAX_ENTRIES 10240
|
||||
#define TASK_RUNNING 0
|
||||
#define MAX_ENTRIES 10240
|
||||
#define TASK_RUNNING 0
|
||||
|
||||
const volatile bool filter_cg = false;
|
||||
const volatile bool targ_per_process = false;
|
||||
@@ -34,140 +37,157 @@ const volatile bool targ_ms = false;
|
||||
const volatile pid_t targ_tgid = 0;
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
|
||||
__type(key, u32);
|
||||
__type(value, u32);
|
||||
__uint(max_entries, 1);
|
||||
__uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
|
||||
__type(key, u32);
|
||||
__type(value, u32);
|
||||
__uint(max_entries, 1);
|
||||
} cgroup_map SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
__type(key, u32);
|
||||
__type(value, u64);
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
__type(key, u32);
|
||||
__type(value, u64);
|
||||
} start SEC(".maps");
|
||||
|
||||
static struct hist zero;
|
||||
|
||||
/// @sample {"interval": 1000, "type" : "log2_hist"}
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
__type(key, u32);
|
||||
__type(value, struct hist);
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
__type(key, u32);
|
||||
__type(value, struct hist);
|
||||
} hists SEC(".maps");
|
||||
|
||||
static int trace_enqueue(u32 tgid, u32 pid)
|
||||
{
|
||||
u64 ts;
|
||||
u64 ts;
|
||||
|
||||
if (!pid)
|
||||
return 0;
|
||||
if (targ_tgid && targ_tgid != tgid)
|
||||
return 0;
|
||||
if (!pid)
|
||||
return 0;
|
||||
if (targ_tgid && targ_tgid != tgid)
|
||||
return 0;
|
||||
|
||||
ts = bpf_ktime_get_ns();
|
||||
bpf_map_update_elem(&start, &pid, &ts, BPF_ANY);
|
||||
return 0;
|
||||
ts = bpf_ktime_get_ns();
|
||||
bpf_map_update_elem(&start, &pid, &ts, BPF_ANY);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int pid_namespace(struct task_struct *task)
|
||||
{
|
||||
struct pid *pid;
|
||||
unsigned int level;
|
||||
struct upid upid;
|
||||
unsigned int inum;
|
||||
struct pid *pid;
|
||||
unsigned int level;
|
||||
struct upid upid;
|
||||
unsigned int inum;
|
||||
|
||||
/* get the pid namespace by following task_active_pid_ns(),
|
||||
* pid->numbers[pid->level].ns
|
||||
*/
|
||||
pid = BPF_CORE_READ(task, thread_pid);
|
||||
level = BPF_CORE_READ(pid, level);
|
||||
bpf_core_read(&upid, sizeof(upid), &pid->numbers[level]);
|
||||
inum = BPF_CORE_READ(upid.ns, ns.inum);
|
||||
/* get the pid namespace by following task_active_pid_ns(),
|
||||
* pid->numbers[pid->level].ns
|
||||
*/
|
||||
pid = BPF_CORE_READ(task, thread_pid);
|
||||
level = BPF_CORE_READ(pid, level);
|
||||
bpf_core_read(&upid, sizeof(upid), &pid->numbers[level]);
|
||||
inum = BPF_CORE_READ(upid.ns, ns.inum);
|
||||
|
||||
return inum;
|
||||
return inum;
|
||||
}
|
||||
|
||||
static int handle_switch(bool preempt, struct task_struct *prev, struct task_struct *next)
|
||||
{
|
||||
struct hist *histp;
|
||||
u64 *tsp, slot;
|
||||
u32 pid, hkey;
|
||||
s64 delta;
|
||||
struct hist *histp;
|
||||
u64 *tsp, slot;
|
||||
u32 pid, hkey;
|
||||
s64 delta;
|
||||
|
||||
if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
|
||||
return 0;
|
||||
if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
|
||||
return 0;
|
||||
|
||||
if (get_task_state(prev) == TASK_RUNNING)
|
||||
trace_enqueue(BPF_CORE_READ(prev, tgid), BPF_CORE_READ(prev, pid));
|
||||
if (get_task_state(prev) == TASK_RUNNING)
|
||||
trace_enqueue(BPF_CORE_READ(prev, tgid), BPF_CORE_READ(prev, pid));
|
||||
|
||||
pid = BPF_CORE_READ(next, pid);
|
||||
pid = BPF_CORE_READ(next, pid);
|
||||
|
||||
tsp = bpf_map_lookup_elem(&start, &pid);
|
||||
if (!tsp)
|
||||
return 0;
|
||||
delta = bpf_ktime_get_ns() - *tsp;
|
||||
if (delta < 0)
|
||||
goto cleanup;
|
||||
tsp = bpf_map_lookup_elem(&start, &pid);
|
||||
if (!tsp)
|
||||
return 0;
|
||||
delta = bpf_ktime_get_ns() - *tsp;
|
||||
if (delta < 0)
|
||||
goto cleanup;
|
||||
|
||||
if (targ_per_process)
|
||||
hkey = BPF_CORE_READ(next, tgid);
|
||||
else if (targ_per_thread)
|
||||
hkey = pid;
|
||||
else if (targ_per_pidns)
|
||||
hkey = pid_namespace(next);
|
||||
else
|
||||
hkey = -1;
|
||||
histp = bpf_map_lookup_or_try_init(&hists, &hkey, &zero);
|
||||
if (!histp)
|
||||
goto cleanup;
|
||||
if (!histp->comm[0])
|
||||
bpf_probe_read_kernel_str(&histp->comm, sizeof(histp->comm),
|
||||
next->comm);
|
||||
if (targ_ms)
|
||||
delta /= 1000000U;
|
||||
else
|
||||
delta /= 1000U;
|
||||
slot = log2l(delta);
|
||||
if (slot >= MAX_SLOTS)
|
||||
slot = MAX_SLOTS - 1;
|
||||
__sync_fetch_and_add(&histp->slots[slot], 1);
|
||||
if (targ_per_process)
|
||||
hkey = BPF_CORE_READ(next, tgid);
|
||||
else if (targ_per_thread)
|
||||
hkey = pid;
|
||||
else if (targ_per_pidns)
|
||||
hkey = pid_namespace(next);
|
||||
else
|
||||
hkey = -1;
|
||||
histp = bpf_map_lookup_or_try_init(&hists, &hkey, &zero);
|
||||
if (!histp)
|
||||
goto cleanup;
|
||||
if (!histp->comm[0])
|
||||
bpf_probe_read_kernel_str(&histp->comm, sizeof(histp->comm),
|
||||
next->comm);
|
||||
if (targ_ms)
|
||||
delta /= 1000000U;
|
||||
else
|
||||
delta /= 1000U;
|
||||
slot = log2l(delta);
|
||||
if (slot >= MAX_SLOTS)
|
||||
slot = MAX_SLOTS - 1;
|
||||
__sync_fetch_and_add(&histp->slots[slot], 1);
|
||||
|
||||
cleanup:
|
||||
bpf_map_delete_elem(&start, &pid);
|
||||
return 0;
|
||||
bpf_map_delete_elem(&start, &pid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("raw_tp/sched_wakeup")
|
||||
int BPF_PROG(handle_sched_wakeup, struct task_struct *p)
|
||||
{
|
||||
if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
|
||||
return 0;
|
||||
if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
|
||||
return 0;
|
||||
|
||||
return trace_enqueue(BPF_CORE_READ(p, tgid), BPF_CORE_READ(p, pid));
|
||||
return trace_enqueue(BPF_CORE_READ(p, tgid), BPF_CORE_READ(p, pid));
|
||||
}
|
||||
|
||||
SEC("raw_tp/sched_wakeup_new")
|
||||
int BPF_PROG(handle_sched_wakeup_new, struct task_struct *p)
|
||||
{
|
||||
if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
|
||||
return 0;
|
||||
if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
|
||||
return 0;
|
||||
|
||||
return trace_enqueue(BPF_CORE_READ(p, tgid), BPF_CORE_READ(p, pid));
|
||||
return trace_enqueue(BPF_CORE_READ(p, tgid), BPF_CORE_READ(p, pid));
|
||||
}
|
||||
|
||||
SEC("raw_tp/sched_switch")
|
||||
int BPF_PROG(handle_sched_switch, bool preempt, struct task_struct *prev, struct task_struct *next)
|
||||
{
|
||||
return handle_switch(preempt, prev, next);
|
||||
return handle_switch(preempt, prev, next);
|
||||
}
|
||||
|
||||
char LICENSE[] SEC("license") = "GPL";
|
||||
```
|
||||
这是一个 Linux 内核 BPF 程序,旨在收集和报告运行队列的延迟。BPF 是 Linux 内核中一项技术,它允许将程序附加到内核中的特定点并进行安全高效的执行。这些程序可用于收集有关内核行为的信息,并实现自定义行为。这个 BPF 程序使用 BPF maps和来自 bpf_helpers.h 和 bpf_tracing.h 头文件的帮助程序的组合来收集有关任务何时从内核的运行队列中排队和取消排队的信息,并记录任务在被安排执行之前在运行队列上等待的时间。然后,它使用这些信息生成直方图,显示不同组任务的运行队列延迟分布。这些直方图可用于识别和诊断内核调度行为中的性能问题。
|
||||
|
||||
然后我们需要定义一个头文件`runqlat.h`,用来给用户态处理从内核态上报的事件:
|
||||
|
||||
```c
|
||||
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
|
||||
#ifndef __RUNQLAT_H
|
||||
#define __RUNQLAT_H
|
||||
|
||||
#define TASK_COMM_LEN 16
|
||||
#define MAX_SLOTS 26
|
||||
|
||||
struct hist {
|
||||
__u32 slots[MAX_SLOTS];
|
||||
char comm[TASK_COMM_LEN];
|
||||
};
|
||||
|
||||
#endif /* __RUNQLAT_H */
|
||||
```
|
||||
|
||||
这是一个 Linux 内核 BPF 程序,旨在收集和报告运行队列的延迟。BPF 是 Linux 内核中一项技术,它允许将程序附加到内核中的特定点并进行安全高效的执行。这些程序可用于收集有关内核行为的信息,并实现自定义行为。这个 BPF 程序使用 BPF maps 来收集有关任务何时从内核的运行队列中排队和取消排队的信息,并记录任务在被安排执行之前在运行队列上等待的时间。然后,它使用这些信息生成直方图,显示不同组任务的运行队列延迟分布。这些直方图可用于识别和诊断内核调度行为中的性能问题。
|
||||
|
||||
## Compile and Run
|
||||
|
||||
@@ -176,7 +196,9 @@ Compile:
|
||||
```shell
|
||||
docker run -it -v `pwd`/:/src/ yunwei37/ebpm:latest
|
||||
```
|
||||
|
||||
或者
|
||||
|
||||
```console
|
||||
$ ecc runqlat.bpf.c runqlat.h
|
||||
Compiling bpf object...
|
||||
@@ -242,597 +264,12 @@ comm = cpptools
|
||||
128 -> 255 : 3 |********** |
|
||||
```
|
||||
|
||||
## details in bcc
|
||||
|
||||
```text
|
||||
Demonstrations of runqlat, the Linux eBPF/bcc version.
|
||||
|
||||
|
||||
This program summarizes scheduler run queue latency as a histogram, showing
|
||||
how long tasks spent waiting their turn to run on-CPU.
|
||||
|
||||
Here is a heavily loaded system:
|
||||
|
||||
# ./runqlat
|
||||
Tracing run queue latency... Hit Ctrl-C to end.
|
||||
^C
|
||||
usecs : count distribution
|
||||
0 -> 1 : 233 |*********** |
|
||||
2 -> 3 : 742 |************************************ |
|
||||
4 -> 7 : 203 |********** |
|
||||
8 -> 15 : 173 |******** |
|
||||
16 -> 31 : 24 |* |
|
||||
32 -> 63 : 0 | |
|
||||
64 -> 127 : 30 |* |
|
||||
128 -> 255 : 6 | |
|
||||
256 -> 511 : 3 | |
|
||||
512 -> 1023 : 5 | |
|
||||
1024 -> 2047 : 27 |* |
|
||||
2048 -> 4095 : 30 |* |
|
||||
4096 -> 8191 : 20 | |
|
||||
8192 -> 16383 : 29 |* |
|
||||
16384 -> 32767 : 809 |****************************************|
|
||||
32768 -> 65535 : 64 |*** |
|
||||
|
||||
The distribution is bimodal, with one mode between 0 and 15 microseconds,
|
||||
and another between 16 and 65 milliseconds. These modes are visible as the
|
||||
spikes in the ASCII distribution (which is merely a visual representation
|
||||
of the "count" column). As an example of reading one line: 809 events fell
|
||||
into the 16384 to 32767 microsecond range (16 to 32 ms) while tracing.
|
||||
|
||||
I would expect the two modes to be due the workload: 16 hot CPU-bound threads,
|
||||
and many other mostly idle threads doing occasional work. I suspect the mostly
|
||||
idle threads will run with a higher priority when they wake up, and are
|
||||
the reason for the low latency mode. The high latency mode will be the
|
||||
CPU-bound threads. More analysis with this and other tools can confirm.
|
||||
|
||||
|
||||
A -m option can be used to show milliseconds instead, as well as an interval
|
||||
and a count. For example, showing three x five second summary in milliseconds:
|
||||
|
||||
# ./runqlat -m 5 3
|
||||
Tracing run queue latency... Hit Ctrl-C to end.
|
||||
|
||||
msecs : count distribution
|
||||
0 -> 1 : 3818 |****************************************|
|
||||
2 -> 3 : 39 | |
|
||||
4 -> 7 : 39 | |
|
||||
8 -> 15 : 62 | |
|
||||
16 -> 31 : 2214 |*********************** |
|
||||
32 -> 63 : 226 |** |
|
||||
|
||||
msecs : count distribution
|
||||
0 -> 1 : 3775 |****************************************|
|
||||
2 -> 3 : 52 | |
|
||||
4 -> 7 : 37 | |
|
||||
8 -> 15 : 65 | |
|
||||
16 -> 31 : 2230 |*********************** |
|
||||
32 -> 63 : 212 |** |
|
||||
|
||||
msecs : count distribution
|
||||
0 -> 1 : 3816 |****************************************|
|
||||
2 -> 3 : 49 | |
|
||||
4 -> 7 : 40 | |
|
||||
8 -> 15 : 53 | |
|
||||
16 -> 31 : 2228 |*********************** |
|
||||
32 -> 63 : 221 |** |
|
||||
|
||||
This shows a similar distribution across the three summaries.
|
||||
|
||||
|
||||
A -p option can be used to show one PID only, which is filtered in kernel for
|
||||
efficiency. For example, PID 4505, and one second summaries:
|
||||
|
||||
# ./runqlat -mp 4505 1
|
||||
Tracing run queue latency... Hit Ctrl-C to end.
|
||||
|
||||
msecs : count distribution
|
||||
0 -> 1 : 1 |* |
|
||||
2 -> 3 : 2 |*** |
|
||||
4 -> 7 : 1 |* |
|
||||
8 -> 15 : 0 | |
|
||||
16 -> 31 : 25 |****************************************|
|
||||
32 -> 63 : 3 |**** |
|
||||
|
||||
msecs : count distribution
|
||||
0 -> 1 : 0 | |
|
||||
2 -> 3 : 2 |** |
|
||||
4 -> 7 : 0 | |
|
||||
8 -> 15 : 1 |* |
|
||||
16 -> 31 : 30 |****************************************|
|
||||
32 -> 63 : 1 |* |
|
||||
|
||||
msecs : count distribution
|
||||
0 -> 1 : 0 | |
|
||||
2 -> 3 : 0 | |
|
||||
4 -> 7 : 0 | |
|
||||
8 -> 15 : 0 | |
|
||||
16 -> 31 : 28 |****************************************|
|
||||
32 -> 63 : 2 |** |
|
||||
|
||||
msecs : count distribution
|
||||
0 -> 1 : 1 |* |
|
||||
2 -> 3 : 0 | |
|
||||
4 -> 7 : 0 | |
|
||||
8 -> 15 : 0 | |
|
||||
16 -> 31 : 27 |****************************************|
|
||||
32 -> 63 : 4 |***** |
|
||||
[...]
|
||||
|
||||
For comparison, here is pidstat(1) for that process:
|
||||
|
||||
# pidstat -p 4505 1
|
||||
Linux 4.4.0-virtual (bgregg-xxxxxxxx) 02/08/2016 _x86_64_ (8 CPU)
|
||||
|
||||
08:56:11 AM UID PID %usr %system %guest %CPU CPU Command
|
||||
08:56:12 AM 0 4505 9.00 3.00 0.00 12.00 0 bash
|
||||
08:56:13 AM 0 4505 7.00 5.00 0.00 12.00 0 bash
|
||||
08:56:14 AM 0 4505 10.00 2.00 0.00 12.00 0 bash
|
||||
08:56:15 AM 0 4505 11.00 2.00 0.00 13.00 0 bash
|
||||
08:56:16 AM 0 4505 9.00 3.00 0.00 12.00 0 bash
|
||||
[...]
|
||||
|
||||
This is a synthetic workload that is CPU bound. It's only spending 12% on-CPU
|
||||
each second because of high CPU demand on this server: the remaining time
|
||||
is spent waiting on a run queue, as visualized by runqlat.
|
||||
|
||||
|
||||
Here is the same system, but when it is CPU idle:
|
||||
|
||||
# ./runqlat 5 1
|
||||
Tracing run queue latency... Hit Ctrl-C to end.
|
||||
|
||||
usecs : count distribution
|
||||
0 -> 1 : 2250 |******************************** |
|
||||
2 -> 3 : 2340 |********************************** |
|
||||
4 -> 7 : 2746 |****************************************|
|
||||
8 -> 15 : 418 |****** |
|
||||
16 -> 31 : 93 |* |
|
||||
32 -> 63 : 28 | |
|
||||
64 -> 127 : 119 |* |
|
||||
128 -> 255 : 9 | |
|
||||
256 -> 511 : 4 | |
|
||||
512 -> 1023 : 20 | |
|
||||
1024 -> 2047 : 22 | |
|
||||
2048 -> 4095 : 5 | |
|
||||
4096 -> 8191 : 2 | |
|
||||
|
||||
Back to a microsecond scale, this time there is little run queue latency past 1
|
||||
millisecond, as would be expected.
|
||||
|
||||
|
||||
Now 16 threads are performing heavy disk I/O:
|
||||
|
||||
# ./runqlat 5 1
|
||||
Tracing run queue latency... Hit Ctrl-C to end.
|
||||
|
||||
usecs : count distribution
|
||||
0 -> 1 : 204 | |
|
||||
2 -> 3 : 944 |* |
|
||||
4 -> 7 : 16315 |********************* |
|
||||
8 -> 15 : 29897 |****************************************|
|
||||
16 -> 31 : 1044 |* |
|
||||
32 -> 63 : 23 | |
|
||||
64 -> 127 : 128 | |
|
||||
128 -> 255 : 24 | |
|
||||
256 -> 511 : 5 | |
|
||||
512 -> 1023 : 13 | |
|
||||
1024 -> 2047 : 15 | |
|
||||
2048 -> 4095 : 13 | |
|
||||
4096 -> 8191 : 10 | |
|
||||
|
||||
The distribution hasn't changed too much. While the disks are 100% busy, there
|
||||
is still plenty of CPU headroom, and threads still don't spend much time
|
||||
waiting their turn.
|
||||
|
||||
|
||||
A -P option will print a distribution for each PID:
|
||||
|
||||
# ./runqlat -P
|
||||
Tracing run queue latency... Hit Ctrl-C to end.
|
||||
^C
|
||||
|
||||
pid = 0
|
||||
usecs : count distribution
|
||||
0 -> 1 : 351 |******************************** |
|
||||
2 -> 3 : 96 |******** |
|
||||
4 -> 7 : 437 |****************************************|
|
||||
8 -> 15 : 12 |* |
|
||||
16 -> 31 : 10 | |
|
||||
32 -> 63 : 0 | |
|
||||
64 -> 127 : 16 |* |
|
||||
128 -> 255 : 0 | |
|
||||
256 -> 511 : 0 | |
|
||||
512 -> 1023 : 0 | |
|
||||
1024 -> 2047 : 0 | |
|
||||
2048 -> 4095 : 0 | |
|
||||
4096 -> 8191 : 0 | |
|
||||
8192 -> 16383 : 1 | |
|
||||
|
||||
pid = 12929
|
||||
usecs : count distribution
|
||||
0 -> 1 : 1 |****************************************|
|
||||
2 -> 3 : 0 | |
|
||||
4 -> 7 : 1 |****************************************|
|
||||
|
||||
pid = 12930
|
||||
usecs : count distribution
|
||||
0 -> 1 : 0 | |
|
||||
2 -> 3 : 0 | |
|
||||
4 -> 7 : 0 | |
|
||||
8 -> 15 : 0 | |
|
||||
16 -> 31 : 1 |****************************************|
|
||||
32 -> 63 : 0 | |
|
||||
64 -> 127 : 1 |****************************************|
|
||||
|
||||
pid = 12931
|
||||
usecs : count distribution
|
||||
0 -> 1 : 0 | |
|
||||
2 -> 3 : 0 | |
|
||||
4 -> 7 : 1 |******************** |
|
||||
8 -> 15 : 0 | |
|
||||
16 -> 31 : 0 | |
|
||||
32 -> 63 : 0 | |
|
||||
64 -> 127 : 0 | |
|
||||
128 -> 255 : 0 | |
|
||||
256 -> 511 : 0 | |
|
||||
512 -> 1023 : 2 |****************************************|
|
||||
|
||||
pid = 12932
|
||||
usecs : count distribution
|
||||
0 -> 1 : 0 | |
|
||||
2 -> 3 : 0 | |
|
||||
4 -> 7 : 0 | |
|
||||
8 -> 15 : 0 | |
|
||||
16 -> 31 : 0 | |
|
||||
32 -> 63 : 0 | |
|
||||
64 -> 127 : 0 | |
|
||||
128 -> 255 : 1 |****************************************|
|
||||
256 -> 511 : 0 | |
|
||||
512 -> 1023 : 1 |****************************************|
|
||||
|
||||
pid = 7
|
||||
usecs : count distribution
|
||||
0 -> 1 : 0 | |
|
||||
2 -> 3 : 426 |************************************* |
|
||||
4 -> 7 : 457 |****************************************|
|
||||
8 -> 15 : 16 |* |
|
||||
|
||||
pid = 9
|
||||
usecs : count distribution
|
||||
0 -> 1 : 0 | |
|
||||
2 -> 3 : 0 | |
|
||||
4 -> 7 : 425 |****************************************|
|
||||
8 -> 15 : 16 |* |
|
||||
|
||||
pid = 11
|
||||
usecs : count distribution
|
||||
0 -> 1 : 0 | |
|
||||
2 -> 3 : 10 |****************************************|
|
||||
|
||||
pid = 14
|
||||
usecs : count distribution
|
||||
0 -> 1 : 0 | |
|
||||
2 -> 3 : 8 |****************************************|
|
||||
4 -> 7 : 2 |********** |
|
||||
|
||||
pid = 18
|
||||
usecs : count distribution
|
||||
0 -> 1 : 414 |****************************************|
|
||||
2 -> 3 : 0 | |
|
||||
4 -> 7 : 20 |* |
|
||||
8 -> 15 : 8 | |
|
||||
|
||||
pid = 12928
|
||||
usecs : count distribution
|
||||
0 -> 1 : 0 | |
|
||||
2 -> 3 : 0 | |
|
||||
4 -> 7 : 1 |****************************************|
|
||||
8 -> 15 : 0 | |
|
||||
16 -> 31 : 0 | |
|
||||
32 -> 63 : 0 | |
|
||||
64 -> 127 : 1 |****************************************|
|
||||
|
||||
pid = 1867
|
||||
usecs : count distribution
|
||||
0 -> 1 : 0 | |
|
||||
2 -> 3 : 0 | |
|
||||
4 -> 7 : 0 | |
|
||||
8 -> 15 : 15 |****************************************|
|
||||
16 -> 31 : 1 |** |
|
||||
32 -> 63 : 0 | |
|
||||
64 -> 127 : 0 | |
|
||||
128 -> 255 : 4 |********** |
|
||||
|
||||
pid = 1871
|
||||
usecs : count distribution
|
||||
0 -> 1 : 0 | |
|
||||
2 -> 3 : 0 | |
|
||||
4 -> 7 : 0 | |
|
||||
8 -> 15 : 2 |****************************************|
|
||||
16 -> 31 : 0 | |
|
||||
32 -> 63 : 0 | |
|
||||
64 -> 127 : 0 | |
|
||||
128 -> 255 : 0 | |
|
||||
256 -> 511 : 0 | |
|
||||
512 -> 1023 : 1 |******************** |
|
||||
|
||||
pid = 1876
|
||||
usecs : count distribution
|
||||
0 -> 1 : 0 | |
|
||||
2 -> 3 : 0 | |
|
||||
4 -> 7 : 0 | |
|
||||
8 -> 15 : 1 |****************************************|
|
||||
16 -> 31 : 0 | |
|
||||
32 -> 63 : 0 | |
|
||||
64 -> 127 : 0 | |
|
||||
128 -> 255 : 0 | |
|
||||
256 -> 511 : 1 |****************************************|
|
||||
|
||||
pid = 1878
|
||||
usecs : count distribution
|
||||
0 -> 1 : 0 | |
|
||||
2 -> 3 : 0 | |
|
||||
4 -> 7 : 0 | |
|
||||
8 -> 15 : 0 | |
|
||||
16 -> 31 : 3 |****************************************|
|
||||
|
||||
pid = 1880
|
||||
usecs : count distribution
|
||||
0 -> 1 : 0 | |
|
||||
2 -> 3 : 0 | |
|
||||
4 -> 7 : 0 | |
|
||||
8 -> 15 : 3 |****************************************|
|
||||
|
||||
pid = 9307
|
||||
usecs : count distribution
|
||||
0 -> 1 : 0 | |
|
||||
2 -> 3 : 0 | |
|
||||
4 -> 7 : 0 | |
|
||||
8 -> 15 : 1 |****************************************|
|
||||
|
||||
pid = 1886
|
||||
usecs : count distribution
|
||||
0 -> 1 : 0 | |
|
||||
2 -> 3 : 0 | |
|
||||
4 -> 7 : 1 |******************** |
|
||||
8 -> 15 : 2 |****************************************|
|
||||
|
||||
pid = 1888
|
||||
usecs : count distribution
|
||||
0 -> 1 : 0 | |
|
||||
2 -> 3 : 0 | |
|
||||
4 -> 7 : 0 | |
|
||||
8 -> 15 : 3 |****************************************|
|
||||
|
||||
pid = 3297
|
||||
usecs : count distribution
|
||||
0 -> 1 : 0 | |
|
||||
2 -> 3 : 0 | |
|
||||
4 -> 7 : 0 | |
|
||||
8 -> 15 : 1 |****************************************|
|
||||
|
||||
pid = 1892
|
||||
usecs : count distribution
|
||||
0 -> 1 : 0 | |
|
||||
2 -> 3 : 0 | |
|
||||
4 -> 7 : 0 | |
|
||||
8 -> 15 : 0 | |
|
||||
16 -> 31 : 1 |******************** |
|
||||
32 -> 63 : 0 | |
|
||||
64 -> 127 : 0 | |
|
||||
128 -> 255 : 0 | |
|
||||
256 -> 511 : 0 | |
|
||||
512 -> 1023 : 2 |****************************************|
|
||||
|
||||
pid = 7024
|
||||
usecs : count distribution
|
||||
0 -> 1 : 0 | |
|
||||
2 -> 3 : 0 | |
|
||||
4 -> 7 : 0 | |
|
||||
8 -> 15 : 4 |****************************************|
|
||||
|
||||
pid = 16468
|
||||
usecs : count distribution
|
||||
0 -> 1 : 0 | |
|
||||
2 -> 3 : 0 | |
|
||||
4 -> 7 : 0 | |
|
||||
8 -> 15 : 3 |****************************************|
|
||||
|
||||
pid = 12922
|
||||
usecs : count distribution
|
||||
0 -> 1 : 1 |****************************************|
|
||||
2 -> 3 : 0 | |
|
||||
4 -> 7 : 0 | |
|
||||
8 -> 15 : 1 |****************************************|
|
||||
16 -> 31 : 1 |****************************************|
|
||||
32 -> 63 : 0 | |
|
||||
64 -> 127 : 1 |****************************************|
|
||||
|
||||
pid = 12923
|
||||
usecs : count distribution
|
||||
0 -> 1 : 0 | |
|
||||
2 -> 3 : 0 | |
|
||||
4 -> 7 : 1 |******************** |
|
||||
8 -> 15 : 0 | |
|
||||
16 -> 31 : 0 | |
|
||||
32 -> 63 : 0 | |
|
||||
64 -> 127 : 2 |****************************************|
|
||||
128 -> 255 : 0 | |
|
||||
256 -> 511 : 0 | |
|
||||
512 -> 1023 : 1 |******************** |
|
||||
1024 -> 2047 : 1 |******************** |
|
||||
|
||||
pid = 12924
|
||||
usecs : count distribution
|
||||
0 -> 1 : 0 | |
|
||||
2 -> 3 : 0 | |
|
||||
4 -> 7 : 2 |******************** |
|
||||
8 -> 15 : 4 |****************************************|
|
||||
16 -> 31 : 1 |********** |
|
||||
32 -> 63 : 0 | |
|
||||
64 -> 127 : 0 | |
|
||||
128 -> 255 : 0 | |
|
||||
256 -> 511 : 0 | |
|
||||
512 -> 1023 : 0 | |
|
||||
1024 -> 2047 : 1 |********** |
|
||||
|
||||
pid = 12925
|
||||
usecs : count distribution
|
||||
0 -> 1 : 0 | |
|
||||
2 -> 3 : 0 | |
|
||||
4 -> 7 : 0 | |
|
||||
8 -> 15 : 0 | |
|
||||
16 -> 31 : 0 | |
|
||||
32 -> 63 : 0 | |
|
||||
64 -> 127 : 1 |****************************************|
|
||||
|
||||
pid = 12926
|
||||
usecs : count distribution
|
||||
0 -> 1 : 0 | |
|
||||
2 -> 3 : 1 |****************************************|
|
||||
4 -> 7 : 0 | |
|
||||
8 -> 15 : 1 |****************************************|
|
||||
16 -> 31 : 0 | |
|
||||
32 -> 63 : 0 | |
|
||||
64 -> 127 : 0 | |
|
||||
128 -> 255 : 0 | |
|
||||
256 -> 511 : 0 | |
|
||||
512 -> 1023 : 1 |****************************************|
|
||||
|
||||
pid = 12927
|
||||
usecs : count distribution
|
||||
0 -> 1 : 1 |****************************************|
|
||||
2 -> 3 : 0 | |
|
||||
4 -> 7 : 1 |****************************************|
|
||||
|
||||
|
||||
A -L option will print a distribution for each TID:
|
||||
|
||||
# ./runqlat -L
|
||||
Tracing run queue latency... Hit Ctrl-C to end.
|
||||
^C
|
||||
|
||||
tid = 0
|
||||
usecs : count distribution
|
||||
0 -> 1 : 593 |**************************** |
|
||||
2 -> 3 : 829 |****************************************|
|
||||
4 -> 7 : 300 |************** |
|
||||
8 -> 15 : 321 |*************** |
|
||||
16 -> 31 : 132 |****** |
|
||||
32 -> 63 : 58 |** |
|
||||
64 -> 127 : 0 | |
|
||||
128 -> 255 : 0 | |
|
||||
256 -> 511 : 13 | |
|
||||
|
||||
tid = 7
|
||||
usecs : count distribution
|
||||
0 -> 1 : 8 |******** |
|
||||
2 -> 3 : 19 |******************** |
|
||||
4 -> 7 : 37 |****************************************|
|
||||
[...]
|
||||
|
||||
|
||||
And a --pidnss option (short for PID namespaces) will print for each PID
|
||||
namespace, for analyzing container performance:
|
||||
|
||||
# ./runqlat --pidnss -m
|
||||
Tracing run queue latency... Hit Ctrl-C to end.
|
||||
^C
|
||||
|
||||
pidns = 4026532870
|
||||
msecs : count distribution
|
||||
0 -> 1 : 40 |****************************************|
|
||||
2 -> 3 : 1 |* |
|
||||
4 -> 7 : 0 | |
|
||||
8 -> 15 : 0 | |
|
||||
16 -> 31 : 0 | |
|
||||
32 -> 63 : 2 |** |
|
||||
64 -> 127 : 5 |***** |
|
||||
|
||||
pidns = 4026532809
|
||||
msecs : count distribution
|
||||
0 -> 1 : 67 |****************************************|
|
||||
|
||||
pidns = 4026532748
|
||||
msecs : count distribution
|
||||
0 -> 1 : 63 |****************************************|
|
||||
|
||||
pidns = 4026532687
|
||||
msecs : count distribution
|
||||
0 -> 1 : 7 |****************************************|
|
||||
|
||||
pidns = 4026532626
|
||||
msecs : count distribution
|
||||
0 -> 1 : 45 |****************************************|
|
||||
2 -> 3 : 0 | |
|
||||
4 -> 7 : 0 | |
|
||||
8 -> 15 : 0 | |
|
||||
16 -> 31 : 0 | |
|
||||
32 -> 63 : 0 | |
|
||||
64 -> 127 : 3 |** |
|
||||
|
||||
pidns = 4026531836
|
||||
msecs : count distribution
|
||||
0 -> 1 : 314 |****************************************|
|
||||
2 -> 3 : 1 | |
|
||||
4 -> 7 : 11 |* |
|
||||
8 -> 15 : 28 |*** |
|
||||
16 -> 31 : 137 |***************** |
|
||||
32 -> 63 : 86 |********** |
|
||||
64 -> 127 : 1 | |
|
||||
|
||||
pidns = 4026532382
|
||||
msecs : count distribution
|
||||
0 -> 1 : 285 |****************************************|
|
||||
2 -> 3 : 5 | |
|
||||
4 -> 7 : 16 |** |
|
||||
8 -> 15 : 9 |* |
|
||||
16 -> 31 : 69 |********* |
|
||||
32 -> 63 : 25 |*** |
|
||||
|
||||
Many of these distributions have two modes: the second, in this case, is
|
||||
caused by capping CPU usage via CPU shares.
|
||||
|
||||
|
||||
USAGE message:
|
||||
|
||||
# ./runqlat -h
|
||||
usage: runqlat.py [-h] [-T] [-m] [-P] [--pidnss] [-L] [-p PID]
|
||||
[interval] [count]
|
||||
|
||||
Summarize run queue (scheduler) latency as a histogram
|
||||
|
||||
positional arguments:
|
||||
interval output interval, in seconds
|
||||
count number of outputs
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
-T, --timestamp include timestamp on output
|
||||
-m, --milliseconds millisecond histogram
|
||||
-P, --pids print a histogram per process ID
|
||||
--pidnss print a histogram per PID namespace
|
||||
-L, --tids print a histogram per thread ID
|
||||
-p PID, --pid PID trace this PID only
|
||||
|
||||
examples:
|
||||
./runqlat # summarize run queue latency as a histogram
|
||||
./runqlat 1 10 # print 1 second summaries, 10 times
|
||||
./runqlat -mT 1 # 1s summaries, milliseconds, and timestamps
|
||||
./runqlat -P # show each PID separately
|
||||
./runqlat -p 185 # trace PID 185 only
|
||||
|
||||
```
|
||||
|
||||
## 总结
|
||||
一个 Linux 内核 BPF 程序,通过柱状图来总结调度程序运行队列延迟,显示任务等待运行在 CPU 上的时间长度
|
||||
编译这个程序可以使用 ecc 工具,运行时可以使用 ecli 命令,runqlat是一种用于监控Linux内核中进程调度延迟的工具。它可以帮助您了解进程在内核中等待执行的时间,并根据这些信息优化进程调度,提高系统的性能。要使用runq-lat,需要在终端中输入runq-lat命令,然后按照提示操作即可。更多的例子和详细的开发指南,请参考 eunomia-bpf 的官方文档:https://github.com/eunomia-bpf/eunomia-bpf
|
||||
## origin
|
||||
|
||||
origin from:
|
||||
runqlat 是一个 Linux 内核 BPF 程序,通过柱状图来总结调度程序运行队列延迟,显示任务等待运行在 CPU 上的时间长度。编译这个程序可以使用 ecc 工具,运行时可以使用 ecli 命令。
|
||||
|
||||
<https://github.com/iovisor/bcc/blob/master/libbpf-tools/runqlat.bpf.c>
|
||||
runqlat 是一种用于监控Linux内核中进程调度延迟的工具。它可以帮助您了解进程在内核中等待执行的时间,并根据这些信息优化进程调度,提高系统的性能。可以在 libbpf-tools 中找到最初的源代码:<https://github.com/iovisor/bcc/blob/master/libbpf-tools/runqlat.bpf.c>
|
||||
|
||||
This program summarizes scheduler run queue latency as a histogram, showing
|
||||
how long tasks spent waiting their turn to run on-CPU.
|
||||
更多的例子和详细的开发指南,请参考 eunomia-bpf 的官方文档:<https://github.com/eunomia-bpf/eunomia-bpf>
|
||||
|
||||
完整的教程和源代码已经全部开源,可以在 <https://github.com/eunomia-bpf/bpf-developer-tutorial> 中查看。
|
||||
|
||||
Reference in New Issue
Block a user