mirror of
https://github.com/eunomia-bpf/bpf-developer-tutorial.git
synced 2026-02-03 10:14:44 +08:00
Deploying to gh-pages from @ eunomia-bpf/bpf-developer-tutorial@9af603b21a 🚀
This commit is contained in:
2
13-tcpconnlat/.gitignore
vendored
2
13-tcpconnlat/.gitignore
vendored
@@ -1,2 +1,4 @@
|
||||
.vscode
|
||||
package.json
|
||||
tcpconnlat
|
||||
.output
|
||||
|
||||
141
13-tcpconnlat/Makefile
Normal file
141
13-tcpconnlat/Makefile
Normal file
@@ -0,0 +1,141 @@
|
||||
# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
||||
OUTPUT := .output
|
||||
CLANG ?= clang
|
||||
LIBBPF_SRC := $(abspath ../../libbpf/src)
|
||||
BPFTOOL_SRC := $(abspath ../../bpftool/src)
|
||||
LIBBPF_OBJ := $(abspath $(OUTPUT)/libbpf.a)
|
||||
BPFTOOL_OUTPUT ?= $(abspath $(OUTPUT)/bpftool)
|
||||
BPFTOOL ?= $(BPFTOOL_OUTPUT)/bootstrap/bpftool
|
||||
LIBBLAZESYM_SRC := $(abspath ../../blazesym/)
|
||||
LIBBLAZESYM_OBJ := $(abspath $(OUTPUT)/libblazesym.a)
|
||||
LIBBLAZESYM_HEADER := $(abspath $(OUTPUT)/blazesym.h)
|
||||
ARCH ?= $(shell uname -m | sed 's/x86_64/x86/' \
|
||||
| sed 's/arm.*/arm/' \
|
||||
| sed 's/aarch64/arm64/' \
|
||||
| sed 's/ppc64le/powerpc/' \
|
||||
| sed 's/mips.*/mips/' \
|
||||
| sed 's/riscv64/riscv/' \
|
||||
| sed 's/loongarch64/loongarch/')
|
||||
VMLINUX := ../../vmlinux/$(ARCH)/vmlinux.h
|
||||
# Use our own libbpf API headers and Linux UAPI headers distributed with
|
||||
# libbpf to avoid dependency on system-wide headers, which could be missing or
|
||||
# outdated
|
||||
INCLUDES := -I$(OUTPUT) -I../../libbpf/include/uapi -I$(dir $(VMLINUX))
|
||||
CFLAGS := -g -Wall
|
||||
ALL_LDFLAGS := $(LDFLAGS) $(EXTRA_LDFLAGS)
|
||||
|
||||
APPS = tcpconnlat # minimal minimal_legacy uprobe kprobe fentry usdt sockfilter tc ksyscall
|
||||
|
||||
CARGO ?= $(shell which cargo)
|
||||
ifeq ($(strip $(CARGO)),)
|
||||
BZS_APPS :=
|
||||
else
|
||||
BZS_APPS := # profile
|
||||
APPS += $(BZS_APPS)
|
||||
# Required by libblazesym
|
||||
ALL_LDFLAGS += -lrt -ldl -lpthread -lm
|
||||
endif
|
||||
|
||||
# Get Clang's default includes on this system. We'll explicitly add these dirs
|
||||
# to the includes list when compiling with `-target bpf` because otherwise some
|
||||
# architecture-specific dirs will be "missing" on some architectures/distros -
|
||||
# headers such as asm/types.h, asm/byteorder.h, asm/socket.h, asm/sockios.h,
|
||||
# sys/cdefs.h etc. might be missing.
|
||||
#
|
||||
# Use '-idirafter': Don't interfere with include mechanics except where the
|
||||
# build would have failed anyways.
|
||||
CLANG_BPF_SYS_INCLUDES ?= $(shell $(CLANG) -v -E - </dev/null 2>&1 \
|
||||
| sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }')
|
||||
|
||||
ifeq ($(V),1)
|
||||
Q =
|
||||
msg =
|
||||
else
|
||||
Q = @
|
||||
msg = @printf ' %-8s %s%s\n' \
|
||||
"$(1)" \
|
||||
"$(patsubst $(abspath $(OUTPUT))/%,%,$(2))" \
|
||||
"$(if $(3), $(3))";
|
||||
MAKEFLAGS += --no-print-directory
|
||||
endif
|
||||
|
||||
define allow-override
|
||||
$(if $(or $(findstring environment,$(origin $(1))),\
|
||||
$(findstring command line,$(origin $(1)))),,\
|
||||
$(eval $(1) = $(2)))
|
||||
endef
|
||||
|
||||
$(call allow-override,CC,$(CROSS_COMPILE)cc)
|
||||
$(call allow-override,LD,$(CROSS_COMPILE)ld)
|
||||
|
||||
.PHONY: all
|
||||
all: $(APPS)
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
$(call msg,CLEAN)
|
||||
$(Q)rm -rf $(OUTPUT) $(APPS)
|
||||
|
||||
$(OUTPUT) $(OUTPUT)/libbpf $(BPFTOOL_OUTPUT):
|
||||
$(call msg,MKDIR,$@)
|
||||
$(Q)mkdir -p $@
|
||||
|
||||
# Build libbpf
|
||||
$(LIBBPF_OBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT)/libbpf
|
||||
$(call msg,LIB,$@)
|
||||
$(Q)$(MAKE) -C $(LIBBPF_SRC) BUILD_STATIC_ONLY=1 \
|
||||
OBJDIR=$(dir $@)/libbpf DESTDIR=$(dir $@) \
|
||||
INCLUDEDIR= LIBDIR= UAPIDIR= \
|
||||
install
|
||||
|
||||
# Build bpftool
|
||||
$(BPFTOOL): | $(BPFTOOL_OUTPUT)
|
||||
$(call msg,BPFTOOL,$@)
|
||||
$(Q)$(MAKE) ARCH= CROSS_COMPILE= OUTPUT=$(BPFTOOL_OUTPUT)/ -C $(BPFTOOL_SRC) bootstrap
|
||||
|
||||
|
||||
$(LIBBLAZESYM_SRC)/target/release/libblazesym.a::
|
||||
$(Q)cd $(LIBBLAZESYM_SRC) && $(CARGO) build --features=cheader,dont-generate-test-files --release
|
||||
|
||||
$(LIBBLAZESYM_OBJ): $(LIBBLAZESYM_SRC)/target/release/libblazesym.a | $(OUTPUT)
|
||||
$(call msg,LIB, $@)
|
||||
$(Q)cp $(LIBBLAZESYM_SRC)/target/release/libblazesym.a $@
|
||||
|
||||
$(LIBBLAZESYM_HEADER): $(LIBBLAZESYM_SRC)/target/release/libblazesym.a | $(OUTPUT)
|
||||
$(call msg,LIB,$@)
|
||||
$(Q)cp $(LIBBLAZESYM_SRC)/target/release/blazesym.h $@
|
||||
|
||||
# Build BPF code
|
||||
$(OUTPUT)/%.bpf.o: %.bpf.c $(LIBBPF_OBJ) $(wildcard %.h) $(VMLINUX) | $(OUTPUT) $(BPFTOOL)
|
||||
$(call msg,BPF,$@)
|
||||
$(Q)$(CLANG) -g -O2 -target bpf -D__TARGET_ARCH_$(ARCH) \
|
||||
$(INCLUDES) $(CLANG_BPF_SYS_INCLUDES) \
|
||||
-c $(filter %.c,$^) -o $(patsubst %.bpf.o,%.tmp.bpf.o,$@)
|
||||
$(Q)$(BPFTOOL) gen object $@ $(patsubst %.bpf.o,%.tmp.bpf.o,$@)
|
||||
|
||||
# Generate BPF skeletons
|
||||
$(OUTPUT)/%.skel.h: $(OUTPUT)/%.bpf.o | $(OUTPUT) $(BPFTOOL)
|
||||
$(call msg,GEN-SKEL,$@)
|
||||
$(Q)$(BPFTOOL) gen skeleton $< > $@
|
||||
|
||||
# Build user-space code
|
||||
$(patsubst %,$(OUTPUT)/%.o,$(APPS)): %.o: %.skel.h
|
||||
|
||||
$(OUTPUT)/%.o: %.c $(wildcard %.h) | $(OUTPUT)
|
||||
$(call msg,CC,$@)
|
||||
$(Q)$(CC) $(CFLAGS) $(INCLUDES) -c $(filter %.c,$^) -o $@
|
||||
|
||||
$(patsubst %,$(OUTPUT)/%.o,$(BZS_APPS)): $(LIBBLAZESYM_HEADER)
|
||||
|
||||
$(BZS_APPS): $(LIBBLAZESYM_OBJ)
|
||||
|
||||
# Build application binary
|
||||
$(APPS): %: $(OUTPUT)/%.o $(LIBBPF_OBJ) | $(OUTPUT)
|
||||
$(call msg,BINARY,$@)
|
||||
$(Q)$(CC) $(CFLAGS) $^ $(ALL_LDFLAGS) -lelf -lz -o $@
|
||||
|
||||
# delete failed targets
|
||||
.DELETE_ON_ERROR:
|
||||
|
||||
# keep intermediate (.skel.h, .bpf.o, etc) targets
|
||||
.SECONDARY:
|
||||
@@ -149,7 +149,7 @@
|
||||
<p>在互联网后端日常开发接口的时候中,不管你使用的是C、Java、PHP还是Golang,都避免不了需要调用mysql、redis等组件来获取数据,可能还需要执行一些rpc远程调用,或者再调用一些其它restful api。 在这些调用的底层,基本都是在使用TCP协议进行传输。这是因为在传输层协议中,TCP协议具备可靠的连接,错误重传,拥塞控制等优点,所以目前应用比UDP更广泛一些。但相对而言,tcp 连接也有一些缺点,例如建立连接的延时较长等。因此也会出现像 QUIC ,即 快速UDP网络连接 ( Quick UDP Internet Connections )这样的替代方案。</p>
|
||||
<p>tcp 连接延时分析对于网络性能分析优化或者故障排查都能起到不少作用。</p>
|
||||
<h2 id="tcpconnlat-的实现原理"><a class="header" href="#tcpconnlat-的实现原理">tcpconnlat 的实现原理</a></h2>
|
||||
<p>tcpconnlat 这个工具跟踪执行活动TCP连接的内核函数 (例如,通过connect()系统调用),并显示本地测量的连接的延迟(时间),即从发送 SYN 到响应包的时间。</p>
|
||||
<p>tcpconnlat 这个工具跟踪执行活动TCP连接的内核函数(例如,通过connect()系统调用),并显示本地测量的连接的延迟(时间),即从发送 SYN 到响应包的时间。</p>
|
||||
<h3 id="tcp-连接原理"><a class="header" href="#tcp-连接原理">tcp 连接原理</a></h3>
|
||||
<p>tcp 连接的整个过程如图所示:</p>
|
||||
<p><img src="tcpconnlat1.png" alt="tcpconnlate" /></p>
|
||||
@@ -264,15 +264,13 @@ cleanup:
|
||||
}
|
||||
</code></pre>
|
||||
<h2 id="编译运行"><a class="header" href="#编译运行">编译运行</a></h2>
|
||||
<ul>
|
||||
<li><code>git clone https://github.com/libbpf/libbpf-bootstrap libbpf-bootstrap-cloned</code></li>
|
||||
<li>将 <a href="libbpf-bootstrap">libbpf-bootstrap</a>目录下的文件复制到 <code>libbpf-bootstrap-cloned/examples/c</code>下</li>
|
||||
<li>修改 <code>libbpf-bootstrap-cloned/examples/c/Makefile</code> ,在其 <code>APPS</code> 项后添加 <code>tcpconnlat</code></li>
|
||||
<li>在 <code>libbpf-bootstrap-cloned/examples/c</code> 下运行 <code>make tcpconnlat</code></li>
|
||||
<li><code>sudo ./tcpconnlat</code></li>
|
||||
</ul>
|
||||
<h2 id="效果"><a class="header" href="#效果">效果</a></h2>
|
||||
<pre><code class="language-plain">root@yutong-VirtualBox:~/libbpf-bootstrap/examples/c# ./tcpconnlat
|
||||
<pre><code class="language-console">$ make
|
||||
...
|
||||
BPF .output/tcpconnlat.bpf.o
|
||||
GEN-SKEL .output/tcpconnlat.skel.h
|
||||
CC .output/tcpconnlat.o
|
||||
BINARY tcpconnlat
|
||||
$ sudo ./tcpconnlat
|
||||
PID COMM IP SADDR DADDR DPORT LAT(ms)
|
||||
222564 wget 4 192.168.88.15 110.242.68.3 80 25.29
|
||||
222684 wget 4 192.168.88.15 167.179.101.42 443 246.76
|
||||
|
||||
@@ -1,131 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (c) 2020 Wenbo Zhang
|
||||
#include <vmlinux.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_core_read.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include "tcpconnlat.h"
|
||||
|
||||
#define AF_INET 2
|
||||
#define AF_INET6 10
|
||||
|
||||
const volatile __u64 targ_min_us = 0;
|
||||
const volatile pid_t targ_tgid = 0;
|
||||
|
||||
struct piddata {
|
||||
char comm[TASK_COMM_LEN];
|
||||
u64 ts;
|
||||
u32 tgid;
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 4096);
|
||||
__type(key, struct sock *);
|
||||
__type(value, struct piddata);
|
||||
} start SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
|
||||
__uint(key_size, sizeof(u32));
|
||||
__uint(value_size, sizeof(u32));
|
||||
} events SEC(".maps");
|
||||
|
||||
static int trace_connect(struct sock *sk)
|
||||
{
|
||||
u32 tgid = bpf_get_current_pid_tgid() >> 32;
|
||||
struct piddata piddata = {};
|
||||
|
||||
if (targ_tgid && targ_tgid != tgid)
|
||||
return 0;
|
||||
|
||||
bpf_get_current_comm(&piddata.comm, sizeof(piddata.comm));
|
||||
piddata.ts = bpf_ktime_get_ns();
|
||||
piddata.tgid = tgid;
|
||||
bpf_map_update_elem(&start, &sk, &piddata, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int handle_tcp_rcv_state_process(void *ctx, struct sock *sk)
|
||||
{
|
||||
struct piddata *piddatap;
|
||||
struct event event = {};
|
||||
s64 delta;
|
||||
u64 ts;
|
||||
|
||||
if (BPF_CORE_READ(sk, __sk_common.skc_state) != TCP_SYN_SENT)
|
||||
return 0;
|
||||
|
||||
piddatap = bpf_map_lookup_elem(&start, &sk);
|
||||
if (!piddatap)
|
||||
return 0;
|
||||
|
||||
ts = bpf_ktime_get_ns();
|
||||
delta = (s64)(ts - piddatap->ts);
|
||||
if (delta < 0)
|
||||
goto cleanup;
|
||||
|
||||
event.delta_us = delta / 1000U;
|
||||
if (targ_min_us && event.delta_us < targ_min_us)
|
||||
goto cleanup;
|
||||
__builtin_memcpy(&event.comm, piddatap->comm,
|
||||
sizeof(event.comm));
|
||||
event.ts_us = ts / 1000;
|
||||
event.tgid = piddatap->tgid;
|
||||
event.lport = BPF_CORE_READ(sk, __sk_common.skc_num);
|
||||
event.dport = BPF_CORE_READ(sk, __sk_common.skc_dport);
|
||||
event.af = BPF_CORE_READ(sk, __sk_common.skc_family);
|
||||
if (event.af == AF_INET) {
|
||||
event.saddr_v4 = BPF_CORE_READ(sk, __sk_common.skc_rcv_saddr);
|
||||
event.daddr_v4 = BPF_CORE_READ(sk, __sk_common.skc_daddr);
|
||||
} else {
|
||||
BPF_CORE_READ_INTO(&event.saddr_v6, sk,
|
||||
__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
|
||||
BPF_CORE_READ_INTO(&event.daddr_v6, sk,
|
||||
__sk_common.skc_v6_daddr.in6_u.u6_addr32);
|
||||
}
|
||||
bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU,
|
||||
&event, sizeof(event));
|
||||
|
||||
cleanup:
|
||||
bpf_map_delete_elem(&start, &sk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("kprobe/tcp_v4_connect")
|
||||
int BPF_KPROBE(tcp_v4_connect, struct sock *sk)
|
||||
{
|
||||
return trace_connect(sk);
|
||||
}
|
||||
|
||||
SEC("kprobe/tcp_v6_connect")
|
||||
int BPF_KPROBE(tcp_v6_connect, struct sock *sk)
|
||||
{
|
||||
return trace_connect(sk);
|
||||
}
|
||||
|
||||
SEC("kprobe/tcp_rcv_state_process")
|
||||
int BPF_KPROBE(tcp_rcv_state_process, struct sock *sk)
|
||||
{
|
||||
return handle_tcp_rcv_state_process(ctx, sk);
|
||||
}
|
||||
|
||||
SEC("fentry/tcp_v4_connect")
|
||||
int BPF_PROG(fentry_tcp_v4_connect, struct sock *sk)
|
||||
{
|
||||
return trace_connect(sk);
|
||||
}
|
||||
|
||||
SEC("fentry/tcp_v6_connect")
|
||||
int BPF_PROG(fentry_tcp_v6_connect, struct sock *sk)
|
||||
{
|
||||
return trace_connect(sk);
|
||||
}
|
||||
|
||||
SEC("fentry/tcp_rcv_state_process")
|
||||
int BPF_PROG(fentry_tcp_rcv_state_process, struct sock *sk)
|
||||
{
|
||||
return handle_tcp_rcv_state_process(ctx, sk);
|
||||
}
|
||||
|
||||
char LICENSE[] SEC("license") = "GPL";
|
||||
@@ -4,7 +4,7 @@
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_core_read.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include "tcpconnlat.bpf.h"
|
||||
#include "tcpconnlat.h"
|
||||
|
||||
#define AF_INET 2
|
||||
#define AF_INET6 10
|
||||
@@ -110,4 +110,22 @@ int BPF_KPROBE(tcp_rcv_state_process, struct sock *sk)
|
||||
return handle_tcp_rcv_state_process(ctx, sk);
|
||||
}
|
||||
|
||||
char LICENSE[] SEC("license") = "GPL";
|
||||
SEC("fentry/tcp_v4_connect")
|
||||
int BPF_PROG(fentry_tcp_v4_connect, struct sock *sk)
|
||||
{
|
||||
return trace_connect(sk);
|
||||
}
|
||||
|
||||
SEC("fentry/tcp_v6_connect")
|
||||
int BPF_PROG(fentry_tcp_v6_connect, struct sock *sk)
|
||||
{
|
||||
return trace_connect(sk);
|
||||
}
|
||||
|
||||
SEC("fentry/tcp_rcv_state_process")
|
||||
int BPF_PROG(fentry_tcp_rcv_state_process, struct sock *sk)
|
||||
{
|
||||
return handle_tcp_rcv_state_process(ctx, sk);
|
||||
}
|
||||
|
||||
char LICENSE[] SEC("license") = "GPL";
|
||||
@@ -1,26 +0,0 @@
|
||||
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
|
||||
#ifndef __TCPCONNLAT_H
|
||||
#define __TCPCONNLAT_H
|
||||
|
||||
#define TASK_COMM_LEN 16
|
||||
|
||||
struct event {
|
||||
// union {
|
||||
unsigned int saddr_v4;
|
||||
unsigned char saddr_v6[16];
|
||||
// };
|
||||
// union {
|
||||
unsigned int daddr_v4;
|
||||
unsigned char daddr_v6[16];
|
||||
// };
|
||||
char comm[TASK_COMM_LEN];
|
||||
unsigned long long delta_us;
|
||||
unsigned long long ts_us;
|
||||
unsigned int tgid;
|
||||
int af;
|
||||
unsigned short lport;
|
||||
unsigned short dport;
|
||||
};
|
||||
|
||||
|
||||
#endif /* __TCPCONNLAT_H_ */
|
||||
@@ -145,138 +145,6 @@
|
||||
<div id="content" class="content">
|
||||
<main>
|
||||
<h1 id="ebpf-入门实践教程编写-ebpf-程序-tcpconnlat-测量-tcp-连接延时"><a class="header" href="#ebpf-入门实践教程编写-ebpf-程序-tcpconnlat-测量-tcp-连接延时">eBPF 入门实践教程:编写 eBPF 程序 tcpconnlat 测量 tcp 连接延时</a></h1>
|
||||
<h2 id="代码解释"><a class="header" href="#代码解释">代码解释</a></h2>
|
||||
<h3 id="背景"><a class="header" href="#背景">背景</a></h3>
|
||||
<p>在互联网后端日常开发接口的时候中,不管你使用的是C、Java、PHP还是Golang,都避免不了需要调用mysql、redis等组件来获取数据,可能还需要执行一些rpc远程调用,或者再调用一些其它restful api。 在这些调用的底层,基本都是在使用TCP协议进行传输。这是因为在传输层协议中,TCP协议具备可靠的连接,错误重传,拥塞控制等优点,所以目前应用比UDP更广泛一些。但相对而言,tcp 连接也有一些缺点,例如建立连接的延时较长等。因此也会出现像 QUIC ,即 快速UDP网络连接 ( Quick UDP Internet Connections )这样的替代方案。</p>
|
||||
<p>tcp 连接延时分析对于网络性能分析优化或者故障排查都能起到不少作用。</p>
|
||||
<h3 id="tcpconnlat-的实现原理"><a class="header" href="#tcpconnlat-的实现原理">tcpconnlat 的实现原理</a></h3>
|
||||
<p>tcpconnlat 这个工具跟踪执行活动TCP连接的内核函数 (例如,通过connect()系统调用),并显示本地测量的连接的延迟(时间),即从发送 SYN 到响应包的时间。</p>
|
||||
<h3 id="tcp-连接原理"><a class="header" href="#tcp-连接原理">tcp 连接原理</a></h3>
|
||||
<p>tcp 连接的整个过程如图所示:</p>
|
||||
<p><img src="tcpconnlat1.png" alt="tcpconnlate" /></p>
|
||||
<p>在这个连接过程中,我们来简单分析一下每一步的耗时:</p>
|
||||
<ol>
|
||||
<li>客户端发出SYNC包:客户端一般是通过connect系统调用来发出 SYN 的,这里牵涉到本机的系统调用和软中断的 CPU 耗时开销</li>
|
||||
<li>SYN传到服务器:SYN从客户端网卡被发出,这是一次长途远距离的网络传输</li>
|
||||
<li>服务器处理SYN包:内核通过软中断来收包,然后放到半连接队列中,然后再发出SYN/ACK响应。主要是 CPU 耗时开销</li>
|
||||
<li>SYC/ACK传到客户端:长途网络跋涉</li>
|
||||
<li>客户端处理 SYN/ACK:客户端内核收包并处理SYN后,经过几us的CPU处理,接着发出 ACK。同样是软中断处理开销</li>
|
||||
<li>ACK传到服务器:长途网络跋涉</li>
|
||||
<li>服务端收到ACK:服务器端内核收到并处理ACK,然后把对应的连接从半连接队列中取出来,然后放到全连接队列中。一次软中断CPU开销</li>
|
||||
<li>服务器端用户进程唤醒:正在被accpet系统调用阻塞的用户进程被唤醒,然后从全连接队列中取出来已经建立好的连接。一次上下文切换的CPU开销</li>
|
||||
</ol>
|
||||
<p>在客户端视角,在正常情况下一次TCP连接总的耗时也就就大约是一次网络RTT的耗时。但在某些情况下,可能会导致连接时的网络传输耗时上涨、CPU处理开销增加、甚至是连接失败。这种时候在发现延时过长之后,就可以结合其他信息进行分析。</p>
|
||||
<h3 id="ebpf-实现原理"><a class="header" href="#ebpf-实现原理">ebpf 实现原理</a></h3>
|
||||
<p>在 TCP 三次握手的时候,Linux 内核会维护两个队列,分别是:</p>
|
||||
<ul>
|
||||
<li>半连接队列,也称 SYN 队列;</li>
|
||||
<li>全连接队列,也称 accepet 队列;</li>
|
||||
</ul>
|
||||
<p>服务端收到客户端发起的 SYN 请求后,内核会把该连接存储到半连接队列,并向客户端响应 SYN+ACK,接着客户端会返回 ACK,服务端收到第三次握手的 ACK 后,内核会把连接从半连接队列移除,然后创建新的完全的连接,并将其添加到 accept 队列,等待进程调用 accept 函数时把连接取出来。</p>
|
||||
<p>我们的 ebpf 代码实现在 <a href="https://github.com/yunwei37/Eunomia/blob/master/bpftools/tcpconnlat/tcpconnlat.bpf.c">https://github.com/yunwei37/Eunomia/blob/master/bpftools/tcpconnlat/tcpconnlat.bpf.c</a> 中:</p>
|
||||
<p>它主要使用了 trace_tcp_rcv_state_process 和 kprobe/tcp_v4_connect 这样的跟踪点:</p>
|
||||
<pre><code class="language-c">
|
||||
SEC("kprobe/tcp_v4_connect")
|
||||
int BPF_KPROBE(tcp_v4_connect, struct sock *sk)
|
||||
{
|
||||
return trace_connect(sk);
|
||||
}
|
||||
|
||||
SEC("kprobe/tcp_v6_connect")
|
||||
int BPF_KPROBE(tcp_v6_connect, struct sock *sk)
|
||||
{
|
||||
return trace_connect(sk);
|
||||
}
|
||||
|
||||
SEC("kprobe/tcp_rcv_state_process")
|
||||
int BPF_KPROBE(tcp_rcv_state_process, struct sock *sk)
|
||||
{
|
||||
return handle_tcp_rcv_state_process(ctx, sk);
|
||||
}
|
||||
</code></pre>
|
||||
<p>在 trace_connect 中,我们跟踪新的 tcp 连接,记录到达时间,并且把它加入 map 中:</p>
|
||||
<pre><code class="language-c">struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 4096);
|
||||
__type(key, struct sock *);
|
||||
__type(value, struct piddata);
|
||||
} start SEC(".maps");
|
||||
|
||||
static int trace_connect(struct sock *sk)
|
||||
{
|
||||
u32 tgid = bpf_get_current_pid_tgid() >> 32;
|
||||
struct piddata piddata = {};
|
||||
|
||||
if (targ_tgid && targ_tgid != tgid)
|
||||
return 0;
|
||||
|
||||
bpf_get_current_comm(&piddata.comm, sizeof(piddata.comm));
|
||||
piddata.ts = bpf_ktime_get_ns();
|
||||
piddata.tgid = tgid;
|
||||
bpf_map_update_elem(&start, &sk, &piddata, 0);
|
||||
return 0;
|
||||
}
|
||||
</code></pre>
|
||||
<p>在 handle_tcp_rcv_state_process 中,我们跟踪接收到的 tcp 数据包,从 map 从提取出对应的 connect 事件,并且计算延迟:</p>
|
||||
<pre><code class="language-c">static int handle_tcp_rcv_state_process(void *ctx, struct sock *sk)
|
||||
{
|
||||
struct piddata *piddatap;
|
||||
struct event event = {};
|
||||
s64 delta;
|
||||
u64 ts;
|
||||
|
||||
if (BPF_CORE_READ(sk, __sk_common.skc_state) != TCP_SYN_SENT)
|
||||
return 0;
|
||||
|
||||
piddatap = bpf_map_lookup_elem(&start, &sk);
|
||||
if (!piddatap)
|
||||
return 0;
|
||||
|
||||
ts = bpf_ktime_get_ns();
|
||||
delta = (s64)(ts - piddatap->ts);
|
||||
if (delta < 0)
|
||||
goto cleanup;
|
||||
|
||||
event.delta_us = delta / 1000U;
|
||||
if (targ_min_us && event.delta_us < targ_min_us)
|
||||
goto cleanup;
|
||||
__builtin_memcpy(&event.comm, piddatap->comm,
|
||||
sizeof(event.comm));
|
||||
event.ts_us = ts / 1000;
|
||||
event.tgid = piddatap->tgid;
|
||||
event.lport = BPF_CORE_READ(sk, __sk_common.skc_num);
|
||||
event.dport = BPF_CORE_READ(sk, __sk_common.skc_dport);
|
||||
event.af = BPF_CORE_READ(sk, __sk_common.skc_family);
|
||||
if (event.af == AF_INET) {
|
||||
event.saddr_v4 = BPF_CORE_READ(sk, __sk_common.skc_rcv_saddr);
|
||||
event.daddr_v4 = BPF_CORE_READ(sk, __sk_common.skc_daddr);
|
||||
} else {
|
||||
BPF_CORE_READ_INTO(&event.saddr_v6, sk,
|
||||
__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
|
||||
BPF_CORE_READ_INTO(&event.daddr_v6, sk,
|
||||
__sk_common.skc_v6_daddr.in6_u.u6_addr32);
|
||||
}
|
||||
bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU,
|
||||
&event, sizeof(event));
|
||||
|
||||
cleanup:
|
||||
bpf_map_delete_elem(&start, &sk);
|
||||
return 0;
|
||||
}
|
||||
</code></pre>
|
||||
<h3 id="编译运行"><a class="header" href="#编译运行">编译运行</a></h3>
|
||||
<p>TODO</p>
|
||||
<h3 id="总结"><a class="header" href="#总结">总结</a></h3>
|
||||
<p>通过上面的实验,我们可以看到,tcpconnlat 工具的实现原理是基于内核的TCP连接的跟踪,并且可以跟踪到 tcp 连接的延迟时间;除了命令行使用方式之外,还可以将其和容器、k8s 等元信息综合起来,通过 <code>prometheus</code> 和 <code>grafana</code> 等工具进行网络性能分析。</p>
|
||||
<blockquote>
|
||||
<p><code>Eunomia</code> 是一个使用 C/C++ 开发的基于 eBPF的轻量级,高性能云原生监控工具,旨在帮助用户了解容器的各项行为、监控可疑的容器安全事件,力求提供覆盖容器全生命周期的轻量级开源监控解决方案。它使用 <code>Linux</code> <code>eBPF</code> 技术在运行时跟踪您的系统和应用程序,并分析收集的事件以检测可疑的行为模式。目前,它包含性能分析、容器集群网络可视化分析*、容器安全感知告警、一键部署、持久化存储监控等功能,提供了多样化的 ebpf 追踪点。其核心导出器/命令行工具最小仅需要约 4MB 大小的二进制程序,即可在支持的 Linux 内核上启动。</p>
|
||||
</blockquote>
|
||||
<p>项目地址:<a href="https://github.com/yunwei37/Eunomia">https://github.com/yunwei37/Eunomia</a></p>
|
||||
<h3 id="参考资料"><a class="header" href="#参考资料">参考资料</a></h3>
|
||||
<ol>
|
||||
<li><a href="http://kerneltravel.net/blog/2020/tcpconnlat/">http://kerneltravel.net/blog/2020/tcpconnlat/</a></li>
|
||||
<li><a href="https://network.51cto.com/article/640631.html">https://network.51cto.com/article/640631.html</a></li>
|
||||
</ol>
|
||||
|
||||
</main>
|
||||
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 40 KiB |
3
14-tcpstates/.gitignore
vendored
3
14-tcpstates/.gitignore
vendored
@@ -2,4 +2,5 @@
|
||||
package.json
|
||||
eunomia-exporter
|
||||
ecli
|
||||
|
||||
tcpstates
|
||||
.output
|
||||
|
||||
141
14-tcpstates/Makefile
Normal file
141
14-tcpstates/Makefile
Normal file
@@ -0,0 +1,141 @@
|
||||
# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
||||
OUTPUT := .output
|
||||
CLANG ?= clang
|
||||
LIBBPF_SRC := $(abspath ../../libbpf/src)
|
||||
BPFTOOL_SRC := $(abspath ../../bpftool/src)
|
||||
LIBBPF_OBJ := $(abspath $(OUTPUT)/libbpf.a)
|
||||
BPFTOOL_OUTPUT ?= $(abspath $(OUTPUT)/bpftool)
|
||||
BPFTOOL ?= $(BPFTOOL_OUTPUT)/bootstrap/bpftool
|
||||
LIBBLAZESYM_SRC := $(abspath ../../blazesym/)
|
||||
LIBBLAZESYM_OBJ := $(abspath $(OUTPUT)/libblazesym.a)
|
||||
LIBBLAZESYM_HEADER := $(abspath $(OUTPUT)/blazesym.h)
|
||||
ARCH ?= $(shell uname -m | sed 's/x86_64/x86/' \
|
||||
| sed 's/arm.*/arm/' \
|
||||
| sed 's/aarch64/arm64/' \
|
||||
| sed 's/ppc64le/powerpc/' \
|
||||
| sed 's/mips.*/mips/' \
|
||||
| sed 's/riscv64/riscv/' \
|
||||
| sed 's/loongarch64/loongarch/')
|
||||
VMLINUX := ../../vmlinux/$(ARCH)/vmlinux.h
|
||||
# Use our own libbpf API headers and Linux UAPI headers distributed with
|
||||
# libbpf to avoid dependency on system-wide headers, which could be missing or
|
||||
# outdated
|
||||
INCLUDES := -I$(OUTPUT) -I../../libbpf/include/uapi -I$(dir $(VMLINUX))
|
||||
CFLAGS := -g -Wall
|
||||
ALL_LDFLAGS := $(LDFLAGS) $(EXTRA_LDFLAGS)
|
||||
|
||||
APPS = tcpstates # minimal minimal_legacy uprobe kprobe fentry usdt sockfilter tc ksyscall
|
||||
|
||||
CARGO ?= $(shell which cargo)
|
||||
ifeq ($(strip $(CARGO)),)
|
||||
BZS_APPS :=
|
||||
else
|
||||
BZS_APPS := # profile
|
||||
APPS += $(BZS_APPS)
|
||||
# Required by libblazesym
|
||||
ALL_LDFLAGS += -lrt -ldl -lpthread -lm
|
||||
endif
|
||||
|
||||
# Get Clang's default includes on this system. We'll explicitly add these dirs
|
||||
# to the includes list when compiling with `-target bpf` because otherwise some
|
||||
# architecture-specific dirs will be "missing" on some architectures/distros -
|
||||
# headers such as asm/types.h, asm/byteorder.h, asm/socket.h, asm/sockios.h,
|
||||
# sys/cdefs.h etc. might be missing.
|
||||
#
|
||||
# Use '-idirafter': Don't interfere with include mechanics except where the
|
||||
# build would have failed anyways.
|
||||
CLANG_BPF_SYS_INCLUDES ?= $(shell $(CLANG) -v -E - </dev/null 2>&1 \
|
||||
| sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }')
|
||||
|
||||
ifeq ($(V),1)
|
||||
Q =
|
||||
msg =
|
||||
else
|
||||
Q = @
|
||||
msg = @printf ' %-8s %s%s\n' \
|
||||
"$(1)" \
|
||||
"$(patsubst $(abspath $(OUTPUT))/%,%,$(2))" \
|
||||
"$(if $(3), $(3))";
|
||||
MAKEFLAGS += --no-print-directory
|
||||
endif
|
||||
|
||||
define allow-override
|
||||
$(if $(or $(findstring environment,$(origin $(1))),\
|
||||
$(findstring command line,$(origin $(1)))),,\
|
||||
$(eval $(1) = $(2)))
|
||||
endef
|
||||
|
||||
$(call allow-override,CC,$(CROSS_COMPILE)cc)
|
||||
$(call allow-override,LD,$(CROSS_COMPILE)ld)
|
||||
|
||||
.PHONY: all
|
||||
all: $(APPS)
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
$(call msg,CLEAN)
|
||||
$(Q)rm -rf $(OUTPUT) $(APPS)
|
||||
|
||||
$(OUTPUT) $(OUTPUT)/libbpf $(BPFTOOL_OUTPUT):
|
||||
$(call msg,MKDIR,$@)
|
||||
$(Q)mkdir -p $@
|
||||
|
||||
# Build libbpf
|
||||
$(LIBBPF_OBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT)/libbpf
|
||||
$(call msg,LIB,$@)
|
||||
$(Q)$(MAKE) -C $(LIBBPF_SRC) BUILD_STATIC_ONLY=1 \
|
||||
OBJDIR=$(dir $@)/libbpf DESTDIR=$(dir $@) \
|
||||
INCLUDEDIR= LIBDIR= UAPIDIR= \
|
||||
install
|
||||
|
||||
# Build bpftool
|
||||
$(BPFTOOL): | $(BPFTOOL_OUTPUT)
|
||||
$(call msg,BPFTOOL,$@)
|
||||
$(Q)$(MAKE) ARCH= CROSS_COMPILE= OUTPUT=$(BPFTOOL_OUTPUT)/ -C $(BPFTOOL_SRC) bootstrap
|
||||
|
||||
|
||||
$(LIBBLAZESYM_SRC)/target/release/libblazesym.a::
|
||||
$(Q)cd $(LIBBLAZESYM_SRC) && $(CARGO) build --features=cheader,dont-generate-test-files --release
|
||||
|
||||
$(LIBBLAZESYM_OBJ): $(LIBBLAZESYM_SRC)/target/release/libblazesym.a | $(OUTPUT)
|
||||
$(call msg,LIB, $@)
|
||||
$(Q)cp $(LIBBLAZESYM_SRC)/target/release/libblazesym.a $@
|
||||
|
||||
$(LIBBLAZESYM_HEADER): $(LIBBLAZESYM_SRC)/target/release/libblazesym.a | $(OUTPUT)
|
||||
$(call msg,LIB,$@)
|
||||
$(Q)cp $(LIBBLAZESYM_SRC)/target/release/blazesym.h $@
|
||||
|
||||
# Build BPF code
|
||||
$(OUTPUT)/%.bpf.o: %.bpf.c $(LIBBPF_OBJ) $(wildcard %.h) $(VMLINUX) | $(OUTPUT) $(BPFTOOL)
|
||||
$(call msg,BPF,$@)
|
||||
$(Q)$(CLANG) -g -O2 -target bpf -D__TARGET_ARCH_$(ARCH) \
|
||||
$(INCLUDES) $(CLANG_BPF_SYS_INCLUDES) \
|
||||
-c $(filter %.c,$^) -o $(patsubst %.bpf.o,%.tmp.bpf.o,$@)
|
||||
$(Q)$(BPFTOOL) gen object $@ $(patsubst %.bpf.o,%.tmp.bpf.o,$@)
|
||||
|
||||
# Generate BPF skeletons
|
||||
$(OUTPUT)/%.skel.h: $(OUTPUT)/%.bpf.o | $(OUTPUT) $(BPFTOOL)
|
||||
$(call msg,GEN-SKEL,$@)
|
||||
$(Q)$(BPFTOOL) gen skeleton $< > $@
|
||||
|
||||
# Build user-space code
|
||||
$(patsubst %,$(OUTPUT)/%.o,$(APPS)): %.o: %.skel.h
|
||||
|
||||
$(OUTPUT)/%.o: %.c $(wildcard %.h) | $(OUTPUT)
|
||||
$(call msg,CC,$@)
|
||||
$(Q)$(CC) $(CFLAGS) $(INCLUDES) -c $(filter %.c,$^) -o $@
|
||||
|
||||
$(patsubst %,$(OUTPUT)/%.o,$(BZS_APPS)): $(LIBBLAZESYM_HEADER)
|
||||
|
||||
$(BZS_APPS): $(LIBBLAZESYM_OBJ)
|
||||
|
||||
# Build application binary
|
||||
$(APPS): %: $(OUTPUT)/%.o $(LIBBPF_OBJ) | $(OUTPUT)
|
||||
$(call msg,BINARY,$@)
|
||||
$(Q)$(CC) $(CFLAGS) $^ $(ALL_LDFLAGS) -lelf -lz -o $@
|
||||
|
||||
# delete failed targets
|
||||
.DELETE_ON_ERROR:
|
||||
|
||||
# keep intermediate (.skel.h, .bpf.o, etc) targets
|
||||
.SECONDARY:
|
||||
@@ -145,6 +145,110 @@
|
||||
<div id="content" class="content">
|
||||
<main>
|
||||
<h1 id="ebpf入门实践教程使用-libbpf-bootstrap-开发程序统计-tcp-连接延时"><a class="header" href="#ebpf入门实践教程使用-libbpf-bootstrap-开发程序统计-tcp-连接延时">eBPF入门实践教程:使用 libbpf-bootstrap 开发程序统计 TCP 连接延时</a></h1>
|
||||
<h2 id="内核态代码"><a class="header" href="#内核态代码">内核态代码</a></h2>
|
||||
<pre><code class="language-c">// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
||||
/* Copyright (c) 2021 Hengqi Chen */
|
||||
#include <vmlinux.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include <bpf/bpf_core_read.h>
|
||||
#include "tcpstates.h"
|
||||
|
||||
#define MAX_ENTRIES 10240
|
||||
#define AF_INET 2
|
||||
#define AF_INET6 10
|
||||
|
||||
const volatile bool filter_by_sport = false;
|
||||
const volatile bool filter_by_dport = false;
|
||||
const volatile short target_family = 0;
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
__type(key, __u16);
|
||||
__type(value, __u16);
|
||||
} sports SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
__type(key, __u16);
|
||||
__type(value, __u16);
|
||||
} dports SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
__type(key, struct sock *);
|
||||
__type(value, __u64);
|
||||
} timestamps SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
|
||||
__uint(key_size, sizeof(__u32));
|
||||
__uint(value_size, sizeof(__u32));
|
||||
} events SEC(".maps");
|
||||
|
||||
SEC("tracepoint/sock/inet_sock_set_state")
|
||||
int handle_set_state(struct trace_event_raw_inet_sock_set_state *ctx)
|
||||
{
|
||||
struct sock *sk = (struct sock *)ctx->skaddr;
|
||||
__u16 family = ctx->family;
|
||||
__u16 sport = ctx->sport;
|
||||
__u16 dport = ctx->dport;
|
||||
__u64 *tsp, delta_us, ts;
|
||||
struct event event = {};
|
||||
|
||||
if (ctx->protocol != IPPROTO_TCP)
|
||||
return 0;
|
||||
|
||||
if (target_family && target_family != family)
|
||||
return 0;
|
||||
|
||||
if (filter_by_sport && !bpf_map_lookup_elem(&sports, &sport))
|
||||
return 0;
|
||||
|
||||
if (filter_by_dport && !bpf_map_lookup_elem(&dports, &dport))
|
||||
return 0;
|
||||
|
||||
tsp = bpf_map_lookup_elem(&timestamps, &sk);
|
||||
ts = bpf_ktime_get_ns();
|
||||
if (!tsp)
|
||||
delta_us = 0;
|
||||
else
|
||||
delta_us = (ts - *tsp) / 1000;
|
||||
|
||||
event.skaddr = (__u64)sk;
|
||||
event.ts_us = ts / 1000;
|
||||
event.delta_us = delta_us;
|
||||
event.pid = bpf_get_current_pid_tgid() >> 32;
|
||||
event.oldstate = ctx->oldstate;
|
||||
event.newstate = ctx->newstate;
|
||||
event.family = family;
|
||||
event.sport = sport;
|
||||
event.dport = dport;
|
||||
bpf_get_current_comm(&event.task, sizeof(event.task));
|
||||
|
||||
if (family == AF_INET) {
|
||||
bpf_probe_read_kernel(&event.saddr, sizeof(event.saddr), &sk->__sk_common.skc_rcv_saddr);
|
||||
bpf_probe_read_kernel(&event.daddr, sizeof(event.daddr), &sk->__sk_common.skc_daddr);
|
||||
} else { /* family == AF_INET6 */
|
||||
bpf_probe_read_kernel(&event.saddr, sizeof(event.saddr), &sk->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
|
||||
bpf_probe_read_kernel(&event.daddr, sizeof(event.daddr), &sk->__sk_common.skc_v6_daddr.in6_u.u6_addr32);
|
||||
}
|
||||
|
||||
bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, &event, sizeof(event));
|
||||
|
||||
if (ctx->newstate == TCP_CLOSE)
|
||||
bpf_map_delete_elem(&timestamps, &sk);
|
||||
else
|
||||
bpf_map_update_elem(&timestamps, &sk, &ts, BPF_ANY);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
char LICENSE[] SEC("license") = "Dual BSD/GPL";
|
||||
</code></pre>
|
||||
<p><code>tcpstates</code> 是一个追踪当前系统上的TCP套接字的TCP状态的程序,主要通过跟踪内核跟踪点 <code>inet_sock_set_state</code> 来实现。统计数据通过 <code>perf_event</code>向用户态传输。</p>
|
||||
<pre><code class="language-c">SEC("tracepoint/sock/inet_sock_set_state")
|
||||
int handle_set_state(struct trace_event_raw_inet_sock_set_state *ctx)
|
||||
@@ -253,15 +357,13 @@ static void handle_lost_events(void* ctx, int cpu, __u64 lost_cnt) {
|
||||
</code></pre>
|
||||
<p>收到事件后所调用对应的处理函数并进行输出打印。</p>
|
||||
<h2 id="编译运行"><a class="header" href="#编译运行">编译运行</a></h2>
|
||||
<ul>
|
||||
<li><code>git clone https://github.com/libbpf/libbpf-bootstrap libbpf-bootstrap-cloned</code></li>
|
||||
<li>将 <a href="libbpf-bootstrap">libbpf-bootstrap</a>目录下的文件复制到 <code>libbpf-bootstrap-cloned/examples/c</code>下</li>
|
||||
<li>修改 <code>libbpf-bootstrap-cloned/examples/c/Makefile</code> ,在其 <code>APPS</code> 项后添加 <code>tcpstates</code></li>
|
||||
<li>在 <code>libbpf-bootstrap-cloned/examples/c</code> 下运行 <code>make tcpstates</code></li>
|
||||
<li><code>sudo ./tcpstates</code></li>
|
||||
</ul>
|
||||
<h2 id="效果"><a class="header" href="#效果">效果</a></h2>
|
||||
<pre><code class="language-plain">root@yutong-VirtualBox:~/libbpf-bootstrap/examples/c# ./tcpstates
|
||||
<pre><code class="language-console">$ make
|
||||
...
|
||||
BPF .output/tcpstates.bpf.o
|
||||
GEN-SKEL .output/tcpstates.skel.h
|
||||
CC .output/tcpstates.o
|
||||
BINARY tcpstates
|
||||
$ sudo ./tcpstates
|
||||
SKADDR PID COMM LADDR LPORT RADDR RPORT OLDSTATE -> NEWSTATE MS
|
||||
ffff9bf61bb62bc0 164978 node 192.168.88.15 0 52.178.17.2 443 CLOSE -> SYN_SENT 0.000
|
||||
ffff9bf61bb62bc0 0 swapper/0 192.168.88.15 41596 52.178.17.2 443 SYN_SENT -> ESTABLISHED 225.794
|
||||
@@ -273,7 +375,6 @@ ffff9bf6d8ee88c0 229832 redis-serv 0.0.0.0 6379 0.0.0.0 0
|
||||
ffff9bf6d8ee88c0 229832 redis-serv 0.0.0.0 6379 0.0.0.0 0 LISTEN -> CLOSE 1.763
|
||||
ffff9bf7109d6900 88750 node 127.0.0.1 39755 127.0.0.1 50966 ESTABLISHED -> FIN_WAIT1 0.000
|
||||
</code></pre>
|
||||
<p>对于输出的详细解释,详见 <a href="README.html">README.md</a></p>
|
||||
<h2 id="总结"><a class="header" href="#总结">总结</a></h2>
|
||||
<p>这里的代码修改自 <a href="https://github.com/iovisor/bcc/blob/master/libbpf-tools/tcpstates.bpf.c">https://github.com/iovisor/bcc/blob/master/libbpf-tools/tcpstates.bpf.c</a></p>
|
||||
|
||||
|
||||
@@ -1,102 +0,0 @@
|
||||
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
||||
/* Copyright (c) 2021 Hengqi Chen */
|
||||
#include <vmlinux.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include <bpf/bpf_core_read.h>
|
||||
#include "tcpstates.h"
|
||||
|
||||
#define MAX_ENTRIES 10240
|
||||
#define AF_INET 2
|
||||
#define AF_INET6 10
|
||||
|
||||
const volatile bool filter_by_sport = false;
|
||||
const volatile bool filter_by_dport = false;
|
||||
const volatile short target_family = 0;
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
__type(key, __u16);
|
||||
__type(value, __u16);
|
||||
} sports SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
__type(key, __u16);
|
||||
__type(value, __u16);
|
||||
} dports SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
__type(key, struct sock *);
|
||||
__type(value, __u64);
|
||||
} timestamps SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
|
||||
__uint(key_size, sizeof(__u32));
|
||||
__uint(value_size, sizeof(__u32));
|
||||
} events SEC(".maps");
|
||||
|
||||
SEC("tracepoint/sock/inet_sock_set_state")
|
||||
int handle_set_state(struct trace_event_raw_inet_sock_set_state *ctx)
|
||||
{
|
||||
struct sock *sk = (struct sock *)ctx->skaddr;
|
||||
__u16 family = ctx->family;
|
||||
__u16 sport = ctx->sport;
|
||||
__u16 dport = ctx->dport;
|
||||
__u64 *tsp, delta_us, ts;
|
||||
struct event event = {};
|
||||
|
||||
if (ctx->protocol != IPPROTO_TCP)
|
||||
return 0;
|
||||
|
||||
if (target_family && target_family != family)
|
||||
return 0;
|
||||
|
||||
if (filter_by_sport && !bpf_map_lookup_elem(&sports, &sport))
|
||||
return 0;
|
||||
|
||||
if (filter_by_dport && !bpf_map_lookup_elem(&dports, &dport))
|
||||
return 0;
|
||||
|
||||
tsp = bpf_map_lookup_elem(×tamps, &sk);
|
||||
ts = bpf_ktime_get_ns();
|
||||
if (!tsp)
|
||||
delta_us = 0;
|
||||
else
|
||||
delta_us = (ts - *tsp) / 1000;
|
||||
|
||||
event.skaddr = (__u64)sk;
|
||||
event.ts_us = ts / 1000;
|
||||
event.delta_us = delta_us;
|
||||
event.pid = bpf_get_current_pid_tgid() >> 32;
|
||||
event.oldstate = ctx->oldstate;
|
||||
event.newstate = ctx->newstate;
|
||||
event.family = family;
|
||||
event.sport = sport;
|
||||
event.dport = dport;
|
||||
bpf_get_current_comm(&event.task, sizeof(event.task));
|
||||
|
||||
if (family == AF_INET) {
|
||||
bpf_probe_read_kernel(&event.saddr, sizeof(event.saddr), &sk->__sk_common.skc_rcv_saddr);
|
||||
bpf_probe_read_kernel(&event.daddr, sizeof(event.daddr), &sk->__sk_common.skc_daddr);
|
||||
} else { /* family == AF_INET6 */
|
||||
bpf_probe_read_kernel(&event.saddr, sizeof(event.saddr), &sk->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
|
||||
bpf_probe_read_kernel(&event.daddr, sizeof(event.daddr), &sk->__sk_common.skc_v6_daddr.in6_u.u6_addr32);
|
||||
}
|
||||
|
||||
bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, &event, sizeof(event));
|
||||
|
||||
if (ctx->newstate == TCP_CLOSE)
|
||||
bpf_map_delete_elem(×tamps, &sk);
|
||||
else
|
||||
bpf_map_update_elem(×tamps, &sk, &ts, BPF_ANY);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
char LICENSE[] SEC("license") = "Dual BSD/GPL";
|
||||
@@ -4,42 +4,38 @@
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include <bpf/bpf_core_read.h>
|
||||
#include "tcpstates.bpf.h"
|
||||
#include "tcpstates.h"
|
||||
|
||||
#define MAX_ENTRIES 10240
|
||||
#define AF_INET 2
|
||||
#define AF_INET6 10
|
||||
#define MAX_ENTRIES 10240
|
||||
#define AF_INET 2
|
||||
#define AF_INET6 10
|
||||
|
||||
const volatile bool filter_by_sport = false;
|
||||
const volatile bool filter_by_dport = false;
|
||||
const volatile short target_family = 0;
|
||||
|
||||
struct
|
||||
{
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
__type(key, __u16);
|
||||
__type(value, __u16);
|
||||
} sports SEC(".maps");
|
||||
|
||||
struct
|
||||
{
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
__type(key, __u16);
|
||||
__type(value, __u16);
|
||||
} dports SEC(".maps");
|
||||
|
||||
struct
|
||||
{
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
__type(key, struct sock *);
|
||||
__type(value, __u64);
|
||||
} timestamps SEC(".maps");
|
||||
|
||||
struct
|
||||
{
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
|
||||
__uint(key_size, sizeof(__u32));
|
||||
__uint(value_size, sizeof(__u32));
|
||||
@@ -85,13 +81,10 @@ int handle_set_state(struct trace_event_raw_inet_sock_set_state *ctx)
|
||||
event.dport = dport;
|
||||
bpf_get_current_comm(&event.task, sizeof(event.task));
|
||||
|
||||
if (family == AF_INET)
|
||||
{
|
||||
if (family == AF_INET) {
|
||||
bpf_probe_read_kernel(&event.saddr, sizeof(event.saddr), &sk->__sk_common.skc_rcv_saddr);
|
||||
bpf_probe_read_kernel(&event.daddr, sizeof(event.daddr), &sk->__sk_common.skc_daddr);
|
||||
}
|
||||
else
|
||||
{ /* family == AF_INET6 */
|
||||
} else { /* family == AF_INET6 */
|
||||
bpf_probe_read_kernel(&event.saddr, sizeof(event.saddr), &sk->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
|
||||
bpf_probe_read_kernel(&event.daddr, sizeof(event.daddr), &sk->__sk_common.skc_v6_daddr.in6_u.u6_addr32);
|
||||
}
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
||||
/* Copyright (c) 2021 Hengqi Chen */
|
||||
#ifndef __TCPSTATES_H
|
||||
#define __TCPSTATES_H
|
||||
|
||||
#define TASK_COMM_LEN 16
|
||||
|
||||
struct event
|
||||
{
|
||||
unsigned __int128 saddr;
|
||||
unsigned __int128 daddr;
|
||||
__u64 skaddr;
|
||||
__u64 ts_us;
|
||||
__u64 delta_us;
|
||||
__u32 pid;
|
||||
int oldstate;
|
||||
int newstate;
|
||||
__u16 family;
|
||||
__u16 sport;
|
||||
__u16 dport;
|
||||
char task[TASK_COMM_LEN];
|
||||
};
|
||||
|
||||
#endif /* __TCPSTATES_H */
|
||||
@@ -179,10 +179,10 @@ const volatile bool targ_ms = false;
|
||||
|
||||
/// @sample {"interval": 1000, "type" : "log2_hist"}
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
__type(key, u64);
|
||||
__type(value, struct hist);
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
__type(key, u64);
|
||||
__type(value, struct hist);
|
||||
} hists SEC(".maps");
|
||||
|
||||
static struct hist zero;
|
||||
@@ -190,69 +190,55 @@ static struct hist zero;
|
||||
SEC("fentry/tcp_rcv_established")
|
||||
int BPF_PROG(tcp_rcv, struct sock *sk)
|
||||
{
|
||||
const struct inet_sock *inet = (struct inet_sock *)(sk);
|
||||
struct tcp_sock *ts;
|
||||
struct hist *histp;
|
||||
u64 key, slot;
|
||||
u32 srtt;
|
||||
const struct inet_sock *inet = (struct inet_sock *)(sk);
|
||||
struct tcp_sock *ts;
|
||||
struct hist *histp;
|
||||
u64 key, slot;
|
||||
u32 srtt;
|
||||
|
||||
if (targ_sport && targ_sport != inet->inet_sport)
|
||||
return 0;
|
||||
if (targ_dport && targ_dport != sk->__sk_common.skc_dport)
|
||||
return 0;
|
||||
if (targ_saddr && targ_saddr != inet->inet_saddr)
|
||||
return 0;
|
||||
if (targ_daddr && targ_daddr != sk->__sk_common.skc_daddr)
|
||||
return 0;
|
||||
if (targ_sport && targ_sport != inet->inet_sport)
|
||||
return 0;
|
||||
if (targ_dport && targ_dport != sk->__sk_common.skc_dport)
|
||||
return 0;
|
||||
if (targ_saddr && targ_saddr != inet->inet_saddr)
|
||||
return 0;
|
||||
if (targ_daddr && targ_daddr != sk->__sk_common.skc_daddr)
|
||||
return 0;
|
||||
|
||||
if (targ_laddr_hist)
|
||||
key = inet->inet_saddr;
|
||||
else if (targ_raddr_hist)
|
||||
key = inet->sk.__sk_common.skc_daddr;
|
||||
else
|
||||
key = 0;
|
||||
histp = bpf_map_lookup_or_try_init(&hists, &key, &zero);
|
||||
if (!histp)
|
||||
return 0;
|
||||
ts = (struct tcp_sock *)(sk);
|
||||
srtt = BPF_CORE_READ(ts, srtt_us) >> 3;
|
||||
if (targ_ms)
|
||||
srtt /= 1000U;
|
||||
slot = log2l(srtt);
|
||||
if (slot >= MAX_SLOTS)
|
||||
slot = MAX_SLOTS - 1;
|
||||
__sync_fetch_and_add(&histp->slots[slot], 1);
|
||||
if (targ_show_ext) {
|
||||
__sync_fetch_and_add(&histp->latency, srtt);
|
||||
__sync_fetch_and_add(&histp->cnt, 1);
|
||||
}
|
||||
return 0;
|
||||
if (targ_laddr_hist)
|
||||
key = inet->inet_saddr;
|
||||
else if (targ_raddr_hist)
|
||||
key = inet->sk.__sk_common.skc_daddr;
|
||||
else
|
||||
key = 0;
|
||||
histp = bpf_map_lookup_or_try_init(&hists, &key, &zero);
|
||||
if (!histp)
|
||||
return 0;
|
||||
ts = (struct tcp_sock *)(sk);
|
||||
srtt = BPF_CORE_READ(ts, srtt_us) >> 3;
|
||||
if (targ_ms)
|
||||
srtt /= 1000U;
|
||||
slot = log2l(srtt);
|
||||
if (slot >= MAX_SLOTS)
|
||||
slot = MAX_SLOTS - 1;
|
||||
__sync_fetch_and_add(&histp->slots[slot], 1);
|
||||
if (targ_show_ext) {
|
||||
__sync_fetch_and_add(&histp->latency, srtt);
|
||||
__sync_fetch_and_add(&histp->cnt, 1);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
</code></pre>
|
||||
<p>这段代码是基于eBPF的网络延迟分析工具,它通过hooking TCP协议栈中的tcp_rcv_established函数来统计TCP连接的RTT分布。下面是这段代码的主要工作原理:</p>
|
||||
<ol>
|
||||
<li>
|
||||
<p>首先定义了一个名为"hists"的eBPF哈希表,用于保存RTT直方图数据。</p>
|
||||
</li>
|
||||
<li>
|
||||
<p>当tcp_rcv_established函数被调用时,它首先从传入的socket结构体中获取TCP相关信息,包括本地/远程IP地址、本地/远程端口号以及TCP状态信息等。</p>
|
||||
</li>
|
||||
<li>
|
||||
<p>接下来,代码会检查用户指定的条件是否匹配当前TCP连接。如果匹配失败,则直接返回。</p>
|
||||
</li>
|
||||
<li>
|
||||
<p>如果匹配成功,则从"hists"哈希表中查找与本地/远程IP地址匹配的直方图数据。如果该IP地址的直方图不存在,则创建一个新的直方图并插入哈希表中。</p>
|
||||
</li>
|
||||
<li>
|
||||
<p>接下来,代码会从socket结构体中获取当前TCP连接的RTT(srtt),并根据用户设置的选项来将srtt值进行处理。如果用户设置了"-ms"选项,则将srtt值除以1000。</p>
|
||||
</li>
|
||||
<li>
|
||||
<p>接着,代码会将srtt值转换为直方图的槽位(slot),并将该槽位的计数器+1。</p>
|
||||
</li>
|
||||
<li>
|
||||
<p>如果用户设置了"-show-ext"选项,则还会累加直方图的总延迟(latency)和计数(cnt)。</p>
|
||||
</li>
|
||||
<li>首先定义了一个名为"hists"的eBPF哈希表,用于保存RTT直方图数据。</li>
|
||||
<li>当tcp_rcv_established函数被调用时,它首先从传入的socket结构体中获取TCP相关信息,包括本地/远程IP地址、本地/远程端口号以及TCP状态信息等。</li>
|
||||
<li>接下来,代码会检查用户指定的条件是否匹配当前TCP连接。如果匹配失败,则直接返回。</li>
|
||||
<li>如果匹配成功,则从"hists"哈希表中查找与本地/远程IP地址匹配的直方图数据。如果该IP地址的直方图不存在,则创建一个新的直方图并插入哈希表中。</li>
|
||||
<li>接下来,代码会从socket结构体中获取当前TCP连接的RTT(srtt),并根据用户设置的选项来将srtt值进行处理。如果用户设置了"-ms"选项,则将srtt值除以1000。</li>
|
||||
<li>接着,代码会将srtt值转换为直方图的槽位(slot),并将该槽位的计数器+1。</li>
|
||||
<li>如果用户设置了"-show-ext"选项,则还会累加直方图的总延迟(latency)和计数(cnt)。</li>
|
||||
</ol>
|
||||
<h2 id="编译运行"><a class="header" href="#编译运行">编译运行</a></h2>
|
||||
<p>eunomia-bpf 是一个结合 Wasm 的开源 eBPF 动态加载运行时和开发工具链,它的目的是简化 eBPF 程序的开发、构建、分发、运行。可以参考 <a href="https://github.com/eunomia-bpf/eunomia-bpf">https://github.com/eunomia-bpf/eunomia-bpf</a> 下载和安装 ecc 编译工具链和 ecli 运行时。我们使用 eunomia-bpf 编译运行这个例子。</p>
|
||||
|
||||
407
print.html
407
print.html
@@ -2235,7 +2235,7 @@ TIME EVENT COMM PID PPID FILENAME/EXIT CODE
|
||||
<p>在互联网后端日常开发接口的时候中,不管你使用的是C、Java、PHP还是Golang,都避免不了需要调用mysql、redis等组件来获取数据,可能还需要执行一些rpc远程调用,或者再调用一些其它restful api。 在这些调用的底层,基本都是在使用TCP协议进行传输。这是因为在传输层协议中,TCP协议具备可靠的连接,错误重传,拥塞控制等优点,所以目前应用比UDP更广泛一些。但相对而言,tcp 连接也有一些缺点,例如建立连接的延时较长等。因此也会出现像 QUIC ,即 快速UDP网络连接 ( Quick UDP Internet Connections )这样的替代方案。</p>
|
||||
<p>tcp 连接延时分析对于网络性能分析优化或者故障排查都能起到不少作用。</p>
|
||||
<h2 id="tcpconnlat-的实现原理"><a class="header" href="#tcpconnlat-的实现原理">tcpconnlat 的实现原理</a></h2>
|
||||
<p>tcpconnlat 这个工具跟踪执行活动TCP连接的内核函数 (例如,通过connect()系统调用),并显示本地测量的连接的延迟(时间),即从发送 SYN 到响应包的时间。</p>
|
||||
<p>tcpconnlat 这个工具跟踪执行活动TCP连接的内核函数(例如,通过connect()系统调用),并显示本地测量的连接的延迟(时间),即从发送 SYN 到响应包的时间。</p>
|
||||
<h3 id="tcp-连接原理"><a class="header" href="#tcp-连接原理">tcp 连接原理</a></h3>
|
||||
<p>tcp 连接的整个过程如图所示:</p>
|
||||
<p><img src="13-tcpconnlat/tcpconnlat1.png" alt="tcpconnlate" /></p>
|
||||
@@ -2350,15 +2350,13 @@ cleanup:
|
||||
}
|
||||
</code></pre>
|
||||
<h2 id="编译运行-2"><a class="header" href="#编译运行-2">编译运行</a></h2>
|
||||
<ul>
|
||||
<li><code>git clone https://github.com/libbpf/libbpf-bootstrap libbpf-bootstrap-cloned</code></li>
|
||||
<li>将 <a href="13-tcpconnlat/libbpf-bootstrap">libbpf-bootstrap</a>目录下的文件复制到 <code>libbpf-bootstrap-cloned/examples/c</code>下</li>
|
||||
<li>修改 <code>libbpf-bootstrap-cloned/examples/c/Makefile</code> ,在其 <code>APPS</code> 项后添加 <code>tcpconnlat</code></li>
|
||||
<li>在 <code>libbpf-bootstrap-cloned/examples/c</code> 下运行 <code>make tcpconnlat</code></li>
|
||||
<li><code>sudo ./tcpconnlat</code></li>
|
||||
</ul>
|
||||
<h2 id="效果"><a class="header" href="#效果">效果</a></h2>
|
||||
<pre><code class="language-plain">root@yutong-VirtualBox:~/libbpf-bootstrap/examples/c# ./tcpconnlat
|
||||
<pre><code class="language-console">$ make
|
||||
...
|
||||
BPF .output/tcpconnlat.bpf.o
|
||||
GEN-SKEL .output/tcpconnlat.skel.h
|
||||
CC .output/tcpconnlat.o
|
||||
BINARY tcpconnlat
|
||||
$ sudo ./tcpconnlat
|
||||
PID COMM IP SADDR DADDR DPORT LAT(ms)
|
||||
222564 wget 4 192.168.88.15 110.242.68.3 80 25.29
|
||||
222684 wget 4 192.168.88.15 167.179.101.42 443 246.76
|
||||
@@ -2369,139 +2367,111 @@ PID COMM IP SADDR DADDR DPORT LAT(ms)
|
||||
<p>通过上面的实验,我们可以看到,tcpconnlat 工具的实现原理是基于内核的TCP连接的跟踪,并且可以跟踪到 tcp 连接的延迟时间;除了命令行使用方式之外,还可以将其和容器、k8s 等元信息综合起来,通过 <code>prometheus</code> 和 <code>grafana</code> 等工具进行网络性能分析。</p>
|
||||
<p>来源:<a href="https://github.com/iovisor/bcc/blob/master/libbpf-tools/tcpconnlat.bpf.c">https://github.com/iovisor/bcc/blob/master/libbpf-tools/tcpconnlat.bpf.c</a></p>
|
||||
<div style="break-before: page; page-break-before: always;"></div><h1 id="ebpf-入门实践教程编写-ebpf-程序-tcpconnlat-测量-tcp-连接延时"><a class="header" href="#ebpf-入门实践教程编写-ebpf-程序-tcpconnlat-测量-tcp-连接延时">eBPF 入门实践教程:编写 eBPF 程序 tcpconnlat 测量 tcp 连接延时</a></h1>
|
||||
<h2 id="代码解释"><a class="header" href="#代码解释">代码解释</a></h2>
|
||||
<h3 id="背景-1"><a class="header" href="#背景-1">背景</a></h3>
|
||||
<p>在互联网后端日常开发接口的时候中,不管你使用的是C、Java、PHP还是Golang,都避免不了需要调用mysql、redis等组件来获取数据,可能还需要执行一些rpc远程调用,或者再调用一些其它restful api。 在这些调用的底层,基本都是在使用TCP协议进行传输。这是因为在传输层协议中,TCP协议具备可靠的连接,错误重传,拥塞控制等优点,所以目前应用比UDP更广泛一些。但相对而言,tcp 连接也有一些缺点,例如建立连接的延时较长等。因此也会出现像 QUIC ,即 快速UDP网络连接 ( Quick UDP Internet Connections )这样的替代方案。</p>
|
||||
<p>tcp 连接延时分析对于网络性能分析优化或者故障排查都能起到不少作用。</p>
|
||||
<h3 id="tcpconnlat-的实现原理-1"><a class="header" href="#tcpconnlat-的实现原理-1">tcpconnlat 的实现原理</a></h3>
|
||||
<p>tcpconnlat 这个工具跟踪执行活动TCP连接的内核函数 (例如,通过connect()系统调用),并显示本地测量的连接的延迟(时间),即从发送 SYN 到响应包的时间。</p>
|
||||
<h3 id="tcp-连接原理-1"><a class="header" href="#tcp-连接原理-1">tcp 连接原理</a></h3>
|
||||
<p>tcp 连接的整个过程如图所示:</p>
|
||||
<p><img src="13-tcpconnlat/tcpconnlat1.png" alt="tcpconnlate" /></p>
|
||||
<p>在这个连接过程中,我们来简单分析一下每一步的耗时:</p>
|
||||
<ol>
|
||||
<li>客户端发出SYNC包:客户端一般是通过connect系统调用来发出 SYN 的,这里牵涉到本机的系统调用和软中断的 CPU 耗时开销</li>
|
||||
<li>SYN传到服务器:SYN从客户端网卡被发出,这是一次长途远距离的网络传输</li>
|
||||
<li>服务器处理SYN包:内核通过软中断来收包,然后放到半连接队列中,然后再发出SYN/ACK响应。主要是 CPU 耗时开销</li>
|
||||
<li>SYC/ACK传到客户端:长途网络跋涉</li>
|
||||
<li>客户端处理 SYN/ACK:客户端内核收包并处理SYN后,经过几us的CPU处理,接着发出 ACK。同样是软中断处理开销</li>
|
||||
<li>ACK传到服务器:长途网络跋涉</li>
|
||||
<li>服务端收到ACK:服务器端内核收到并处理ACK,然后把对应的连接从半连接队列中取出来,然后放到全连接队列中。一次软中断CPU开销</li>
|
||||
<li>服务器端用户进程唤醒:正在被accpet系统调用阻塞的用户进程被唤醒,然后从全连接队列中取出来已经建立好的连接。一次上下文切换的CPU开销</li>
|
||||
</ol>
|
||||
<p>在客户端视角,在正常情况下一次TCP连接总的耗时也就就大约是一次网络RTT的耗时。但在某些情况下,可能会导致连接时的网络传输耗时上涨、CPU处理开销增加、甚至是连接失败。这种时候在发现延时过长之后,就可以结合其他信息进行分析。</p>
|
||||
<h3 id="ebpf-实现原理-1"><a class="header" href="#ebpf-实现原理-1">ebpf 实现原理</a></h3>
|
||||
<p>在 TCP 三次握手的时候,Linux 内核会维护两个队列,分别是:</p>
|
||||
<ul>
|
||||
<li>半连接队列,也称 SYN 队列;</li>
|
||||
<li>全连接队列,也称 accepet 队列;</li>
|
||||
</ul>
|
||||
<p>服务端收到客户端发起的 SYN 请求后,内核会把该连接存储到半连接队列,并向客户端响应 SYN+ACK,接着客户端会返回 ACK,服务端收到第三次握手的 ACK 后,内核会把连接从半连接队列移除,然后创建新的完全的连接,并将其添加到 accept 队列,等待进程调用 accept 函数时把连接取出来。</p>
|
||||
<p>我们的 ebpf 代码实现在 <a href="https://github.com/yunwei37/Eunomia/blob/master/bpftools/tcpconnlat/tcpconnlat.bpf.c">https://github.com/yunwei37/Eunomia/blob/master/bpftools/tcpconnlat/tcpconnlat.bpf.c</a> 中:</p>
|
||||
<p>它主要使用了 trace_tcp_rcv_state_process 和 kprobe/tcp_v4_connect 这样的跟踪点:</p>
|
||||
<pre><code class="language-c">
|
||||
SEC("kprobe/tcp_v4_connect")
|
||||
int BPF_KPROBE(tcp_v4_connect, struct sock *sk)
|
||||
{
|
||||
return trace_connect(sk);
|
||||
}
|
||||
|
||||
SEC("kprobe/tcp_v6_connect")
|
||||
int BPF_KPROBE(tcp_v6_connect, struct sock *sk)
|
||||
{
|
||||
return trace_connect(sk);
|
||||
}
|
||||
|
||||
SEC("kprobe/tcp_rcv_state_process")
|
||||
int BPF_KPROBE(tcp_rcv_state_process, struct sock *sk)
|
||||
{
|
||||
return handle_tcp_rcv_state_process(ctx, sk);
|
||||
}
|
||||
</code></pre>
|
||||
<p>在 trace_connect 中,我们跟踪新的 tcp 连接,记录到达时间,并且把它加入 map 中:</p>
|
||||
<pre><code class="language-c">struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, 4096);
|
||||
__type(key, struct sock *);
|
||||
__type(value, struct piddata);
|
||||
} start SEC(".maps");
|
||||
|
||||
static int trace_connect(struct sock *sk)
|
||||
{
|
||||
u32 tgid = bpf_get_current_pid_tgid() >> 32;
|
||||
struct piddata piddata = {};
|
||||
|
||||
if (targ_tgid && targ_tgid != tgid)
|
||||
return 0;
|
||||
|
||||
bpf_get_current_comm(&piddata.comm, sizeof(piddata.comm));
|
||||
piddata.ts = bpf_ktime_get_ns();
|
||||
piddata.tgid = tgid;
|
||||
bpf_map_update_elem(&start, &sk, &piddata, 0);
|
||||
return 0;
|
||||
}
|
||||
</code></pre>
|
||||
<p>在 handle_tcp_rcv_state_process 中,我们跟踪接收到的 tcp 数据包,从 map 从提取出对应的 connect 事件,并且计算延迟:</p>
|
||||
<pre><code class="language-c">static int handle_tcp_rcv_state_process(void *ctx, struct sock *sk)
|
||||
{
|
||||
struct piddata *piddatap;
|
||||
struct event event = {};
|
||||
s64 delta;
|
||||
u64 ts;
|
||||
|
||||
if (BPF_CORE_READ(sk, __sk_common.skc_state) != TCP_SYN_SENT)
|
||||
return 0;
|
||||
|
||||
piddatap = bpf_map_lookup_elem(&start, &sk);
|
||||
if (!piddatap)
|
||||
return 0;
|
||||
|
||||
ts = bpf_ktime_get_ns();
|
||||
delta = (s64)(ts - piddatap->ts);
|
||||
if (delta < 0)
|
||||
goto cleanup;
|
||||
|
||||
event.delta_us = delta / 1000U;
|
||||
if (targ_min_us && event.delta_us < targ_min_us)
|
||||
goto cleanup;
|
||||
__builtin_memcpy(&event.comm, piddatap->comm,
|
||||
sizeof(event.comm));
|
||||
event.ts_us = ts / 1000;
|
||||
event.tgid = piddatap->tgid;
|
||||
event.lport = BPF_CORE_READ(sk, __sk_common.skc_num);
|
||||
event.dport = BPF_CORE_READ(sk, __sk_common.skc_dport);
|
||||
event.af = BPF_CORE_READ(sk, __sk_common.skc_family);
|
||||
if (event.af == AF_INET) {
|
||||
event.saddr_v4 = BPF_CORE_READ(sk, __sk_common.skc_rcv_saddr);
|
||||
event.daddr_v4 = BPF_CORE_READ(sk, __sk_common.skc_daddr);
|
||||
} else {
|
||||
BPF_CORE_READ_INTO(&event.saddr_v6, sk,
|
||||
__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
|
||||
BPF_CORE_READ_INTO(&event.daddr_v6, sk,
|
||||
__sk_common.skc_v6_daddr.in6_u.u6_addr32);
|
||||
}
|
||||
bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU,
|
||||
&event, sizeof(event));
|
||||
|
||||
cleanup:
|
||||
bpf_map_delete_elem(&start, &sk);
|
||||
return 0;
|
||||
}
|
||||
</code></pre>
|
||||
<h3 id="编译运行-3"><a class="header" href="#编译运行-3">编译运行</a></h3>
|
||||
<p>TODO</p>
|
||||
<h3 id="总结-12"><a class="header" href="#总结-12">总结</a></h3>
|
||||
<p>通过上面的实验,我们可以看到,tcpconnlat 工具的实现原理是基于内核的TCP连接的跟踪,并且可以跟踪到 tcp 连接的延迟时间;除了命令行使用方式之外,还可以将其和容器、k8s 等元信息综合起来,通过 <code>prometheus</code> 和 <code>grafana</code> 等工具进行网络性能分析。</p>
|
||||
<blockquote>
|
||||
<p><code>Eunomia</code> 是一个使用 C/C++ 开发的基于 eBPF的轻量级,高性能云原生监控工具,旨在帮助用户了解容器的各项行为、监控可疑的容器安全事件,力求提供覆盖容器全生命周期的轻量级开源监控解决方案。它使用 <code>Linux</code> <code>eBPF</code> 技术在运行时跟踪您的系统和应用程序,并分析收集的事件以检测可疑的行为模式。目前,它包含性能分析、容器集群网络可视化分析*、容器安全感知告警、一键部署、持久化存储监控等功能,提供了多样化的 ebpf 追踪点。其核心导出器/命令行工具最小仅需要约 4MB 大小的二进制程序,即可在支持的 Linux 内核上启动。</p>
|
||||
</blockquote>
|
||||
<p>项目地址:<a href="https://github.com/yunwei37/Eunomia">https://github.com/yunwei37/Eunomia</a></p>
|
||||
<h3 id="参考资料-1"><a class="header" href="#参考资料-1">参考资料</a></h3>
|
||||
<ol>
|
||||
<li><a href="http://kerneltravel.net/blog/2020/tcpconnlat/">http://kerneltravel.net/blog/2020/tcpconnlat/</a></li>
|
||||
<li><a href="https://network.51cto.com/article/640631.html">https://network.51cto.com/article/640631.html</a></li>
|
||||
</ol>
|
||||
<div style="break-before: page; page-break-before: always;"></div><h1 id="ebpf入门实践教程使用-libbpf-bootstrap-开发程序统计-tcp-连接延时-1"><a class="header" href="#ebpf入门实践教程使用-libbpf-bootstrap-开发程序统计-tcp-连接延时-1">eBPF入门实践教程:使用 libbpf-bootstrap 开发程序统计 TCP 连接延时</a></h1>
|
||||
<h2 id="内核态代码"><a class="header" href="#内核态代码">内核态代码</a></h2>
|
||||
<pre><code class="language-c">// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
||||
/* Copyright (c) 2021 Hengqi Chen */
|
||||
#include <vmlinux.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include <bpf/bpf_core_read.h>
|
||||
#include "tcpstates.h"
|
||||
|
||||
#define MAX_ENTRIES 10240
|
||||
#define AF_INET 2
|
||||
#define AF_INET6 10
|
||||
|
||||
const volatile bool filter_by_sport = false;
|
||||
const volatile bool filter_by_dport = false;
|
||||
const volatile short target_family = 0;
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
__type(key, __u16);
|
||||
__type(value, __u16);
|
||||
} sports SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
__type(key, __u16);
|
||||
__type(value, __u16);
|
||||
} dports SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
__type(key, struct sock *);
|
||||
__type(value, __u64);
|
||||
} timestamps SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
|
||||
__uint(key_size, sizeof(__u32));
|
||||
__uint(value_size, sizeof(__u32));
|
||||
} events SEC(".maps");
|
||||
|
||||
SEC("tracepoint/sock/inet_sock_set_state")
|
||||
int handle_set_state(struct trace_event_raw_inet_sock_set_state *ctx)
|
||||
{
|
||||
struct sock *sk = (struct sock *)ctx->skaddr;
|
||||
__u16 family = ctx->family;
|
||||
__u16 sport = ctx->sport;
|
||||
__u16 dport = ctx->dport;
|
||||
__u64 *tsp, delta_us, ts;
|
||||
struct event event = {};
|
||||
|
||||
if (ctx->protocol != IPPROTO_TCP)
|
||||
return 0;
|
||||
|
||||
if (target_family && target_family != family)
|
||||
return 0;
|
||||
|
||||
if (filter_by_sport && !bpf_map_lookup_elem(&sports, &sport))
|
||||
return 0;
|
||||
|
||||
if (filter_by_dport && !bpf_map_lookup_elem(&dports, &dport))
|
||||
return 0;
|
||||
|
||||
tsp = bpf_map_lookup_elem(&timestamps, &sk);
|
||||
ts = bpf_ktime_get_ns();
|
||||
if (!tsp)
|
||||
delta_us = 0;
|
||||
else
|
||||
delta_us = (ts - *tsp) / 1000;
|
||||
|
||||
event.skaddr = (__u64)sk;
|
||||
event.ts_us = ts / 1000;
|
||||
event.delta_us = delta_us;
|
||||
event.pid = bpf_get_current_pid_tgid() >> 32;
|
||||
event.oldstate = ctx->oldstate;
|
||||
event.newstate = ctx->newstate;
|
||||
event.family = family;
|
||||
event.sport = sport;
|
||||
event.dport = dport;
|
||||
bpf_get_current_comm(&event.task, sizeof(event.task));
|
||||
|
||||
if (family == AF_INET) {
|
||||
bpf_probe_read_kernel(&event.saddr, sizeof(event.saddr), &sk->__sk_common.skc_rcv_saddr);
|
||||
bpf_probe_read_kernel(&event.daddr, sizeof(event.daddr), &sk->__sk_common.skc_daddr);
|
||||
} else { /* family == AF_INET6 */
|
||||
bpf_probe_read_kernel(&event.saddr, sizeof(event.saddr), &sk->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
|
||||
bpf_probe_read_kernel(&event.daddr, sizeof(event.daddr), &sk->__sk_common.skc_v6_daddr.in6_u.u6_addr32);
|
||||
}
|
||||
|
||||
bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, &event, sizeof(event));
|
||||
|
||||
if (ctx->newstate == TCP_CLOSE)
|
||||
bpf_map_delete_elem(&timestamps, &sk);
|
||||
else
|
||||
bpf_map_update_elem(&timestamps, &sk, &ts, BPF_ANY);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
char LICENSE[] SEC("license") = "Dual BSD/GPL";
|
||||
</code></pre>
|
||||
<p><code>tcpstates</code> 是一个追踪当前系统上的TCP套接字的TCP状态的程序,主要通过跟踪内核跟踪点 <code>inet_sock_set_state</code> 来实现。统计数据通过 <code>perf_event</code>向用户态传输。</p>
|
||||
<pre><code class="language-c">SEC("tracepoint/sock/inet_sock_set_state")
|
||||
int handle_set_state(struct trace_event_raw_inet_sock_set_state *ctx)
|
||||
@@ -2609,16 +2579,14 @@ static void handle_lost_events(void* ctx, int cpu, __u64 lost_cnt) {
|
||||
}
|
||||
</code></pre>
|
||||
<p>收到事件后所调用对应的处理函数并进行输出打印。</p>
|
||||
<h2 id="编译运行-4"><a class="header" href="#编译运行-4">编译运行</a></h2>
|
||||
<ul>
|
||||
<li><code>git clone https://github.com/libbpf/libbpf-bootstrap libbpf-bootstrap-cloned</code></li>
|
||||
<li>将 <a href="14-tcpstates/libbpf-bootstrap">libbpf-bootstrap</a>目录下的文件复制到 <code>libbpf-bootstrap-cloned/examples/c</code>下</li>
|
||||
<li>修改 <code>libbpf-bootstrap-cloned/examples/c/Makefile</code> ,在其 <code>APPS</code> 项后添加 <code>tcpstates</code></li>
|
||||
<li>在 <code>libbpf-bootstrap-cloned/examples/c</code> 下运行 <code>make tcpstates</code></li>
|
||||
<li><code>sudo ./tcpstates</code></li>
|
||||
</ul>
|
||||
<h2 id="效果-1"><a class="header" href="#效果-1">效果</a></h2>
|
||||
<pre><code class="language-plain">root@yutong-VirtualBox:~/libbpf-bootstrap/examples/c# ./tcpstates
|
||||
<h2 id="编译运行-3"><a class="header" href="#编译运行-3">编译运行</a></h2>
|
||||
<pre><code class="language-console">$ make
|
||||
...
|
||||
BPF .output/tcpstates.bpf.o
|
||||
GEN-SKEL .output/tcpstates.skel.h
|
||||
CC .output/tcpstates.o
|
||||
BINARY tcpstates
|
||||
$ sudo ./tcpstates
|
||||
SKADDR PID COMM LADDR LPORT RADDR RPORT OLDSTATE -> NEWSTATE MS
|
||||
ffff9bf61bb62bc0 164978 node 192.168.88.15 0 52.178.17.2 443 CLOSE -> SYN_SENT 0.000
|
||||
ffff9bf61bb62bc0 0 swapper/0 192.168.88.15 41596 52.178.17.2 443 SYN_SENT -> ESTABLISHED 225.794
|
||||
@@ -2630,11 +2598,10 @@ ffff9bf6d8ee88c0 229832 redis-serv 0.0.0.0 6379 0.0.0.0 0
|
||||
ffff9bf6d8ee88c0 229832 redis-serv 0.0.0.0 6379 0.0.0.0 0 LISTEN -> CLOSE 1.763
|
||||
ffff9bf7109d6900 88750 node 127.0.0.1 39755 127.0.0.1 50966 ESTABLISHED -> FIN_WAIT1 0.000
|
||||
</code></pre>
|
||||
<p>对于输出的详细解释,详见 <a href="14-tcpstates/README.html">README.md</a></p>
|
||||
<h2 id="总结-13"><a class="header" href="#总结-13">总结</a></h2>
|
||||
<h2 id="总结-12"><a class="header" href="#总结-12">总结</a></h2>
|
||||
<p>这里的代码修改自 <a href="https://github.com/iovisor/bcc/blob/master/libbpf-tools/tcpstates.bpf.c">https://github.com/iovisor/bcc/blob/master/libbpf-tools/tcpstates.bpf.c</a></p>
|
||||
<div style="break-before: page; page-break-before: always;"></div><h1 id="ebpf-入门实践教程编写-ebpf-程序-tcprtt-测量-tcp-连接的往返时间"><a class="header" href="#ebpf-入门实践教程编写-ebpf-程序-tcprtt-测量-tcp-连接的往返时间">eBPF 入门实践教程:编写 eBPF 程序 Tcprtt 测量 TCP 连接的往返时间</a></h1>
|
||||
<h2 id="背景-2"><a class="header" href="#背景-2">背景</a></h2>
|
||||
<h2 id="背景-1"><a class="header" href="#背景-1">背景</a></h2>
|
||||
<p>网络质量在互联网社会中是一个很重要的因素。导致网络质量差的因素有很多,可能是硬件因素导致,也可能是程序
|
||||
写的不好导致。为了能更好地定位网络问题,<code>tcprtt</code> 工具被提出。它可以监测TCP链接的往返时间,从而分析
|
||||
网络质量,帮助用户定位问题来源。</p>
|
||||
@@ -2668,10 +2635,10 @@ const volatile bool targ_ms = false;
|
||||
|
||||
/// @sample {"interval": 1000, "type" : "log2_hist"}
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
__type(key, u64);
|
||||
__type(value, struct hist);
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__uint(max_entries, MAX_ENTRIES);
|
||||
__type(key, u64);
|
||||
__type(value, struct hist);
|
||||
} hists SEC(".maps");
|
||||
|
||||
static struct hist zero;
|
||||
@@ -2679,71 +2646,57 @@ static struct hist zero;
|
||||
SEC("fentry/tcp_rcv_established")
|
||||
int BPF_PROG(tcp_rcv, struct sock *sk)
|
||||
{
|
||||
const struct inet_sock *inet = (struct inet_sock *)(sk);
|
||||
struct tcp_sock *ts;
|
||||
struct hist *histp;
|
||||
u64 key, slot;
|
||||
u32 srtt;
|
||||
const struct inet_sock *inet = (struct inet_sock *)(sk);
|
||||
struct tcp_sock *ts;
|
||||
struct hist *histp;
|
||||
u64 key, slot;
|
||||
u32 srtt;
|
||||
|
||||
if (targ_sport && targ_sport != inet->inet_sport)
|
||||
return 0;
|
||||
if (targ_dport && targ_dport != sk->__sk_common.skc_dport)
|
||||
return 0;
|
||||
if (targ_saddr && targ_saddr != inet->inet_saddr)
|
||||
return 0;
|
||||
if (targ_daddr && targ_daddr != sk->__sk_common.skc_daddr)
|
||||
return 0;
|
||||
if (targ_sport && targ_sport != inet->inet_sport)
|
||||
return 0;
|
||||
if (targ_dport && targ_dport != sk->__sk_common.skc_dport)
|
||||
return 0;
|
||||
if (targ_saddr && targ_saddr != inet->inet_saddr)
|
||||
return 0;
|
||||
if (targ_daddr && targ_daddr != sk->__sk_common.skc_daddr)
|
||||
return 0;
|
||||
|
||||
if (targ_laddr_hist)
|
||||
key = inet->inet_saddr;
|
||||
else if (targ_raddr_hist)
|
||||
key = inet->sk.__sk_common.skc_daddr;
|
||||
else
|
||||
key = 0;
|
||||
histp = bpf_map_lookup_or_try_init(&hists, &key, &zero);
|
||||
if (!histp)
|
||||
return 0;
|
||||
ts = (struct tcp_sock *)(sk);
|
||||
srtt = BPF_CORE_READ(ts, srtt_us) >> 3;
|
||||
if (targ_ms)
|
||||
srtt /= 1000U;
|
||||
slot = log2l(srtt);
|
||||
if (slot >= MAX_SLOTS)
|
||||
slot = MAX_SLOTS - 1;
|
||||
__sync_fetch_and_add(&histp->slots[slot], 1);
|
||||
if (targ_show_ext) {
|
||||
__sync_fetch_and_add(&histp->latency, srtt);
|
||||
__sync_fetch_and_add(&histp->cnt, 1);
|
||||
}
|
||||
return 0;
|
||||
if (targ_laddr_hist)
|
||||
key = inet->inet_saddr;
|
||||
else if (targ_raddr_hist)
|
||||
key = inet->sk.__sk_common.skc_daddr;
|
||||
else
|
||||
key = 0;
|
||||
histp = bpf_map_lookup_or_try_init(&hists, &key, &zero);
|
||||
if (!histp)
|
||||
return 0;
|
||||
ts = (struct tcp_sock *)(sk);
|
||||
srtt = BPF_CORE_READ(ts, srtt_us) >> 3;
|
||||
if (targ_ms)
|
||||
srtt /= 1000U;
|
||||
slot = log2l(srtt);
|
||||
if (slot >= MAX_SLOTS)
|
||||
slot = MAX_SLOTS - 1;
|
||||
__sync_fetch_and_add(&histp->slots[slot], 1);
|
||||
if (targ_show_ext) {
|
||||
__sync_fetch_and_add(&histp->latency, srtt);
|
||||
__sync_fetch_and_add(&histp->cnt, 1);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
</code></pre>
|
||||
<p>这段代码是基于eBPF的网络延迟分析工具,它通过hooking TCP协议栈中的tcp_rcv_established函数来统计TCP连接的RTT分布。下面是这段代码的主要工作原理:</p>
|
||||
<ol>
|
||||
<li>
|
||||
<p>首先定义了一个名为"hists"的eBPF哈希表,用于保存RTT直方图数据。</p>
|
||||
</li>
|
||||
<li>
|
||||
<p>当tcp_rcv_established函数被调用时,它首先从传入的socket结构体中获取TCP相关信息,包括本地/远程IP地址、本地/远程端口号以及TCP状态信息等。</p>
|
||||
</li>
|
||||
<li>
|
||||
<p>接下来,代码会检查用户指定的条件是否匹配当前TCP连接。如果匹配失败,则直接返回。</p>
|
||||
</li>
|
||||
<li>
|
||||
<p>如果匹配成功,则从"hists"哈希表中查找与本地/远程IP地址匹配的直方图数据。如果该IP地址的直方图不存在,则创建一个新的直方图并插入哈希表中。</p>
|
||||
</li>
|
||||
<li>
|
||||
<p>接下来,代码会从socket结构体中获取当前TCP连接的RTT(srtt),并根据用户设置的选项来将srtt值进行处理。如果用户设置了"-ms"选项,则将srtt值除以1000。</p>
|
||||
</li>
|
||||
<li>
|
||||
<p>接着,代码会将srtt值转换为直方图的槽位(slot),并将该槽位的计数器+1。</p>
|
||||
</li>
|
||||
<li>
|
||||
<p>如果用户设置了"-show-ext"选项,则还会累加直方图的总延迟(latency)和计数(cnt)。</p>
|
||||
</li>
|
||||
<li>首先定义了一个名为"hists"的eBPF哈希表,用于保存RTT直方图数据。</li>
|
||||
<li>当tcp_rcv_established函数被调用时,它首先从传入的socket结构体中获取TCP相关信息,包括本地/远程IP地址、本地/远程端口号以及TCP状态信息等。</li>
|
||||
<li>接下来,代码会检查用户指定的条件是否匹配当前TCP连接。如果匹配失败,则直接返回。</li>
|
||||
<li>如果匹配成功,则从"hists"哈希表中查找与本地/远程IP地址匹配的直方图数据。如果该IP地址的直方图不存在,则创建一个新的直方图并插入哈希表中。</li>
|
||||
<li>接下来,代码会从socket结构体中获取当前TCP连接的RTT(srtt),并根据用户设置的选项来将srtt值进行处理。如果用户设置了"-ms"选项,则将srtt值除以1000。</li>
|
||||
<li>接着,代码会将srtt值转换为直方图的槽位(slot),并将该槽位的计数器+1。</li>
|
||||
<li>如果用户设置了"-show-ext"选项,则还会累加直方图的总延迟(latency)和计数(cnt)。</li>
|
||||
</ol>
|
||||
<h2 id="编译运行-5"><a class="header" href="#编译运行-5">编译运行</a></h2>
|
||||
<h2 id="编译运行-4"><a class="header" href="#编译运行-4">编译运行</a></h2>
|
||||
<p>eunomia-bpf 是一个结合 Wasm 的开源 eBPF 动态加载运行时和开发工具链,它的目的是简化 eBPF 程序的开发、构建、分发、运行。可以参考 <a href="https://github.com/eunomia-bpf/eunomia-bpf">https://github.com/eunomia-bpf/eunomia-bpf</a> 下载和安装 ecc 编译工具链和 ecli 运行时。我们使用 eunomia-bpf 编译运行这个例子。</p>
|
||||
<p>Compile:</p>
|
||||
<pre><code class="language-shell">docker run -it -v `pwd`/:/src/ yunwei37/ebpm:latest
|
||||
@@ -2817,12 +2770,12 @@ cnt = 0
|
||||
4096 -> 8191 : 16 |****************************************|
|
||||
8192 -> 16383 : 4 |********** |
|
||||
</code></pre>
|
||||
<h2 id="总结-14"><a class="header" href="#总结-14">总结</a></h2>
|
||||
<h2 id="总结-13"><a class="header" href="#总结-13">总结</a></h2>
|
||||
<p>tcprtt是一个基于eBPF的TCP延迟分析工具。通过hooking TCP协议栈中的tcp_rcv_established函数来统计TCP连接的RTT分布,可以对指定的TCP连接进行RTT分布统计,并将结果保存到eBPF哈希表中。同时,这个工具支持多种条件过滤和RTT分布数据扩展功能,以便用户可以更好地进行网络性能分析和调优。</p>
|
||||
<p>更多的例子和详细的开发指南,请参考 eunomia-bpf 的官方文档:<a href="https://github.com/eunomia-bpf/eunomia-bpf">https://github.com/eunomia-bpf/eunomia-bpf</a></p>
|
||||
<p>完整的教程和源代码已经全部开源,可以在 <a href="https://github.com/eunomia-bpf/bpf-developer-tutorial">https://github.com/eunomia-bpf/bpf-developer-tutorial</a> 中查看。</p>
|
||||
<div style="break-before: page; page-break-before: always;"></div><h1 id="ebpf-入门实践教程编写-ebpf-程序-memleak-监控内存泄漏"><a class="header" href="#ebpf-入门实践教程编写-ebpf-程序-memleak-监控内存泄漏">eBPF 入门实践教程:编写 eBPF 程序 Memleak 监控内存泄漏</a></h1>
|
||||
<h2 id="背景-3"><a class="header" href="#背景-3">背景</a></h2>
|
||||
<h2 id="背景-2"><a class="header" href="#背景-2">背景</a></h2>
|
||||
<p>内存泄漏对于一个程序而言是一个很严重的问题。倘若放任一个存在内存泄漏的程序运行,久而久之
|
||||
系统的内存会慢慢被耗尽,导致程序运行速度显著下降。为了避免这一情况,<code>memleak</code>工具被提出。
|
||||
它可以跟踪并匹配内存分配和释放的请求,并且打印出已经被分配资源而又尚未释放的堆栈信息。</p>
|
||||
@@ -3027,7 +2980,7 @@ int BPF_KPROBE(free_enter, void *address)
|
||||
<p>gen_free_enter函数接收一个地址参数,该函数首先使用allocs map查找该地址对应的内存分配信息。如果未找到,则表示该地址没有被分配,该函数返回0。如果找到了对应的内存分配信息,则使用bpf_map_delete_elem从allocs map中删除该信息。</p>
|
||||
<p>接下来,调用update_statistics_del函数用于更新内存分配的统计信息,它接收堆栈ID和内存块大小作为参数。首先在combined_allocs map中查找堆栈ID对应的内存分配统计信息。如果没有找到,则输出一条日志,表示查找失败,并且函数直接返回。如果找到了对应的内存分配统计信息,则使用原子操作从内存分配统计信息中减去该内存块大小和1(表示减少了1个内存块)。这是因为堆栈ID对应的内存块数量减少了1,而堆栈ID对应的内存块总大小也减少了该内存块的大小。</p>
|
||||
<p>最后定义了一个bpf程序BPF_KPROBE(free_enter, void *address)会在进程调用free函数时执行。它会接收参数address,表示正在释放的内存块的地址,并调用gen_free_enter函数来处理该内存块的释放。</p>
|
||||
<h2 id="编译运行-6"><a class="header" href="#编译运行-6">编译运行</a></h2>
|
||||
<h2 id="编译运行-5"><a class="header" href="#编译运行-5">编译运行</a></h2>
|
||||
<pre><code class="language-console">$ git clone https://github.com/iovisor/bcc.git --recurse-submodules
|
||||
$ cd libbpf-tools/
|
||||
$ make memleak
|
||||
@@ -3047,11 +3000,11 @@ Tracing outstanding memory allocs... Hit Ctrl-C to end
|
||||
6 [<ffffffff82000b62>] <null sym>
|
||||
...
|
||||
</code></pre>
|
||||
<h2 id="总结-15"><a class="header" href="#总结-15">总结</a></h2>
|
||||
<h2 id="总结-14"><a class="header" href="#总结-14">总结</a></h2>
|
||||
<p>memleak是一个内存泄漏监控工具,可以用来跟踪内存分配和释放时间对应的调用栈信息。随着时间的推移,这个工具可以显示长期不被释放的内存。</p>
|
||||
<p>这份代码来自于https://github.com/iovisor/bcc/blob/master/libbpf-tools/memleak.bpf.c</p>
|
||||
<div style="break-before: page; page-break-before: always;"></div><h1 id="ebpf-入门实践教程编写-ebpf-程序-biopattern-统计随机顺序磁盘-io"><a class="header" href="#ebpf-入门实践教程编写-ebpf-程序-biopattern-统计随机顺序磁盘-io">eBPF 入门实践教程:编写 eBPF 程序 Biopattern: 统计随机/顺序磁盘 I/O</a></h1>
|
||||
<h2 id="背景-4"><a class="header" href="#背景-4">背景</a></h2>
|
||||
<h2 id="背景-3"><a class="header" href="#背景-3">背景</a></h2>
|
||||
<p>Biopattern 可以统计随机/顺序磁盘I/O次数的比例。</p>
|
||||
<p>TODO</p>
|
||||
<h2 id="实现原理-2"><a class="header" href="#实现原理-2">实现原理</a></h2>
|
||||
@@ -3060,13 +3013,13 @@ Tracing outstanding memory allocs... Hit Ctrl-C to end
|
||||
会获得操作信息,根据哈希表中该设备的上一次操作记录来判断本次操作是随机IO还是顺序IO,并更新操作计数。</p>
|
||||
<h2 id="编写-ebpf-程序-3"><a class="header" href="#编写-ebpf-程序-3">编写 eBPF 程序</a></h2>
|
||||
<p>TODO</p>
|
||||
<h3 id="总结-16"><a class="header" href="#总结-16">总结</a></h3>
|
||||
<h3 id="总结-15"><a class="header" href="#总结-15">总结</a></h3>
|
||||
<p>Biopattern 可以展现随机/顺序磁盘I/O次数的比例,对于开发者把握整体I/O情况有较大帮助。</p>
|
||||
<p>TODO</p>
|
||||
<div style="break-before: page; page-break-before: always;"></div><h1 id="更多的参考资料"><a class="header" href="#更多的参考资料">更多的参考资料</a></h1>
|
||||
<p>TODO</p>
|
||||
<div style="break-before: page; page-break-before: always;"></div><h1 id="ebpf-入门实践教程使用-lsm-进行安全检测防御"><a class="header" href="#ebpf-入门实践教程使用-lsm-进行安全检测防御">eBPF 入门实践教程:使用 LSM 进行安全检测防御</a></h1>
|
||||
<h2 id="背景-5"><a class="header" href="#背景-5">背景</a></h2>
|
||||
<h2 id="背景-4"><a class="header" href="#背景-4">背景</a></h2>
|
||||
<p>LSM 从 Linux 2.6 开始成为官方内核的一个安全框架,基于此的安全实现包括 SELinux 和 AppArmor 等。在 Linux 5.7 引入 BPF LSM 后,系统开发人员已经能够自由地实现函数粒度的安全检查能力,本文就提供了这样一个案例:限制通过 socket connect 函数对特定 IPv4 地址进行访问的 BPF LSM 程序。(可见其控制精度是很高的)</p>
|
||||
<h2 id="lsm-概述"><a class="header" href="#lsm-概述">LSM 概述</a></h2>
|
||||
<p>LSM(Linux Security Modules)是 Linux 内核中用于支持各种计算机安全模型的框架。LSM 在 Linux 内核安全相关的关键路径上预置了一批 hook 点,从而实现了内核和安全模块的解耦,使不同的安全模块可以自由地在内核中加载/卸载,无需修改原有的内核代码就可以加入安全检查功能。</p>
|
||||
@@ -3151,7 +3104,7 @@ int BPF_PROG(restrict_connect, struct socket *sock, struct sockaddr *address, in
|
||||
<li>若请求地址为 1.1.1.1 则拒绝连接,否则允许连接;</li>
|
||||
</ul>
|
||||
<p>在程序运行期间,所有通过 socket 的连接操作都会被输出到 <code>/sys/kernel/debug/tracing/trace_pipe</code>。</p>
|
||||
<h2 id="编译运行-7"><a class="header" href="#编译运行-7">编译运行</a></h2>
|
||||
<h2 id="编译运行-6"><a class="header" href="#编译运行-6">编译运行</a></h2>
|
||||
<p>通过容器编译:</p>
|
||||
<pre><code class="language-console">docker run -it -v `pwd`/:/src/ yunwei37/ebpm:latest
|
||||
</code></pre>
|
||||
@@ -3182,7 +3135,7 @@ Retrying.
|
||||
wget-7061 [000] d...1 6318.800698: bpf_trace_printk: lsm: found connect to 16843009
|
||||
wget-7061 [000] d...1 6318.800700: bpf_trace_printk: lsm: blocking 16843009
|
||||
</code></pre>
|
||||
<h2 id="总结-17"><a class="header" href="#总结-17">总结</a></h2>
|
||||
<h2 id="总结-16"><a class="header" href="#总结-16">总结</a></h2>
|
||||
<p>本文介绍了如何使用 BPF LSM 来限制通过 socket 对特定 IPv4 地址的访问。我们可以通过修改 GRUB 配置文件来开启 LSM 的 BPF 挂载点。在 eBPF 程序中,我们通过 <code>BPF_PROG</code> 宏定义函数,并通过 <code>SEC</code> 宏指定挂载点;在函数实现上,遵循 LSM 安全检查模块中 "cannot override a denial" 的原则,并根据 socket 连接请求的目的地址对该请求进行限制。</p>
|
||||
<p>更多的例子和详细的开发指南,请参考 eunomia-bpf 的官方文档:<a href="https://github.com/eunomia-bpf/eunomia-bpf">https://github.com/eunomia-bpf/eunomia-bpf</a></p>
|
||||
<p>完整的教程和源代码已经全部开源,可以在 <a href="https://github.com/eunomia-bpf/bpf-developer-tutorial">https://github.com/eunomia-bpf/bpf-developer-tutorial</a> 中查看。</p>
|
||||
@@ -3233,7 +3186,7 @@ char __license[] SEC("license") = "GPL";
|
||||
</code></pre>
|
||||
<p>这些注释告诉 TC 将 eBPF 程序附加到网络接口的 ingress 附加点,并指定了 handle 和 priority 选项的值。</p>
|
||||
<p>总之,这段代码实现了一个简单的 eBPF 程序,用于捕获数据包并打印出它们的信息。</p>
|
||||
<h2 id="编译运行-8"><a class="header" href="#编译运行-8">编译运行</a></h2>
|
||||
<h2 id="编译运行-7"><a class="header" href="#编译运行-7">编译运行</a></h2>
|
||||
<pre><code class="language-console">docker run -it -v `pwd`/:/src/ yunwei37/ebpm:latest
|
||||
</code></pre>
|
||||
<p>or compile with <code>ecc</code>:</p>
|
||||
@@ -3254,7 +3207,7 @@ something like this:</p>
|
||||
sshd-1254728 [006] ..s1 8737831.674349: 0: Got IP packet: tot_len: 72, ttl: 64
|
||||
node-1254811 [007] ..s1 8737831.674550: 0: Got IP packet: tot_len: 71, ttl: 64
|
||||
</code></pre>
|
||||
<h2 id="总结-18"><a class="header" href="#总结-18">总结</a></h2>
|
||||
<h2 id="总结-17"><a class="header" href="#总结-17">总结</a></h2>
|
||||
<p>TODO</p>
|
||||
<div style="break-before: page; page-break-before: always;"></div><h1 id="bpf-features-by-linux-kernel-version"><a class="header" href="#bpf-features-by-linux-kernel-version">BPF Features by Linux Kernel Version</a></h1>
|
||||
<h2 id="ebpf-support"><a class="header" href="#ebpf-support">eBPF support</a></h2>
|
||||
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
Reference in New Issue
Block a user