-
Notifications
You must be signed in to change notification settings - Fork 3.9k
New issue
Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? # to your account
libbpf-tools: Use raw_tp rather than tp_btf for biolatency. #4271
libbpf-tools: Use raw_tp rather than tp_btf for biolatency. #4271
Conversation
tp_btf were added in kernel 5.5, by using raw_tp we enable this tool to be used on older kernels [1]. Signed-off-by: Francis Laniel <flaniel@linux.microsoft.com> [1] torvalds/linux@ac4414b5ca47
Sorry, I forgot your commets in #4231 , I've already worked on biolatency. Would you mind taking biosnoop/biostacks/bitesize instead ? Thanks. |
From 4e5c7c3ef039576e05fec6641f248f115a87c545 Mon Sep 17 00:00:00 2001
From: Hengqi Chen <chenhengqi@outlook.com>
Date: Mon, 10 Oct 2022 18:04:25 +0800
Subject: [PATCH] libbpf-tools: Enable biolatency on kernels without BPF
trampoline
On kernels without BPF trampoline, let's switch to raw tracepoint instead.
Signed-off-by: Hengqi Chen <chenhengqi@outlook.com>
---
libbpf-tools/biolatency.bpf.c | 74 +++++++++++++++++++++++++----------
libbpf-tools/biolatency.c | 38 +++++++++---------
2 files changed, 71 insertions(+), 41 deletions(-)
diff --git a/libbpf-tools/biolatency.bpf.c b/libbpf-tools/biolatency.bpf.c
index bab62b1d..429412db 100644
--- a/libbpf-tools/biolatency.bpf.c
+++ b/libbpf-tools/biolatency.bpf.c
@@ -4,6 +4,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_tracing.h>
+
#include "biolatency.h"
#include "bits.bpf.h"
#include "core_fixes.bpf.h"
@@ -43,13 +44,17 @@ struct {
__type(value, struct hist);
} hists SEC(".maps");
-static __always_inline
-int trace_rq_start(struct request *rq, int issue)
+static int __always_inline trace_rq_start(struct request *rq, int issue)
{
- if (issue && targ_queued && BPF_CORE_READ(rq->q, elevator))
+ u64 ts;
+
+ if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
return 0;
- u64 ts = bpf_ktime_get_ns();
+ if (issue && targ_queued && BPF_CORE_READ(rq, q, elevator))
+ return 0;
+
+ ts = bpf_ktime_get_ns();
if (filter_dev) {
struct gendisk *disk = get_disk(rq);
@@ -64,12 +69,8 @@ int trace_rq_start(struct request *rq, int issue)
return 0;
}
-SEC("tp_btf/block_rq_insert")
-int block_rq_insert(u64 *ctx)
+static int handle_block_rq_insert(__u64 *ctx)
{
- if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
- return 0;
-
/**
* commit a54895fa (v5.11-rc1) changed tracepoint argument list
* from TP_PROTO(struct request_queue *q, struct request *rq)
@@ -81,12 +82,8 @@ int block_rq_insert(u64 *ctx)
return trace_rq_start((void *)ctx[0], false);
}
-SEC("tp_btf/block_rq_issue")
-int block_rq_issue(u64 *ctx)
+static int handle_block_rq_issue(__u64 *ctx)
{
- if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
- return 0;
-
/**
* commit a54895fa (v5.11-rc1) changed tracepoint argument list
* from TP_PROTO(struct request_queue *q, struct request *rq)
@@ -98,21 +95,20 @@ int block_rq_issue(u64 *ctx)
return trace_rq_start((void *)ctx[0], true);
}
-SEC("tp_btf/block_rq_complete")
-int BPF_PROG(block_rq_complete, struct request *rq, int error,
- unsigned int nr_bytes)
+static int handle_block_rq_complete(struct request *rq, int error, unsigned int nr_bytes)
{
- if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
- return 0;
-
u64 slot, *tsp, ts = bpf_ktime_get_ns();
struct hist_key hkey = {};
struct hist *histp;
s64 delta;
+ if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
+ return 0;
+
tsp = bpf_map_lookup_elem(&start, &rq);
if (!tsp)
return 0;
+
delta = (s64)(ts - *tsp);
if (delta < 0)
goto cleanup;
@@ -124,7 +120,7 @@ int BPF_PROG(block_rq_complete, struct request *rq, int error,
BPF_CORE_READ(disk, first_minor)) : 0;
}
if (targ_per_flag)
- hkey.cmd_flags = rq->cmd_flags;
+ hkey.cmd_flags = BPF_CORE_READ(rq, cmd_flags);
histp = bpf_map_lookup_elem(&hists, &hkey);
if (!histp) {
@@ -148,4 +144,40 @@ cleanup:
return 0;
}
+SEC("tp_btf/block_rq_insert")
+int block_rq_insert_btf(u64 *ctx)
+{
+ return handle_block_rq_insert(ctx);
+}
+
+SEC("tp_btf/block_rq_issue")
+int block_rq_issue_btf(u64 *ctx)
+{
+ return handle_block_rq_issue(ctx);
+}
+
+SEC("tp_btf/block_rq_complete")
+int BPF_PROG(block_rq_complete_btf, struct request *rq, int error, unsigned int nr_bytes)
+{
+ return handle_block_rq_complete(rq, error, nr_bytes);
+}
+
+SEC("raw_tp/block_rq_insert")
+int BPF_PROG(block_rq_insert)
+{
+ return handle_block_rq_insert(ctx);
+}
+
+SEC("raw_tp/block_rq_issue")
+int BPF_PROG(block_rq_issue)
+{
+ return handle_block_rq_issue(ctx);
+}
+
+SEC("raw_tp/block_rq_complete")
+int BPF_PROG(block_rq_complete, struct request *rq, int error, unsigned int nr_bytes)
+{
+ return handle_block_rq_complete(rq, error, nr_bytes);
+}
+
char LICENSE[] SEC("license") = "GPL";
diff --git a/libbpf-tools/biolatency.c b/libbpf-tools/biolatency.c
index 51afa509..4345851a 100644
--- a/libbpf-tools/biolatency.c
+++ b/libbpf-tools/biolatency.c
@@ -193,8 +193,7 @@ static void print_cmd_flags(int cmd_flags)
printf("Unknown");
}
-static
-int print_log2_hists(struct bpf_map *hists, struct partitions *partitions)
+static int print_log2_hists(struct bpf_map *hists, struct partitions *partitions)
{
struct hist_key lookup_key = { .cmd_flags = -1 }, next_key;
const char *units = env.milliseconds ? "msecs" : "usecs";
@@ -286,6 +285,20 @@ int main(int argc, char **argv)
obj->rodata->targ_queued = env.queued;
obj->rodata->filter_cg = env.cg;
+ if (probe_tp_btf("block_rq_insert")) {
+ bpf_program__set_autoload(obj->progs.block_rq_insert, false);
+ bpf_program__set_autoload(obj->progs.block_rq_issue, false);
+ bpf_program__set_autoload(obj->progs.block_rq_complete, false);
+ if (!env.queued)
+ bpf_program__set_autoload(obj->progs.block_rq_insert_btf, false);
+ } else {
+ bpf_program__set_autoload(obj->progs.block_rq_insert_btf, false);
+ bpf_program__set_autoload(obj->progs.block_rq_issue_btf, false);
+ bpf_program__set_autoload(obj->progs.block_rq_complete_btf, false);
+ if (!env.queued)
+ bpf_program__set_autoload(obj->progs.block_rq_insert, false);
+ }
+
err = biolatency_bpf__load(obj);
if (err) {
fprintf(stderr, "failed to load BPF object: %d\n", err);
@@ -307,24 +320,9 @@ int main(int argc, char **argv)
}
}
- if (env.queued) {
- obj->links.block_rq_insert = bpf_program__attach(obj->progs.block_rq_insert);
- if (!obj->links.block_rq_insert) {
- err = -errno;
- fprintf(stderr, "failed to attach: %s\n", strerror(-err));
- goto cleanup;
- }
- }
- obj->links.block_rq_issue = bpf_program__attach(obj->progs.block_rq_issue);
- if (!obj->links.block_rq_issue) {
- err = -errno;
- fprintf(stderr, "failed to attach: %s\n", strerror(-err));
- goto cleanup;
- }
- obj->links.block_rq_complete = bpf_program__attach(obj->progs.block_rq_complete);
- if (!obj->links.block_rq_complete) {
- err = -errno;
- fprintf(stderr, "failed to attach: %s\n", strerror(-err));
+ err = biolatency_bpf__attach(obj);
+ if (err) {
+ fprintf(stderr, "failed to attach BPF object: %d\n", err);
goto cleanup;
}
--
2.27.0 |
No problem, I guess this kind of things happen! |
Close in favor of #4275 . |
Hi.
In this PR, I converted
biolatency
to useraw_tp
rather thantp_btf
.As a result, the tool can now run on kernel older than 5.5.
This PR is one step toward closing #4231.
Best regards and thank you in advance for any feedback.