1/* SPDX-License-Identifier: GPL-2.0 2 * Copyright(c) 2017-2018 Jesper Dangaard Brouer, Red Hat Inc. 3 * 4 * XDP monitor tool, based on tracepoints 5 */ 6#include <uapi/linux/bpf.h> 7#include <bpf/bpf_helpers.h> 8 9struct { 10 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); 11 __type(key, u32); 12 __type(value, u64); 13 __uint(max_entries, 2); 14 /* TODO: have entries for all possible errno's */ 15} redirect_err_cnt SEC(".maps"); 16 17#define XDP_UNKNOWN XDP_REDIRECT + 1 18struct { 19 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); 20 __type(key, u32); 21 __type(value, u64); 22 __uint(max_entries, XDP_UNKNOWN + 1); 23} exception_cnt SEC(".maps"); 24 25/* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_redirect/format 26 * Code in: kernel/include/trace/events/xdp.h 27 */ 28struct xdp_redirect_ctx { 29 u64 __pad; // First 8 bytes are not accessible by bpf code 30 int prog_id; // offset:8; size:4; signed:1; 31 u32 act; // offset:12 size:4; signed:0; 32 int ifindex; // offset:16 size:4; signed:1; 33 int err; // offset:20 size:4; signed:1; 34 int to_ifindex; // offset:24 size:4; signed:1; 35 u32 map_id; // offset:28 size:4; signed:0; 36 int map_index; // offset:32 size:4; signed:1; 37}; // offset:36 38 39enum { 40 XDP_REDIRECT_SUCCESS = 0, 41 XDP_REDIRECT_ERROR = 1 42}; 43 44static __always_inline 45int xdp_redirect_collect_stat(struct xdp_redirect_ctx *ctx) 46{ 47 u32 key = XDP_REDIRECT_ERROR; 48 int err = ctx->err; 49 u64 *cnt; 50 51 if (!err) 52 key = XDP_REDIRECT_SUCCESS; 53 54 cnt = bpf_map_lookup_elem(&redirect_err_cnt, &key); 55 if (!cnt) 56 return 1; 57 *cnt += 1; 58 59 return 0; /* Indicate event was filtered (no further processing)*/ 60 /* 61 * Returning 1 here would allow e.g. a perf-record tracepoint 62 * to see and record these events, but it doesn't work well 63 * in-practice as stopping perf-record also unload this 64 * bpf_prog. Plus, there is additional overhead of doing so. 65 */ 66} 67 68SEC("tracepoint/xdp/xdp_redirect_err") 69int trace_xdp_redirect_err(struct xdp_redirect_ctx *ctx) 70{ 71 return xdp_redirect_collect_stat(ctx); 72} 73 74 75SEC("tracepoint/xdp/xdp_redirect_map_err") 76int trace_xdp_redirect_map_err(struct xdp_redirect_ctx *ctx) 77{ 78 return xdp_redirect_collect_stat(ctx); 79} 80 81/* Likely unloaded when prog starts */ 82SEC("tracepoint/xdp/xdp_redirect") 83int trace_xdp_redirect(struct xdp_redirect_ctx *ctx) 84{ 85 return xdp_redirect_collect_stat(ctx); 86} 87 88/* Likely unloaded when prog starts */ 89SEC("tracepoint/xdp/xdp_redirect_map") 90int trace_xdp_redirect_map(struct xdp_redirect_ctx *ctx) 91{ 92 return xdp_redirect_collect_stat(ctx); 93} 94 95/* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_exception/format 96 * Code in: kernel/include/trace/events/xdp.h 97 */ 98struct xdp_exception_ctx { 99 u64 __pad; // First 8 bytes are not accessible by bpf code 100 int prog_id; // offset:8; size:4; signed:1; 101 u32 act; // offset:12; size:4; signed:0; 102 int ifindex; // offset:16; size:4; signed:1; 103}; 104 105SEC("tracepoint/xdp/xdp_exception") 106int trace_xdp_exception(struct xdp_exception_ctx *ctx) 107{ 108 u64 *cnt; 109 u32 key; 110 111 key = ctx->act; 112 if (key > XDP_REDIRECT) 113 key = XDP_UNKNOWN; 114 115 cnt = bpf_map_lookup_elem(&exception_cnt, &key); 116 if (!cnt) 117 return 1; 118 *cnt += 1; 119 120 return 0; 121} 122 123/* Common stats data record shared with _user.c */ 124struct datarec { 125 u64 processed; 126 u64 dropped; 127 u64 info; 128 u64 err; 129}; 130#define MAX_CPUS 64 131 132struct { 133 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); 134 __type(key, u32); 135 __type(value, struct datarec); 136 __uint(max_entries, MAX_CPUS); 137} cpumap_enqueue_cnt SEC(".maps"); 138 139struct { 140 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); 141 __type(key, u32); 142 __type(value, struct datarec); 143 __uint(max_entries, 1); 144} cpumap_kthread_cnt SEC(".maps"); 145 146/* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_enqueue/format 147 * Code in: kernel/include/trace/events/xdp.h 148 */ 149struct cpumap_enqueue_ctx { 150 u64 __pad; // First 8 bytes are not accessible by bpf code 151 int map_id; // offset:8; size:4; signed:1; 152 u32 act; // offset:12; size:4; signed:0; 153 int cpu; // offset:16; size:4; signed:1; 154 unsigned int drops; // offset:20; size:4; signed:0; 155 unsigned int processed; // offset:24; size:4; signed:0; 156 int to_cpu; // offset:28; size:4; signed:1; 157}; 158 159SEC("tracepoint/xdp/xdp_cpumap_enqueue") 160int trace_xdp_cpumap_enqueue(struct cpumap_enqueue_ctx *ctx) 161{ 162 u32 to_cpu = ctx->to_cpu; 163 struct datarec *rec; 164 165 if (to_cpu >= MAX_CPUS) 166 return 1; 167 168 rec = bpf_map_lookup_elem(&cpumap_enqueue_cnt, &to_cpu); 169 if (!rec) 170 return 0; 171 rec->processed += ctx->processed; 172 rec->dropped += ctx->drops; 173 174 /* Record bulk events, then userspace can calc average bulk size */ 175 if (ctx->processed > 0) 176 rec->info += 1; 177 178 return 0; 179} 180 181/* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_kthread/format 182 * Code in: kernel/include/trace/events/xdp.h 183 */ 184struct cpumap_kthread_ctx { 185 u64 __pad; // First 8 bytes are not accessible by bpf code 186 int map_id; // offset:8; size:4; signed:1; 187 u32 act; // offset:12; size:4; signed:0; 188 int cpu; // offset:16; size:4; signed:1; 189 unsigned int drops; // offset:20; size:4; signed:0; 190 unsigned int processed; // offset:24; size:4; signed:0; 191 int sched; // offset:28; size:4; signed:1; 192}; 193 194SEC("tracepoint/xdp/xdp_cpumap_kthread") 195int trace_xdp_cpumap_kthread(struct cpumap_kthread_ctx *ctx) 196{ 197 struct datarec *rec; 198 u32 key = 0; 199 200 rec = bpf_map_lookup_elem(&cpumap_kthread_cnt, &key); 201 if (!rec) 202 return 0; 203 rec->processed += ctx->processed; 204 rec->dropped += ctx->drops; 205 206 /* Count times kthread yielded CPU via schedule call */ 207 if (ctx->sched) 208 rec->info++; 209 210 return 0; 211} 212 213struct { 214 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); 215 __type(key, u32); 216 __type(value, struct datarec); 217 __uint(max_entries, 1); 218} devmap_xmit_cnt SEC(".maps"); 219 220/* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_devmap_xmit/format 221 * Code in: kernel/include/trace/events/xdp.h 222 */ 223struct devmap_xmit_ctx { 224 u64 __pad; // First 8 bytes are not accessible by bpf code 225 int from_ifindex; // offset:8; size:4; signed:1; 226 u32 act; // offset:12; size:4; signed:0; 227 int to_ifindex; // offset:16; size:4; signed:1; 228 int drops; // offset:20; size:4; signed:1; 229 int sent; // offset:24; size:4; signed:1; 230 int err; // offset:28; size:4; signed:1; 231}; 232 233SEC("tracepoint/xdp/xdp_devmap_xmit") 234int trace_xdp_devmap_xmit(struct devmap_xmit_ctx *ctx) 235{ 236 struct datarec *rec; 237 u32 key = 0; 238 239 rec = bpf_map_lookup_elem(&devmap_xmit_cnt, &key); 240 if (!rec) 241 return 0; 242 rec->processed += ctx->sent; 243 rec->dropped += ctx->drops; 244 245 /* Record bulk events, then userspace can calc average bulk size */ 246 rec->info += 1; 247 248 /* Record error cases, where no frame were sent */ 249 if (ctx->err) 250 rec->err++; 251 252 /* Catch API error of drv ndo_xdp_xmit sent more than count */ 253 if (ctx->drops < 0) 254 rec->err++; 255 256 return 1; 257} 258