/kernel/linux/linux-6.6/include/trace/events/ |
H A D | xdp.h | 96 u32 map_id, u32 index), 98 TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index), 106 __field(u32, map_id) 119 } else if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) { 129 __entry->map_id = map_id; 134 " map_id=%d map_index=%d", 138 __entry->err, __entry->map_id, __entry->map_index) 146 u32 map_id, u32 index), 147 TP_ARGS(dev, xdp, tgt, err, map_type, map_id, inde [all...] |
/kernel/linux/linux-5.10/include/trace/events/ |
H A D | xdp.h | 109 __field(u32, map_id) 120 __entry->map_id = map ? map->id : 0; 125 " map_id=%d map_index=%d", 129 __entry->err, __entry->map_id, __entry->map_index) 179 TP_PROTO(int map_id, unsigned int processed, unsigned int drops, 182 TP_ARGS(map_id, processed, drops, sched, xdp_stats), 185 __field(int, map_id) 197 __entry->map_id = map_id; 209 " cpu=%d map_id [all...] |
/kernel/linux/linux-6.6/samples/bpf/ |
H A D | syscall_tp_user.c | 25 static void verify_map(int map_id) in verify_map() argument 30 if (bpf_map_lookup_elem(map_id, &key, &val) != 0) { in verify_map() 35 fprintf(stderr, "failed: map #%d returns value 0\n", map_id); in verify_map() 39 printf("verify map:%d val: %d\n", map_id, val); in verify_map() 42 if (bpf_map_update_elem(map_id, &key, &val, BPF_ANY) != 0) { in verify_map()
|
H A D | xdp_sample.bpf.c | 128 int BPF_PROG(tp_xdp_cpumap_enqueue, int map_id, unsigned int processed, in BPF_PROG() argument 135 if (cpumap_map_id && cpumap_map_id != map_id) in BPF_PROG() 156 int BPF_PROG(tp_xdp_cpumap_kthread, int map_id, unsigned int processed, in BPF_PROG() argument 162 if (cpumap_map_id && cpumap_map_id != map_id) in BPF_PROG()
|
/kernel/linux/linux-5.10/kernel/bpf/ |
H A D | map_iter.c | 10 u32 map_id; member 18 map = bpf_map_get_curr_or_next(&info->map_id); in bpf_map_seq_start() 32 ++info->map_id; in bpf_map_seq_next() 34 return bpf_map_get_curr_or_next(&info->map_id); in bpf_map_seq_next() 155 seq_printf(seq, "map_id:\t%u\n", aux->map->id); in bpf_iter_map_show_fdinfo() 161 info->iter.map.map_id = aux->map->id; in bpf_iter_map_fill_link_info()
|
H A D | cpumap.c | 54 int map_id; /* Back reference to map */ member 379 trace_xdp_cpumap_kthread(rcpu->map_id, n, drops, sched, &stats); in cpu_map_kthread_run() 415 __cpu_map_entry_alloc(struct bpf_cpumap_val *value, u32 cpu, int map_id) in __cpu_map_entry_alloc() argument 450 rcpu->map_id = map_id; in __cpu_map_entry_alloc() 458 "cpumap/%d/map:%d", cpu, map_id); in __cpu_map_entry_alloc() 701 trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu); in bq_flush_to_queue()
|
/kernel/linux/linux-5.10/samples/bpf/ |
H A D | syscall_tp_user.c | 26 static void verify_map(int map_id) in verify_map() argument 31 if (bpf_map_lookup_elem(map_id, &key, &val) != 0) { in verify_map() 36 fprintf(stderr, "failed: map #%d returns value 0\n", map_id); in verify_map() 40 if (bpf_map_update_elem(map_id, &key, &val, BPF_ANY) != 0) { in verify_map()
|
H A D | xdp_monitor_kern.c | 35 u32 map_id; // offset:28 size:4; signed:0; member 151 int map_id; // offset:8; size:4; signed:1; member 186 int map_id; // offset:8; size:4; signed:1; member
|
/kernel/linux/linux-6.6/tools/perf/arch/arm64/util/ |
H A D | header.c | 105 u64 map_id = strtoull(mapcpuid, NULL, 16); in strcmp_cpuid_str() local 106 char map_id_variant = FIELD_GET(MIDR_VARIANT_MASK, map_id); in strcmp_cpuid_str() 107 char map_id_revision = FIELD_GET(MIDR_REVISION_MASK, map_id); in strcmp_cpuid_str() 114 if ((map_id & id_fields) != (id & id_fields)) in strcmp_cpuid_str()
|
/kernel/linux/linux-6.6/kernel/bpf/ |
H A D | map_iter.c | 10 u32 map_id; member 18 map = bpf_map_get_curr_or_next(&info->map_id); in bpf_map_seq_start() 32 ++info->map_id; in bpf_map_seq_next() 34 return bpf_map_get_curr_or_next(&info->map_id); in bpf_map_seq_next() 154 seq_printf(seq, "map_id:\t%u\n", aux->map->id); in bpf_iter_map_show_fdinfo() 160 info->iter.map.map_id = aux->map->id; in bpf_iter_map_fill_link_info()
|
H A D | cpumap.c | 59 int map_id; /* Back reference to map */ member 357 trace_xdp_cpumap_kthread(rcpu->map_id, n, kmem_alloc_drops, in cpu_map_kthread_run() 426 rcpu->map_id = map->id; in __cpu_map_entry_alloc() 699 trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu); in bq_flush_to_queue() 753 trace_xdp_cpumap_enqueue(rcpu->map_id, !ret, !!ret, rcpu->cpu); in cpu_map_generic_redirect()
|
/kernel/linux/linux-5.10/kernel/trace/ |
H A D | trace_entries.h | 264 __field_desc( int, rw, map_id ) 271 __entry->map_id, __entry->opcode, __entry->width) 283 __field_desc( int, map, map_id ) 289 __entry->map_id, __entry->opcode)
|
H A D | trace_mmiotrace.c | 184 rw->width, secs, usec_rem, rw->map_id, in mmio_print_rw() 191 rw->width, secs, usec_rem, rw->map_id, in mmio_print_rw() 199 secs, usec_rem, rw->map_id, in mmio_print_rw() 229 secs, usec_rem, m->map_id, in mmio_print_map() 236 secs, usec_rem, m->map_id, 0UL, 0); in mmio_print_map()
|
/kernel/linux/linux-5.10/tools/testing/selftests/bpf/prog_tests/ |
H A D | pinning.c | 36 __u32 map_id, map_id2, duration = 0; in test_pinning() local 82 map_id = get_map_id(obj, "pinmap"); in test_pinning() 83 if (!map_id) in test_pinning() 100 if (CHECK(map_id != map_id2, "check reuse", in test_pinning() 101 "err %d errno %d id %d id2 %d\n", err, errno, map_id, map_id2)) in test_pinning()
|
/kernel/linux/linux-6.6/tools/testing/selftests/bpf/prog_tests/ |
H A D | pinning.c | 36 __u32 map_id, map_id2, duration = 0; in test_pinning() local 82 map_id = get_map_id(obj, "pinmap"); in test_pinning() 83 if (!map_id) in test_pinning() 100 if (CHECK(map_id != map_id2, "check reuse", in test_pinning() 101 "err %d errno %d id %d id2 %d\n", err, errno, map_id, map_id2)) in test_pinning()
|
/kernel/linux/linux-6.6/kernel/trace/ |
H A D | trace_mmiotrace.c | 182 rw->width, secs, usec_rem, rw->map_id, in mmio_print_rw() 189 rw->width, secs, usec_rem, rw->map_id, in mmio_print_rw() 197 secs, usec_rem, rw->map_id, in mmio_print_rw() 227 secs, usec_rem, m->map_id, in mmio_print_map() 234 secs, usec_rem, m->map_id, 0UL, 0); in mmio_print_map()
|
H A D | trace_entries.h | 290 __field_desc( int, rw, map_id ) 297 __entry->map_id, __entry->opcode, __entry->width) 309 __field_desc( int, map, map_id ) 315 __entry->map_id, __entry->opcode)
|
/kernel/linux/linux-5.10/include/linux/ |
H A D | mmiotrace.h | 92 int map_id; member 101 int map_id; member
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | mmiotrace.h | 92 int map_id; member 101 int map_id; member
|
/kernel/linux/linux-6.6/tools/testing/selftests/bpf/map_tests/ |
H A D | map_in_map_batch_ops.c | 35 __u32 map_key = 0, map_id; in create_inner_maps() local 57 map_id = get_map_id_from_fd(map_fd); in create_inner_maps() 58 ret = bpf_map_update_elem(map_fd, &map_key, &map_id, 0); in create_inner_maps()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/ibm/ |
H A D | ibmvnic.h | 310 u8 map_id; member 605 u8 map_id; member 615 u8 map_id; member 624 u8 map_id; member 632 u8 map_id; member 880 u8 map_id; member 1066 u8 map_id; member
|
/kernel/linux/linux-5.10/arch/x86/mm/ |
H A D | mmio-mod.c | 150 my_trace->map_id = trace->id; in pre() 250 map.map_id = trace->id; in ioremap_trace_core() 307 map.map_id = (found_trace) ? found_trace->id : -1; in iounmap_trace_core()
|
/kernel/linux/linux-6.6/arch/x86/mm/ |
H A D | mmio-mod.c | 148 my_trace->map_id = trace->id; in pre() 248 map.map_id = trace->id; in ioremap_trace_core() 305 map.map_id = (found_trace) ? found_trace->id : -1; in iounmap_trace_core()
|
/kernel/linux/linux-6.6/drivers/net/ethernet/ibm/ |
H A D | ibmvnic.h | 349 u8 map_id; member 564 u8 map_id; member 574 u8 map_id; member 583 u8 map_id; member 591 u8 map_id; member 835 u8 map_id; member
|
/kernel/linux/linux-5.10/drivers/net/ethernet/netronome/nfp/bpf/ |
H A D | offload.c | 58 record->map_id = map->id; in nfp_map_ptr_record() 450 u32 pkt_size, data_size, map_id; in nfp_bpf_event_output() local 459 map_id = map_id_full; in nfp_bpf_event_output() 467 record = rhashtable_lookup(&bpf->maps_neutral, &map_id, in nfp_bpf_event_output()
|