Lines Matching defs:to
104 * used as an inner map. It is a runtime check to ensure
105 * an inner map can be inserted to an outer map.
109 * map_meta_equal has to ensure the inserting map has the same
118 /* bpf_iter info used to open a seq_file */
154 /* The 3rd and 4th cacheline with misc members to avoid false sharing
233 /* bpf_type_flag contains a set of flags that are applicable to the values of
270 /* the following constraints used to prototype
273 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */
274 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
275 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
276 ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */
278 /* the following constraints used to prototype bpf_memcmp() and other
281 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
282 ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized,
290 ARG_PTR_TO_CTX, /* pointer to context */
292 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
293 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
294 ARG_PTR_TO_INT, /* pointer to int */
295 ARG_PTR_TO_LONG, /* pointer to long */
296 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
297 ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */
298 ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */
300 ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
301 ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */
311 /* This must be the last entry. Its purpose is to ensure the enum is
312 * wide enough to hold the higher bits reserved for bpf_type_flag.
322 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
323 RET_PTR_TO_SOCKET, /* returns a pointer to a socket */
324 RET_PTR_TO_TCP_SOCK, /* returns a pointer to a tcp_sock */
325 RET_PTR_TO_SOCK_COMMON, /* returns a pointer to a sock_common */
326 RET_PTR_TO_ALLOC_MEM, /* returns a pointer to dynamically allocated memory */
327 RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */
328 RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */
339 /* This must be the last entry. Its purpose is to ensure the enum is
340 * wide enough to hold the higher bits reserved for bpf_type_flag.
346 /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
347 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
379 /* bpf_context is intentionally undefined structure. Pointer to bpf_context is
380 * the first argument to eBPF programs.
393 * if (range > 0) then [ptr, ptr + range - off) is safe to access
400 PTR_TO_CTX, /* reg points to bpf_context */
401 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
402 PTR_TO_MAP_VALUE, /* reg points to map element value */
405 PTR_TO_PACKET, /* reg points to skb->data */
407 PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */
408 PTR_TO_SOCKET, /* reg points to struct bpf_sock */
409 PTR_TO_SOCK_COMMON, /* reg points to sock_common */
410 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
411 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
412 PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
413 /* PTR_TO_BTF_ID points to a kernel struct that does not need
414 * to be null checked by the BPF program. This does not imply the
424 /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
425 * been checked for null. Used primarily to inform the verifier
428 PTR_TO_MEM, /* reg points to valid memory region */
429 PTR_TO_BUF, /* reg points to a read/write buffer */
430 PTR_TO_PERCPU_BTF_ID, /* reg points to a percpu kernel variable */
440 /* This must be the last entry. Its purpose is to ensure the enum is
441 * wide enough to hold the higher bits reserved for bpf_type_flag.
448 * back to the verifier.
531 /* Restore arguments before returning from trampoline to let original function
540 /* Skip current frame and return to parent. Makes sense for fentry/fexit
548 * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2
560 * fentry = a set of programs to run before returning from trampoline
565 * fentry = a set of program to run before calling original function
566 * fexit = a set of program to run after original function
571 * fentry = a set of programs to run before returning from trampoline
574 * fentry = a set of program to run before calling original function
575 * fexit = a set of program to run after original function
621 /* serializes access to fields of this trampoline */
705 void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, struct bpf_prog *to);
734 static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, struct bpf_prog *to)
787 u32 attach_btf_id; /* in-kernel BTF type id to attach to */
799 bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
836 * one to one mapping to linfo:
845 /* subprog can use linfo_idx to access its first linfo and
861 * is going to use this map or by the first program which FD is
862 * stored in the map to make sure that all callers and callees have
1028 /* an array of programs to be executed under rcu_lock.
1035 * The user has to keep refcnt on the program and make sure the program
1101 * Current cgroup skb programs can only return 0 or 1 (0 to drop the
1105 * used by TCP to call tcp_enter_cwr()
1113 * This macro then converts it to one of the NET_XMIT or an error
1161 * Block execution of BPF programs attached to instrumentation (perf,
1162 * kprobes, tracepoints) to prevent deadlocks on map operations as any of
1168 * might lead to inconsistent state. Use the raw variants for non RT
1169 * kernels as migrate_disable() maps to preempt_disable() so the slightly
1374 * forced to use 'long' read/writes to try to atomically copy long counters.
1377 * size 8 or 16 bytes, so ask compiler to inline it.