1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4#ifndef _LINUX_BPF_H 5#define _LINUX_BPF_H 1 6 7#include <uapi/linux/bpf.h> 8 9#include <linux/workqueue.h> 10#include <linux/file.h> 11#include <linux/percpu.h> 12#include <linux/err.h> 13#include <linux/rbtree_latch.h> 14#include <linux/numa.h> 15#include <linux/mm_types.h> 16#include <linux/wait.h> 17#include <linux/u64_stats_sync.h> 18#include <linux/refcount.h> 19#include <linux/mutex.h> 20#include <linux/module.h> 21#include <linux/kallsyms.h> 22#include <linux/capability.h> 23#include <linux/percpu-refcount.h> 24 25#define BPF_TWO 2 26 27struct bpf_verifier_env; 28struct bpf_verifier_log; 29struct perf_event; 30struct bpf_prog; 31struct bpf_prog_aux; 32struct bpf_map; 33struct sock; 34struct seq_file; 35struct btf; 36struct btf_type; 37struct exception_table_entry; 38struct seq_operations; 39struct bpf_iter_aux_info; 40struct bpf_local_storage; 41struct bpf_local_storage_map; 42 43extern struct idr btf_idr; 44extern spinlock_t btf_idr_lock; 45 46typedef int (*bpf_iter_init_seq_priv_t)(void *private_data, struct bpf_iter_aux_info *aux); 47typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); 48struct bpf_iter_seq_info { 49 const struct seq_operations *seq_ops; 50 bpf_iter_init_seq_priv_t init_seq_private; 51 bpf_iter_fini_seq_priv_t fini_seq_private; 52 u32 seq_priv_size; 53}; 54 55/* map is generic key/value storage optionally accesible by eBPF programs */ 56struct bpf_map_ops { 57 /* funcs callable from userspace (via syscall) */ 58 int (*map_alloc_check)(union bpf_attr *attr); 59 struct bpf_map *(*map_alloc)(union bpf_attr *attr); 60 void (*map_release)(struct bpf_map *map, struct file *map_file); 61 void (*map_free)(struct bpf_map *map); 62 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); 63 void (*map_release_uref)(struct bpf_map *map); 64 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); 65 int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr); 66 int (*map_lookup_and_delete_batch)(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr); 67 int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr); 68 int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr); 69 70 /* funcs callable from userspace and from eBPF programs */ 71 void *(*map_lookup_elem)(struct bpf_map *map, void *key); 72 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); 73 int (*map_delete_elem)(struct bpf_map *map, void *key); 74 int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); 75 int (*map_pop_elem)(struct bpf_map *map, void *value); 76 int (*map_peek_elem)(struct bpf_map *map, void *value); 77 78 /* funcs called by prog_array and perf_event_array map */ 79 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, int fd); 80 void (*map_fd_put_ptr)(void *ptr); 81 int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); 82 u32 (*map_fd_sys_lookup_elem)(void *ptr); 83 void (*map_seq_show_elem)(struct bpf_map *map, void *key, struct seq_file *m); 84 int (*map_check_btf)(const struct bpf_map *map, const struct btf *btf, const struct btf_type *key_type, 85 const struct btf_type *value_type); 86 87 /* Prog poke tracking helpers. */ 88 int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux); 89 void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux); 90 void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old, struct bpf_prog *new); 91 92 /* Direct value access helpers. */ 93 int (*map_direct_value_addr)(const struct bpf_map *map, u64 *imm, u32 off); 94 int (*map_direct_value_meta)(const struct bpf_map *map, u64 imm, u32 *off); 95 int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma); 96 __poll_t (*map_poll)(struct bpf_map *map, struct file *filp, struct poll_table_struct *pts); 97 98 /* Functions called by bpf_local_storage maps */ 99 int (*map_local_storage_charge)(struct bpf_local_storage_map *smap, void *owner, u32 size); 100 void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap, void *owner, u32 size); 101 struct bpf_local_storage __rcu **(*map_owner_storage_ptr)(void *owner); 102 103 /* map_meta_equal must be implemented for maps that can be 104 * used as an inner map. It is a runtime check to ensure 105 * an inner map can be inserted to an outer map. 106 * 107 * Some properties of the inner map has been used during the 108 * verification time. When inserting an inner map at the runtime, 109 * map_meta_equal has to ensure the inserting map has the same 110 * properties that the verifier has used earlier. 111 */ 112 bool (*map_meta_equal)(const struct bpf_map *meta0, const struct bpf_map *meta1); 113 114 /* BTF name and id of struct allocated by map_alloc */ 115 const char *const map_btf_name; 116 int *map_btf_id; 117 118 /* bpf_iter info used to open a seq_file */ 119 const struct bpf_iter_seq_info *iter_seq_info; 120}; 121 122struct bpf_map_memory { 123 u32 pages; 124 struct user_struct *user; 125}; 126 127struct bpf_map { 128 /* The first two cachelines with read-mostly members of which some 129 * are also accessed in fast-path (e.g. ops, max_entries). 130 */ 131 const struct bpf_map_ops *ops ____cacheline_aligned; 132 struct bpf_map *inner_map_meta; 133#ifdef CONFIG_SECURITY 134 void *security; 135#endif 136 enum bpf_map_type map_type; 137 u32 key_size; 138 u32 value_size; 139 u32 max_entries; 140 u32 map_flags; 141 int spin_lock_off; /* >=0 valid offset, <0 error */ 142 u32 id; 143 int numa_node; 144 u32 btf_key_type_id; 145 u32 btf_value_type_id; 146 struct btf *btf; 147 struct bpf_map_memory memory; 148 char name[BPF_OBJ_NAME_LEN]; 149 u32 btf_vmlinux_value_type_id; 150 bool bypass_spec_v1; 151 bool frozen; /* write-once; write-protected by freeze_mutex */ 152 /* 22 bytes hole */ 153 154 /* The 3rd and 4th cacheline with misc members to avoid false sharing 155 * particularly with refcounting. 156 */ 157 atomic64_t refcnt ____cacheline_aligned; 158 atomic64_t usercnt; 159 struct work_struct work; 160 struct mutex freeze_mutex; 161 u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */ 162}; 163 164static inline bool map_value_has_spin_lock(const struct bpf_map *map) 165{ 166 return map->spin_lock_off >= 0; 167} 168 169static inline void check_and_init_map_lock(struct bpf_map *map, void *dst) 170{ 171 if (likely(!map_value_has_spin_lock(map))) { 172 return; 173 } 174 *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = (struct bpf_spin_lock) {}; 175} 176 177/* copy everything but bpf_spin_lock */ 178static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) 179{ 180 if (unlikely(map_value_has_spin_lock(map))) { 181 u32 off = map->spin_lock_off; 182 183 memcpy(dst, src, off); 184 memcpy(dst + off + sizeof(struct bpf_spin_lock), src + off + sizeof(struct bpf_spin_lock), 185 map->value_size - off - sizeof(struct bpf_spin_lock)); 186 } else { 187 memcpy(dst, src, map->value_size); 188 } 189} 190void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, bool lock_src); 191int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size); 192 193struct bpf_offload_dev; 194struct bpf_offloaded_map; 195 196struct bpf_map_dev_ops { 197 int (*map_get_next_key)(struct bpf_offloaded_map *map, void *key, void *next_key); 198 int (*map_lookup_elem)(struct bpf_offloaded_map *map, void *key, void *value); 199 int (*map_update_elem)(struct bpf_offloaded_map *map, void *key, void *value, u64 flags); 200 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); 201}; 202 203struct bpf_offloaded_map { 204 struct bpf_map map; 205 struct net_device *netdev; 206 const struct bpf_map_dev_ops *dev_ops; 207 void *dev_priv; 208 struct list_head offloads; 209}; 210 211static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) 212{ 213 return container_of(map, struct bpf_offloaded_map, map); 214} 215 216static inline bool bpf_map_offload_neutral(const struct bpf_map *map) 217{ 218 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; 219} 220 221static inline bool bpf_map_support_seq_show(const struct bpf_map *map) 222{ 223 return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) && map->ops->map_seq_show_elem; 224} 225 226int map_check_no_btf(const struct bpf_map *map, const struct btf *btf, const struct btf_type *key_type, 227 const struct btf_type *value_type); 228 229bool bpf_map_meta_equal(const struct bpf_map *meta0, const struct bpf_map *meta1); 230 231extern const struct bpf_map_ops bpf_map_offload_ops; 232 233/* bpf_type_flag contains a set of flags that are applicable to the values of 234 * arg_type, ret_type and reg_type. For example, a pointer value may be null, 235 * or a memory is read-only. We classify types into two categories: base types 236 * and extended types. Extended types are base types combined with a type flag. 237 * 238 * Currently there are no more than 32 base types in arg_type, ret_type and 239 * reg_types. 240 */ 241#define BPF_BASE_TYPE_BITS 8 242 243enum bpf_type_flag { 244 /* PTR may be NULL. */ 245 PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS), 246 247 /* MEM is read-only. When applied on bpf_arg, it indicates the arg is 248 * compatible with both mutable and immutable memory. 249 */ 250 MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS), 251 252 /* MEM was "allocated" from a different helper, and cannot be mixed 253 * with regular non-MEM_ALLOC'ed MEM types. 254 */ 255 MEM_ALLOC = BIT(2 + BPF_BASE_TYPE_BITS), 256 257 __BPF_TYPE_LAST_FLAG = MEM_ALLOC, 258}; 259 260/* Max number of base types. */ 261#define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS) 262 263/* Max number of all types. */ 264#define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1)) 265 266/* function argument constraints */ 267enum bpf_arg_type { 268 ARG_DONTCARE = 0, /* unused argument in helper function */ 269 270 /* the following constraints used to prototype 271 * bpf_map_lookup/update/delete_elem() functions 272 */ 273 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ 274 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ 275 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ 276 ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */ 277 278 /* the following constraints used to prototype bpf_memcmp() and other 279 * functions that access data on eBPF program stack 280 */ 281 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ 282 ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized, 283 * helper function must fill all bytes or clear 284 * them in error case. 285 */ 286 287 ARG_CONST_SIZE, /* number of bytes accessed from memory */ 288 ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ 289 290 ARG_PTR_TO_CTX, /* pointer to context */ 291 ARG_ANYTHING, /* any (initialized) argument is ok */ 292 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ 293 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ 294 ARG_PTR_TO_INT, /* pointer to int */ 295 ARG_PTR_TO_LONG, /* pointer to long */ 296 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ 297 ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ 298 ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */ 299 ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */ 300 ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */ 301 ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */ 302 __BPF_ARG_TYPE_MAX, 303 304 /* Extended arg_types. */ 305 ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE, 306 ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM, 307 ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX, 308 ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET, 309 ARG_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_ALLOC_MEM, 310 311 /* This must be the last entry. Its purpose is to ensure the enum is 312 * wide enough to hold the higher bits reserved for bpf_type_flag. 313 */ 314 __BPF_ARG_TYPE_LIMIT = BPF_TYPE_LIMIT, 315}; 316static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); 317 318/* type of values returned from helper functions */ 319enum bpf_return_type { 320 RET_INTEGER, /* function returns integer */ 321 RET_VOID, /* function doesn't return anything */ 322 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ 323 RET_PTR_TO_SOCKET, /* returns a pointer to a socket */ 324 RET_PTR_TO_TCP_SOCK, /* returns a pointer to a tcp_sock */ 325 RET_PTR_TO_SOCK_COMMON, /* returns a pointer to a sock_common */ 326 RET_PTR_TO_ALLOC_MEM, /* returns a pointer to dynamically allocated memory */ 327 RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */ 328 RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */ 329 __BPF_RET_TYPE_MAX, 330 331 /* Extended ret_types. */ 332 RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE, 333 RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET, 334 RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK, 335 RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON, 336 RET_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_ALLOC | RET_PTR_TO_ALLOC_MEM, 337 RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID, 338 339 /* This must be the last entry. Its purpose is to ensure the enum is 340 * wide enough to hold the higher bits reserved for bpf_type_flag. 341 */ 342 __BPF_RET_TYPE_LIMIT = BPF_TYPE_LIMIT, 343}; 344static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); 345 346/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs 347 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL 348 * instructions after verifying 349 */ 350struct bpf_func_proto { 351 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 352 bool gpl_only; 353 bool pkt_access; 354 enum bpf_return_type ret_type; 355 union { 356 struct { 357 enum bpf_arg_type arg1_type; 358 enum bpf_arg_type arg2_type; 359 enum bpf_arg_type arg3_type; 360 enum bpf_arg_type arg4_type; 361 enum bpf_arg_type arg5_type; 362 }; 363 enum bpf_arg_type arg_type[5]; 364 }; 365 union { 366 struct { 367 u32 *arg1_btf_id; 368 u32 *arg2_btf_id; 369 u32 *arg3_btf_id; 370 u32 *arg4_btf_id; 371 u32 *arg5_btf_id; 372 }; 373 u32 *arg_btf_id[5]; 374 }; 375 int *ret_btf_id; /* return value btf_id */ 376 bool (*allowed)(const struct bpf_prog *prog); 377}; 378 379/* bpf_context is intentionally undefined structure. Pointer to bpf_context is 380 * the first argument to eBPF programs. 381 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *' 382 */ 383struct bpf_context; 384 385enum bpf_access_type { BPF_READ = 1, BPF_WRITE = 2 }; 386 387/* types of values stored in eBPF registers */ 388/* Pointer types represent: 389 * pointer 390 * pointer + imm 391 * pointer + (u16) var 392 * pointer + (u16) var + imm 393 * if (range > 0) then [ptr, ptr + range - off) is safe to access 394 * if (id > 0) means that some 'var' was added 395 * if (off > 0) means that 'imm' was added 396 */ 397enum bpf_reg_type { 398 NOT_INIT = 0, /* nothing was written into register */ 399 SCALAR_VALUE, /* reg doesn't contain a valid pointer */ 400 PTR_TO_CTX, /* reg points to bpf_context */ 401 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ 402 PTR_TO_MAP_VALUE, /* reg points to map element value */ 403 PTR_TO_STACK, /* reg == frame_pointer + offset */ 404 PTR_TO_PACKET_META, /* skb->data - meta_len */ 405 PTR_TO_PACKET, /* reg points to skb->data */ 406 PTR_TO_PACKET_END, /* skb->data + headlen */ 407 PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ 408 PTR_TO_SOCKET, /* reg points to struct bpf_sock */ 409 PTR_TO_SOCK_COMMON, /* reg points to sock_common */ 410 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ 411 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ 412 PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ 413 /* PTR_TO_BTF_ID points to a kernel struct that does not need 414 * to be null checked by the BPF program. This does not imply the 415 * pointer is _not_ null and in practice this can easily be a null 416 * pointer when reading pointer chains. The assumption is program 417 * context will handle null pointer dereference typically via fault 418 * handling. The verifier must keep this in mind and can make no 419 * assumptions about null or non-null when doing branch analysis. 420 * Further, when passed into helpers the helpers can not, without 421 * additional context, assume the value is non-null. 422 */ 423 PTR_TO_BTF_ID, 424 /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not 425 * been checked for null. Used primarily to inform the verifier 426 * an explicit null check is required for this struct. 427 */ 428 PTR_TO_MEM, /* reg points to valid memory region */ 429 PTR_TO_BUF, /* reg points to a read/write buffer */ 430 PTR_TO_PERCPU_BTF_ID, /* reg points to a percpu kernel variable */ 431 __BPF_REG_TYPE_MAX, 432 433 /* Extended reg_types. */ 434 PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE, 435 PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET, 436 PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON, 437 PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK, 438 PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID, 439 440 /* This must be the last entry. Its purpose is to ensure the enum is 441 * wide enough to hold the higher bits reserved for bpf_type_flag. 442 */ 443 __BPF_REG_TYPE_LIMIT = BPF_TYPE_LIMIT, 444}; 445static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); 446 447/* The information passed from prog-specific *_is_valid_access 448 * back to the verifier. 449 */ 450struct bpf_insn_access_aux { 451 enum bpf_reg_type reg_type; 452 union { 453 int ctx_field_size; 454 u32 btf_id; 455 }; 456 struct bpf_verifier_log *log; /* for verbose logs */ 457}; 458 459static inline void bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) 460{ 461 aux->ctx_field_size = size; 462} 463 464struct bpf_prog_ops { 465 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); 466}; 467 468struct bpf_verifier_ops { 469 /* return eBPF function prototype for verification */ 470 const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id, const struct bpf_prog *prog); 471 472 /* return true if 'size' wide access at offset 'off' within bpf_context 473 * with 'type' (read or write) is allowed 474 */ 475 bool (*is_valid_access)(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, 476 struct bpf_insn_access_aux *info); 477 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, const struct bpf_prog *prog); 478 int (*gen_ld_abs)(const struct bpf_insn *orig, struct bpf_insn *insn_buf); 479 u32 (*convert_ctx_access)(enum bpf_access_type type, const struct bpf_insn *src, struct bpf_insn *dst, 480 struct bpf_prog *prog, u32 *target_size); 481 int (*btf_struct_access)(struct bpf_verifier_log *log, const struct btf_type *t, int off, int size, 482 enum bpf_access_type atype, u32 *next_btf_id); 483}; 484 485struct bpf_prog_offload_ops { 486 /* verifier basic callbacks */ 487 int (*insn_hook)(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx); 488 int (*finalize)(struct bpf_verifier_env *env); 489 /* verifier optimization callbacks (called after .finalize) */ 490 int (*replace_insn)(struct bpf_verifier_env *env, u32 off, struct bpf_insn *insn); 491 int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); 492 /* program management callbacks */ 493 int (*prepare)(struct bpf_prog *prog); 494 int (*translate)(struct bpf_prog *prog); 495 void (*destroy)(struct bpf_prog *prog); 496}; 497 498struct bpf_prog_offload { 499 struct bpf_prog *prog; 500 struct net_device *netdev; 501 struct bpf_offload_dev *offdev; 502 void *dev_priv; 503 struct list_head offloads; 504 bool dev_state; 505 bool opt_failed; 506 void *jited_image; 507 u32 jited_len; 508}; 509 510enum bpf_cgroup_storage_type { BPF_CGROUP_STORAGE_SHARED, BPF_CGROUP_STORAGE_PERCPU, __BPF_CGROUP_STORAGE_MAX }; 511 512#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX 513 514/* The longest tracepoint has 12 args. 515 * See include/trace/bpf_probe.h 516 */ 517#define MAX_BPF_FUNC_ARGS 12 518 519struct bpf_prog_stats { 520 u64 cnt; 521 u64 nsecs; 522 struct u64_stats_sync syncp; 523} __aligned(BPF_TWO * sizeof(u64)); 524 525struct btf_func_model { 526 u8 ret_size; 527 u8 nr_args; 528 u8 arg_size[MAX_BPF_FUNC_ARGS]; 529}; 530 531/* Restore arguments before returning from trampoline to let original function 532 * continue executing. This flag is used for fentry progs when there are no 533 * fexit progs. 534 */ 535#define BPF_TRAMP_F_RESTORE_REGS BIT(0) 536/* Call original function after fentry progs, but before fexit progs. 537 * Makes sense for fentry/fexit, normal calls and indirect calls. 538 */ 539#define BPF_TRAMP_F_CALL_ORIG BIT(1) 540/* Skip current frame and return to parent. Makes sense for fentry/fexit 541 * programs only. Should not be used with normal calls and indirect calls. 542 */ 543#define BPF_TRAMP_F_SKIP_FRAME BIT(2) 544/* Return the return value of fentry prog. Only used by bpf_struct_ops. */ 545#define BPF_TRAMP_F_RET_FENTRY_RET BIT(4) 546 547/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 548 * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2 549 */ 550#define BPF_MAX_TRAMP_PROGS 40 551 552struct bpf_tramp_progs { 553 struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS]; 554 int nr_progs; 555}; 556 557/* Different use cases for BPF trampoline: 558 * 1. replace nop at the function entry (kprobe equivalent) 559 * flags = BPF_TRAMP_F_RESTORE_REGS 560 * fentry = a set of programs to run before returning from trampoline 561 * 562 * 2. replace nop at the function entry (kprobe + kretprobe equivalent) 563 * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME 564 * orig_call = fentry_ip + MCOUNT_INSN_SIZE 565 * fentry = a set of program to run before calling original function 566 * fexit = a set of program to run after original function 567 * 568 * 3. replace direct call instruction anywhere in the function body 569 * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid) 570 * With flags = 0 571 * fentry = a set of programs to run before returning from trampoline 572 * With flags = BPF_TRAMP_F_CALL_ORIG 573 * orig_call = original callback addr or direct function addr 574 * fentry = a set of program to run before calling original function 575 * fexit = a set of program to run after original function 576 */ 577struct bpf_tramp_image; 578int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end, 579 const struct btf_func_model *m, u32 flags, struct bpf_tramp_progs *tprogs, 580 void *orig_call); 581/* these two functions are called from generated trampoline */ 582u64 notrace __bpf_prog_enter(void); 583void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start); 584void notrace __bpf_prog_enter_sleepable(void); 585void notrace __bpf_prog_exit_sleepable(void); 586void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr); 587void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr); 588 589struct bpf_ksym { 590 unsigned long start; 591 unsigned long end; 592 char name[KSYM_NAME_LEN]; 593 struct list_head lnode; 594 struct latch_tree_node tnode; 595 bool prog; 596}; 597 598enum bpf_tramp_prog_type { 599 BPF_TRAMP_FENTRY, 600 BPF_TRAMP_FEXIT, 601 BPF_TRAMP_MODIFY_RETURN, 602 BPF_TRAMP_MAX, 603 BPF_TRAMP_REPLACE, /* more than MAX */ 604}; 605 606struct bpf_tramp_image { 607 void *image; 608 struct bpf_ksym ksym; 609 struct percpu_ref pcref; 610 void *ip_after_call; 611 void *ip_epilogue; 612 union { 613 struct rcu_head rcu; 614 struct work_struct work; 615 }; 616}; 617 618struct bpf_trampoline { 619 /* hlist for trampoline_table */ 620 struct hlist_node hlist; 621 /* serializes access to fields of this trampoline */ 622 struct mutex mutex; 623 refcount_t refcnt; 624 u64 key; 625 struct { 626 struct btf_func_model model; 627 void *addr; 628 bool ftrace_managed; 629 } func; 630 /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF 631 * program by replacing one of its functions. func.addr is the address 632 * of the function it replaced. 633 */ 634 struct bpf_prog *extension_prog; 635 /* list of BPF programs using this trampoline */ 636 struct hlist_head progs_hlist[BPF_TRAMP_MAX]; 637 /* Number of attached programs. A counter per kind. */ 638 int progs_cnt[BPF_TRAMP_MAX]; 639 /* Executable image of trampoline */ 640 struct bpf_tramp_image *cur_image; 641 u64 selector; 642}; 643 644struct bpf_attach_target_info { 645 struct btf_func_model fmodel; 646 long tgt_addr; 647 const char *tgt_name; 648 const struct btf_type *tgt_type; 649}; 650 651#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */ 652 653struct bpf_dispatcher_prog { 654 struct bpf_prog *prog; 655 refcount_t users; 656}; 657 658struct bpf_dispatcher { 659 /* dispatcher mutex */ 660 struct mutex mutex; 661 void *func; 662 struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX]; 663 int num_progs; 664 void *image; 665 u32 image_off; 666 struct bpf_ksym ksym; 667}; 668 669static __always_inline unsigned int bpf_dispatcher_nop_func(const void *ctx, const struct bpf_insn *insnsi, 670 unsigned int (*bpf_func)(const void *, 671 const struct bpf_insn *)) 672{ 673 return bpf_func(ctx, insnsi); 674} 675#ifdef CONFIG_BPF_JIT 676int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr); 677int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr); 678struct bpf_trampoline *bpf_trampoline_get(u64 key, struct bpf_attach_target_info *tgt_info); 679void bpf_trampoline_put(struct bpf_trampoline *tr); 680#define BPF_DISPATCHER_INIT(_name) \ 681 { \ 682 .mutex = __MUTEX_INITIALIZER(_name.mutex), .func = &_name##_func, .progs = {}, .num_progs = 0, .image = NULL, \ 683 .image_off = 0, \ 684 .ksym = { \ 685 .name = #_name, \ 686 .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \ 687 }, \ 688 } 689 690#define DEFINE_BPF_DISPATCHER(name) \ 691 noinline unsigned int bpf_dispatcher_##name##_func( \ 692 const void *ctx, const struct bpf_insn *insnsi, \ 693 unsigned int (*bpf_func)(const void *, const struct bpf_insn *)) \ 694 { \ 695 return bpf_func(ctx, insnsi); \ 696 } \ 697 EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \ 698 struct bpf_dispatcher bpf_dispatcher_##name = BPF_DISPATCHER_INIT(bpf_dispatcher_##name); 699#define DECLARE_BPF_DISPATCHER(name) \ 700 unsigned int bpf_dispatcher_##name##_func(const void *ctx, const struct bpf_insn *insnsi, \ 701 unsigned int (*bpf_func)(const void *, const struct bpf_insn *)); \ 702 extern struct bpf_dispatcher bpf_dispatcher_##name; 703#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func 704#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name) 705void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, struct bpf_prog *to); 706/* Called only from JIT-enabled code, so there's no need for stubs. */ 707void *bpf_jit_alloc_exec_page(void); 708void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym); 709void bpf_image_ksym_del(struct bpf_ksym *ksym); 710void bpf_ksym_add(struct bpf_ksym *ksym); 711void bpf_ksym_del(struct bpf_ksym *ksym); 712int bpf_jit_charge_modmem(u32 pages); 713void bpf_jit_uncharge_modmem(u32 pages); 714#else 715static inline int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr) 716{ 717 return -ENOTSUPP; 718} 719static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr) 720{ 721 return -ENOTSUPP; 722} 723static inline struct bpf_trampoline *bpf_trampoline_get(u64 key, struct bpf_attach_target_info *tgt_info) 724{ 725 return ERR_PTR(-EOPNOTSUPP); 726} 727static inline void bpf_trampoline_put(struct bpf_trampoline *tr) 728{ 729} 730#define DEFINE_BPF_DISPATCHER(name) 731#define DECLARE_BPF_DISPATCHER(name) 732#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func 733#define BPF_DISPATCHER_PTR(name) NULL 734static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, struct bpf_prog *to) 735{ 736} 737static inline bool is_bpf_image_address(unsigned long address) 738{ 739 return false; 740} 741#endif 742 743struct bpf_func_info_aux { 744 u16 linkage; 745 bool unreliable; 746}; 747 748enum bpf_jit_poke_reason { 749 BPF_POKE_REASON_TAIL_CALL, 750}; 751 752/* Descriptor of pokes pointing /into/ the JITed image. */ 753struct bpf_jit_poke_descriptor { 754 void *tailcall_target; 755 void *tailcall_bypass; 756 void *bypass_addr; 757 void *aux; 758 union { 759 struct { 760 struct bpf_map *map; 761 u32 key; 762 } tail_call; 763 }; 764 bool tailcall_target_stable; 765 u8 adj_off; 766 u16 reason; 767 u32 insn_idx; 768}; 769 770/* reg_type info for ctx arguments */ 771struct bpf_ctx_arg_aux { 772 u32 offset; 773 enum bpf_reg_type reg_type; 774 u32 btf_id; 775}; 776 777struct bpf_prog_aux { 778 atomic64_t refcnt; 779 u32 used_map_cnt; 780 u32 max_ctx_offset; 781 u32 max_pkt_offset; 782 u32 max_tp_access; 783 u32 stack_depth; 784 u32 id; 785 u32 func_cnt; /* used by non-func prog as the number of func progs */ 786 u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ 787 u32 attach_btf_id; /* in-kernel BTF type id to attach to */ 788 u32 ctx_arg_info_size; 789 u32 max_rdonly_access; 790 u32 max_rdwr_access; 791 const struct bpf_ctx_arg_aux *ctx_arg_info; 792 struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */ 793 struct bpf_prog *dst_prog; 794 struct bpf_trampoline *dst_trampoline; 795 enum bpf_prog_type saved_dst_prog_type; 796 enum bpf_attach_type saved_dst_attach_type; 797 bool verifier_zext; /* Zero extensions has been inserted by verifier. */ 798 bool offload_requested; 799 bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */ 800 bool func_proto_unreliable; 801 bool sleepable; 802 bool tail_call_reachable; 803 struct hlist_node tramp_hlist; 804 /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ 805 const struct btf_type *attach_func_proto; 806 /* function name for valid attach_btf_id */ 807 const char *attach_func_name; 808 struct bpf_prog **func; 809 void *jit_data; /* JIT specific data. arch dependent */ 810 struct bpf_jit_poke_descriptor *poke_tab; 811 u32 size_poke_tab; 812 struct bpf_ksym ksym; 813 const struct bpf_prog_ops *ops; 814 struct bpf_map **used_maps; 815 struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */ 816 struct bpf_prog *prog; 817 struct user_struct *user; 818 u64 load_time; /* ns since boottime */ 819 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 820 char name[BPF_OBJ_NAME_LEN]; 821#ifdef CONFIG_SECURITY 822 void *security; 823#endif 824 struct bpf_prog_offload *offload; 825 struct btf *btf; 826 struct bpf_func_info *func_info; 827 struct bpf_func_info_aux *func_info_aux; 828 /* bpf_line_info loaded from userspace. linfo->insn_off 829 * has the xlated insn offset. 830 * Both the main and sub prog share the same linfo. 831 * The subprog can access its first linfo by 832 * using the linfo_idx. 833 */ 834 struct bpf_line_info *linfo; 835 /* jited_linfo is the jited addr of the linfo. It has a 836 * one to one mapping to linfo: 837 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off. 838 * Both the main and sub prog share the same jited_linfo. 839 * The subprog can access its first jited_linfo by 840 * using the linfo_idx. 841 */ 842 void **jited_linfo; 843 u32 func_info_cnt; 844 u32 nr_linfo; 845 /* subprog can use linfo_idx to access its first linfo and 846 * jited_linfo. 847 * main prog always has linfo_idx == 0 848 */ 849 u32 linfo_idx; 850 u32 num_exentries; 851 struct exception_table_entry *extable; 852 struct bpf_prog_stats __percpu *stats; 853 union { 854 struct work_struct work; 855 struct rcu_head rcu; 856 }; 857}; 858 859struct bpf_array_aux { 860 /* 'Ownership' of prog array is claimed by the first program that 861 * is going to use this map or by the first program which FD is 862 * stored in the map to make sure that all callers and callees have 863 * the same prog type and JITed flag. 864 */ 865 struct { 866 spinlock_t lock; 867 enum bpf_prog_type type; 868 bool jited; 869 } owner; 870 /* Programs with direct jumps into programs part of this array. */ 871 struct list_head poke_progs; 872 struct bpf_map *map; 873 struct mutex poke_mutex; 874 struct work_struct work; 875}; 876 877struct bpf_link { 878 atomic64_t refcnt; 879 u32 id; 880 enum bpf_link_type type; 881 const struct bpf_link_ops *ops; 882 struct bpf_prog *prog; 883 struct work_struct work; 884}; 885 886struct bpf_link_ops { 887 void (*release)(struct bpf_link *link); 888 void (*dealloc)(struct bpf_link *link); 889 int (*detach)(struct bpf_link *link); 890 int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog, struct bpf_prog *old_prog); 891 void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq); 892 int (*fill_link_info)(const struct bpf_link *link, struct bpf_link_info *info); 893}; 894 895struct bpf_link_primer { 896 struct bpf_link *link; 897 struct file *file; 898 int fd; 899 u32 id; 900}; 901 902struct bpf_struct_ops_value; 903struct btf_type; 904struct btf_member; 905 906#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64 907struct bpf_struct_ops { 908 const struct bpf_verifier_ops *verifier_ops; 909 int (*init)(struct btf *btf); 910 int (*check_member)(const struct btf_type *t, const struct btf_member *member); 911 int (*init_member)(const struct btf_type *t, const struct btf_member *member, void *kdata, const void *udata); 912 int (*reg)(void *kdata); 913 void (*unreg)(void *kdata); 914 const struct btf_type *type; 915 const struct btf_type *value_type; 916 const char *name; 917 struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS]; 918 u32 type_id; 919 u32 value_id; 920}; 921 922#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) 923#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) 924const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id); 925void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log); 926bool bpf_struct_ops_get(const void *kdata); 927void bpf_struct_ops_put(const void *kdata); 928int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, void *value); 929static inline bool bpf_try_module_get(const void *data, struct module *owner) 930{ 931 if (owner == BPF_MODULE_OWNER) { 932 return bpf_struct_ops_get(data); 933 } else { 934 return try_module_get(owner); 935 } 936} 937static inline void bpf_module_put(const void *data, struct module *owner) 938{ 939 if (owner == BPF_MODULE_OWNER) { 940 bpf_struct_ops_put(data); 941 } else { 942 module_put(owner); 943 } 944} 945#else 946static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id) 947{ 948 return NULL; 949} 950static inline void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log) 951{ 952} 953static inline bool bpf_try_module_get(const void *data, struct module *owner) 954{ 955 return try_module_get(owner); 956} 957static inline void bpf_module_put(const void *data, struct module *owner) 958{ 959 module_put(owner); 960} 961static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, void *value) 962{ 963 return -EINVAL; 964} 965#endif 966 967struct bpf_array { 968 struct bpf_map map; 969 u32 elem_size; 970 u32 index_mask; 971 struct bpf_array_aux *aux; 972 union { 973 char value[0] __aligned(8); 974 void *ptrs[0] __aligned(8); 975 void __percpu *pptrs[0] __aligned(8); 976 }; 977}; 978 979#define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ 980#define MAX_TAIL_CALL_CNT 32 981 982#define BPF_F_ACCESS_MASK (BPF_F_RDONLY | BPF_F_RDONLY_PROG | BPF_F_WRONLY | BPF_F_WRONLY_PROG) 983 984#define BPF_MAP_CAN_READ BIT(0) 985#define BPF_MAP_CAN_WRITE BIT(1) 986 987static inline u32 bpf_map_flags_to_cap(struct bpf_map *map) 988{ 989 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 990 991 /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is 992 * not possible. 993 */ 994 if (access_flags & BPF_F_RDONLY_PROG) { 995 return BPF_MAP_CAN_READ; 996 } else if (access_flags & BPF_F_WRONLY_PROG) { 997 return BPF_MAP_CAN_WRITE; 998 } else { 999 return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE; 1000 } 1001} 1002 1003static inline bool bpf_map_flags_access_ok(u32 access_flags) 1004{ 1005 return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) != (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); 1006} 1007 1008struct bpf_event_entry { 1009 struct perf_event *event; 1010 struct file *perf_file; 1011 struct file *map_file; 1012 struct rcu_head rcu; 1013}; 1014 1015bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); 1016int bpf_prog_calc_tag(struct bpf_prog *fp); 1017const char *kernel_type_name(u32 btf_type_id); 1018 1019const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 1020 1021typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, unsigned long off, unsigned long len); 1022typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, const struct bpf_insn *src, struct bpf_insn *dst, 1023 struct bpf_prog *prog, u32 *target_size); 1024 1025u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, void *ctx, u64 ctx_size, 1026 bpf_ctx_copy_t ctx_copy); 1027 1028/* an array of programs to be executed under rcu_lock. 1029 * 1030 * Typical usage: 1031 * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN); 1032 * 1033 * the structure returned by bpf_prog_array_alloc() should be populated 1034 * with program pointers and the last pointer must be NULL. 1035 * The user has to keep refcnt on the program and make sure the program 1036 * is removed from the array before bpf_prog_put(). 1037 * The 'struct bpf_prog_array *' should only be replaced with xchg() 1038 * since other cpus are walking the array of pointers in parallel. 1039 */ 1040struct bpf_prog_array_item { 1041 struct bpf_prog *prog; 1042 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 1043}; 1044 1045struct bpf_prog_array { 1046 struct rcu_head rcu; 1047 struct bpf_prog_array_item items[]; 1048}; 1049 1050struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); 1051void bpf_prog_array_free(struct bpf_prog_array *progs); 1052int bpf_prog_array_length(struct bpf_prog_array *progs); 1053bool bpf_prog_array_is_empty(struct bpf_prog_array *array); 1054int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, __u32 __user *prog_ids, u32 cnt); 1055 1056void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, struct bpf_prog *old_prog); 1057int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index); 1058int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, struct bpf_prog *prog); 1059int bpf_prog_array_copy_info(struct bpf_prog_array *array, u32 *prog_ids, u32 request_cnt, u32 *prog_cnt); 1060int bpf_prog_array_copy(struct bpf_prog_array *old_array, struct bpf_prog *exclude_prog, struct bpf_prog *include_prog, 1061 struct bpf_prog_array **new_array); 1062 1063struct bpf_run_ctx { 1064}; 1065 1066struct bpf_cg_run_ctx { 1067 struct bpf_run_ctx run_ctx; 1068 struct bpf_prog_array_item *prog_item; 1069}; 1070 1071#define I_BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null, set_cg_storage) \ 1072 ( { \ 1073 struct bpf_prog_array_item *_item; \ 1074 struct bpf_prog *_prog; \ 1075 struct bpf_prog_array *_array; \ 1076 struct bpf_run_ctx *old_run_ctx; \ 1077 struct bpf_cg_run_ctx run_ctx; \ 1078 u32 _ret = 1; \ 1079 migrate_disable(); \ 1080 rcu_read_lock(); \ 1081 _array = rcu_dereference(array); \ 1082 if (unlikely((check_non_null) && !_array)) \ 1083 goto _out; \ 1084 _item = &_array->items[0]; \ 1085 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); \ 1086 while ((_prog = READ_ONCE(_item->prog))) { \ 1087 run_ctx.prog_item = _item; \ 1088 _ret &= func(_prog, ctx); \ 1089 _item++; \ 1090 } \ 1091 bpf_reset_run_ctx(old_run_ctx); \ 1092 _out: \ 1093 rcu_read_unlock(); \ 1094 migrate_enable(); \ 1095 _ret; \ 1096 }) 1097 1098/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs 1099 * so BPF programs can request cwr for TCP packets. 1100 * 1101 * Current cgroup skb programs can only return 0 or 1 (0 to drop the 1102 * packet. This macro changes the behavior so the low order bit 1103 * indicates whether the packet should be dropped (0) or not (1) 1104 * and the next bit is a congestion notification bit. This could be 1105 * used by TCP to call tcp_enter_cwr() 1106 * 1107 * Hence, new allowed return values of CGROUP EGRESS BPF programs are: 1108 * 0: drop packet 1109 * 1: keep packet 1110 * 2: drop packet and cn 1111 * 3: keep packet and cn 1112 * 1113 * This macro then converts it to one of the NET_XMIT or an error 1114 * code that is then interpreted as drop packet (and no cn): 1115 * 0: NET_XMIT_SUCCESS skb should be transmitted 1116 * 1: NET_XMIT_DROP skb should be dropped and cn 1117 * 2: NET_XMIT_CN skb should be transmitted and cn 1118 * 3: -EPERM skb should be dropped 1119 */ 1120#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \ 1121 ( { \ 1122 struct bpf_prog_array_item *_item; \ 1123 struct bpf_prog *_prog; \ 1124 struct bpf_prog_array *_array; \ 1125 struct bpf_run_ctx *old_run_ctx; \ 1126 struct bpf_cg_run_ctx run_ctx; \ 1127 u32 ret; \ 1128 u32 _ret = 1; \ 1129 u32 _cn = 0; \ 1130 migrate_disable(); \ 1131 rcu_read_lock(); \ 1132 _array = rcu_dereference(array); \ 1133 _item = &_array->items[0]; \ 1134 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); \ 1135 while ((_prog = READ_ONCE(_item->prog))) { \ 1136 run_ctx.prog_item = _item; \ 1137 ret = func(_prog, ctx); \ 1138 _ret &= (ret & 1); \ 1139 _cn |= (ret & 2); \ 1140 _item++; \ 1141 } \ 1142 bpf_reset_run_ctx(old_run_ctx); \ 1143 rcu_read_unlock(); \ 1144 migrate_enable(); \ 1145 if (_ret) \ 1146 _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ 1147 else \ 1148 _ret = (_cn ? NET_XMIT_DROP : -EPERM); \ 1149 _ret; \ 1150 }) 1151 1152#define BPF_PROG_RUN_ARRAY(array, ctx, func) I_BPF_PROG_RUN_ARRAY(array, ctx, func, false, true) 1153 1154#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) I_BPF_PROG_RUN_ARRAY(array, ctx, func, true, false) 1155 1156#ifdef CONFIG_BPF_SYSCALL 1157DECLARE_PER_CPU(int, bpf_prog_active); 1158extern struct mutex bpf_stats_enabled_mutex; 1159 1160/* 1161 * Block execution of BPF programs attached to instrumentation (perf, 1162 * kprobes, tracepoints) to prevent deadlocks on map operations as any of 1163 * these events can happen inside a region which holds a map bucket lock 1164 * and can deadlock on it. 1165 * 1166 * Use the preemption safe inc/dec variants on RT because migrate disable 1167 * is preemptible on RT and preemption in the middle of the RMW operation 1168 * might lead to inconsistent state. Use the raw variants for non RT 1169 * kernels as migrate_disable() maps to preempt_disable() so the slightly 1170 * more expensive save operation can be avoided. 1171 */ 1172static inline void bpf_disable_instrumentation(void) 1173{ 1174 migrate_disable(); 1175 if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 1176 this_cpu_inc(bpf_prog_active); 1177 } else { 1178 __this_cpu_inc(bpf_prog_active); 1179 } 1180} 1181 1182static inline void bpf_enable_instrumentation(void) 1183{ 1184 if (IS_ENABLED(CONFIG_PREEMPT_RT)) { 1185 this_cpu_dec(bpf_prog_active); 1186 } else { 1187 __this_cpu_dec(bpf_prog_active); 1188 } 1189 migrate_enable(); 1190} 1191 1192static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx) 1193{ 1194 struct bpf_run_ctx *old_ctx; 1195 1196 old_ctx = current->bpf_ctx; 1197 current->bpf_ctx = new_ctx; 1198 return old_ctx; 1199} 1200 1201static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx) 1202{ 1203 current->bpf_ctx = old_ctx; 1204} 1205 1206extern const struct file_operations bpf_map_fops; 1207extern const struct file_operations bpf_prog_fops; 1208extern const struct file_operations bpf_iter_fops; 1209 1210#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 1211 extern const struct bpf_prog_ops _name##_prog_ops; \ 1212 extern const struct bpf_verifier_ops _name##_verifier_ops; 1213#define BPF_MAP_TYPE(_id, _ops) extern const struct bpf_map_ops _ops; 1214#define BPF_LINK_TYPE(_id, _name) 1215#include <linux/bpf_types.h> 1216#undef BPF_PROG_TYPE 1217#undef BPF_MAP_TYPE 1218#undef BPF_LINK_TYPE 1219 1220extern const struct bpf_prog_ops bpf_offload_prog_ops; 1221extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; 1222extern const struct bpf_verifier_ops xdp_analyzer_ops; 1223 1224struct bpf_prog *bpf_prog_get(u32 ufd); 1225struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, bool attach_drv); 1226void bpf_prog_add(struct bpf_prog *prog, int i); 1227void bpf_prog_sub(struct bpf_prog *prog, int i); 1228void bpf_prog_inc(struct bpf_prog *prog); 1229struct bpf_prog *__must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); 1230void bpf_prog_put(struct bpf_prog *prog); 1231int __bpf_prog_charge(struct user_struct *user, u32 pages); 1232void __bpf_prog_uncharge(struct user_struct *user, u32 pages); 1233 1234void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); 1235void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); 1236 1237struct bpf_map *bpf_map_get(u32 ufd); 1238struct bpf_map *bpf_map_get_with_uref(u32 ufd); 1239struct bpf_map *__bpf_map_get(struct fd f); 1240void bpf_map_inc(struct bpf_map *map); 1241void bpf_map_inc_with_uref(struct bpf_map *map); 1242struct bpf_map *__must_check bpf_map_inc_not_zero(struct bpf_map *map); 1243void bpf_map_put_with_uref(struct bpf_map *map); 1244void bpf_map_put(struct bpf_map *map); 1245int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); 1246void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); 1247int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size); 1248void bpf_map_charge_finish(struct bpf_map_memory *mem); 1249void bpf_map_charge_move(struct bpf_map_memory *dst, struct bpf_map_memory *src); 1250void *bpf_map_area_alloc(u64 size, int numa_node); 1251void *bpf_map_area_mmapable_alloc(u64 size, int numa_node); 1252void bpf_map_area_free(void *base); 1253void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); 1254int generic_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr); 1255int generic_map_update_batch(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr); 1256int generic_map_delete_batch(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr); 1257struct bpf_map *bpf_map_get_curr_or_next(u32 *id); 1258struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id); 1259 1260extern int sysctl_unprivileged_bpf_disabled; 1261 1262static inline bool bpf_allow_ptr_leaks(void) 1263{ 1264 return perfmon_capable(); 1265} 1266 1267static inline bool bpf_allow_uninit_stack(void) 1268{ 1269 return perfmon_capable(); 1270} 1271 1272static inline bool bpf_allow_ptr_to_map_access(void) 1273{ 1274 return perfmon_capable(); 1275} 1276 1277static inline bool bpf_bypass_spec_v1(void) 1278{ 1279 return perfmon_capable(); 1280} 1281 1282static inline bool bpf_bypass_spec_v4(void) 1283{ 1284 return perfmon_capable(); 1285} 1286 1287int bpf_map_new_fd(struct bpf_map *map, int flags); 1288int bpf_prog_new_fd(struct bpf_prog *prog); 1289 1290void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, const struct bpf_link_ops *ops, 1291 struct bpf_prog *prog); 1292int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); 1293int bpf_link_settle(struct bpf_link_primer *primer); 1294void bpf_link_cleanup(struct bpf_link_primer *primer); 1295void bpf_link_inc(struct bpf_link *link); 1296void bpf_link_put(struct bpf_link *link); 1297int bpf_link_new_fd(struct bpf_link *link); 1298struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd); 1299struct bpf_link *bpf_link_get_from_fd(u32 ufd); 1300 1301int bpf_obj_pin_user(u32 ufd, const char __user *pathname); 1302int bpf_obj_get_user(const char __user *pathname, int flags); 1303 1304#define BPF_ITER_FUNC_PREFIX "bpf_iter_" 1305#define DEFINE_BPF_ITER_FUNC(target, args...) \ 1306 extern int bpf_iter_##target(args); \ 1307 int __init bpf_iter_##target(args) \ 1308 { \ 1309 return 0; \ 1310 } 1311 1312struct bpf_iter_aux_info { 1313 struct bpf_map *map; 1314}; 1315 1316typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog, union bpf_iter_link_info *linfo, 1317 struct bpf_iter_aux_info *aux); 1318typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux); 1319typedef void (*bpf_iter_show_fdinfo_t)(const struct bpf_iter_aux_info *aux, struct seq_file *seq); 1320typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux, struct bpf_link_info *info); 1321 1322#define BPF_ITER_CTX_ARG_MAX 2 1323struct bpf_iter_reg { 1324 const char *target; 1325 bpf_iter_attach_target_t attach_target; 1326 bpf_iter_detach_target_t detach_target; 1327 bpf_iter_show_fdinfo_t show_fdinfo; 1328 bpf_iter_fill_link_info_t fill_link_info; 1329 u32 ctx_arg_info_size; 1330 struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; 1331 const struct bpf_iter_seq_info *seq_info; 1332}; 1333 1334struct bpf_iter_meta { 1335 __bpf_md_ptr(struct seq_file *, seq); 1336 u64 session_id; 1337 u64 seq_num; 1338}; 1339 1340struct bpf_iter__bpf_map_elem { 1341 __bpf_md_ptr(struct bpf_iter_meta *, meta); 1342 __bpf_md_ptr(struct bpf_map *, map); 1343 __bpf_md_ptr(void *, key); 1344 __bpf_md_ptr(void *, value); 1345}; 1346 1347int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info); 1348void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info); 1349bool bpf_iter_prog_supported(struct bpf_prog *prog); 1350int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); 1351int bpf_iter_new_fd(struct bpf_link *link); 1352bool bpf_link_is_iter(struct bpf_link *link); 1353struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop); 1354int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx); 1355void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux, struct seq_file *seq); 1356int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux, struct bpf_link_info *info); 1357 1358int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); 1359int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); 1360int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, u64 flags); 1361int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, u64 flags); 1362 1363int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); 1364 1365int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, void *key, void *value, u64 map_flags); 1366int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 1367int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, void *key, void *value, u64 map_flags); 1368int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 1369 1370int bpf_get_file_flag(int flags); 1371int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size, size_t actual_size); 1372 1373/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and 1374 * forced to use 'long' read/writes to try to atomically copy long counters. 1375 * Best-effort only. No barriers here, since it _will_ race with concurrent 1376 * updates from BPF programs. Called from bpf syscall and mostly used with 1377 * size 8 or 16 bytes, so ask compiler to inline it. 1378 */ 1379static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) 1380{ 1381 const long *lsrc = src; 1382 long *ldst = dst; 1383 1384 size /= sizeof(long); 1385 while (size--) { 1386 *ldst++ = *lsrc++; 1387 } 1388} 1389 1390/* verify correctness of eBPF program */ 1391int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, union bpf_attr __user *uattr); 1392 1393#ifndef CONFIG_BPF_JIT_ALWAYS_ON 1394void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); 1395#endif 1396 1397struct btf *bpf_get_btf_vmlinux(void); 1398 1399/* Map specifics */ 1400struct xdp_buff; 1401struct sk_buff; 1402 1403struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key); 1404struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key); 1405void __dev_flush(void); 1406int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, struct net_device *dev_rx); 1407int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, struct net_device *dev_rx); 1408int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, struct bpf_prog *xdp_prog); 1409bool dev_map_can_have_prog(struct bpf_map *map); 1410 1411struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); 1412void __cpu_map_flush(void); 1413int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, struct net_device *dev_rx); 1414bool cpu_map_prog_allowed(struct bpf_map *map); 1415 1416/* Return map's numa specified by userspace */ 1417static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) 1418{ 1419 return (attr->map_flags & BPF_F_NUMA_NODE) ? attr->numa_node : NUMA_NO_NODE; 1420} 1421 1422struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); 1423int array_map_alloc_check(union bpf_attr *attr); 1424 1425int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); 1426int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); 1427int bpf_prog_test_run_tracing(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); 1428int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); 1429int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); 1430bool btf_ctx_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, 1431 struct bpf_insn_access_aux *info); 1432int btf_struct_access(struct bpf_verifier_log *log, const struct btf_type *t, int off, int size, 1433 enum bpf_access_type atype, u32 *next_btf_id); 1434bool btf_struct_ids_match(struct bpf_verifier_log *log, int off, u32 id, u32 need_type_id); 1435 1436int btf_distill_func_proto(struct bpf_verifier_log *log, struct btf *btf, const struct btf_type *func_proto, 1437 const char *func_name, struct btf_func_model *m); 1438 1439struct bpf_reg_state; 1440int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog, struct bpf_reg_state *regs); 1441int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, struct bpf_reg_state *reg); 1442int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog, struct btf *btf, 1443 const struct btf_type *t); 1444 1445struct bpf_prog *bpf_prog_by_id(u32 id); 1446struct bpf_link *bpf_link_by_id(u32 id); 1447 1448const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id); 1449 1450static inline bool unprivileged_ebpf_enabled(void) 1451{ 1452 return !sysctl_unprivileged_bpf_disabled; 1453} 1454 1455#else /* !CONFIG_BPF_SYSCALL */ 1456static inline struct bpf_prog *bpf_prog_get(u32 ufd) 1457{ 1458 return ERR_PTR(-EOPNOTSUPP); 1459} 1460 1461static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, bool attach_drv) 1462{ 1463 return ERR_PTR(-EOPNOTSUPP); 1464} 1465 1466static inline void bpf_prog_add(struct bpf_prog *prog, int i) 1467{ 1468} 1469 1470static inline void bpf_prog_sub(struct bpf_prog *prog, int i) 1471{ 1472} 1473 1474static inline void bpf_prog_put(struct bpf_prog *prog) 1475{ 1476} 1477 1478static inline void bpf_prog_inc(struct bpf_prog *prog) 1479{ 1480} 1481 1482static inline struct bpf_prog *__must_check bpf_prog_inc_not_zero(struct bpf_prog *prog) 1483{ 1484 return ERR_PTR(-EOPNOTSUPP); 1485} 1486 1487static inline int __bpf_prog_charge(struct user_struct *user, u32 pages) 1488{ 1489 return 0; 1490} 1491 1492static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages) 1493{ 1494} 1495 1496static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, const struct bpf_link_ops *ops, 1497 struct bpf_prog *prog) 1498{ 1499} 1500 1501static inline int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer) 1502{ 1503 return -EOPNOTSUPP; 1504} 1505 1506static inline int bpf_link_settle(struct bpf_link_primer *primer) 1507{ 1508 return -EOPNOTSUPP; 1509} 1510 1511static inline void bpf_link_cleanup(struct bpf_link_primer *primer) 1512{ 1513} 1514 1515static inline void bpf_link_inc(struct bpf_link *link) 1516{ 1517} 1518 1519static inline void bpf_link_put(struct bpf_link *link) 1520{ 1521} 1522 1523static inline int bpf_obj_get_user(const char __user *pathname, int flags) 1524{ 1525 return -EOPNOTSUPP; 1526} 1527 1528static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key) 1529{ 1530 return NULL; 1531} 1532 1533static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key) 1534{ 1535 return NULL; 1536} 1537static inline bool dev_map_can_have_prog(struct bpf_map *map) 1538{ 1539 return false; 1540} 1541 1542static inline void __dev_flush(void) 1543{ 1544} 1545 1546struct xdp_buff; 1547struct bpf_dtab_netdev; 1548 1549static inline int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, struct net_device *dev_rx) 1550{ 1551 return 0; 1552} 1553 1554static inline int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, struct net_device *dev_rx) 1555{ 1556 return 0; 1557} 1558 1559struct sk_buff; 1560 1561static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, struct bpf_prog *xdp_prog) 1562{ 1563 return 0; 1564} 1565 1566static inline struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) 1567{ 1568 return NULL; 1569} 1570 1571static inline void __cpu_map_flush(void) 1572{ 1573} 1574 1575static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, struct net_device *dev_rx) 1576{ 1577 return 0; 1578} 1579 1580static inline bool cpu_map_prog_allowed(struct bpf_map *map) 1581{ 1582 return false; 1583} 1584 1585static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type) 1586{ 1587 return ERR_PTR(-EOPNOTSUPP); 1588} 1589 1590static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, 1591 union bpf_attr __user *uattr) 1592{ 1593 return -ENOTSUPP; 1594} 1595 1596static inline int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, 1597 union bpf_attr __user *uattr) 1598{ 1599 return -ENOTSUPP; 1600} 1601 1602static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog, const union bpf_attr *kattr, 1603 union bpf_attr __user *uattr) 1604{ 1605 return -ENOTSUPP; 1606} 1607 1608static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, const union bpf_attr *kattr, 1609 union bpf_attr __user *uattr) 1610{ 1611 return -ENOTSUPP; 1612} 1613 1614static inline void bpf_map_put(struct bpf_map *map) 1615{ 1616} 1617 1618static inline struct bpf_prog *bpf_prog_by_id(u32 id) 1619{ 1620 return ERR_PTR(-ENOTSUPP); 1621} 1622 1623static inline const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id) 1624{ 1625 return NULL; 1626} 1627 1628static inline bool unprivileged_ebpf_enabled(void) 1629{ 1630 return false; 1631} 1632 1633#endif /* CONFIG_BPF_SYSCALL */ 1634 1635static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type) 1636{ 1637 return bpf_prog_get_type_dev(ufd, type, false); 1638} 1639 1640void __bpf_free_used_maps(struct bpf_prog_aux *aux, struct bpf_map **used_maps, u32 len); 1641 1642bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); 1643 1644int bpf_prog_offload_compile(struct bpf_prog *prog); 1645void bpf_prog_offload_destroy(struct bpf_prog *prog); 1646int bpf_prog_offload_info_fill(struct bpf_prog_info *info, struct bpf_prog *prog); 1647 1648int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); 1649 1650int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); 1651int bpf_map_offload_update_elem(struct bpf_map *map, void *key, void *value, u64 flags); 1652int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); 1653int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key); 1654 1655bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); 1656 1657struct bpf_offload_dev *bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv); 1658void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); 1659void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev); 1660int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, struct net_device *netdev); 1661void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, struct net_device *netdev); 1662bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); 1663 1664void unpriv_ebpf_notify(int new_state); 1665 1666#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) 1667int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); 1668 1669static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) 1670{ 1671 return aux->offload_requested; 1672} 1673 1674static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 1675{ 1676 return unlikely(map->ops == &bpf_map_offload_ops); 1677} 1678 1679struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); 1680void bpf_map_offload_map_free(struct bpf_map *map); 1681#else 1682static inline int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) 1683{ 1684 return -EOPNOTSUPP; 1685} 1686 1687static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) 1688{ 1689 return false; 1690} 1691 1692static inline bool bpf_map_is_dev_bound(struct bpf_map *map) 1693{ 1694 return false; 1695} 1696 1697static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) 1698{ 1699 return ERR_PTR(-EOPNOTSUPP); 1700} 1701 1702static inline void bpf_map_offload_map_free(struct bpf_map *map) 1703{ 1704} 1705#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ 1706 1707#if defined(CONFIG_BPF_STREAM_PARSER) 1708int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, struct bpf_prog *old, u32 which); 1709int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); 1710int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); 1711int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); 1712void sock_map_unhash(struct sock *sk); 1713void sock_map_close(struct sock *sk, long timeout); 1714#else 1715static inline int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, struct bpf_prog *old, u32 which) 1716{ 1717 return -EOPNOTSUPP; 1718} 1719 1720static inline int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog) 1721{ 1722 return -EINVAL; 1723} 1724 1725static inline int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) 1726{ 1727 return -EOPNOTSUPP; 1728} 1729 1730static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags) 1731{ 1732 return -EOPNOTSUPP; 1733} 1734#endif /* CONFIG_BPF_STREAM_PARSER */ 1735 1736#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) 1737void bpf_sk_reuseport_detach(struct sock *sk); 1738int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, void *value); 1739int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, void *value, u64 map_flags); 1740#else 1741static inline void bpf_sk_reuseport_detach(struct sock *sk) 1742{ 1743} 1744 1745#ifdef CONFIG_BPF_SYSCALL 1746static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, void *value) 1747{ 1748 return -EOPNOTSUPP; 1749} 1750 1751static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, void *value, u64 map_flags) 1752{ 1753 return -EOPNOTSUPP; 1754} 1755#endif /* CONFIG_BPF_SYSCALL */ 1756#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */ 1757 1758/* verifier prototypes for helper functions called from eBPF programs */ 1759extern const struct bpf_func_proto bpf_map_lookup_elem_proto; 1760extern const struct bpf_func_proto bpf_map_update_elem_proto; 1761extern const struct bpf_func_proto bpf_map_delete_elem_proto; 1762extern const struct bpf_func_proto bpf_map_push_elem_proto; 1763extern const struct bpf_func_proto bpf_map_pop_elem_proto; 1764extern const struct bpf_func_proto bpf_map_peek_elem_proto; 1765 1766extern const struct bpf_func_proto bpf_get_prandom_u32_proto; 1767extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; 1768extern const struct bpf_func_proto bpf_get_numa_node_id_proto; 1769extern const struct bpf_func_proto bpf_tail_call_proto; 1770extern const struct bpf_func_proto bpf_ktime_get_ns_proto; 1771extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto; 1772extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; 1773extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; 1774extern const struct bpf_func_proto bpf_get_current_comm_proto; 1775extern const struct bpf_func_proto bpf_get_stackid_proto; 1776extern const struct bpf_func_proto bpf_get_stack_proto; 1777extern const struct bpf_func_proto bpf_get_task_stack_proto; 1778extern const struct bpf_func_proto bpf_get_stackid_proto_pe; 1779extern const struct bpf_func_proto bpf_get_stack_proto_pe; 1780extern const struct bpf_func_proto bpf_sock_map_update_proto; 1781extern const struct bpf_func_proto bpf_sock_hash_update_proto; 1782extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; 1783extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto; 1784extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; 1785extern const struct bpf_func_proto bpf_msg_redirect_map_proto; 1786extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; 1787extern const struct bpf_func_proto bpf_sk_redirect_map_proto; 1788extern const struct bpf_func_proto bpf_spin_lock_proto; 1789extern const struct bpf_func_proto bpf_spin_unlock_proto; 1790extern const struct bpf_func_proto bpf_get_local_storage_proto; 1791extern const struct bpf_func_proto bpf_strtol_proto; 1792extern const struct bpf_func_proto bpf_strtoul_proto; 1793extern const struct bpf_func_proto bpf_tcp_sock_proto; 1794extern const struct bpf_func_proto bpf_jiffies64_proto; 1795extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto; 1796extern const struct bpf_func_proto bpf_event_output_data_proto; 1797extern const struct bpf_func_proto bpf_ringbuf_output_proto; 1798extern const struct bpf_func_proto bpf_ringbuf_reserve_proto; 1799extern const struct bpf_func_proto bpf_ringbuf_submit_proto; 1800extern const struct bpf_func_proto bpf_ringbuf_discard_proto; 1801extern const struct bpf_func_proto bpf_ringbuf_query_proto; 1802extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto; 1803extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto; 1804extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto; 1805extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto; 1806extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto; 1807extern const struct bpf_func_proto bpf_copy_from_user_proto; 1808extern const struct bpf_func_proto bpf_snprintf_btf_proto; 1809extern const struct bpf_func_proto bpf_per_cpu_ptr_proto; 1810extern const struct bpf_func_proto bpf_this_cpu_ptr_proto; 1811 1812const struct bpf_func_proto *bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog); 1813 1814const struct bpf_func_proto *tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog); 1815 1816/* Shared helpers among cBPF and eBPF. */ 1817void bpf_user_rnd_init_once(void); 1818u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 1819u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 1820 1821#if defined(CONFIG_NET) 1822bool bpf_sock_common_is_valid_access(int off, int size, enum bpf_access_type type, struct bpf_insn_access_aux *info); 1823bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, struct bpf_insn_access_aux *info); 1824u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, 1825 struct bpf_prog *prog, u32 *target_size); 1826#else 1827static inline bool bpf_sock_common_is_valid_access(int off, int size, enum bpf_access_type type, 1828 struct bpf_insn_access_aux *info) 1829{ 1830 return false; 1831} 1832static inline bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, 1833 struct bpf_insn_access_aux *info) 1834{ 1835 return false; 1836} 1837static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, const struct bpf_insn *si, 1838 struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size) 1839{ 1840 return 0; 1841} 1842#endif 1843 1844#ifdef CONFIG_INET 1845struct sk_reuseport_kern { 1846 struct sk_buff *skb; 1847 struct sock *sk; 1848 struct sock *selected_sk; 1849 void *data_end; 1850 u32 hash; 1851 u32 reuseport_id; 1852 bool bind_inany; 1853}; 1854bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, struct bpf_insn_access_aux *info); 1855 1856u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, 1857 struct bpf_prog *prog, u32 *target_size); 1858 1859bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, struct bpf_insn_access_aux *info); 1860 1861u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, 1862 struct bpf_prog *prog, u32 *target_size); 1863#else 1864static inline bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 1865 struct bpf_insn_access_aux *info) 1866{ 1867 return false; 1868} 1869 1870static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, const struct bpf_insn *si, 1871 struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size) 1872{ 1873 return 0; 1874} 1875static inline bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, 1876 struct bpf_insn_access_aux *info) 1877{ 1878 return false; 1879} 1880 1881static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, const struct bpf_insn *si, 1882 struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size) 1883{ 1884 return 0; 1885} 1886#endif /* CONFIG_INET */ 1887 1888enum bpf_text_poke_type { 1889 BPF_MOD_CALL, 1890 BPF_MOD_JUMP, 1891}; 1892 1893int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, void *addr1, void *addr2); 1894 1895struct btf_id_set; 1896bool btf_id_set_contains(const struct btf_id_set *set, u32 id); 1897 1898#endif /* _LINUX_BPF_H */ 1899