Lines Matching defs:map

32  * Different map implementations will rely on rcu in map methods
37 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
41 return (unsigned long) map->ops->map_lookup_elem(map, key);
53 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
58 return map->ops->map_update_elem(map, key, value, flags);
72 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
76 return map->ops->map_delete_elem(map, key);
88 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
90 return map->ops->map_push_elem(map, value, flags);
103 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
105 return map->ops->map_pop_elem(map, value);
116 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
118 return map->ops->map_peek_elem(map, value);
129 BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
132 return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu);
373 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
379 lock = src + map->record->spin_lock_off;
381 lock = dst + map->record->spin_lock_off;
384 copy_map_value(map, dst, src);
634 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
640 return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
1081 /* BPF map elements can contain 'struct bpf_timer'.
1082 * Such map owns all of its BPF timers.
1083 * 'struct bpf_timer' is allocated as part of map element allocation
1090 * If user space reference to a map goes to zero at this point
1095 * freeing the timers when inner map is replaced or deleted by user space.
1099 struct bpf_map *map;
1121 struct bpf_map *map = t->map;
1139 if (map->map_type == BPF_MAP_TYPE_ARRAY) {
1140 struct bpf_array *array = container_of(map, struct bpf_array, map);
1146 key = value - round_up(map->key_size, 8);
1149 callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1157 BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map,
1184 t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node);
1189 t->value = (void *)timer - map->record->timer_off;
1190 t->map = map;
1196 /* Guarantee the order between timer->timer and map->usercnt. So
1202 if (!atomic64_read(&map->usercnt)) {
1239 if (!atomic64_read(&t->map->usercnt)) {
1366 * by ops->map_release_uref when the user space reference to a map reaches zero.
1836 * section, and end up doing map ops that call bpf_list_head_free for
1837 * the same map value again.
1970 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
2016 /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
2119 * kfunc which is not stored in a map as a kptr, must be released by calling
2142 * this kfunc which is not stored in a map as a kptr, must be released by
2166 * map, must be released by calling bpf_cgroup_release().
2186 * kfunc which is not subsequently stored in a map, must be released by calling
2225 * stored in a map, or released with bpf_task_release().