Lines Matching defs:storage
76 struct bpf_cgroup_storage *storage;
78 storage = container_of(node, struct bpf_cgroup_storage, node);
80 switch (bpf_cgroup_storage_key_cmp(map, key, &storage->key)) {
90 return storage;
101 struct bpf_cgroup_storage *storage)
112 switch (bpf_cgroup_storage_key_cmp(map, &storage->key, &this->key)) {
124 rb_link_node(&storage->node, parent, new);
125 rb_insert_color(&storage->node, root);
133 struct bpf_cgroup_storage *storage;
135 storage = cgroup_storage_lookup(map, key, false);
136 if (!storage)
139 return &READ_ONCE(storage->buf)->data[0];
145 struct bpf_cgroup_storage *storage;
155 storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map,
157 if (!storage)
161 copy_map_value_locked(map, storage->buf->data, value, false);
175 new = xchg(&storage->buf, new);
185 struct bpf_cgroup_storage *storage;
190 storage = cgroup_storage_lookup(map, key, false);
191 if (!storage) {
203 per_cpu_ptr(storage->percpu_buf, cpu), size);
214 struct bpf_cgroup_storage *storage;
222 storage = cgroup_storage_lookup(map, key, false);
223 if (!storage) {
236 bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu),
248 struct bpf_cgroup_storage *storage;
256 storage = cgroup_storage_lookup(map, key, true);
257 if (!storage)
260 storage = list_next_entry(storage, list_map);
261 if (!storage)
264 storage = list_first_entry(&map->list,
272 *next = storage->key;
275 *next = storage->key.cgroup_inode_id;
336 struct bpf_cgroup_storage *storage, *stmp;
340 list_for_each_entry_safe(storage, stmp, storages, list_map) {
341 bpf_cgroup_storage_unlink(storage);
342 bpf_cgroup_storage_free(storage);
420 struct bpf_cgroup_storage *storage;
424 storage = cgroup_storage_lookup(map_to_storage(map), key, false);
425 if (!storage) {
435 &READ_ONCE(storage->buf)->data[0], m);
442 per_cpu_ptr(storage->percpu_buf, cpu),
497 struct bpf_cgroup_storage *storage;
512 storage = kmalloc_node(sizeof(struct bpf_cgroup_storage),
514 if (!storage)
520 storage->buf = kmalloc_node(size, flags, map->numa_node);
521 if (!storage->buf)
523 check_and_init_map_lock(map, storage->buf->data);
525 storage->percpu_buf = __alloc_percpu_gfp(size, 8, flags);
526 if (!storage->percpu_buf)
530 storage->map = (struct bpf_cgroup_storage_map *)map;
532 return storage;
536 kfree(storage);
542 struct bpf_cgroup_storage *storage =
545 kfree(storage->buf);
546 kfree(storage);
551 struct bpf_cgroup_storage *storage =
554 free_percpu(storage->percpu_buf);
555 kfree(storage);
558 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage)
564 if (!storage)
567 map = &storage->map->map;
574 call_rcu(&storage->rcu, free_shared_cgroup_storage_rcu);
576 call_rcu(&storage->rcu, free_percpu_cgroup_storage_rcu);
579 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
585 if (!storage)
588 storage->key.attach_type = type;
589 storage->key.cgroup_inode_id = cgroup_id(cgroup);
591 map = storage->map;
594 WARN_ON(cgroup_storage_insert(map, storage));
595 list_add(&storage->list_map, &map->list);
596 list_add(&storage->list_cg, &cgroup->bpf.storages);
600 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage)
605 if (!storage)
608 map = storage->map;
612 rb_erase(&storage->node, root);
614 list_del(&storage->list_map);
615 list_del(&storage->list_cg);