Lines Matching defs:element

24 static void poison_error(mempool_t *pool, void *element, size_t size,
32 pr_err("BUG: mempool element poison mismatch\n");
34 pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
36 pr_cont("%x ", *(u8 *)(element + i));
41 static void __check_element(mempool_t *pool, void *element, size_t size)
43 u8 *obj = element;
50 poison_error(pool, element, size, i);
57 static void check_element(mempool_t *pool, void *element)
61 __check_element(pool, element, (size_t)pool->pool_data);
63 __check_element(pool, element, kmem_cache_size(pool->pool_data));
67 void *addr = kmap_atomic((struct page *)element);
74 static void __poison_element(void *element, size_t size)
76 u8 *obj = element;
82 static void poison_element(mempool_t *pool, void *element)
86 __poison_element(element, (size_t)pool->pool_data);
88 __poison_element(element, kmem_cache_size(pool->pool_data));
92 void *addr = kmap_atomic((struct page *)element);
99 static inline void check_element(mempool_t *pool, void *element)
102 static inline void poison_element(mempool_t *pool, void *element)
107 static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
110 kasan_slab_free_mempool(element);
112 kasan_poison_pages(element, (unsigned long)pool->pool_data,
116 static void kasan_unpoison_element(mempool_t *pool, void *element)
119 kasan_unpoison_range(element, (size_t)pool->pool_data);
121 kasan_unpoison_range(element, kmem_cache_size(pool->pool_data));
123 kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
127 static __always_inline void add_element(mempool_t *pool, void *element)
130 poison_element(pool, element);
131 kasan_poison_element(pool, element);
132 pool->elements[pool->curr_nr++] = element;
137 void *element = pool->elements[--pool->curr_nr];
140 kasan_unpoison_element(pool, element);
141 check_element(pool, element);
142 return element;
159 void *element = remove_element(pool);
160 pool->free(element, pool->pool_data);
205 void *element;
207 element = pool->alloc(gfp_mask, pool->pool_data);
208 if (unlikely(!element)) {
212 add_element(pool, element);
224 * @alloc_fn: user-defined element-allocation function.
225 * @free_fn: user-defined element-freeing function.
246 * @alloc_fn: user-defined element-allocation function.
247 * @free_fn: user-defined element-freeing function.
306 void *element;
316 element = remove_element(pool);
318 pool->free(element, pool->pool_data);
347 element = pool->alloc(GFP_KERNEL, pool->pool_data);
348 if (!element)
352 add_element(pool, element);
355 pool->free(element, pool->pool_data); /* Raced */
367 * mempool_alloc - allocate an element from a specific memory pool
378 * Return: pointer to the allocated element or %NULL on error.
382 void *element;
398 element = pool->alloc(gfp_temp, pool->pool_data);
399 if (likely(element != NULL))
400 return element;
404 element = remove_element(pool);
412 kmemleak_update_trace(element);
413 return element;
432 /* Let's wait for someone else to return an element to @pool */
450 * mempool_free - return an element to the pool.
451 * @element: pool element pointer.
457 void mempool_free(void *element, mempool_t *pool)
461 if (unlikely(element == NULL))
466 * for @element and the following @pool->curr_nr. This ensures
468 * allocation of @element. This is necessary for fringe cases
469 * where @element was passed to this task without going through
487 * allocation of @element, any task which decremented curr_nr below
490 * to min_nr after the allocation of @element, the elements
500 add_element(pool, element);
507 pool->free(element, pool->pool_data);
522 void mempool_free_slab(void *element, void *pool_data)
525 kmem_cache_free(mem, element);
540 void mempool_kfree(void *element, void *pool_data)
542 kfree(element);
557 void mempool_free_pages(void *element, void *pool_data)
560 __free_pages(element, order);