Lines Matching refs:gfp_mask
187 gfp_t gfp_mask, int node_id)
197 gfp_mask, node_id);
207 element = pool->alloc(gfp_mask, pool->pool_data);
268 gfp_t gfp_mask, int node_id)
272 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
277 gfp_mask, node_id)) {
370 * @gfp_mask: the usual allocation bitmask.
380 void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
387 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
388 might_alloc(gfp_mask);
390 gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */
391 gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */
392 gfp_mask |= __GFP_NOWARN; /* failures are OK */
394 gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
420 if (gfp_temp != gfp_mask) {
422 gfp_temp = gfp_mask;
427 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
514 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
518 return kmem_cache_alloc(mem, gfp_mask);
533 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
536 return kmalloc(size, gfp_mask);
550 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
553 return alloc_pages(gfp_mask, order);