Lines Matching defs:gfp

473  * @gfp: allocation flags
483 static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
489 return kzalloc(size, gfp);
491 return __vmalloc(size, gfp | __GFP_ZERO);
1396 static struct pcpu_chunk *pcpu_alloc_chunk(enum pcpu_chunk_type type, gfp_t gfp)
1401 chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp);
1410 sizeof(chunk->alloc_map[0]), gfp);
1415 sizeof(chunk->bound_map[0]), gfp);
1420 sizeof(chunk->md_blocks[0]), gfp);
1428 sizeof(struct obj_cgroup *), gfp);
1535 int page_start, int page_end, gfp_t gfp);
1539 gfp_t gfp);
1582 static enum pcpu_chunk_type pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp,
1587 if (!memcg_kmem_enabled() || !(gfp & __GFP_ACCOUNT))
1594 if (obj_cgroup_charge(objcg, gfp, size * num_possible_cpus())) {
1645 pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp)
1666 * @gfp: allocation flags
1668 * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
1669 * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
1677 gfp_t gfp)
1693 gfp = current_gfp_context(gfp);
1695 pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
1696 is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1697 do_warn = !(gfp & __GFP_NOWARN);
1719 type = pcpu_memcg_pre_alloc_hook(size, gfp, &objcg);
1730 if (gfp & __GFP_NOFAIL) {
1842 kmemleak_alloc_percpu(ptr, size, gfp);
1880 * @gfp: allocation flags
1883 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1884 * be called from any context but is a lot more likely to fail. If @gfp
1891 void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
1893 return pcpu_alloc(size, align, false, gfp);
1944 /* gfp flags passed to underlying allocators */
1945 const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
2028 ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp);
2045 chunk = pcpu_create_chunk(type, gfp);