Lines Matching defs:gfp
4259 * @gfp: GFP flags for the allocation
4277 unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
4309 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT))
4329 gfp &= gfp_allowed_mask;
4330 alloc_gfp = gfp;
4331 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
4333 gfp = alloc_gfp;
4340 !__cpuset_zone_allowed(zone, gfp)) {
4352 alloc_flags, gfp)) {
4392 prep_new_page(page, 0, gfp, 0);
4413 page = __alloc_pages(gfp, 0, preferred_nid, nodemask);
4429 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
4441 if (WARN_ON_ONCE_GFP(order > MAX_ORDER, gfp))
4444 gfp &= gfp_allowed_mask;
4452 gfp = current_gfp_context(gfp);
4453 alloc_gfp = gfp;
4454 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
4462 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
4469 alloc_gfp = gfp;
4481 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page &&
4482 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
4494 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
4497 struct page *page = __alloc_pages(gfp | __GFP_COMP, order,
4587 gfp_t gfp = gfp_mask;
4597 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);