Lines Matching defs:pcp

85 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
1295 * to pcp lists. With debug_pagealloc also enabled, they are also rechecked when
1296 * moved from pcp lists to free lists.
1313 * moving from pcp lists to free list in order to reduce overhead. With
1315 * to the pcp lists.
1352 struct per_cpu_pages *pcp)
1365 count = min(pcp->count, count);
1380 list = &pcp->lists[migratetype];
1389 /* must delete to avoid corrupting pcp list */
1391 pcp->count--;
1405 * prefetch buddy for the first pcp->batch nr of pages.
1407 if (prefetch_nr++ < pcp->batch)
2228 * being allocated from pcp lists. With debug_pagealloc also enabled, they are
2229 * also checked when pcp lists are refilled from the free lists.
2246 * when pcp lists are being refilled from the free lists. With debug_pagealloc
2247 * enabled, they are also checked when being allocated from the pcp lists.
2952 * pages added to the pcp list.
2968 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
2974 batch = READ_ONCE(pcp->batch);
2975 to_drain = min(pcp->count, batch);
2977 free_pcppages_bulk(zone, to_drain, pcp);
2993 struct per_cpu_pages *pcp;
2998 pcp = &pset->pcp;
2999 if (pcp->count)
3000 free_pcppages_bulk(zone, pcp->count, pcp);
3096 struct per_cpu_pageset *pcp;
3101 pcp = per_cpu_ptr(zone->pageset, cpu);
3102 if (pcp->pcp.count)
3106 pcp = per_cpu_ptr(z->pageset, cpu);
3107 if (pcp->pcp.count) {
3203 struct per_cpu_pages *pcp;
3210 * We only track unmovable, reclaimable and movable on pcp lists.
3225 pcp = &this_cpu_ptr(zone->pageset)->pcp;
3226 list_add(&page->lru, &pcp->lists[migratetype]);
3227 pcp->count++;
3228 if (pcp->count >= pcp->high) {
3229 unsigned long batch = READ_ONCE(pcp->batch);
3230 free_pcppages_bulk(zone, batch, pcp);
3409 struct per_cpu_pages *pcp,
3416 pcp->count += rmqueue_bulk(zone, 0,
3417 pcp->batch, list,
3425 pcp->count--;
3436 struct per_cpu_pages *pcp;
3442 pcp = &this_cpu_ptr(zone->pageset)->pcp;
3443 list = &pcp->lists[migratetype];
3444 page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list);
5101 if (order == 0) /* Via pcp? */
5591 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
5682 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
5725 K(this_cpu_read(zone->pageset->pcp.count)),
6519 * pcp->high and pcp->batch values are related and dependent on one another:
6524 * Any new users of pcp->batch and pcp->high should ensure they can cope with
6531 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
6535 pcp->batch = 1;
6539 pcp->high = high;
6542 pcp->batch = batch;
6548 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
6553 struct per_cpu_pages *pcp;
6558 pcp = &p->pcp;
6560 INIT_LIST_HEAD(&pcp->lists[migratetype]);
6580 pageset_update(&p->pcp, high, batch);
6584 struct per_cpu_pageset *pcp)
6587 pageset_set_high(pcp,
6591 pageset_set_batch(pcp, zone_batchsize(zone));
6596 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
6598 pageset_init(pcp);
6599 pageset_set_high_and_batch(zone, pcp);
6631 struct per_cpu_pageset *pcp = &per_cpu(boot_pageset, cpu);
6632 memset(pcp->vm_numa_stat_diff, 0,
6633 sizeof(pcp->vm_numa_stat_diff));
8276 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
8294 /* Sanity checking to avoid pcp imbalance */