Lines Matching defs:pcp

64 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
111 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid
568 if (pcp_allowed_order(order)) /* Via pcp? */
1191 struct per_cpu_pages *pcp,
1203 count = min(pcp->count, count);
1219 list = &pcp->lists[pindex];
1230 /* must delete to avoid corrupting pcp list */
1233 pcp->count -= nr_pages;
2199 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
2203 batch = READ_ONCE(pcp->batch);
2204 to_drain = min(pcp->count, batch);
2206 spin_lock(&pcp->lock);
2207 free_pcppages_bulk(zone, to_drain, pcp, 0);
2208 spin_unlock(&pcp->lock);
2218 struct per_cpu_pages *pcp;
2220 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
2221 if (pcp->count) {
2222 spin_lock(&pcp->lock);
2223 free_pcppages_bulk(zone, pcp->count, pcp, 0);
2224 spin_unlock(&pcp->lock);
2259 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers
2291 struct per_cpu_pages *pcp;
2297 * The pcp.count check is racy, some callers need a
2302 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
2303 if (pcp->count)
2307 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu);
2308 if (pcp->count) {
2354 static int nr_pcp_free(struct per_cpu_pages *pcp, int high, bool free_high)
2357 int batch = READ_ONCE(pcp->batch);
2361 return pcp->count;
2367 /* Leave at least pcp->batch pages on the list */
2375 batch <<= pcp->free_factor;
2377 pcp->free_factor++;
2383 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone,
2386 int high = READ_ONCE(pcp->high);
2396 * stored on pcp lists
2398 return min(READ_ONCE(pcp->batch) << 2, high);
2401 static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
2411 list_add(&page->pcp_list, &pcp->lists[pindex]);
2412 pcp->count += 1 << order;
2420 free_high = (pcp->free_factor && order && order <= PAGE_ALLOC_COSTLY_ORDER);
2422 high = nr_pcp_high(pcp, zone, free_high);
2423 if (pcp->count >= high) {
2424 free_pcppages_bulk(zone, nr_pcp_free(pcp, high, free_high), pcp, pindex);
2429 * Free a pcp page
2434 struct per_cpu_pages *pcp;
2443 * We only track unmovable, reclaimable and movable on pcp lists.
2460 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
2461 if (pcp) {
2462 free_unref_page_commit(zone, pcp, page, pcpmigratetype, order);
2463 pcp_spin_unlock(pcp);
2477 struct per_cpu_pages *pcp = NULL;
2509 * Either different zone requiring a different pcp lock or
2514 if (pcp) {
2515 pcp_spin_unlock(pcp);
2526 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
2527 if (unlikely(!pcp)) {
2539 * to the MIGRATE_MOVABLE pcp list.
2545 free_unref_page_commit(zone, pcp, page, migratetype, 0);
2549 if (pcp) {
2550 pcp_spin_unlock(pcp);
2713 struct per_cpu_pages *pcp,
2720 int batch = READ_ONCE(pcp->batch);
2736 pcp->count += alloced << order;
2743 pcp->count -= 1 << order;
2754 struct per_cpu_pages *pcp;
2761 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
2762 if (!pcp) {
2772 pcp->free_factor >>= 1;
2773 list = &pcp->lists[order_to_pindex(migratetype, order)];
2774 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
2775 pcp_spin_unlock(pcp);
4286 struct per_cpu_pages *pcp;
4366 pcp = pcp_spin_trylock(zone->per_cpu_pageset);
4367 if (!pcp)
4371 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)];
4381 pcp, pcp_list);
4385 pcp_spin_unlock(pcp);
4400 pcp_spin_unlock(pcp);
5116 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats);
5306 * By default, the high value of the pcp is based on the zone
5324 * onlined. For memory nodes that have no CPUs, split pcp->high across
5326 * prematurely due to pages stored on pcp lists.
5346 * pcp->high and pcp->batch values are related and generally batch is lower
5347 * than high. They are also related to pcp->count such that count is lower
5353 * store tearing. Any new users of pcp->batch and pcp->high should ensure they
5355 * pcp->count field on the local CPU with interrupts disabled.
5361 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
5364 WRITE_ONCE(pcp->batch, batch);
5365 WRITE_ONCE(pcp->high, high);
5368 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats)
5372 memset(pcp, 0, sizeof(*pcp));
5375 spin_lock_init(&pcp->lock);
5377 INIT_LIST_HEAD(&pcp->lists[pindex]);
5385 pcp->high = BOOT_PAGESET_HIGH;
5386 pcp->batch = BOOT_PAGESET_BATCH;
5387 pcp->free_factor = 0;
5393 struct per_cpu_pages *pcp;
5397 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
5398 pageset_update(pcp, high, batch);
5433 struct per_cpu_pages *pcp;
5436 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
5438 per_cpu_pages_init(pcp, pzstats);
5596 "mm/page_alloc:pcp",
5940 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each
5958 /* Sanity checking to avoid pcp imbalance */