Lines Matching defs:high

92 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
116 * interfered with and a high priority task cannot preempt the allocator.
238 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
239 * 1G machine -> (16M dma, 784M normal, 224M high)
645 * have trouble finding a high-order free page.
754 * At a high level, all that happens here is marking the table entry
1397 int low, int high, int migratetype)
1399 unsigned long size = 1 << high;
1401 while (high > low) {
1402 high--;
1412 if (set_page_guard(zone, &page[size], high, migratetype))
1415 add_to_free_list(&page[size], zone, high, migratetype);
1416 set_buddy_order(&page[size], high);
1748 * high watermark may be uninitialised if fragmentation occurs
1884 * Reserve a pageblock for exclusive use of high-order atomic allocations if
1921 * potentially hurts the reliability of high-order allocations when under
1943 * is really high.
2354 static int nr_pcp_free(struct per_cpu_pages *pcp, int high, bool free_high)
2359 /* Free everything if batch freeing high-order pages. */
2364 if (unlikely(high < batch))
2369 max_nr_free = high - batch;
2386 int high = READ_ONCE(pcp->high);
2388 if (unlikely(!high || free_high))
2392 return high;
2398 return min(READ_ONCE(pcp->batch) << 2, high);
2405 int high;
2415 * As high-order pages other than THP's stored on PCP can contribute
2422 high = nr_pcp_high(pcp, zone, free_high);
2423 if (pcp->count >= high) {
2424 free_pcppages_bulk(zone, nr_pcp_free(pcp, high, free_high), pcp, pindex);
2586 * emulate a high-order watermark check with a raised order-0
2587 * watermark, because we already know our high-order page
2686 * failing a high-order atomic allocation in the
2786 * Use pcplists for THP or "cheap" high-order allocations.
2845 * watermark then subtract the high-atomic reserves. This will
2864 * Return true if free base pages are above 'mark'. For high-order checks it
2910 * are not met, then a high-order request also cannot go ahead
2920 /* For a high-order request, check at least one suitable page is free */
2973 /* reserved may over estimate high-atomic reserves. */
3113 * lowmem reserves and high watermark so that kswapd
3207 * If this is a high-order atomic allocation then check
3332 * Go through the zonelist yet one more time, keep very high watermark
3400 /* Try memory compaction for high-order allocations before reclaim */
3692 * pages are pinned on the per-cpu lists or in high alloc reserves.
3838 * their order will become available due to high fragmentation so
4001 * movable high-order allocations, do that as well, as compaction will
4033 * bursty high order allocations,
4065 * ignored. These allocations are high priority and system rather than
4104 * Do not retry costly high order allocations unless they are
4190 * reserves normally used for high priority non-blocking
4510 * you need to access high mem.
4792 * nr_free_zone_pages - count number of pages beyond high watermark
4796 * high watermark within all zones at or below a given zone index. For each
4801 * Return: number of pages beyond high watermark.
4815 unsigned long high = high_wmark_pages(zone);
4816 if (size > high)
4817 sum += size - high;
4824 * nr_free_buffer_pages - count number of pages beyond high watermark
4826 * nr_free_buffer_pages() counts the number of pages which are beyond the high
4829 * Return: number of pages beyond high watermark within ZONE_DMA and
5225 /* Get the number of free pages beyond high watermark in all zones. */
5290 * fragmented and becoming unavailable for high-order allocations.
5300 int high;
5306 * By default, the high value of the pcp is based on the zone
5313 * If percpu_pagelist_high_fraction is configured, the high
5321 * Split the high value across all online CPUs local to the zone. Note
5324 * onlined. For memory nodes that have no CPUs, split pcp->high across
5331 high = total_pages / nr_split_cpus;
5334 * Ensure high is at least batch*4. The multiple is based on the
5335 * historical relationship between high and batch.
5337 high = max(high, batch << 2);
5339 return high;
5346 * pcp->high and pcp->batch values are related and generally batch is lower
5347 * than high. They are also related to pcp->count such that count is lower
5348 * than high, and as soon as it reaches high, the pcplist is flushed.
5353 * store tearing. Any new users of pcp->batch and pcp->high should ensure they
5361 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
5365 WRITE_ONCE(pcp->high, high);
5380 * Set batch and high values safe for a boot pageset. A true percpu
5385 pcp->high = BOOT_PAGESET_HIGH;
5390 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high,
5398 pageset_update(pcp, high, batch);
5403 * Calculate and set new high and batch values for all per-cpu pagesets of a
5446 * page high values need to be recalculated.
5627 /* we treat the high watermark as reserved pages. */
5743 * Ensures that the watermark[min,low,high] values for each zone are set
5757 * and high limits or the limits may be inappropriate.
5940 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each
6388 * Effectively disable pcplists for the zone by setting the high limit to 0
6391 * will be drained, or observe the new high limit and skip the pcplist.
6500 struct page *target, int low, int high,
6503 unsigned long size = 1 << high;
6506 while (high > low) {
6507 high--;
6519 if (set_page_guard(zone, current_buddy, high, migratetype))
6523 add_to_free_list(current_buddy, zone, high, migratetype);
6524 set_buddy_order(current_buddy, high);
6681 /* How much to accept to get to high watermark? */