Lines Matching defs:nr_pages
262 unsigned int nr_pages);
268 unsigned int nr_pages;
293 nr_pages = nr_bytes >> PAGE_SHIFT;
295 if (nr_pages)
296 obj_cgroup_uncharge_pages(objcg, nr_pages);
474 unsigned long nr_pages = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON,
478 unsigned long nr_pages = page_counter_read(&memcg->memory);
483 if (nr_pages > soft_limit)
484 excess = nr_pages - soft_limit;
964 int nr_pages)
967 if (nr_pages > 0)
971 nr_pages = -nr_pages; /* for event */
974 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
1404 * @nr_pages: positive when adding or negative when removing
1410 int zid, int nr_pages)
1427 if (nr_pages < 0)
1428 *lru_size += nr_pages;
1433 __func__, lruvec, lru, nr_pages, size)) {
1438 if (nr_pages > 0)
1439 *lru_size += nr_pages;
2212 unsigned int nr_pages;
2235 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages);
2247 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
2255 * @nr_pages: how many pages to charge.
2258 * stock, and at least @nr_pages are available in that stock. Failure to
2263 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2269 if (nr_pages > MEMCG_CHARGE_BATCH)
2275 if (memcg == READ_ONCE(stock->cached) && stock->nr_pages >= nr_pages) {
2276 stock->nr_pages -= nr_pages;
2295 if (stock->nr_pages) {
2296 page_counter_uncharge(&old->memory, stock->nr_pages);
2298 page_counter_uncharge(&old->memsw, stock->nr_pages);
2299 stock->nr_pages = 0;
2333 static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2343 stock->nr_pages += nr_pages;
2345 if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2349 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2354 __refill_stock(memcg, nr_pages);
2384 if (memcg && stock->nr_pages &&
2414 unsigned int nr_pages,
2429 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2553 unsigned int nr_pages,
2581 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2593 unsigned int nr_pages = current->memcg_nr_pages_over_high;
2598 if (likely(!nr_pages))
2615 in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2622 penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2625 penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2668 unsigned int nr_pages)
2670 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2682 if (consume_stock(memcg, nr_pages))
2697 if (batch > nr_pages) {
2698 batch = nr_pages;
2721 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2725 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2745 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2770 get_order(nr_pages * PAGE_SIZE))) {
2797 page_counter_charge(&memcg->memory, nr_pages);
2799 page_counter_charge(&memcg->memsw, nr_pages);
2804 if (batch > nr_pages)
2805 refill_stock(memcg, batch - nr_pages);
2858 unsigned int nr_pages)
2863 return try_charge_memcg(memcg, gfp_mask, nr_pages);
2866 static inline void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2871 page_counter_uncharge(&memcg->memory, nr_pages);
2873 page_counter_uncharge(&memcg->memsw, nr_pages);
3090 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
3092 mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
3094 if (nr_pages > 0)
3095 page_counter_charge(&memcg->kmem, nr_pages);
3097 page_counter_uncharge(&memcg->kmem, -nr_pages);
3105 * @nr_pages: number of pages to uncharge
3108 unsigned int nr_pages)
3114 memcg_account_kmem(memcg, -nr_pages);
3115 refill_stock(memcg, nr_pages);
3124 * @nr_pages: number of pages to charge
3129 unsigned int nr_pages)
3136 ret = try_charge_memcg(memcg, gfp, nr_pages);
3140 memcg_account_kmem(memcg, nr_pages);
3182 unsigned int nr_pages = 1 << order;
3188 obj_cgroup_uncharge_pages(objcg, nr_pages);
3286 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3289 if (nr_pages) {
3294 memcg_account_kmem(memcg, -nr_pages);
3295 __refill_stock(memcg, nr_pages);
3362 unsigned int nr_pages = 0;
3378 nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3386 if (nr_pages)
3387 obj_cgroup_uncharge_pages(objcg, nr_pages);
3392 unsigned int nr_pages, nr_bytes;
3421 nr_pages = size >> PAGE_SHIFT;
3425 nr_pages += 1;
3427 ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3880 unsigned long nr_pages;
3884 ret = page_counter_memparse(buf, "-1", &nr_pages);
3896 ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3899 ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3909 ret = memcg_update_tcp_max(memcg, nr_pages);
3917 WRITE_ONCE(memcg->soft_limit, nr_pages);
5842 unsigned int nr_pages = compound ? folio_nr_pages(folio) : 1;
5862 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
5863 __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
5866 -nr_pages);
5868 nr_pages);
5872 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
5873 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
5876 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
5877 __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
5881 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5882 __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5890 -nr_pages);
5892 nr_pages);
5899 __mod_lruvec_state(from_vec, NR_SWAPCACHE, -nr_pages);
5900 __mod_lruvec_state(to_vec, NR_SWAPCACHE, nr_pages);
5904 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5905 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5934 mem_cgroup_charge_statistics(to, nr_pages);
5936 mem_cgroup_charge_statistics(from, -nr_pages);
6571 unsigned long nr_pages = page_counter_read(&memcg->memory);
6574 if (nr_pages <= high)
6586 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6620 unsigned long nr_pages = page_counter_read(&memcg->memory);
6622 if (nr_pages <= max)
6635 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
7068 long nr_pages = folio_nr_pages(folio);
7071 ret = try_charge(memcg, gfp, nr_pages);
7079 mem_cgroup_charge_statistics(memcg, nr_pages);
7204 long nr_pages;
7241 nr_pages = folio_nr_pages(folio);
7244 ug->nr_memory += nr_pages;
7245 ug->nr_kmem += nr_pages;
7252 ug->nr_memory += nr_pages;
7306 long nr_pages = folio_nr_pages(new);
7312 VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
7328 page_counter_charge(&memcg->memory, nr_pages);
7330 page_counter_charge(&memcg->memsw, nr_pages);
7337 mem_cgroup_charge_statistics(memcg, nr_pages);
7377 * @nr_pages: number of pages to charge
7380 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
7383 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
7389 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
7395 page_counter_charge(&memcg->tcpmem, nr_pages);
7401 if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
7402 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
7412 * @nr_pages: number of pages to uncharge
7414 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7417 page_counter_uncharge(&memcg->tcpmem, nr_pages);
7421 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7423 refill_stock(memcg, nr_pages);
7586 unsigned int nr_pages = folio_nr_pages(folio);
7608 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7616 if (nr_pages > 1)
7617 mem_cgroup_id_get_many(memcg, nr_pages - 1);
7618 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7620 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7628 * @nr_pages: the amount of swap space to uncharge
7630 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7635 id = swap_cgroup_record(entry, 0, nr_pages);
7641 page_counter_uncharge(&memcg->memsw, nr_pages);
7643 page_counter_uncharge(&memcg->swap, nr_pages);
7645 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7646 mem_cgroup_id_put_many(memcg, nr_pages);