Lines Matching defs:nr_pages
261 unsigned int nr_pages;
286 nr_pages = nr_bytes >> PAGE_SHIFT;
290 if (nr_pages)
291 __memcg_kmem_uncharge(memcg, nr_pages);
673 unsigned long nr_pages = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON,
677 unsigned long nr_pages = page_counter_read(&memcg->memory);
682 if (nr_pages > soft_limit)
683 excess = nr_pages - soft_limit;
961 int nr_pages)
964 if (nr_pages > 0)
968 nr_pages = -nr_pages; /* for event */
971 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
1404 * @nr_pages: positive when adding or negative when removing
1411 int zid, int nr_pages)
1427 if (nr_pages < 0)
1428 *lru_size += nr_pages;
1433 __func__, lruvec, lru, nr_pages, size)) {
1438 if (nr_pages > 0)
1439 *lru_size += nr_pages;
2233 unsigned int nr_pages;
2266 * @nr_pages: how many pages to charge.
2269 * stock, and at least @nr_pages are available in that stock. Failure to
2274 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2280 if (nr_pages > MEMCG_CHARGE_BATCH)
2286 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2287 stock->nr_pages -= nr_pages;
2306 if (stock->nr_pages) {
2307 page_counter_uncharge(&old->memory, stock->nr_pages);
2309 page_counter_uncharge(&old->memsw, stock->nr_pages);
2310 stock->nr_pages = 0;
2340 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2353 stock->nr_pages += nr_pages;
2355 if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2386 if (memcg && stock->nr_pages &&
2454 unsigned int nr_pages,
2469 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2592 unsigned int nr_pages,
2620 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2632 unsigned int nr_pages = current->memcg_nr_pages_over_high;
2637 if (likely(!nr_pages))
2654 in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2661 penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2664 penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2707 unsigned int nr_pages)
2709 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2723 if (consume_stock(memcg, nr_pages))
2738 if (batch > nr_pages) {
2739 batch = nr_pages;
2770 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2774 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2794 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2822 get_order(nr_pages * PAGE_SIZE));
2837 page_counter_charge(&memcg->memory, nr_pages);
2839 page_counter_charge(&memcg->memsw, nr_pages);
2844 if (batch > nr_pages)
2845 refill_stock(memcg, batch - nr_pages);
2893 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2898 page_counter_uncharge(&memcg->memory, nr_pages);
2900 page_counter_uncharge(&memcg->memsw, nr_pages);
3067 * @nr_pages: number of pages to charge
3072 unsigned int nr_pages)
3077 ret = try_charge(memcg, gfp, nr_pages);
3082 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
3090 page_counter_charge(&memcg->kmem, nr_pages);
3093 cancel_charge(memcg, nr_pages);
3102 * @nr_pages: number of pages to uncharge
3104 void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
3107 page_counter_uncharge(&memcg->kmem, nr_pages);
3109 refill_stock(memcg, nr_pages);
3146 unsigned int nr_pages = 1 << order;
3152 __memcg_kmem_uncharge(memcg, nr_pages);
3188 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3191 if (nr_pages) {
3201 __memcg_kmem_uncharge(memcg, nr_pages);
3262 unsigned int nr_pages, nr_bytes;
3285 nr_pages = size >> PAGE_SHIFT;
3289 nr_pages += 1;
3291 ret = __memcg_kmem_charge(memcg, gfp, nr_pages);
3874 unsigned long nr_pages;
3878 ret = page_counter_memparse(buf, "-1", &nr_pages);
3890 ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3893 ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3899 ret = memcg_update_kmem_max(memcg, nr_pages);
3902 ret = memcg_update_tcp_max(memcg, nr_pages);
3907 memcg->soft_limit = nr_pages;
5755 unsigned int nr_pages = compound ? thp_nr_pages(page) : 1;
5782 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
5783 __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
5791 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
5792 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
5795 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
5796 __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
5800 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5801 __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5809 -nr_pages);
5811 nr_pages);
5817 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5818 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5846 mem_cgroup_charge_statistics(to, page, nr_pages);
5848 mem_cgroup_charge_statistics(from, page, -nr_pages);
6438 unsigned long nr_pages = page_counter_read(&memcg->memory);
6441 if (nr_pages <= high)
6453 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6487 unsigned long nr_pages = page_counter_read(&memcg->memory);
6489 if (nr_pages <= max)
6502 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6880 unsigned int nr_pages = thp_nr_pages(page);
6913 ret = try_charge(memcg, gfp_mask, nr_pages);
6921 mem_cgroup_charge_statistics(memcg, page, nr_pages);
6944 mem_cgroup_uncharge_swap(entry, nr_pages);
6955 unsigned long nr_pages;
6971 page_counter_uncharge(&ug->memcg->memory, ug->nr_pages);
6973 page_counter_uncharge(&ug->memcg->memsw, ug->nr_pages);
6981 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
6991 unsigned long nr_pages;
7015 nr_pages = compound_nr(page);
7016 ug->nr_pages += nr_pages;
7021 ug->nr_kmem += nr_pages;
7106 unsigned int nr_pages;
7128 nr_pages = thp_nr_pages(newpage);
7130 page_counter_charge(&memcg->memory, nr_pages);
7132 page_counter_charge(&memcg->memsw, nr_pages);
7138 mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
7178 * @nr_pages: number of pages to charge
7180 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
7183 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7190 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
7194 page_counter_charge(&memcg->tcpmem, nr_pages);
7203 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
7205 if (try_charge(memcg, gfp_mask, nr_pages) == 0)
7208 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
7215 * @nr_pages: number of pages to uncharge
7217 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7220 page_counter_uncharge(&memcg->tcpmem, nr_pages);
7224 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7226 refill_stock(memcg, nr_pages);
7376 unsigned int nr_pages = thp_nr_pages(page);
7398 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7406 if (nr_pages > 1)
7407 mem_cgroup_id_get_many(memcg, nr_pages - 1);
7408 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7410 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7418 * @nr_pages: the amount of swap space to uncharge
7420 void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7425 id = swap_cgroup_record(entry, 0, nr_pages);
7431 page_counter_uncharge(&memcg->swap, nr_pages);
7433 page_counter_uncharge(&memcg->memsw, nr_pages);
7435 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7436 mem_cgroup_id_put_many(memcg, nr_pages);