Lines Matching refs:stock

277 	 * 1) CPU0: objcg == stock->cached_objcg
281 * the stock if flushed,
284 * 92 bytes are added to stock->nr_bytes
285 * 6) CPU0: stock is flushed,
2232 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
2233 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2238 static inline struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
2242 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2258 * stock, and at least @nr_pages are available in that stock. Failure to
2259 * service an allocation will refill the stock.
2265 struct memcg_stock_pcp *stock;
2274 stock = this_cpu_ptr(&memcg_stock);
2275 if (memcg == READ_ONCE(stock->cached) && stock->nr_pages >= nr_pages) {
2276 stock->nr_pages -= nr_pages;
2288 static void drain_stock(struct memcg_stock_pcp *stock)
2290 struct mem_cgroup *old = READ_ONCE(stock->cached);
2295 if (stock->nr_pages) {
2296 page_counter_uncharge(&old->memory, stock->nr_pages);
2298 page_counter_uncharge(&old->memsw, stock->nr_pages);
2299 stock->nr_pages = 0;
2303 WRITE_ONCE(stock->cached, NULL);
2308 struct memcg_stock_pcp *stock;
2314 * drain_stock races is that we always operate on local CPU stock
2319 stock = this_cpu_ptr(&memcg_stock);
2320 old = drain_obj_stock(stock);
2321 drain_stock(stock);
2322 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2335 struct memcg_stock_pcp *stock;
2337 stock = this_cpu_ptr(&memcg_stock);
2338 if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */
2339 drain_stock(stock);
2341 WRITE_ONCE(stock->cached, memcg);
2343 stock->nr_pages += nr_pages;
2345 if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2346 drain_stock(stock);
2378 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2383 memcg = READ_ONCE(stock->cached);
2384 if (memcg && stock->nr_pages &&
2387 else if (obj_stock_flush_required(stock, root_memcg))
2392 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2394 drain_local_stock(&stock->work);
2396 schedule_work_on(cpu, &stock->work);
2405 struct memcg_stock_pcp *stock;
2407 stock = &per_cpu(memcg_stock, cpu);
2408 drain_stock(stock);
3196 struct memcg_stock_pcp *stock;
3202 stock = this_cpu_ptr(&memcg_stock);
3205 * Save vmstat data in stock and skip vmstat array update unless
3209 if (READ_ONCE(stock->cached_objcg) != objcg) {
3210 old = drain_obj_stock(stock);
3212 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3214 WRITE_ONCE(stock->cached_objcg, objcg);
3215 stock->cached_pgdat = pgdat;
3216 } else if (stock->cached_pgdat != pgdat) {
3218 struct pglist_data *oldpg = stock->cached_pgdat;
3220 if (stock->nr_slab_reclaimable_b) {
3222 stock->nr_slab_reclaimable_b);
3223 stock->nr_slab_reclaimable_b = 0;
3225 if (stock->nr_slab_unreclaimable_b) {
3227 stock->nr_slab_unreclaimable_b);
3228 stock->nr_slab_unreclaimable_b = 0;
3230 stock->cached_pgdat = pgdat;
3233 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3234 : &stock->nr_slab_unreclaimable_b;
3261 struct memcg_stock_pcp *stock;
3267 stock = this_cpu_ptr(&memcg_stock);
3268 if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
3269 stock->nr_bytes -= nr_bytes;
3278 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
3280 struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
3285 if (stock->nr_bytes) {
3286 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3287 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3302 * On the next attempt to refill obj stock it will be moved
3303 * to a per-cpu stock (probably, on an other CPU), see
3311 stock->nr_bytes = 0;
3315 * Flush the vmstat data in current stock
3317 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3318 if (stock->nr_slab_reclaimable_b) {
3319 mod_objcg_mlstate(old, stock->cached_pgdat,
3321 stock->nr_slab_reclaimable_b);
3322 stock->nr_slab_reclaimable_b = 0;
3324 if (stock->nr_slab_unreclaimable_b) {
3325 mod_objcg_mlstate(old, stock->cached_pgdat,
3327 stock->nr_slab_unreclaimable_b);
3328 stock->nr_slab_unreclaimable_b = 0;
3330 stock->cached_pgdat = NULL;
3333 WRITE_ONCE(stock->cached_objcg, NULL);
3341 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3344 struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
3359 struct memcg_stock_pcp *stock;
3366 stock = this_cpu_ptr(&memcg_stock);
3367 if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
3368 old = drain_obj_stock(stock);
3370 WRITE_ONCE(stock->cached_objcg, objcg);
3371 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3375 stock->nr_bytes += nr_bytes;
3377 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3378 nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3379 stock->nr_bytes &= (PAGE_SIZE - 1);
3406 * grab some new pages. The stock's nr_bytes will be flushed to
3409 * The stock's nr_bytes may contain enough pre-charged bytes