Lines Matching refs:stock
270 * 1) CPU0: objcg == stock->cached_objcg
274 * the stock if flushed,
277 * 92 bytes are added to stock->nr_bytes
278 * 6) CPU0: stock is flushed,
2248 static void drain_obj_stock(struct memcg_stock_pcp *stock);
2249 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2253 static inline void drain_obj_stock(struct memcg_stock_pcp *stock)
2256 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2269 * stock, and at least @nr_pages are available in that stock. Failure to
2270 * service an allocation will refill the stock.
2276 struct memcg_stock_pcp *stock;
2285 stock = this_cpu_ptr(&memcg_stock);
2286 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2287 stock->nr_pages -= nr_pages;
2299 static void drain_stock(struct memcg_stock_pcp *stock)
2301 struct mem_cgroup *old = stock->cached;
2306 if (stock->nr_pages) {
2307 page_counter_uncharge(&old->memory, stock->nr_pages);
2309 page_counter_uncharge(&old->memsw, stock->nr_pages);
2310 stock->nr_pages = 0;
2314 stock->cached = NULL;
2319 struct memcg_stock_pcp *stock;
2324 * that we always operate on local CPU stock here with IRQ disabled
2328 stock = this_cpu_ptr(&memcg_stock);
2329 drain_obj_stock(stock);
2330 drain_stock(stock);
2331 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2342 struct memcg_stock_pcp *stock;
2347 stock = this_cpu_ptr(&memcg_stock);
2348 if (stock->cached != memcg) { /* reset if necessary */
2349 drain_stock(stock);
2351 stock->cached = memcg;
2353 stock->nr_pages += nr_pages;
2355 if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2356 drain_stock(stock);
2380 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2385 memcg = stock->cached;
2386 if (memcg && stock->nr_pages &&
2389 if (obj_stock_flush_required(stock, root_memcg))
2394 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2396 drain_local_stock(&stock->work);
2398 schedule_work_on(cpu, &stock->work);
2407 struct memcg_stock_pcp *stock;
2410 stock = &per_cpu(memcg_stock, cpu);
2411 drain_stock(stock);
3163 struct memcg_stock_pcp *stock;
3169 stock = this_cpu_ptr(&memcg_stock);
3170 if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
3171 stock->nr_bytes -= nr_bytes;
3180 static void drain_obj_stock(struct memcg_stock_pcp *stock)
3182 struct obj_cgroup *old = stock->cached_objcg;
3187 if (stock->nr_bytes) {
3188 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3189 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3207 * On the next attempt to refill obj stock it will be moved
3208 * to a per-cpu stock (probably, on an other CPU), see
3216 stock->nr_bytes = 0;
3220 stock->cached_objcg = NULL;
3223 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3228 if (stock->cached_objcg) {
3229 memcg = obj_cgroup_memcg(stock->cached_objcg);
3239 struct memcg_stock_pcp *stock;
3244 stock = this_cpu_ptr(&memcg_stock);
3245 if (stock->cached_objcg != objcg) { /* reset if necessary */
3246 drain_obj_stock(stock);
3248 stock->cached_objcg = objcg;
3249 stock->nr_bytes = atomic_xchg(&objcg->nr_charged_bytes, 0);
3251 stock->nr_bytes += nr_bytes;
3253 if (stock->nr_bytes > PAGE_SIZE)
3254 drain_obj_stock(stock);