Lines Matching defs:size
354 * The current size of the caches array is stored in memcg_nr_cache_ids. It
407 int size, int old_size)
421 new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid);
427 memset((void *)new->map + old_size, 0, size - old_size);
457 int nid, size, ret = 0;
463 size = memcg_shrinker_map_size;
465 map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid);
480 int size, old_size, ret = 0;
483 size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
485 if (size <= old_size)
495 ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
503 memcg_shrinker_map_size = size;
963 /* pagein of a big page is an event. So, ignore page size */
1415 long size;
1430 size = *lru_size;
1431 if (WARN_ONCE(size < 0,
1433 __func__, lruvec, lru, nr_pages, size)) {
1602 u64 size;
1604 size = memcg_page_state(memcg, memory_stats[i].idx);
1605 size *= memory_stats[i].ratio;
1606 seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size);
1609 size = memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B) +
1611 seq_buf_printf(&s, "slab %llu\n", size);
2645 * The allocating task should reclaim at least the batch size, but for
3022 int id, size;
3039 size = 2 * (id + 1);
3040 if (size < MEMCG_CACHES_MIN_SIZE)
3041 size = MEMCG_CACHES_MIN_SIZE;
3042 else if (size > MEMCG_CACHES_MAX_SIZE)
3043 size = MEMCG_CACHES_MAX_SIZE;
3045 err = memcg_update_all_list_lrus(size);
3047 memcg_nr_cache_ids = size;
3259 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3265 if (consume_obj_stock(objcg, size))
3285 nr_pages = size >> PAGE_SHIFT;
3286 nr_bytes = size & (PAGE_SIZE - 1);
3299 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3301 refill_obj_stock(objcg, size);
4276 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4338 int i, size, ret;
4359 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4362 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4367 new->size = size;
4372 flex_array_size(new, entries, size - 1));
4375 new->entries[size - 1].eventfd = eventfd;
4376 new->entries[size - 1].threshold = threshold;
4379 sort(new->entries, size, sizeof(*new->entries),
4384 for (i = 0; i < size; i++) {
4429 int i, j, size, entries;
4449 size = entries = 0;
4450 for (i = 0; i < thresholds->primary->size; i++) {
4452 size++;
4464 if (!size) {
4470 new->size = size;
4474 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
5349 unsigned int size;
5354 size = sizeof(struct mem_cgroup);
5355 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
5357 memcg = kzalloc(size, GFP_KERNEL);
6570 u64 size;
6574 size = lruvec_page_state(lruvec, memory_stats[i].idx);
6575 size *= memory_stats[i].ratio;
6576 seq_printf(m, " N%d=%llu", nid, size);