Lines Matching refs:lru

22 static inline bool list_lru_memcg_aware(struct list_lru *lru)
24 return lru->memcg_aware;
27 static void list_lru_register(struct list_lru *lru)
29 if (!list_lru_memcg_aware(lru))
33 list_add(&lru->list, &memcg_list_lrus);
37 static void list_lru_unregister(struct list_lru *lru)
39 if (!list_lru_memcg_aware(lru))
43 list_del(&lru->list);
47 static int lru_shrinker_id(struct list_lru *lru)
49 return lru->shrinker_id;
53 list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
55 if (list_lru_memcg_aware(lru) && idx >= 0) {
56 struct list_lru_memcg *mlru = xa_load(&lru->xa, idx);
60 return &lru->node[nid].lru;
64 list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr,
67 struct list_lru_node *nlru = &lru->node[nid];
68 struct list_lru_one *l = &nlru->lru;
71 if (!list_lru_memcg_aware(lru))
78 l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
85 static void list_lru_register(struct list_lru *lru)
89 static void list_lru_unregister(struct list_lru *lru)
93 static int lru_shrinker_id(struct list_lru *lru)
98 static inline bool list_lru_memcg_aware(struct list_lru *lru)
104 list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
106 return &lru->node[nid].lru;
110 list_lru_from_kmem(struct list_lru *lru, int nid, void *ptr,
115 return &lru->node[nid].lru;
119 bool list_lru_add(struct list_lru *lru, struct list_head *item)
122 struct list_lru_node *nlru = &lru->node[nid];
128 l = list_lru_from_kmem(lru, nid, item, &memcg);
133 lru_shrinker_id(lru));
143 bool list_lru_del(struct list_lru *lru, struct list_head *item)
146 struct list_lru_node *nlru = &lru->node[nid];
151 l = list_lru_from_kmem(lru, nid, item, NULL);
178 unsigned long list_lru_count_one(struct list_lru *lru,
185 l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
196 unsigned long list_lru_count_node(struct list_lru *lru, int nid)
200 nlru = &lru->node[nid];
206 __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
210 struct list_lru_node *nlru = &lru->node[nid];
216 l = list_lru_from_memcg_idx(lru, nid, memcg_idx);
240 * If the lru lock has been dropped, our list
254 * The lru lock has been dropped, our list traversal is
268 list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
272 struct list_lru_node *nlru = &lru->node[nid];
276 ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate,
284 list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
288 struct list_lru_node *nlru = &lru->node[nid];
292 ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate,
298 unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
304 isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
308 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
312 xa_for_each(&lru->xa, index, mlru) {
313 struct list_lru_node *nlru = &lru->node[nid];
316 isolated += __list_lru_walk_one(lru, nid, index,
353 static void memcg_list_lru_free(struct list_lru *lru, int src_idx)
355 struct list_lru_memcg *mlru = xa_erase_irq(&lru->xa, src_idx);
360 * is under lru->node[nid]->lock, which can serve as a RCU
367 static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
370 xa_init_flags(&lru->xa, XA_FLAGS_LOCK_IRQ);
371 lru->memcg_aware = memcg_aware;
374 static void memcg_destroy_list_lru(struct list_lru *lru)
376 XA_STATE(xas, &lru->xa, 0);
379 if (!list_lru_memcg_aware(lru))
390 static void memcg_reparent_list_lru_node(struct list_lru *lru, int nid,
393 struct list_lru_node *nlru = &lru->node[nid];
403 src = list_lru_from_memcg_idx(lru, nid, src_idx);
406 dst = list_lru_from_memcg_idx(lru, nid, dst_idx);
412 set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
419 static void memcg_reparent_list_lru(struct list_lru *lru,
425 memcg_reparent_list_lru_node(lru, i, src_idx, dst_memcg);
427 memcg_list_lru_free(lru, src_idx);
433 struct list_lru *lru;
459 list_for_each_entry(lru, &memcg_list_lrus, list)
460 memcg_reparent_list_lru(lru, src_idx, parent);
465 struct list_lru *lru)
469 return idx < 0 || xa_load(&lru->xa, idx);
472 int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
481 XA_STATE(xas, &lru->xa, 0);
483 if (!list_lru_memcg_aware(lru) || memcg_list_lru_allocated(memcg, lru))
497 if (memcg_list_lru_allocated(memcg, lru))
550 static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
554 static void memcg_destroy_list_lru(struct list_lru *lru)
559 int __list_lru_init(struct list_lru *lru, bool memcg_aware,
566 lru->shrinker_id = shrinker->id;
568 lru->shrinker_id = -1;
571 lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
572 if (!lru->node)
576 spin_lock_init(&lru->node[i].lock);
578 lockdep_set_class(&lru->node[i].lock, key);
579 init_one_lru(&lru->node[i].lru);
582 memcg_init_list_lru(lru, memcg_aware);
583 list_lru_register(lru);
589 void list_lru_destroy(struct list_lru *lru)
592 if (!lru->node)
595 list_lru_unregister(lru);
597 memcg_destroy_list_lru(lru);
598 kfree(lru->node);
599 lru->node = NULL;
602 lru->shrinker_id = -1;