/kernel/linux/linux-6.6/fs/btrfs/ |
H A D | lru_cache.c | 17 INIT_LIST_HEAD(&cache->lru_list); in btrfs_lru_cache_init() 57 list_move_tail(&entry->lru_list, &cache->lru_list); in btrfs_lru_cache_lookup() 79 list_del(&entry->lru_list); in btrfs_lru_cache_remove() 137 lru_entry = list_first_entry(&cache->lru_list, in btrfs_lru_cache_store() 139 lru_list); in btrfs_lru_cache_store() 143 list_add_tail(&new_entry->lru_list, &cache->lru_list); in btrfs_lru_cache_store() 161 list_for_each_entry_safe(entry, tmp, &cache->lru_list, lru_list) in btrfs_lru_cache_clear() [all...] |
H A D | lru_cache.h | 18 struct list_head lru_list; member 42 struct list_head lru_list; member 51 list_for_each_entry_safe_reverse((entry), (tmp), &(cache)->lru_list, lru_list) 61 return list_first_entry_or_null(&cache->lru_list, in btrfs_lru_cache_lru_entry() 62 struct btrfs_lru_cache_entry, lru_list); in btrfs_lru_cache_lru_entry()
|
H A D | zstd.c | 50 struct list_head lru_list; member 78 struct list_head lru_list; member 102 * This scans the lru_list and attempts to reclaim any workspace that hasn't 114 if (list_empty(&wsm.lru_list)) { in zstd_reclaim_timer_fn() 119 list_for_each_prev_safe(pos, next, &wsm.lru_list) { in zstd_reclaim_timer_fn() 121 lru_list); in zstd_reclaim_timer_fn() 132 list_del(&victim->lru_list); in zstd_reclaim_timer_fn() 141 if (!list_empty(&wsm.lru_list)) in zstd_reclaim_timer_fn() 186 INIT_LIST_HEAD(&wsm.lru_list); in zstd_init_workspace_manager() 211 list_del(&workspace->lru_list); in zstd_cleanup_workspace_manager() [all...] |
/kernel/linux/linux-5.10/include/linux/ |
H A D | mm_inline.h | 29 enum lru_list lru, enum zone_type zid, in __update_lru_size() 40 enum lru_list lru, enum zone_type zid, in update_lru_size() 50 struct lruvec *lruvec, enum lru_list lru) in add_page_to_lru_list() 57 struct lruvec *lruvec, enum lru_list lru) in add_page_to_lru_list_tail() 64 struct lruvec *lruvec, enum lru_list lru) in del_page_from_lru_list() 78 static inline enum lru_list page_lru_base_type(struct page *page) in page_lru_base_type() 97 static __always_inline enum lru_list page_off_lru(struct page *page) in page_off_lru() 99 enum lru_list lru; in page_off_lru() 121 static __always_inline enum lru_list page_lru(struct page *page) in page_lru() 123 enum lru_list lr in page_lru() [all...] |
/kernel/linux/linux-5.10/fs/btrfs/ |
H A D | zstd.c | 50 struct list_head lru_list; member 78 struct list_head lru_list; member 100 * This scans the lru_list and attempts to reclaim any workspace that hasn't 110 if (list_empty(&wsm.lru_list)) { in zstd_reclaim_timer_fn() 115 list_for_each_prev_safe(pos, next, &wsm.lru_list) { in zstd_reclaim_timer_fn() 117 lru_list); in zstd_reclaim_timer_fn() 128 list_del(&victim->lru_list); in zstd_reclaim_timer_fn() 137 if (!list_empty(&wsm.lru_list)) in zstd_reclaim_timer_fn() 182 INIT_LIST_HEAD(&wsm.lru_list); in zstd_init_workspace_manager() 207 list_del(&workspace->lru_list); in zstd_cleanup_workspace_manager() [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gvt/ |
H A D | sched_policy.c | 54 struct list_head lru_list; member 108 vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); in gvt_balance_timeslice() 113 vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); in gvt_balance_timeslice() 122 vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); in gvt_balance_timeslice() 186 vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); in find_busy_vgpu() 227 /* Move the last used vGPU to the tail of lru_list */ in tbs_sched_func() 228 list_del_init(&vgpu_data->lru_list); in tbs_sched_func() 229 list_add_tail(&vgpu_data->lru_list, in tbs_sched_func() 321 INIT_LIST_HEAD(&data->lru_list); in tbs_sched_init_vgpu() 347 if (!list_empty(&vgpu_data->lru_list)) in tbs_sched_start_schedule() [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gvt/ |
H A D | sched_policy.c | 54 struct list_head lru_list; member 108 vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); in gvt_balance_timeslice() 113 vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); in gvt_balance_timeslice() 122 vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); in gvt_balance_timeslice() 186 vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); in find_busy_vgpu() 227 /* Move the last used vGPU to the tail of lru_list */ in tbs_sched_func() 228 list_del_init(&vgpu_data->lru_list); in tbs_sched_func() 229 list_add_tail(&vgpu_data->lru_list, in tbs_sched_func() 321 INIT_LIST_HEAD(&data->lru_list); in tbs_sched_init_vgpu() 347 if (!list_empty(&vgpu_data->lru_list)) in tbs_sched_start_schedule() [all...] |
/kernel/linux/linux-5.10/fs/xfs/ |
H A D | xfs_mru_cache.c | 132 struct list_head *lru_list; in _xfs_mru_cache_migrate() local 145 lru_list = mru->lists + mru->lru_grp; in _xfs_mru_cache_migrate() 146 if (!list_empty(lru_list)) in _xfs_mru_cache_migrate() 147 list_splice_init(lru_list, mru->reap_list.prev); in _xfs_mru_cache_migrate() 171 lru_list = mru->lists + ((mru->lru_grp + grp) % mru->grp_count); in _xfs_mru_cache_migrate() 172 if (!list_empty(lru_list)) in _xfs_mru_cache_migrate()
|
/kernel/linux/linux-6.6/fs/xfs/ |
H A D | xfs_mru_cache.c | 132 struct list_head *lru_list; in _xfs_mru_cache_migrate() local 145 lru_list = mru->lists + mru->lru_grp; in _xfs_mru_cache_migrate() 146 if (!list_empty(lru_list)) in _xfs_mru_cache_migrate() 147 list_splice_init(lru_list, mru->reap_list.prev); in _xfs_mru_cache_migrate() 171 lru_list = mru->lists + ((mru->lru_grp + grp) % mru->grp_count); in _xfs_mru_cache_migrate() 172 if (!list_empty(lru_list)) in _xfs_mru_cache_migrate()
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | mm_inline.h | 38 enum lru_list lru, enum zone_type zid, in __update_lru_size() 52 enum lru_list lru, enum zone_type zid, in update_lru_size() 86 static __always_inline enum lru_list folio_lru_list(struct folio *folio) in folio_lru_list() 88 enum lru_list lru; in folio_lru_list() 184 enum lru_list lru = type * LRU_INACTIVE_FILE; in lru_gen_update_size() 324 enum lru_list lru = folio_lru_list(folio); in lruvec_add_folio() 338 enum lru_list lru = folio_lru_list(folio); in lruvec_add_folio_tail() 352 enum lru_list lru = folio_lru_list(folio); in lruvec_del_folio()
|
/kernel/linux/linux-5.10/net/netfilter/ |
H A D | xt_recent.c | 68 struct list_head lru_list; member 84 struct list_head lru_list; member 147 list_del(&e->lru_list); in recent_entry_remove() 163 e = list_entry(t->lru_list.next, struct recent_entry, lru_list); in recent_entry_reap() 186 e = list_entry(t->lru_list.next, struct recent_entry, lru_list); in recent_entry_init() 204 list_add_tail(&e->lru_list, &t->lru_list); in recent_entry_init() 215 list_move_tail(&e->lru_list, in recent_entry_update() [all...] |
/kernel/linux/linux-6.6/net/netfilter/ |
H A D | xt_recent.c | 68 struct list_head lru_list; member 84 struct list_head lru_list; member 147 list_del(&e->lru_list); in recent_entry_remove() 163 e = list_entry(t->lru_list.next, struct recent_entry, lru_list); in recent_entry_reap() 186 e = list_entry(t->lru_list.next, struct recent_entry, lru_list); in recent_entry_init() 204 list_add_tail(&e->lru_list, &t->lru_list); in recent_entry_init() 215 list_move_tail(&e->lru_list, in recent_entry_update() [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/hfi1/ |
H A D | mmu_rb.c | 103 INIT_LIST_HEAD(&h->lru_list); in hfi1_mmu_rb_register() 177 list_add_tail(&mnode->list, &handler->lru_list); in hfi1_mmu_rb_insert() 193 list_move_tail(&node->list, &handler->lru_list); in hfi1_mmu_rb_get_first() 246 * Does not remove mmu_rb_node from handler->lru_list or handler->rb_root. 276 list_for_each_entry_safe(rbnode, ptr, &handler->lru_list, list) { in hfi1_mmu_rb_evict() 313 /* Remove from rb tree and lru_list. */ in mmu_notifier_range_start()
|
H A D | mmu_rb.h | 83 struct list_head lru_list; member
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/hfi1/ |
H A D | mmu_rb.c | 64 INIT_LIST_HEAD(&h->lru_list); in hfi1_mmu_rb_register() 139 list_add_tail(&mnode->list, &handler->lru_list); in hfi1_mmu_rb_insert() 155 list_move_tail(&node->list, &handler->lru_list); in hfi1_mmu_rb_get_first() 209 * Does not remove mmu_rb_node from handler->lru_list or handler->rb_root. 239 list_for_each_entry_safe(rbnode, ptr, &handler->lru_list, list) { in hfi1_mmu_rb_evict() 277 /* Remove from rb tree and lru_list. */ in mmu_notifier_range_start()
|
H A D | mmu_rb.h | 46 struct list_head lru_list; member
|
/kernel/linux/linux-5.10/drivers/md/ |
H A D | dm-bufio.c | 72 * are linked to lru[LIST_CLEAN] with their lru_list field. 75 * lru[LIST_DIRTY] with their lru_list field. When the write 138 struct list_head lru_list; member 510 list_add(&b->lru_list, &c->lru[dirty]); in __link_buffer() 528 list_del(&b->lru_list); in __unlink_buffer() 547 list_move(&b->lru_list, &c->lru[dirty]); in __relink_lru() 802 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) { in __get_unclaimed_buffer() 814 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) { in __get_unclaimed_buffer() 902 struct dm_buffer, lru_list); in __alloc_buffer_wait_no_callback() 903 list_del(&b->lru_list); in __alloc_buffer_wait_no_callback() [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/vmwgfx/ |
H A D | vmwgfx_resource.c | 707 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type]; in vmw_resource_validate() local 724 if (list_empty(lru_list) || !res->func->may_evict) { in vmw_resource_validate() 733 (list_first_entry(lru_list, struct vmw_resource, in vmw_resource_validate() 743 list_add_tail(&evict_res->lru_head, lru_list); in vmw_resource_validate() 917 struct list_head *lru_list = &dev_priv->res_lru[type]; in vmw_resource_evict_type() local 926 if (list_empty(lru_list)) in vmw_resource_evict_type() 930 list_first_entry(lru_list, struct vmw_resource, in vmw_resource_evict_type() 939 list_add_tail(&evict_res->lru_head, lru_list); in vmw_resource_evict_type()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/vmwgfx/ |
H A D | vmwgfx_resource.c | 678 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type]; in vmw_resource_validate() local 695 if (list_empty(lru_list) || !res->func->may_evict) { in vmw_resource_validate() 704 (list_first_entry(lru_list, struct vmw_resource, in vmw_resource_validate() 714 list_add_tail(&evict_res->lru_head, lru_list); in vmw_resource_validate() 888 struct list_head *lru_list = &dev_priv->res_lru[type]; in vmw_resource_evict_type() local 897 if (list_empty(lru_list)) in vmw_resource_evict_type() 901 list_first_entry(lru_list, struct vmw_resource, in vmw_resource_evict_type() 910 list_add_tail(&evict_res->lru_head, lru_list); in vmw_resource_evict_type()
|
/kernel/linux/linux-5.10/kernel/bpf/ |
H A D | bpf_lru_list.c | 326 struct bpf_lru_list *l = &lru->common_lru.lru_list; in bpf_lru_list_pop_free_to_local() 538 bpf_lru_list_push_free(&lru->common_lru.lru_list, node); in bpf_common_lru_push_free() 568 struct bpf_lru_list *l = &lru->common_lru.lru_list; in bpf_common_lru_populate() 682 bpf_lru_list_init(&clru->lru_list); in bpf_lru_init()
|
H A D | bpf_lru_list.h | 46 struct bpf_lru_list lru_list; member
|
/kernel/linux/linux-6.6/kernel/bpf/ |
H A D | bpf_lru_list.c | 326 struct bpf_lru_list *l = &lru->common_lru.lru_list; in bpf_lru_list_pop_free_to_local() 538 bpf_lru_list_push_free(&lru->common_lru.lru_list, node); in bpf_common_lru_push_free() 568 struct bpf_lru_list *l = &lru->common_lru.lru_list; in bpf_common_lru_populate() 682 bpf_lru_list_init(&clru->lru_list); in bpf_lru_init()
|
H A D | bpf_lru_list.h | 47 struct bpf_lru_list lru_list; member
|
/kernel/linux/linux-5.10/mm/ |
H A D | mmzone.c | 77 enum lru_list lru; in lruvec_init()
|
/kernel/linux/linux-6.6/include/trace/events/ |
H A D | pagemap.h | 37 __field(enum lru_list, lru )
|