/kernel/linux/linux-6.6/drivers/gpu/drm/ttm/ |
H A D | ttm_device.c | 150 spin_lock(&bdev->lru_lock); in ttm_device_swapout() 165 /* ttm_bo_swapout has dropped the lru_lock */ in ttm_device_swapout() 172 spin_unlock(&bdev->lru_lock); in ttm_device_swapout() 219 spin_lock_init(&bdev->lru_lock); in ttm_device_init() 246 spin_lock(&bdev->lru_lock); in ttm_device_fini() 250 spin_unlock(&bdev->lru_lock); in ttm_device_fini() 262 spin_lock(&bdev->lru_lock); in ttm_device_clear_lru_dma_mappings() 266 /* Take ref against racing releases once lru_lock is unlocked */ in ttm_device_clear_lru_dma_mappings() 271 spin_unlock(&bdev->lru_lock); in ttm_device_clear_lru_dma_mappings() 277 spin_lock(&bdev->lru_lock); in ttm_device_clear_lru_dma_mappings() [all...] |
H A D | ttm_resource.c | 51 * resource order never changes. Should be called with &ttm_device.lru_lock held. 65 lockdep_assert_held(&pos->first->bo->bdev->lru_lock); in ttm_lru_bulk_move_tail() 151 lockdep_assert_held(&bo->bdev->lru_lock); in ttm_resource_move_to_lru_tail() 194 spin_lock(&bo->bdev->lru_lock); in ttm_resource_init() 200 spin_unlock(&bo->bdev->lru_lock); in ttm_resource_init() 219 spin_lock(&bdev->lru_lock); in ttm_resource_fini() 222 spin_unlock(&bdev->lru_lock); in ttm_resource_fini() 238 spin_lock(&bo->bdev->lru_lock); in ttm_resource_alloc() 240 spin_unlock(&bo->bdev->lru_lock); in ttm_resource_alloc() 251 spin_lock(&bo->bdev->lru_lock); in ttm_resource_free() [all...] |
H A D | ttm_bo.c | 71 * object. This function must be called with struct ttm_global::lru_lock 105 spin_lock(&bo->bdev->lru_lock); in ttm_bo_set_bulk_move() 111 spin_unlock(&bo->bdev->lru_lock); in ttm_bo_set_bulk_move() 203 * the resv object while holding the lru_lock. in ttm_bo_individualize_resv() 205 spin_lock(&bo->bdev->lru_lock); in ttm_bo_individualize_resv() 207 spin_unlock(&bo->bdev->lru_lock); in ttm_bo_individualize_resv() 232 * Must be called with lru_lock and reservation held, this function 258 spin_unlock(&bo->bdev->lru_lock); in ttm_bo_cleanup_refs() 269 spin_lock(&bo->bdev->lru_lock); in ttm_bo_cleanup_refs() 279 spin_unlock(&bo->bdev->lru_lock); in ttm_bo_cleanup_refs() [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/ttm/ |
H A D | ttm_bo.c | 347 * the resv object while holding the lru_lock. in ttm_bo_individualize_resv() 349 spin_lock(&ttm_bo_glob.lru_lock); in ttm_bo_individualize_resv() 351 spin_unlock(&ttm_bo_glob.lru_lock); in ttm_bo_individualize_resv() 384 * Must be called with lru_lock and reservation held, this function 409 spin_unlock(&ttm_bo_glob.lru_lock); in ttm_bo_cleanup_refs() 419 spin_lock(&ttm_bo_glob.lru_lock); in ttm_bo_cleanup_refs() 429 spin_unlock(&ttm_bo_glob.lru_lock); in ttm_bo_cleanup_refs() 438 spin_unlock(&ttm_bo_glob.lru_lock); in ttm_bo_cleanup_refs() 444 spin_unlock(&ttm_bo_glob.lru_lock); in ttm_bo_cleanup_refs() 467 spin_lock(&glob->lru_lock); in ttm_bo_delayed_delete() [all...] |
H A D | ttm_execbuf_util.c | 54 spin_lock(&ttm_bo_glob.lru_lock); in ttm_eu_backoff_reservation() 61 spin_unlock(&ttm_bo_glob.lru_lock); in ttm_eu_backoff_reservation() 157 spin_lock(&ttm_bo_glob.lru_lock); in ttm_eu_fence_buffer_objects() 168 spin_unlock(&ttm_bo_glob.lru_lock); in ttm_eu_fence_buffer_objects()
|
H A D | ttm_resource.c | 103 spin_lock(&glob->lru_lock); in ttm_resource_manager_force_list_clean() 106 spin_unlock(&glob->lru_lock); in ttm_resource_manager_force_list_clean() 111 spin_lock(&glob->lru_lock); in ttm_resource_manager_force_list_clean() 114 spin_unlock(&glob->lru_lock); in ttm_resource_manager_force_list_clean()
|
/kernel/linux/linux-5.10/mm/ |
H A D | workingset.c | 430 spin_lock_irq(&page_pgdat(page)->lru_lock); in workingset_refault() 432 spin_unlock_irq(&page_pgdat(page)->lru_lock); in workingset_refault() 582 spinlock_t *lru_lock, 583 void *arg) __must_hold(lru_lock) in __must_hold() 592 * lru_lock. Because the page cache tree is emptied before in __must_hold() 593 * the inode can be destroyed, holding the lru_lock pins any in __must_hold() 598 * to reclaim, take the node off-LRU, and drop the lru_lock. in __must_hold() 605 spin_unlock_irq(lru_lock); in __must_hold() 613 spin_unlock(lru_lock); in __must_hold() 633 spin_lock_irq(lru_lock); in __must_hold() [all...] |
H A D | swap.c | 86 spin_lock_irqsave(&pgdat->lru_lock, flags); in __page_cache_release() 91 spin_unlock_irqrestore(&pgdat->lru_lock, flags); in __page_cache_release() 221 spin_unlock_irqrestore(&pgdat->lru_lock, flags); in pagevec_lru_move_fn() 223 spin_lock_irqsave(&pgdat->lru_lock, flags); in pagevec_lru_move_fn() 230 spin_unlock_irqrestore(&pgdat->lru_lock, flags); in pagevec_lru_move_fn() 382 spin_lock_irq(&pgdat->lru_lock); in activate_page() 384 spin_unlock_irq(&pgdat->lru_lock); in activate_page() 894 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); in release_pages() 904 spin_unlock_irqrestore(&locked_pgdat->lru_lock, in release_pages() 925 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flag in release_pages() [all...] |
H A D | vmscan.c | 906 * lru_lock must not be held, interrupts must be enabled. 1575 * pgdat->lru_lock is heavily contended. Some of the functions that 1703 * (2) the lru_lock must not be held. 1717 spin_lock_irq(&pgdat->lru_lock); in isolate_lru_page() 1726 spin_unlock_irq(&pgdat->lru_lock); in isolate_lru_page() 1805 spin_unlock_irq(&pgdat->lru_lock); in move_pages_to_lru() 1807 spin_lock_irq(&pgdat->lru_lock); in move_pages_to_lru() 1825 spin_unlock_irq(&pgdat->lru_lock); in move_pages_to_lru() 1827 spin_lock_irq(&pgdat->lru_lock); in move_pages_to_lru() 1908 spin_lock_irq(&pgdat->lru_lock); in shrink_inactive_list() [all...] |
H A D | page_idle.c | 42 spin_lock_irq(&pgdat->lru_lock); in page_idle_get_page() 47 spin_unlock_irq(&pgdat->lru_lock); in page_idle_get_page()
|
H A D | mlock.c | 110 * Assumes lru_lock already held and page already pinned. 202 spin_lock_irq(&pgdat->lru_lock); in munlock_vma_page() 214 spin_unlock_irq(&pgdat->lru_lock); in munlock_vma_page() 221 spin_unlock_irq(&pgdat->lru_lock); in munlock_vma_page() 306 spin_lock_irq(&zone->zone_pgdat->lru_lock); in __munlock_pagevec() 326 * pin. We cannot do it under lru_lock however. If it's in __munlock_pagevec() 333 spin_unlock_irq(&zone->zone_pgdat->lru_lock); in __munlock_pagevec()
|
H A D | zswapd.c | 495 spin_lock_irq(&pgdat->lru_lock); in zswapd_shrink_active_list() 502 spin_unlock_irq(&pgdat->lru_lock); in zswapd_shrink_active_list() 519 spin_lock_irq(&pgdat->lru_lock); in zswapd_shrink_active_list() 522 spin_unlock_irq(&pgdat->lru_lock); in zswapd_shrink_active_list()
|
/kernel/linux/linux-6.6/mm/ |
H A D | workingset.c | 755 spinlock_t *lru_lock, 756 void *arg) __must_hold(lru_lock) in __must_hold() 765 * lru_lock. Because the page cache tree is emptied before in __must_hold() 766 * the inode can be destroyed, holding the lru_lock pins any in __must_hold() 771 * to reclaim, take the node off-LRU, and drop the lru_lock. in __must_hold() 778 spin_unlock_irq(lru_lock); in __must_hold() 787 spin_unlock_irq(lru_lock); in __must_hold() 796 spin_unlock(lru_lock); in __must_hold() 820 spin_lock_irq(lru_lock); in __must_hold()
|
H A D | zswap.c | 160 * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock. 161 * The only case where lru_lock is not acquired while holding tree.lock is 175 spinlock_t lru_lock; member 396 spin_lock(&entry->pool->lru_lock); in zswap_free_entry() 398 spin_unlock(&entry->pool->lru_lock); in zswap_free_entry() 640 spin_lock(&pool->lru_lock); in zswap_reclaim_entry() 642 spin_unlock(&pool->lru_lock); in zswap_reclaim_entry() 654 spin_unlock(&pool->lru_lock); in zswap_reclaim_entry() 671 spin_lock(&pool->lru_lock); in zswap_reclaim_entry() 673 spin_unlock(&pool->lru_lock); in zswap_reclaim_entry() [all...] |
H A D | zswapd.c | 496 spin_lock_irq(&lruvec->lru_lock); in zswapd_shrink_active_list() 503 spin_unlock_irq(&lruvec->lru_lock); in zswapd_shrink_active_list() 520 spin_lock_irq(&lruvec->lru_lock); in zswapd_shrink_active_list() 523 spin_unlock_irq(&lruvec->lru_lock); in zswapd_shrink_active_list()
|
H A D | mmzone.c | 80 spin_lock_init(&lruvec->lru_lock); in lruvec_init()
|
H A D | vmscan.c | 1437 * Context: lru_lock must not be held, interrupts must be enabled. 2223 * lruvec->lru_lock is heavily contended. Some of the functions that 2352 * (2) The lru_lock must not be held. 2443 spin_unlock_irq(&lruvec->lru_lock); in move_folios_to_lru() 2445 spin_lock_irq(&lruvec->lru_lock); in move_folios_to_lru() 2455 * !lru //skip lru_lock in move_folios_to_lru() 2466 spin_unlock_irq(&lruvec->lru_lock); in move_folios_to_lru() 2468 spin_lock_irq(&lruvec->lru_lock); in move_folios_to_lru() 2556 spin_lock_irq(&lruvec->lru_lock); in shrink_inactive_list() 2568 spin_unlock_irq(&lruvec->lru_lock); in shrink_inactive_list() [all...] |
/kernel/linux/linux-5.10/include/drm/ttm/ |
H A D | ttm_bo_driver.h | 267 * @lru_lock: Spinlock protecting the bo subsystem lru lists. 280 spinlock_t lru_lock; member 288 * Protected by the lru_lock. 307 * lru_lock: Spinlock that protects the buffer+device lru lists and 537 spin_lock(&ttm_bo_glob.lru_lock); in ttm_bo_move_to_lru_tail_unlocked() 539 spin_unlock(&ttm_bo_glob.lru_lock); in ttm_bo_move_to_lru_tail_unlocked()
|
/kernel/linux/linux-6.6/include/drm/ttm/ |
H A D | ttm_bo.h | 293 spin_lock(&bo->bdev->lru_lock); in ttm_bo_move_to_lru_tail_unlocked() 295 spin_unlock(&bo->bdev->lru_lock); in ttm_bo_move_to_lru_tail_unlocked()
|
H A D | ttm_device.h | 250 * @lru_lock: Protection for the per manager LRU and ddestroy lists. 252 spinlock_t lru_lock; member
|
/kernel/linux/linux-5.10/fs/gfs2/ |
H A D | glock.c | 67 static DEFINE_SPINLOCK(lru_lock); 213 spin_lock(&lru_lock); in gfs2_glock_add_to_lru() 223 spin_unlock(&lru_lock); in gfs2_glock_add_to_lru() 231 spin_lock(&lru_lock); in gfs2_glock_remove_from_lru() 237 spin_unlock(&lru_lock); in gfs2_glock_remove_from_lru() 1771 * Must be called under the lru_lock, but may drop and retake this 1772 * lock. While the lru_lock is dropped, entries may vanish from the 1778 __releases(&lru_lock) 1779 __acquires(&lru_lock) 1806 cond_resched_lock(&lru_lock); variable [all...] |
/kernel/linux/linux-6.6/include/linux/ |
H A D | memcontrol.h | 1385 spin_lock(&pgdat->__lruvec.lru_lock); in folio_lruvec_lock() 1393 spin_lock_irq(&pgdat->__lruvec.lru_lock); in folio_lruvec_lock_irq() 1402 spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp); in folio_lruvec_lock_irqsave() 1670 spin_unlock(&lruvec->lru_lock); in unlock_page_lruvec() 1675 spin_unlock_irq(&lruvec->lru_lock); in unlock_page_lruvec_irq() 1681 spin_unlock_irqrestore(&lruvec->lru_lock, flags); in unlock_page_lruvec_irqrestore()
|
/kernel/linux/linux-6.6/fs/gfs2/ |
H A D | glock.c | 72 static DEFINE_SPINLOCK(lru_lock); 221 spin_lock(&lru_lock); in gfs2_glock_add_to_lru() 230 spin_unlock(&lru_lock); in gfs2_glock_add_to_lru() 238 spin_lock(&lru_lock); in gfs2_glock_remove_from_lru() 244 spin_unlock(&lru_lock); in gfs2_glock_remove_from_lru() 1951 * Must be called under the lru_lock, but may drop and retake this 1952 * lock. While the lru_lock is dropped, entries may vanish from the 1958 __releases(&lru_lock) 1959 __acquires(&lru_lock) 1986 cond_resched_lock(&lru_lock); variable [all...] |
/kernel/linux/linux-6.6/fs/xfs/ |
H A D | xfs_qm.c | 414 spinlock_t *lru_lock, 416 __releases(lru_lock) __acquires(lru_lock) in __releases() 462 spin_unlock(lru_lock); in __releases() 498 spin_lock(lru_lock); in __releases()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/qxl/ |
H A D | qxl_release.c | 459 spin_lock(&ttm_bo_glob.lru_lock); in qxl_release_fence_buffer_objects() 468 spin_unlock(&ttm_bo_glob.lru_lock); in qxl_release_fence_buffer_objects()
|