/kernel/linux/linux-5.10/fs/nilfs2/ |
H A D | btnode.c | 198 xa_lock_irq(&btnc->i_pages); in nilfs_btnode_prepare_change_key() 199 err = __xa_insert(&btnc->i_pages, newkey, opage, GFP_NOFS); in nilfs_btnode_prepare_change_key() 200 xa_unlock_irq(&btnc->i_pages); in nilfs_btnode_prepare_change_key() 255 xa_lock_irq(&btnc->i_pages); in nilfs_btnode_commit_change_key() 256 __xa_erase(&btnc->i_pages, oldkey); in nilfs_btnode_commit_change_key() 257 __xa_set_mark(&btnc->i_pages, newkey, PAGECACHE_TAG_DIRTY); in nilfs_btnode_commit_change_key() 258 xa_unlock_irq(&btnc->i_pages); in nilfs_btnode_commit_change_key() 286 xa_erase_irq(&btnc->i_pages, newkey); in nilfs_btnode_abort_change_key()
|
H A D | page.c | 324 xa_lock_irq(&smap->i_pages); in nilfs_copy_back_pages() 325 p = __xa_erase(&smap->i_pages, offset); in nilfs_copy_back_pages() 328 xa_unlock_irq(&smap->i_pages); in nilfs_copy_back_pages() 330 xa_lock_irq(&dmap->i_pages); in nilfs_copy_back_pages() 331 p = __xa_store(&dmap->i_pages, offset, page, GFP_NOFS); in nilfs_copy_back_pages() 340 __xa_set_mark(&dmap->i_pages, offset, in nilfs_copy_back_pages() 343 xa_unlock_irq(&dmap->i_pages); in nilfs_copy_back_pages() 471 xa_lock_irq(&mapping->i_pages); in __nilfs_clear_page_dirty() 473 __xa_clear_mark(&mapping->i_pages, page_index(page), in __nilfs_clear_page_dirty() 475 xa_unlock_irq(&mapping->i_pages); in __nilfs_clear_page_dirty() [all...] |
/kernel/linux/linux-6.6/fs/nilfs2/ |
H A D | btnode.c | 198 xa_lock_irq(&btnc->i_pages); in nilfs_btnode_prepare_change_key() 199 err = __xa_insert(&btnc->i_pages, newkey, opage, GFP_NOFS); in nilfs_btnode_prepare_change_key() 200 xa_unlock_irq(&btnc->i_pages); in nilfs_btnode_prepare_change_key() 255 xa_lock_irq(&btnc->i_pages); in nilfs_btnode_commit_change_key() 256 __xa_erase(&btnc->i_pages, oldkey); in nilfs_btnode_commit_change_key() 257 __xa_set_mark(&btnc->i_pages, newkey, PAGECACHE_TAG_DIRTY); in nilfs_btnode_commit_change_key() 258 xa_unlock_irq(&btnc->i_pages); in nilfs_btnode_commit_change_key() 286 xa_erase_irq(&btnc->i_pages, newkey); in nilfs_btnode_abort_change_key()
|
H A D | page.c | 325 xa_lock_irq(&smap->i_pages); in nilfs_copy_back_pages() 326 f = __xa_erase(&smap->i_pages, index); in nilfs_copy_back_pages() 329 xa_unlock_irq(&smap->i_pages); in nilfs_copy_back_pages() 331 xa_lock_irq(&dmap->i_pages); in nilfs_copy_back_pages() 332 f = __xa_store(&dmap->i_pages, index, folio, GFP_NOFS); in nilfs_copy_back_pages() 341 __xa_set_mark(&dmap->i_pages, index, in nilfs_copy_back_pages() 344 xa_unlock_irq(&dmap->i_pages); in nilfs_copy_back_pages() 463 xa_lock_irq(&mapping->i_pages); in __nilfs_clear_page_dirty() 465 __xa_clear_mark(&mapping->i_pages, page_index(page), in __nilfs_clear_page_dirty() 467 xa_unlock_irq(&mapping->i_pages); in __nilfs_clear_page_dirty() [all...] |
/kernel/linux/linux-6.6/arch/nios2/include/asm/ |
H A D | cacheflush.h | 54 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) 55 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) 57 xa_lock_irqsave(&mapping->i_pages, flags) 59 xa_unlock_irqrestore(&mapping->i_pages, flags)
|
/kernel/linux/linux-6.6/arch/parisc/include/asm/ |
H A D | cacheflush.h | 55 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) 56 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) 58 xa_lock_irqsave(&mapping->i_pages, flags) 60 xa_unlock_irqrestore(&mapping->i_pages, flags)
|
/kernel/linux/linux-6.6/mm/ |
H A D | truncate.c | 34 XA_STATE(xas, &mapping->i_pages, index); in __clear_shadow_entry() 46 xa_lock_irq(&mapping->i_pages); in clear_shadow_entry() 48 xa_unlock_irq(&mapping->i_pages); in clear_shadow_entry() 79 xa_lock_irq(&mapping->i_pages); in truncate_folio_batch_exceptionals() 100 xa_unlock_irq(&mapping->i_pages); in truncate_folio_batch_exceptionals() 479 xa_lock_irq(&mapping->i_pages); in truncate_inode_pages_final() 480 xa_unlock_irq(&mapping->i_pages); in truncate_inode_pages_final() 579 xa_lock_irq(&mapping->i_pages); in invalidate_complete_folio2() 585 xa_unlock_irq(&mapping->i_pages); in invalidate_complete_folio2() 593 xa_unlock_irq(&mapping->i_pages); in invalidate_complete_folio2() [all...] |
H A D | swap_state.c | 76 page = xa_load(&address_space->i_pages, idx); in get_shadow_from_swap_cache() 91 XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio)); in add_to_swap_cache() 146 XA_STATE(xas, &address_space->i_pages, idx); in __delete_from_swap_cache() 238 xa_lock_irq(&address_space->i_pages); in delete_from_swap_cache() 240 xa_unlock_irq(&address_space->i_pages); in delete_from_swap_cache() 255 XA_STATE(xas, &address_space->i_pages, curr); in clear_shadow_from_swap_cache() 259 xa_lock_irq(&address_space->i_pages); in clear_shadow_from_swap_cache() 265 xa_unlock_irq(&address_space->i_pages); in clear_shadow_from_swap_cache() 684 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ); in init_swap_address_space()
|
H A D | workingset.c | 378 * Return: a shadow entry to be stored in @folio->mapping->i_pages in place 675 * as node->private_list is protected by the i_pages lock. in workingset_update_node() 677 mapping = container_of(node->array, struct address_space, i_pages); in workingset_update_node() 678 lockdep_assert_held(&mapping->i_pages.xa_lock); in workingset_update_node() 764 * the shadow node LRU under the i_pages lock and the in __must_hold() 769 * We can then safely transition to the i_pages lock to in __must_hold() 774 mapping = container_of(node->array, struct address_space, i_pages); in __must_hold() 777 if (!xa_trylock(&mapping->i_pages)) { in __must_hold() 786 xa_unlock(&mapping->i_pages); in __must_hold() 811 xa_unlock_irq(&mapping->i_pages); in __must_hold() [all...] |
H A D | filemap.c | 82 * ->i_pages lock 91 * ->i_pages lock (arch-dependent flush_dcache_mmap_lock) 102 * ->i_pages lock (__sync_single_inode) 113 * ->i_pages lock (try_to_unmap_one) 117 * ->i_pages lock (page_remove_rmap->set_page_dirty) 129 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete() 217 * is safe. The caller must hold the i_pages lock. 256 xa_lock_irq(&mapping->i_pages); in filemap_remove_folio() 258 xa_unlock_irq(&mapping->i_pages); in filemap_remove_folio() 271 * The function walks over mapping->i_pages an [all...] |
/kernel/linux/linux-5.10/arch/parisc/include/asm/ |
H A D | cacheflush.h | 58 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) 59 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) 61 xa_lock_irqsave(&mapping->i_pages, flags) 63 xa_unlock_irqrestore(&mapping->i_pages, flags)
|
/kernel/linux/linux-5.10/mm/ |
H A D | truncate.c | 37 XA_STATE(xas, &mapping->i_pages, index); in __clear_shadow_entry() 49 xa_lock_irq(&mapping->i_pages); in clear_shadow_entry() 51 xa_unlock_irq(&mapping->i_pages); in clear_shadow_entry() 80 xa_lock_irq(&mapping->i_pages); in truncate_exceptional_pvec_entries() 103 xa_unlock_irq(&mapping->i_pages); in truncate_exceptional_pvec_entries() 516 xa_lock_irq(&mapping->i_pages); in truncate_inode_pages_final() 517 xa_unlock_irq(&mapping->i_pages); in truncate_inode_pages_final() 665 xa_lock_irqsave(&mapping->i_pages, flags); in invalidate_complete_page2() 671 xa_unlock_irqrestore(&mapping->i_pages, flags); in invalidate_complete_page2() 679 xa_unlock_irqrestore(&mapping->i_pages, flag in invalidate_complete_page2() [all...] |
H A D | filemap.c | 75 * ->i_pages lock 83 * ->i_pages lock (arch-dependent flush_dcache_mmap_lock) 93 * ->i_pages lock (__sync_single_inode) 104 * ->i_pages lock (try_to_unmap_one) 108 * ->i_pages lock (page_remove_rmap->set_page_dirty) 123 XA_STATE(xas, &mapping->i_pages, page->index); in page_cache_delete() 230 * is safe. The caller must hold the i_pages lock. 273 xa_lock_irqsave(&mapping->i_pages, flags); in delete_from_page_cache() 275 xa_unlock_irqrestore(&mapping->i_pages, flags); in delete_from_page_cache() 286 * The function walks over mapping->i_pages an [all...] |
H A D | swap_state.c | 133 XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page)); in add_to_swap_cache() 189 XA_STATE(xas, &address_space->i_pages, idx); in __delete_from_swap_cache() 277 xa_lock_irq(&address_space->i_pages); in delete_from_swap_cache() 279 xa_unlock_irq(&address_space->i_pages); in delete_from_swap_cache() 295 XA_STATE(xas, &address_space->i_pages, curr); in clear_shadow_from_swap_cache() 297 xa_lock_irq(&address_space->i_pages); in clear_shadow_from_swap_cache() 305 xa_unlock_irq(&address_space->i_pages); in clear_shadow_from_swap_cache() 718 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ); in init_swap_address_space()
|
H A D | workingset.c | 250 * Returns a shadow entry to be stored in @page->mapping->i_pages in place 503 * as node->private_list is protected by the i_pages lock. in workingset_update_node() 591 * the shadow node LRU under the i_pages lock and the in __must_hold() 596 * We can then safely transition to the i_pages lock to in __must_hold() 601 mapping = container_of(node->array, struct address_space, i_pages); in __must_hold() 604 if (!xa_trylock(&mapping->i_pages)) { in __must_hold() 629 xa_unlock_irq(&mapping->i_pages); in __must_hold() 640 /* list_lru lock nests inside the IRQ-safe i_pages lock */ in scan_shadow_nodes() 654 * i_pages lock.
|
/kernel/linux/linux-5.10/arch/nios2/include/asm/ |
H A D | cacheflush.h | 49 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) 50 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
|
/kernel/linux/linux-5.10/arch/csky/abiv1/inc/abi/ |
H A D | cacheflush.h | 21 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) 22 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
|
/kernel/linux/linux-5.10/arch/nds32/include/asm/ |
H A D | cacheflush.h | 43 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages) 44 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages)
|
/kernel/linux/linux-6.6/arch/csky/abiv1/inc/abi/ |
H A D | cacheflush.h | 19 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) 20 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | backing-dev.h | 246 * holding either @inode->i_lock, the i_pages lock, or the 254 !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) && in inode_to_wb() 277 * holding inode->i_lock, the i_pages lock or wb->list_lock. This 298 xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags); in unlocked_inode_to_wb_begin() 301 * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages in unlocked_inode_to_wb_begin() 316 xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags); in unlocked_inode_to_wb_end()
|
/kernel/linux/linux-5.10/include/linux/ |
H A D | backing-dev.h | 277 * holding either @inode->i_lock, the i_pages lock, or the 285 !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) && in inode_to_wb() 297 * holding inode->i_lock, the i_pages lock or wb->list_lock. This 318 xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags); in unlocked_inode_to_wb_begin() 321 * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages in unlocked_inode_to_wb_begin() 336 xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags); in unlocked_inode_to_wb_end()
|
/kernel/linux/linux-6.6/fs/netfs/ |
H A D | io.c | 26 iov_iter_xarray(&iter, ITER_DEST, &subreq->rreq->mapping->i_pages, in netfs_clear_unread() 52 iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, in netfs_read_from_cache() 121 XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE); in netfs_rreq_unmark_after_write() 211 iov_iter_xarray(&iter, ITER_SOURCE, &rreq->mapping->i_pages, in netfs_rreq_do_write_to_cache()
|
/kernel/linux/linux-5.10/arch/arm/include/asm/ |
H A D | cacheflush.h | 318 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) 319 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
|
/kernel/linux/linux-6.6/arch/arm/include/asm/ |
H A D | cacheflush.h | 321 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) 322 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
|
/kernel/linux/linux-5.10/fs/ |
H A D | dax.c | 205 * under the i_pages lock, ditto for entry handling in our callers. in dax_wake_entry() 218 * If @order is larger than the order of the entry found in i_pages, this 221 * Must be called with the i_pages lock held. 253 * The only thing keeping the address space around is the i_pages lock 254 * (it's cycled in clear_inode() after removing the entries from i_pages) 425 xas.xa = &mapping->i_pages; in dax_lock_page() 450 XA_STATE(xas, &mapping->i_pages, page->index); in dax_unlock_page() 519 * the i_pages lock. in grab_mapping_entry() 599 XA_STATE(xas, &mapping->i_pages, start_idx); in dax_layout_busy_page_range() 662 XA_STATE(xas, &mapping->i_pages, inde in __dax_invalidate_entry() [all...] |