Lines Matching defs:page
33 /* We choose 4096 entries - same as per-zone page wait tables */
37 /* The 'colour' (ie low bits) within a PMD of a page offset. */
56 * and two more to tell us if the entry is a zero page or an empty entry that
121 * DAX page cache entry locking
200 * Look up entry in page cache, wait for it to become unlocked if it
323 static inline bool dax_page_is_shared(struct page *page)
325 return page->mapping == PAGE_MAPPING_DAX_SHARED;
329 * Set the page->mapping with PAGE_MAPPING_DAX_SHARED flag, increase the
332 static inline void dax_page_share_get(struct page *page)
334 if (page->mapping != PAGE_MAPPING_DAX_SHARED) {
336 * Reset the index if the page was already mapped
339 if (page->mapping)
340 page->share = 1;
341 page->mapping = PAGE_MAPPING_DAX_SHARED;
343 page->share++;
346 static inline unsigned long dax_page_share_put(struct page *page)
348 return --page->share;
353 * whether this entry is shared by multiple files. If so, set the page->mapping
354 * PAGE_MAPPING_DAX_SHARED, and use page->share as refcount.
367 struct page *page = pfn_to_page(pfn);
370 dax_page_share_get(page);
372 WARN_ON_ONCE(page->mapping);
373 page->mapping = mapping;
374 page->index = index + i++;
388 struct page *page = pfn_to_page(pfn);
390 WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
391 if (dax_page_is_shared(page)) {
392 /* keep the shared flag if this page is still shared */
393 if (dax_page_share_put(page) > 0)
396 WARN_ON_ONCE(page->mapping && page->mapping != mapping);
397 page->mapping = NULL;
398 page->index = 0;
402 static struct page *dax_busy_page(void *entry)
407 struct page *page = pfn_to_page(pfn);
409 if (page_ref_count(page) > 1)
410 return page;
485 * @page: output the dax page corresponding to this dax entry
491 struct page **page)
519 * not output @page.
523 *page = pfn_to_page(dax_to_pfn(entry));
545 * Find page cache entry at given index. If it is a DAX entry, return it
546 * with the entry locked. If the page cache doesn't contain an entry at
660 * dax_layout_busy_page_range - find first pinned page in @mapping
661 * @mapping: address space to scan for a page with ref count > 1
667 * 'onlined' to the page allocator so they are considered idle when
668 * page->count == 1. A filesystem uses this interface to determine if
669 * any page in the mapping is busy, i.e. for DMA, or other
677 struct page *dax_layout_busy_page_range(struct address_space *mapping,
682 struct page *page = NULL;
703 * elevated page count in the iteration and wait, or
704 * get_user_pages_fast() will see that the page it took a reference
705 * against is no longer mapped in the page tables and bail to the
722 page = dax_busy_page(entry);
724 if (page)
735 return page;
739 struct page *dax_layout_busy_page(struct address_space *mapping)
808 * page cache (usually fs-private i_mmap_sem for writing). Since the
884 /* we are replacing a zero page with block mapping */
901 * Only swap our new entry into the page cache if the current
902 * entry is a zero page or an empty entry. If a normal PTE or
934 * A page got tagged dirty in DAX mapping? Something is seriously
966 /* Lock the entry to serialize with page faults */
1103 * dax_iomap_copy_around - Prepare for an unaligned write to a shared/cow page
1111 * This can be called from two places. Either during DAX write fault (page
1126 /* copy_all is usually in page fault case */
1181 * page in the file would cause excessive storage usage for workloads with
1182 * sparse files. Instead we insert a read-only mapping of the 4k zero page.
1183 * If this page is ever written to we will re-fault and change the mapping to
1210 struct page *zero_page;
1452 * Write can allocate block for an area which has a hole page mapped
1453 * into page tables. We have to tear down these mappings so that data
1545 * and evicting any page cache pages in the region under I/O.
1590 * When handling a synchronous page fault and the inode need a fsync, we can
1591 * insert the PTE/PMD into page tables only after that fsync happened. Skip
1783 * that a PMD range in the page table overlaps exactly with a PMD
1784 * range in the page cache.
1895 * dax_iomap_fault - handle a page fault on a DAX file
1897 * @order: Order of the page to fault in
1902 * When a page fault occurs, filesystems may call this helper in
1904 * has done all the necessary locking for page fault to proceed
1920 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1925 * This function inserts a writeable PTE or PMD entry into the page tables
1926 * for an mmaped DAX file. It also marks the page cache entry as dirty.
1964 * dax_finish_sync_fault - finish synchronous page fault
1969 * This function ensures that the file range touched by the page fault is
1970 * stored persistently on the media and handles inserting of appropriate page