Lines Matching refs:mapping

191  * @entry may no longer be the entry at the index in the mapping.
342 static void dax_associate_entry(void *entry, struct address_space *mapping,
355 WARN_ON_ONCE(page->mapping);
356 page->mapping = mapping;
361 static void dax_disassociate_entry(void *entry, struct address_space *mapping,
373 WARN_ON_ONCE(page->mapping && page->mapping != mapping);
374 page->mapping = NULL;
405 /* Ensure page->mapping isn't freed while we look at it */
408 struct address_space *mapping = READ_ONCE(page->mapping);
411 if (!mapping || !dax_mapping(mapping))
422 if (S_ISCHR(mapping->host->i_mode))
425 xas.xa = &mapping->i_pages;
427 if (mapping != page->mapping) {
449 struct address_space *mapping = page->mapping;
450 XA_STATE(xas, &mapping->i_pages, page->index);
452 if (S_ISCHR(mapping->host->i_mode))
488 struct address_space *mapping, unsigned int order)
530 unmap_mapping_pages(mapping,
537 dax_disassociate_entry(entry, mapping, false);
540 mapping->nrexceptional--;
556 mapping->nrexceptional++;
561 if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
574 * dax_layout_busy_page_range - find first pinned page in @mapping
575 * @mapping: address space to scan for a page with ref count > 1
583 * any page in the mapping is busy, i.e. for DMA, or other
591 struct page *dax_layout_busy_page_range(struct address_space *mapping,
599 XA_STATE(xas, &mapping->i_pages, start_idx);
607 if (!dax_mapping(mapping) || !mapping_mapped(mapping))
627 unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0);
653 struct page *dax_layout_busy_page(struct address_space *mapping)
655 return dax_layout_busy_page_range(mapping, 0, LLONG_MAX);
659 static int __dax_invalidate_entry(struct address_space *mapping,
662 XA_STATE(xas, &mapping->i_pages, index);
674 dax_disassociate_entry(entry, mapping, trunc);
676 mapping->nrexceptional--;
685 * Delete DAX entry at @index from @mapping. Wait for it
688 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
690 int ret = __dax_invalidate_entry(mapping, index, true);
706 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
709 return __dax_invalidate_entry(mapping, index, false);
745 struct address_space *mapping, struct vm_fault *vmf,
751 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
755 /* we are replacing a zero page with block mapping */
757 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
760 unmap_mapping_pages(mapping, index, 1, false);
768 dax_disassociate_entry(entry, mapping, false);
769 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
804 static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
812 i_mmap_lock_read(mapping);
813 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
875 i_mmap_unlock_read(mapping);
879 struct address_space *mapping, void *entry)
885 * A page got tagged dirty in DAX mapping? Something is seriously
941 dax_entry_mkclean(mapping, index, pfn);
946 * the pfn mappings are writeprotected and fault waits for mapping
955 trace_dax_writeback_one(mapping->host, index, count);
964 * Flush the mapping to the persistent domain within the byte range of [start,
968 int dax_writeback_mapping_range(struct address_space *mapping,
971 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
972 struct inode *inode = mapping->host;
981 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
986 tag_pages_for_writeback(mapping, xas.xa_index, end_index);
990 ret = dax_writeback_one(&xas, dax_dev, mapping, entry);
992 mapping_set_error(mapping, ret);
1049 * sparse files. Instead we insert a read-only mapping of the 4k zero page.
1050 * If this page is ever written to we will re-fault and change the mapping to
1054 struct address_space *mapping, void **entry,
1057 struct inode *inode = mapping->host;
1062 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
1213 struct address_space *mapping = iocb->ki_filp->f_mapping;
1214 struct inode *inode = mapping->host;
1250 * MAP_SYNC on a dax mapping guarantees dirty metadata is
1264 struct address_space *mapping = vma->vm_file->f_mapping;
1265 XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
1266 struct inode *inode = mapping->host;
1293 entry = grab_mapping_entry(&xas, mapping, 0);
1368 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
1396 ret = dax_load_hole(&xas, mapping, &entry, vmf);
1433 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1436 struct inode *inode = mapping->host;
1449 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
1486 struct address_space *mapping = vma->vm_file->f_mapping;
1487 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
1492 struct inode *inode = mapping->host;
1546 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
1566 * setting up a mapping, so really we're using iomap_begin() as a way
1586 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
1690 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1691 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
1702 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1718 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);