Lines Matching defs:mapping
987 * the mapping, in pagecache page units; huge pages here.
1005 * @vma: The user mapping.
1042 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
1050 * The private mapping reservation is represented in a subtly different
1051 * manner to a shared mapping. A shared mapping has a region map associated
1054 * after the page is instantiated. A private mapping has a region map
1148 * The VERY common case is inode->mapping == &inode->i_data but,
1158 struct address_space *mapping = vma->vm_file->f_mapping;
1159 struct inode *inode = mapping->host;
1295 * could have been performed on the private mapping.
1532 p->mapping = NULL;
1811 * freed and frees them one-by-one. As the page->mapping pointer is going
1828 struct page, mapping);
1830 page->mapping = NULL;
1867 if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist))
1912 folio->mapping = NULL;
2094 * Find and lock address space (mapping) in write mode.
2102 struct address_space *mapping = page_mapping(hpage);
2104 if (!mapping)
2105 return mapping;
2107 if (i_mmap_trylock_write(mapping))
2108 return mapping;
2773 * We know private mapping must have HPAGE_RESV_OWNER set.
2776 * However, a file associated with mapping could have been
3067 * Processes that did not create the mapping will have no
4807 * The change of semantics for shared hugetlb mapping with cpuset is
4843 * HPAGE_RESV_OWNER indicates a private mapping.
5258 struct address_space *mapping = vma->vm_file->f_mapping;
5280 i_mmap_lock_write(mapping);
5310 i_mmap_unlock_write(mapping);
5507 * mapping it owns the reserve page for. The intention is to unmap the page
5516 struct address_space *mapping;
5526 mapping = vma->vm_file->f_mapping;
5529 * Take the mapping lock for the duration of the table walk. As
5530 * this mapping should be shared between all the VMAs,
5533 i_mmap_lock_write(mapping);
5534 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
5558 i_mmap_unlock_write(mapping);
5627 * If the process that created a MAP_PRIVATE mapping is about to
5631 * consumed or not. If reserves were used, a partial faulted mapping
5650 * If a process owning a MAP_PRIVATE mapping fails to COW,
5657 struct address_space *mapping = vma->vm_file->f_mapping;
5672 hash = hugetlb_fault_mutex_hash(mapping, idx);
5761 struct address_space *mapping = vma->vm_file->f_mapping;
5765 folio = filemap_get_folio(mapping, idx);
5772 int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
5775 struct inode *inode = mapping->host;
5780 err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL);
5801 struct address_space *mapping,
5830 hash = hugetlb_fault_mutex_hash(mapping, idx);
5854 struct address_space *mapping, pgoff_t idx,
5867 u32 hash = hugetlb_fault_mutex_hash(mapping, idx);
5886 folio = filemap_lock_folio(mapping, idx);
5888 size = i_size_read(mapping->host) >> huge_page_shift(h);
5915 return hugetlb_handle_userfault(vma, mapping, idx, flags,
5945 int err = hugetlb_add_to_page_cache(folio, mapping, idx);
5988 return hugetlb_handle_userfault(vma, mapping, idx, flags,
5995 * If we are going to COW a private mapping later, we examine the
6063 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
6068 key[0] = (unsigned long) mapping;
6080 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
6097 struct address_space *mapping;
6112 mapping = vma->vm_file->f_mapping;
6114 hash = hugetlb_fault_mutex_hash(mapping, idx);
6148 return hugetlb_no_page(mm, vma, mapping, idx, address, ptep,
6180 * If we are going to COW/unshare the mapping later, we examine the
6195 pagecache_folio = filemap_lock_folio(mapping, idx);
6295 struct address_space *mapping = dst_vma->vm_file->f_mapping;
6328 folio = filemap_lock_folio(mapping, idx);
6409 size = i_size_read(mapping->host) >> huge_page_shift(h);
6420 ret = hugetlb_add_to_page_cache(folio, mapping, idx);
6746 * called to make the mapping read-write. Assume !vma is a shm mapping
6758 /* Private mapping. */
6784 * There must be enough pages in the subpool for the mapping. If
7000 struct address_space *mapping = vma->vm_file->f_mapping;
7008 i_mmap_lock_read(mapping);
7009 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
7038 i_mmap_unlock_read(mapping);
7045 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
7172 * page in a page table page mapping size. Used to skip non-present