Lines Matching refs:pvmw

10 static inline bool not_found(struct page_vma_mapped_walk *pvmw)
12 page_vma_mapped_walk_done(pvmw);
16 static bool map_pte(struct page_vma_mapped_walk *pvmw)
18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
19 if (!(pvmw->flags & PVMW_SYNC)) {
20 if (pvmw->flags & PVMW_MIGRATION) {
21 if (!is_swap_pte(*pvmw->pte))
39 if (is_swap_pte(*pvmw->pte)) {
43 entry = pte_to_swp_entry(*pvmw->pte);
46 } else if (!pte_present(*pvmw->pte))
50 pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
51 spin_lock(pvmw->ptl);
68 * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
70 * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
73 * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary
76 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
77 * entry that points to @pvmw->page or any subpage in case of THP.
79 * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to
80 * @pvmw->page or any subpage in case of THP.
85 static bool check_pte(struct page_vma_mapped_walk *pvmw)
89 if (pvmw->flags & PVMW_MIGRATION) {
91 if (!is_swap_pte(*pvmw->pte))
93 entry = pte_to_swp_entry(*pvmw->pte);
99 } else if (is_swap_pte(*pvmw->pte)) {
103 entry = pte_to_swp_entry(*pvmw->pte);
109 if (!pte_present(*pvmw->pte))
112 pfn = pte_pfn(*pvmw->pte);
115 return pfn_is_match(pvmw->page, pfn);
118 static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
120 pvmw->address = (pvmw->address + size) & ~(size - 1);
121 if (!pvmw->address)
122 pvmw->address = ULONG_MAX;
126 * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
127 * @pvmw->address
128 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
131 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
132 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
135 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
139 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
140 * regardless of which page table level the page is mapped at. @pvmw->pmd is
144 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
149 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
151 struct mm_struct *mm = pvmw->vma->vm_mm;
152 struct page *page = pvmw->page;
160 if (pvmw->pmd && !pvmw->pte)
161 return not_found(pvmw);
165 if (pvmw->pte)
166 return not_found(pvmw);
169 pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
170 if (!pvmw->pte)
173 pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
174 spin_lock(pvmw->ptl);
175 if (!check_pte(pvmw))
176 return not_found(pvmw);
187 vma_address_end(page, pvmw->vma) :
188 pvmw->address + PAGE_SIZE;
189 if (pvmw->pte)
193 pgd = pgd_offset(mm, pvmw->address);
195 step_forward(pvmw, PGDIR_SIZE);
198 p4d = p4d_offset(pgd, pvmw->address);
200 step_forward(pvmw, P4D_SIZE);
203 pud = pud_offset(p4d, pvmw->address);
205 step_forward(pvmw, PUD_SIZE);
209 pvmw->pmd = pmd_offset(pud, pvmw->address);
215 pmde = READ_ONCE(*pvmw->pmd);
218 pvmw->ptl = pmd_lock(mm, pvmw->pmd);
219 pmde = *pvmw->pmd;
221 if (pvmw->flags & PVMW_MIGRATION)
222 return not_found(pvmw);
224 return not_found(pvmw);
231 !(pvmw->flags & PVMW_MIGRATION))
232 return not_found(pvmw);
236 return not_found(pvmw);
240 spin_unlock(pvmw->ptl);
241 pvmw->ptl = NULL;
248 if ((pvmw->flags & PVMW_SYNC) &&
250 spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
254 step_forward(pvmw, PMD_SIZE);
257 if (!map_pte(pvmw))
260 if (check_pte(pvmw))
264 pvmw->address += PAGE_SIZE;
265 if (pvmw->address >= end)
266 return not_found(pvmw);
268 if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
269 if (pvmw->ptl) {
270 spin_unlock(pvmw->ptl);
271 pvmw->ptl = NULL;
273 pte_unmap(pvmw->pte);
274 pvmw->pte = NULL;
277 pvmw->pte++;
278 if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) {
279 pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
280 spin_lock(pvmw->ptl);
282 } while (pte_none(*pvmw->pte));
284 if (!pvmw->ptl) {
285 pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
286 spin_lock(pvmw->ptl);
289 } while (pvmw->address < end);
305 struct page_vma_mapped_walk pvmw = {
311 pvmw.address = vma_address(page, vma);
312 if (pvmw.address == -EFAULT)
314 if (!page_vma_mapped_walk(&pvmw))
316 page_vma_mapped_walk_done(&pvmw);