Lines Matching refs:pvmw

10 static inline bool not_found(struct page_vma_mapped_walk *pvmw)
12 page_vma_mapped_walk_done(pvmw);
16 static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp)
20 if (pvmw->flags & PVMW_SYNC) {
22 pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd,
23 pvmw->address, &pvmw->ptl);
24 *ptlp = pvmw->ptl;
25 return !!pvmw->pte;
30 * in case *pvmw->pmd changes underneath us; so we need to
35 pvmw->pte = pte_offset_map_nolock(pvmw->vma->vm_mm, pvmw->pmd,
36 pvmw->address, ptlp);
37 if (!pvmw->pte)
40 ptent = ptep_get(pvmw->pte);
42 if (pvmw->flags & PVMW_MIGRATION) {
70 pvmw->ptl = *ptlp;
71 spin_lock(pvmw->ptl);
76 * check_pte - check if [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages) is
77 * mapped at the @pvmw->pte
78 * @pvmw: page_vma_mapped_walk struct, includes a pair pte and pfn range
84 * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to
87 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
88 * entry that points to [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages)
90 * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to
91 * [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages)
96 static bool check_pte(struct page_vma_mapped_walk *pvmw)
99 pte_t ptent = ptep_get(pvmw->pte);
101 if (pvmw->flags & PVMW_MIGRATION) {
129 return (pfn - pvmw->pfn) < pvmw->nr_pages;
133 static bool check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw)
135 if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn)
137 if (pfn > pvmw->pfn + pvmw->nr_pages - 1)
142 static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
144 pvmw->address = (pvmw->address + size) & ~(size - 1);
145 if (!pvmw->address)
146 pvmw->address = ULONG_MAX;
150 * page_vma_mapped_walk - check if @pvmw->pfn is mapped in @pvmw->vma at
151 * @pvmw->address
152 * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
155 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
156 * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
159 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
163 * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
164 * regardless of which page table level the page is mapped at. @pvmw->pmd is
168 * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
173 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
175 struct vm_area_struct *vma = pvmw->vma;
185 if (pvmw->pmd && !pvmw->pte)
186 return not_found(pvmw);
192 if (pvmw->pte)
193 return not_found(pvmw);
199 pvmw->pte = hugetlb_walk(vma, pvmw->address, size);
200 if (!pvmw->pte)
203 pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte);
204 if (!check_pte(pvmw))
205 return not_found(pvmw);
209 end = vma_address_end(pvmw);
210 if (pvmw->pte)
214 pgd = pgd_offset(mm, pvmw->address);
216 step_forward(pvmw, PGDIR_SIZE);
219 p4d = p4d_offset(pgd, pvmw->address);
221 step_forward(pvmw, P4D_SIZE);
224 pud = pud_offset(p4d, pvmw->address);
226 step_forward(pvmw, PUD_SIZE);
230 pvmw->pmd = pmd_offset(pud, pvmw->address);
236 pmde = pmdp_get_lockless(pvmw->pmd);
240 pvmw->ptl = pmd_lock(mm, pvmw->pmd);
241 pmde = *pvmw->pmd;
246 !(pvmw->flags & PVMW_MIGRATION))
247 return not_found(pvmw);
250 !check_pmd(swp_offset_pfn(entry), pvmw))
251 return not_found(pvmw);
255 if (pvmw->flags & PVMW_MIGRATION)
256 return not_found(pvmw);
257 if (!check_pmd(pmd_pfn(pmde), pvmw))
258 return not_found(pvmw);
262 spin_unlock(pvmw->ptl);
263 pvmw->ptl = NULL;
270 if ((pvmw->flags & PVMW_SYNC) &&
271 transhuge_vma_suitable(vma, pvmw->address) &&
272 (pvmw->nr_pages >= HPAGE_PMD_NR)) {
273 spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
277 step_forward(pvmw, PMD_SIZE);
280 if (!map_pte(pvmw, &ptl)) {
281 if (!pvmw->pte)
286 if (check_pte(pvmw))
290 pvmw->address += PAGE_SIZE;
291 if (pvmw->address >= end)
292 return not_found(pvmw);
294 if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
295 if (pvmw->ptl) {
296 spin_unlock(pvmw->ptl);
297 pvmw->ptl = NULL;
299 pte_unmap(pvmw->pte);
300 pvmw->pte = NULL;
303 pvmw->pte++;
304 } while (pte_none(ptep_get(pvmw->pte)));
306 if (!pvmw->ptl) {
307 pvmw->ptl = ptl;
308 spin_lock(pvmw->ptl);
311 } while (pvmw->address < end);
327 struct page_vma_mapped_walk pvmw = {
334 pvmw.address = vma_address(page, vma);
335 if (pvmw.address == -EFAULT)
337 if (!page_vma_mapped_walk(&pvmw))
339 page_vma_mapped_walk_done(&pvmw);