Lines Matching defs:pmd
190 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
193 pgtable_t token = pmd_pgtable(*pmd);
194 pmd_clear(pmd);
203 pmd_t *pmd;
208 pmd = pmd_offset(pud, addr);
211 if (pmd_none_or_clear_bad(pmd))
213 free_pte_range(tlb, pmd, addr);
214 } while (pmd++, addr = next, addr != end);
227 pmd = pmd_offset(pud, start);
229 pmd_free_tlb(tlb, pmd, start);
411 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
413 spinlock_t *ptl = pmd_lock(mm, pmd);
415 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
431 pmd_populate(mm, pmd, *pte);
437 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
443 pmd_install(mm, pmd, &new);
449 int __pte_alloc_kernel(pmd_t *pmd)
456 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
458 pmd_populate_kernel(&init_mm, pmd, new);
496 pmd_t *pmd = pmd_offset(pud, addr);
525 pr_alert("BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n",
527 (long long)pte_val(pte), (long long)pmd_val(*pmd));
658 pmd_t pmd)
660 unsigned long pfn = pmd_pfn(pmd);
682 if (pmd_devmap(pmd))
684 if (is_huge_zero_pmd(pmd))
1397 struct vm_area_struct *vma, pmd_t *pmd,
1411 start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1553 pmd_t *pmd;
1556 pmd = pmd_offset(pud, addr);
1559 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1561 __split_huge_pmd(vma, pmd, addr, false, NULL);
1562 else if (zap_huge_pmd(tlb, vma, pmd, addr)) {
1569 next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
1570 spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
1572 * Take and drop THP pmd lock so that we cannot return
1573 * prematurely, while zap_huge_pmd() has cleared *pmd,
1578 if (pmd_none(*pmd)) {
1582 addr = zap_pte_range(tlb, vma, pmd, addr, next, details);
1584 pmd--;
1585 } while (pmd++, cond_resched(), addr != end);
1775 * could have been expanded for hugetlb pmd sharing.
1810 pmd_t *pmd;
1819 pmd = pmd_alloc(mm, pud, addr);
1820 if (!pmd)
1823 VM_BUG_ON(pmd_trans_huge(*pmd));
1824 return pmd;
1830 pmd_t *pmd = walk_to_pmd(mm, addr);
1832 if (!pmd)
1834 return pte_alloc_map_lock(mm, pmd, addr, ptl);
1904 pmd_t *pmd = NULL;
1914 pmd = walk_to_pmd(mm, addr);
1915 if (!pmd)
1923 if (pte_alloc(mm, pmd))
1930 start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
1960 * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
2343 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2351 mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
2373 pmd_t *pmd;
2378 pmd = pmd_alloc(mm, pud, addr);
2379 if (!pmd)
2381 VM_BUG_ON(pmd_trans_huge(*pmd));
2384 err = remap_pte_range(mm, pmd, addr, next,
2388 } while (pmd++, addr = next, addr != end);
2570 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2581 pte_alloc_kernel_track(pmd, addr, mask) :
2582 pte_alloc_map_lock(mm, pmd, addr, &ptl);
2587 pte_offset_kernel(pmd, addr) :
2588 pte_offset_map_lock(mm, pmd, addr, &ptl);
2618 pmd_t *pmd;
2625 pmd = pmd_alloc_track(mm, pud, addr, mask);
2626 if (!pmd)
2629 pmd = pmd_offset(pud, addr);
2633 if (pmd_none(*pmd) && !create)
2635 if (WARN_ON_ONCE(pmd_leaf(*pmd)))
2637 if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) {
2640 pmd_clear_bad(pmd);
2642 err = apply_to_pte_range(mm, pmd, addr, next,
2646 } while (pmd++, addr = next, addr != end);
2849 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2877 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
3124 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
3248 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
3630 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3665 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
3758 migration_entry_wait(vma->vm_mm, vmf->pmd,
3775 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3868 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3936 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
4119 if (pte_alloc(vma->vm_mm, vmf->pmd))
4139 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4181 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
4257 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
4298 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
4341 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
4342 if (unlikely(!pmd_none(*vmf->pmd)))
4355 * deposit and withdraw with pmd lock held
4360 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
4362 update_mmu_cache_pmd(vma, haddr, vmf->pmd);
4468 if (pmd_none(*vmf->pmd)) {
4476 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
4477 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
4481 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4577 if (pmd_none(*vmf->pmd)) {
4744 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
4881 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4944 /* COW or write-notify handled on pte level: split pmd. */
4945 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
5007 if (unlikely(pmd_none(*vmf->pmd))) {
5018 * A regular pmd is established and it can't morph into a huge
5019 * pmd by anon khugepaged, since that takes mmap_lock in write
5021 * it into a huge pmd: just retry later if so.
5023 vmf->pte = pte_offset_map_nolock(vmf->vma->vm_mm, vmf->pmd,
5139 vmf.pmd = pmd_alloc(mm, vmf.pud, address);
5140 if (!vmf.pmd)
5147 if (pmd_none(*vmf.pmd) &&
5153 vmf.orig_pmd = pmdp_get_lockless(vmf.pmd);
5159 pmd_migration_entry_wait(mm, vmf.pmd);
5633 pmd_t *pmd;
5648 pmd = pmd_offset(pud, address);
5649 VM_BUG_ON(pmd_trans_huge(*pmd));
5651 ptep = pte_offset_map_lock(mm, pmd, address, ptlp);