Lines Matching defs:pmd
551 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
554 pmd = pmd_mkwrite(pmd, vma);
555 return pmd;
679 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
680 if (unlikely(!pmd_none(*vmf->pmd))) {
703 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
704 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
705 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
761 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
765 if (!pmd_none(*pmd))
769 pgtable_trans_huge_deposit(mm, pmd, pgtable);
770 set_pmd_at(mm, haddr, pmd, entry);
802 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
804 if (pmd_none(*vmf->pmd)) {
816 haddr, vmf->pmd, zero_page);
817 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
836 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
843 ptl = pmd_lock(mm, pmd);
844 if (!pmd_none(*pmd)) {
846 if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
847 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
850 entry = pmd_mkyoung(*pmd);
852 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
853 update_mmu_cache_pmd(vma, addr, pmd);
868 pgtable_trans_huge_deposit(mm, pmd, pgtable);
873 set_pmd_at(mm, addr, pmd, entry);
874 update_mmu_cache_pmd(vma, addr, pmd);
883 * vmf_insert_pfn_pmd - insert a pmd size pfn
888 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
921 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
1010 pmd_t *pmd, bool write)
1014 _pmd = pmd_mkyoung(*pmd);
1018 pmd, _pmd, write))
1019 update_mmu_cache_pmd(vma, addr, pmd);
1023 pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
1025 unsigned long pfn = pmd_pfn(*pmd);
1030 assert_spin_locked(pmd_lockptr(mm, pmd));
1032 if (flags & FOLL_WRITE && !pmd_write(*pmd))
1035 if (pmd_present(*pmd) && pmd_devmap(*pmd))
1041 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
1068 pmd_t pmd;
1085 pmd = *src_pmd;
1088 if (unlikely(is_swap_pmd(pmd))) {
1089 swp_entry_t entry = pmd_to_swp_entry(pmd);
1091 VM_BUG_ON(!is_pmd_migration_entry(pmd));
1095 pmd = swp_entry_to_pmd(entry);
1097 pmd = pmd_swp_mksoft_dirty(pmd);
1099 pmd = pmd_swp_mkuffd_wp(pmd);
1100 set_pmd_at(src_mm, addr, src_pmd, pmd);
1106 pmd = pmd_swp_clear_uffd_wp(pmd);
1107 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1113 if (unlikely(!pmd_trans_huge(pmd))) {
1118 * When page table lock is held, the huge zero pmd should not be
1119 * under splitting since we don't split the page itself, only pmd to
1122 if (is_huge_zero_pmd(pmd)) {
1132 src_page = pmd_page(pmd);
1151 pmd = pmd_clear_uffd_wp(pmd);
1152 pmd = pmd_mkold(pmd_wrprotect(pmd));
1153 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1279 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1280 if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd)))
1283 touch_pmd(vmf->vma, vmf->address, vmf->pmd, write);
1298 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
1306 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1324 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1361 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
1362 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1371 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
1376 unsigned long addr, pmd_t pmd)
1384 if (pmd_protnone(pmd))
1388 if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
1392 if (userfaultfd_huge_pmd_wp(vma, pmd))
1397 page = vm_normal_page_pmd(vma, addr, pmd);
1402 return pmd_dirty(pmd);
1406 static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
1410 /* If the pmd is writable, we can write to the page. */
1411 if (pmd_write(pmd))
1438 if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
1440 return !userfaultfd_huge_pmd_wp(vma, pmd);
1445 pmd_t *pmd,
1452 assert_spin_locked(pmd_lockptr(mm, pmd));
1454 page = pmd_page(*pmd);
1458 !can_follow_write_pmd(*pmd, page, vma, flags))
1462 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1465 if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags))
1468 if (!pmd_write(*pmd) && gup_must_unshare(vma, flags, page))
1479 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
1492 pmd_t pmd;
1500 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1501 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1506 pmd = pmd_modify(oldpmd, vma->vm_page_prot);
1512 writable = pmd_write(pmd);
1514 can_change_pmd_writable(vma, vmf->address, pmd))
1517 page = vm_normal_page_pmd(vma, haddr, pmd);
1549 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1550 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1566 pmd = pmd_modify(oldpmd, vma->vm_page_prot);
1567 pmd = pmd_mkyoung(pmd);
1569 pmd = pmd_mkwrite(pmd, vma);
1570 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
1571 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1577 * Return true if we do MADV_FREE successfully on entire pmd page.
1581 pmd_t *pmd, unsigned long addr, unsigned long next)
1591 ptl = pmd_trans_huge_lock(pmd, vma);
1595 orig_pmd = *pmd;
1634 pmdp_invalidate(vma, addr, pmd);
1638 set_pmd_at(mm, addr, pmd, orig_pmd);
1639 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1650 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
1654 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1660 pmd_t *pmd, unsigned long addr)
1667 ptl = __pmd_trans_huge_lock(pmd, vma);
1676 orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
1679 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1682 zap_deposited_table(tlb->mm, pmd);
1685 zap_deposited_table(tlb->mm, pmd);
1704 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
1707 zap_deposited_table(tlb->mm, pmd);
1711 zap_deposited_table(tlb->mm, pmd);
1728 * With split pmd lock we also need to move preallocated
1737 static pmd_t move_soft_dirty_pmd(pmd_t pmd)
1740 if (unlikely(is_pmd_migration_entry(pmd)))
1741 pmd = pmd_swp_mksoft_dirty(pmd);
1742 else if (pmd_present(pmd))
1743 pmd = pmd_mksoft_dirty(pmd);
1745 return pmd;
1752 pmd_t pmd;
1757 * The destination pmd shouldn't be established, free_pgtables()
1775 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1776 if (pmd_present(pmd))
1785 pmd = move_soft_dirty_pmd(pmd);
1786 set_pmd_at(mm, new_addr, new_pmd, pmd);
1805 pmd_t *pmd, unsigned long addr, pgprot_t newprot,
1821 ptl = __pmd_trans_huge_lock(pmd, vma);
1826 if (is_swap_pmd(*pmd)) {
1827 swp_entry_t entry = pmd_to_swp_entry(*pmd);
1831 VM_BUG_ON(!is_pmd_migration_entry(*pmd));
1842 if (pmd_swp_soft_dirty(*pmd))
1845 newpmd = *pmd;
1852 if (!pmd_same(*pmd, newpmd))
1853 set_pmd_at(mm, addr, pmd, newpmd);
1866 if (is_huge_zero_pmd(*pmd))
1869 if (pmd_protnone(*pmd))
1872 page = pmd_page(*pmd);
1888 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
1896 * pmd_trans_huge(*pmd) == 0 (without ptl)
1897 * // skip the pmd
1899 * // pmd is re-established
1901 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
1907 oldpmd = pmdp_invalidate_ad(vma, addr, pmd);
1926 set_pmd_at(mm, addr, pmd, entry);
1936 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
1941 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
1944 ptl = pmd_lock(vma->vm_mm, pmd);
1945 if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
1946 pmd_devmap(*pmd)))
2026 unsigned long haddr, pmd_t *pmd)
2036 * Leave pmd empty until pte is filled note that it is fine to delay
2038 * replacing a zero pmd write protected page with a zero pte write
2043 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
2045 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2062 smp_wmb(); /* make pte visible before pmd */
2063 pmd_populate(mm, pmd, pgtable);
2066 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2082 VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
2083 && !pmd_devmap(*pmd));
2088 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
2094 zap_deposited_table(mm, pmd);
2115 if (is_huge_zero_pmd(*pmd)) {
2125 return __split_huge_zero_page_pmd(vma, haddr, pmd);
2129 * Up to this point the pmd is present and huge and userland has the
2131 * place). If we overwrite the pmd with the not-huge version pointing
2143 * current pmd notpresent (atomically because here the pmd_trans_huge
2144 * must remain set at all times on the pmd until the split is complete
2145 * for this pmd), then we flush the SMP TLB and finally we write the
2146 * non-huge version of the pmd entry with pmd_populate.
2148 old_pmd = pmdp_invalidate(vma, haddr, pmd);
2199 * Withdraw the table only after we mark the pmd entry invalid.
2202 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2262 smp_wmb(); /* make pte visible before pmd */
2263 pmd_populate(mm, pmd, pgtable);
2266 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
2276 ptl = pmd_lock(vma->vm_mm, pmd);
2280 * pmd against. Otherwise we can end up replacing wrong folio.
2285 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
2286 is_pmd_migration_entry(*pmd)) {
2289 * guaranteed that pmd is present.
2291 if (folio && folio != page_folio(pmd_page(*pmd)))
2293 __split_huge_pmd_locked(vma, pmd, range.start, freeze);
2304 pmd_t *pmd = mm_find_pmd(vma->vm_mm, address);
2306 if (!pmd)
2309 __split_huge_pmd(vma, pmd, address, freeze, folio);
2316 * contain an hugepage: check if we need to split an huge pmd.
3215 if (!(pvmw->pmd && !pvmw->pte))
3219 pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
3224 set_pmd_at(mm, address, pvmw->pmd, pmdval);
3245 set_pmd_at(mm, address, pvmw->pmd, pmdswp);
3262 if (!(pvmw->pmd && !pvmw->pte))
3265 entry = pmd_to_swp_entry(*pvmw->pmd);
3268 if (pmd_swp_soft_dirty(*pvmw->pmd))
3272 if (pmd_swp_uffd_wp(*pvmw->pmd))
3291 set_pmd_at(mm, haddr, pvmw->pmd, pmde);
3294 update_mmu_cache_pmd(vma, address, pvmw->pmd);