Lines Matching defs:pmd

479 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
482 pmd = pmd_mkwrite(pmd);
483 return pmd;
613 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
614 if (unlikely(!pmd_none(*vmf->pmd))) {
639 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
640 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
696 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
700 if (!pmd_none(*pmd))
705 pgtable_trans_huge_deposit(mm, pmd, pgtable);
706 set_pmd_at(mm, haddr, pmd, entry);
739 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
741 if (pmd_none(*vmf->pmd)) {
753 haddr, vmf->pmd, zero_page);
773 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
780 ptl = pmd_lock(mm, pmd);
781 if (!pmd_none(*pmd)) {
783 if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
784 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
787 entry = pmd_mkyoung(*pmd);
789 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
790 update_mmu_cache_pmd(vma, addr, pmd);
805 pgtable_trans_huge_deposit(mm, pmd, pgtable);
810 set_pmd_at(mm, addr, pmd, entry);
811 update_mmu_cache_pmd(vma, addr, pmd);
820 * vmf_insert_pfn_pmd_prot - insert a pmd size pfn
826 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info and
861 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
952 pmd_t *pmd, int flags)
956 _pmd = pmd_mkyoung(*pmd);
960 pmd, _pmd, flags & FOLL_WRITE))
961 update_mmu_cache_pmd(vma, addr, pmd);
965 pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
967 unsigned long pfn = pmd_pfn(*pmd);
971 assert_spin_locked(pmd_lockptr(mm, pmd));
984 if (flags & FOLL_WRITE && !pmd_write(*pmd))
987 if (pmd_present(*pmd) && pmd_devmap(*pmd))
993 touch_pmd(vma, addr, pmd, flags);
1019 pmd_t pmd;
1036 pmd = *src_pmd;
1039 if (unlikely(is_swap_pmd(pmd))) {
1040 swp_entry_t entry = pmd_to_swp_entry(pmd);
1042 VM_BUG_ON(!is_pmd_migration_entry(pmd));
1045 pmd = swp_entry_to_pmd(entry);
1047 pmd = pmd_swp_mksoft_dirty(pmd);
1049 pmd = pmd_swp_mkuffd_wp(pmd);
1050 set_pmd_at(src_mm, addr, src_pmd, pmd);
1056 pmd = pmd_swp_clear_uffd_wp(pmd);
1057 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1063 if (unlikely(!pmd_trans_huge(pmd))) {
1068 * When page table lock is held, the huge zero pmd should not be
1069 * under splitting since we don't split the page itself, only pmd to
1072 if (is_huge_zero_pmd(pmd)) {
1082 src_page = pmd_page(pmd);
1110 pmd = pmd_clear_uffd_wp(pmd);
1111 pmd = pmd_mkold(pmd_wrprotect(pmd));
1112 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1256 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1257 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
1264 if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write))
1265 update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd);
1277 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
1285 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1299 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1316 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
1317 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1326 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
1331 * FOLL_FORCE can write to even unwritable pmd's, but only
1334 static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
1336 return pmd_write(pmd) ||
1337 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
1342 pmd_t *pmd,
1348 assert_spin_locked(pmd_lockptr(mm, pmd));
1350 if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
1354 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1358 if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
1361 page = pmd_page(*pmd);
1368 touch_pmd(vma, addr, pmd, flags);
1377 * In most cases the pmd is the only mapping of the page as we
1411 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
1424 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1425 if (unlikely(!pmd_same(pmd, *vmf->pmd)))
1433 if (unlikely(pmd_trans_migrating(*vmf->pmd))) {
1434 page = pmd_page(*vmf->pmd);
1442 page = pmd_page(pmd);
1453 if (!pmd_savedwrite(pmd))
1488 if (unlikely(!pmd_same(pmd, *vmf->pmd))) {
1516 * change_huge_pmd() released the pmd lock before
1535 vmf->pmd, pmd, vmf->address, page, target_nid);
1545 was_writable = pmd_savedwrite(pmd);
1546 pmd = pmd_modify(pmd, vma->vm_page_prot);
1547 pmd = pmd_mkyoung(pmd);
1549 pmd = pmd_mkwrite(pmd);
1550 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
1551 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1568 * Return true if we do MADV_FREE successfully on entire pmd page.
1572 pmd_t *pmd, unsigned long addr, unsigned long next)
1582 ptl = pmd_trans_huge_lock(pmd, vma);
1586 orig_pmd = *pmd;
1625 pmdp_invalidate(vma, addr, pmd);
1629 set_pmd_at(mm, addr, pmd, orig_pmd);
1630 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1641 static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
1645 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1651 pmd_t *pmd, unsigned long addr)
1658 ptl = __pmd_trans_huge_lock(pmd, vma);
1667 orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
1669 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1672 zap_deposited_table(tlb->mm, pmd);
1677 zap_deposited_table(tlb->mm, pmd);
1697 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
1700 zap_deposited_table(tlb->mm, pmd);
1704 zap_deposited_table(tlb->mm, pmd);
1721 * With split pmd lock we also need to move preallocated
1730 static pmd_t move_soft_dirty_pmd(pmd_t pmd)
1733 if (unlikely(is_pmd_migration_entry(pmd)))
1734 pmd = pmd_swp_mksoft_dirty(pmd);
1735 else if (pmd_present(pmd))
1736 pmd = pmd_mksoft_dirty(pmd);
1738 return pmd;
1745 pmd_t pmd;
1750 * The destination pmd shouldn't be established, free_pgtables()
1767 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1768 if (pmd_present(pmd))
1777 pmd = move_soft_dirty_pmd(pmd);
1778 set_pmd_at(mm, new_addr, new_pmd, pmd);
1795 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1807 ptl = __pmd_trans_huge_lock(pmd, vma);
1811 preserve_write = prot_numa && pmd_write(*pmd);
1815 if (is_swap_pmd(*pmd)) {
1816 swp_entry_t entry = pmd_to_swp_entry(*pmd);
1818 VM_BUG_ON(!is_pmd_migration_entry(*pmd));
1827 if (pmd_swp_soft_dirty(*pmd))
1829 if (pmd_swp_uffd_wp(*pmd))
1831 set_pmd_at(mm, addr, pmd, newpmd);
1842 if (prot_numa && is_huge_zero_pmd(*pmd))
1845 if (prot_numa && pmd_protnone(*pmd))
1850 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
1858 * pmd_trans_huge(*pmd) == 0 (without ptl)
1859 * // skip the pmd
1861 * // pmd is re-established
1863 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
1869 entry = pmdp_invalidate(vma, addr, pmd);
1886 set_pmd_at(mm, addr, pmd, entry);
1894 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
1899 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
1902 ptl = pmd_lock(vma->vm_mm, pmd);
1903 if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
1904 pmd_devmap(*pmd)))
1993 unsigned long haddr, pmd_t *pmd)
2001 * Leave pmd empty until pte is filled note that it is fine to delay
2003 * replacing a zero pmd write protected page with a zero pte write
2008 old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd);
2010 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2024 smp_wmb(); /* make pte visible before pmd */
2025 pmd_populate(mm, pmd, pgtable);
2028 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2042 VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
2043 && !pmd_devmap(*pmd));
2048 old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
2054 zap_deposited_table(mm, pmd);
2075 if (is_huge_zero_pmd(*pmd)) {
2085 return __split_huge_zero_page_pmd(vma, haddr, pmd);
2089 * Up to this point the pmd is present and huge and userland has the
2091 * place). If we overwrite the pmd with the not-huge version pointing
2103 * current pmd notpresent (atomically because here the pmd_trans_huge
2104 * must remain set at all times on the pmd until the split is complete
2105 * for this pmd), then we flush the SMP TLB and finally we write the
2106 * non-huge version of the pmd entry with pmd_populate.
2108 old_pmd = pmdp_invalidate(vma, haddr, pmd);
2133 * Withdraw the table only after we mark the pmd entry invalid.
2136 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2198 smp_wmb(); /* make pte visible before pmd */
2199 pmd_populate(mm, pmd, pgtable);
2209 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
2221 ptl = pmd_lock(vma->vm_mm, pmd);
2225 * pmd against. Otherwise we can end up replacing wrong page.
2230 if (page != pmd_page(*pmd))
2235 if (pmd_trans_huge(*pmd)) {
2237 page = pmd_page(*pmd);
2248 _pmd = *pmd;
2252 if (unlikely(!pmd_same(*pmd, _pmd))) {
2265 } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd)))
2267 __split_huge_pmd_locked(vma, pmd, range.start, freeze);
2280 * 3) Split a huge pmd into pte pointing to the same page. No need
2294 pmd_t *pmd;
2308 pmd = pmd_offset(pud, address);
2310 __split_huge_pmd(vma, pmd, address, freeze, page);
2321 * an huge pmd.
2331 * an huge pmd.
2341 * contain an hugepage: check if we need to split an huge pmd.
2967 if (!(pvmw->pmd && !pvmw->pte))
2971 pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
2978 set_pmd_at(mm, address, pvmw->pmd, pmdswp);
2992 if (!(pvmw->pmd && !pvmw->pte))
2995 entry = pmd_to_swp_entry(*pvmw->pmd);
2998 if (pmd_swp_soft_dirty(*pvmw->pmd))
3002 if (pmd_swp_uffd_wp(*pvmw->pmd))
3010 set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
3013 update_mmu_cache_pmd(vma, address, pvmw->pmd);