Lines Matching defs:pmd
345 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
348 spinlock_t *ptl = pte_lockptr(mm, pmd);
349 pte_t *ptep = pte_offset_map(pmd, address);
361 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
366 ptl = pmd_lock(mm, pmd);
367 if (!is_pmd_migration_entry(*pmd))
369 page = migration_entry_to_page(pmd_to_swp_entry(*pmd));
1307 * tables or check whether the hugepage is pmd-based or not before
2080 bool pmd_trans_migrating(pmd_t pmd)
2082 struct page *page = pmd_page(pmd);
2148 pmd_t *pmd, pmd_t entry,
2186 ptl = pmd_lock(mm, pmd);
2187 if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) {
2221 * At this point the pmd is numa/protnone (i.e. non present) and the TLB
2223 * caching this non present pmd mapping. There's no need to clear the
2224 * pmd before doing set_pmd_at(), nor to flush the TLB after
2225 * set_pmd_at(). Clearing the pmd here would introduce a race
2227 * mmap_lock for reading. If the pmd is set to NULL at any given time,
2228 * MADV_DONTNEED won't wait on the pmd lock and it'll skip clearing this
2229 * pmd.
2231 set_pmd_at(mm, start, pmd, entry);
2260 ptl = pmd_lock(mm, pmd);
2261 if (pmd_same(*pmd, entry)) {
2263 set_pmd_at(mm, start, pmd, entry);
2555 * check them than regular pages, because they can be mapped with a pmd
2906 * pte_offset_map() on pmds where a huge pmd might be created