Lines Matching defs:tlb

89 #include <asm/tlb.h>
190 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
195 pte_free_tlb(tlb, token, addr);
196 mm_dec_nr_ptes(tlb->mm);
199 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
213 free_pte_range(tlb, pmd, addr);
229 pmd_free_tlb(tlb, pmd, start);
230 mm_dec_nr_pmds(tlb->mm);
233 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
247 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
263 pud_free_tlb(tlb, pud, start);
264 mm_dec_nr_puds(tlb->mm);
267 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
281 free_pud_range(tlb, p4d, addr, next, floor, ceiling);
297 p4d_free_tlb(tlb, p4d, start);
303 void free_pgd_range(struct mmu_gather *tlb,
353 * (see pte_free_tlb()), flush the tlb if we need
355 tlb_change_page_size(tlb, PAGE_SIZE);
356 pgd = pgd_offset(tlb->mm, addr);
361 free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
365 void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
389 hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
404 free_pgd_range(tlb, addr, vma->vm_end,
1396 static unsigned long zap_pte_range(struct mmu_gather *tlb,
1401 struct mm_struct *mm = tlb->mm;
1409 tlb_change_page_size(tlb, PAGE_SIZE);
1438 tlb->fullmm);
1440 tlb_remove_tlb_entry(tlb, pte, addr);
1455 if (tlb_delay_rmap(tlb)) {
1469 if (unlikely(__tlb_remove_page(tlb, page, delay_rmap))) {
1522 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1531 tlb_flush_mmu_tlbonly(tlb);
1532 tlb_flush_rmaps(tlb, vma);
1543 tlb_flush_mmu(tlb);
1548 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1562 else if (zap_huge_pmd(tlb, vma, pmd, addr)) {
1570 spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
1582 addr = zap_pte_range(tlb, vma, pmd, addr, next, details);
1590 static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1603 mmap_assert_locked(tlb->mm);
1605 } else if (zap_huge_pud(tlb, vma, pud, addr))
1611 next = zap_pmd_range(tlb, vma, pud, addr, next, details);
1619 static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1632 next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1638 void unmap_page_range(struct mmu_gather *tlb,
1647 tlb_start_vma(tlb, vma);
1653 next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
1655 tlb_end_vma(tlb, vma);
1659 static void unmap_single_vma(struct mmu_gather *tlb,
1695 __unmap_hugepage_range(tlb, vma, start, end,
1699 unmap_page_range(tlb, vma, start, end, details);
1705 * @tlb: address of the caller's struct mmu_gather
1724 void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
1743 unmap_single_vma(tlb, vma, start, end, &details,
1764 struct mmu_gather tlb;
1770 tlb_gather_mmu(&tlb, vma->vm_mm);
1777 unmap_single_vma(&tlb, vma, address, end, details, false);
1779 tlb_finish_mmu(&tlb);
2853 * and update local tlb only
2879 /* The PTE changed under us, update local tlb */
5069 * This still avoids useless tlb flushes for .text page faults