Lines Matching defs:tlb

84 #include <asm/tlb.h>
217 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
222 pte_free_tlb(tlb, token, addr);
223 mm_dec_nr_ptes(tlb->mm);
226 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
240 free_pte_range(tlb, pmd, addr);
256 pmd_free_tlb(tlb, pmd, start);
257 mm_dec_nr_pmds(tlb->mm);
260 static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
274 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
290 pud_free_tlb(tlb, pud, start);
291 mm_dec_nr_puds(tlb->mm);
294 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
308 free_pud_range(tlb, p4d, addr, next, floor, ceiling);
324 p4d_free_tlb(tlb, p4d, start);
330 void free_pgd_range(struct mmu_gather *tlb,
380 * (see pte_free_tlb()), flush the tlb if we need
382 tlb_change_page_size(tlb, PAGE_SIZE);
383 pgd = pgd_offset(tlb->mm, addr);
388 free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
392 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
407 hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
420 free_pgd_range(tlb, addr, vma->vm_end,
1231 static unsigned long zap_pte_range(struct mmu_gather *tlb,
1236 struct mm_struct *mm = tlb->mm;
1244 tlb_change_page_size(tlb, PAGE_SIZE);
1276 tlb->fullmm);
1277 tlb_remove_tlb_entry(tlb, pte, addr);
1295 if (unlikely(__tlb_remove_page(tlb, page))) {
1318 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1341 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1349 tlb_flush_mmu_tlbonly(tlb);
1360 tlb_flush_mmu(tlb);
1371 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1385 else if (zap_huge_pmd(tlb, vma, pmd, addr))
1391 spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
1409 next = zap_pte_range(tlb, vma, pmd, addr, next, details);
1417 static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1430 mmap_assert_locked(tlb->mm);
1432 } else if (zap_huge_pud(tlb, vma, pud, addr))
1438 next = zap_pmd_range(tlb, vma, pud, addr, next, details);
1446 static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1459 next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1465 void unmap_page_range(struct mmu_gather *tlb,
1474 tlb_start_vma(tlb, vma);
1480 next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
1482 tlb_end_vma(tlb, vma);
1486 static void unmap_single_vma(struct mmu_gather *tlb,
1521 __unmap_hugepage_range_final(tlb, vma, start, end, NULL);
1525 unmap_page_range(tlb, vma, start, end, details);
1531 * @tlb: address of the caller's struct mmu_gather
1547 void unmap_vmas(struct mmu_gather *tlb,
1557 unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
1573 struct mmu_gather tlb;
1578 tlb_gather_mmu(&tlb, vma->vm_mm, start, range.end);
1582 unmap_single_vma(&tlb, vma, start, range.end, NULL);
1584 tlb_finish_mmu(&tlb, start, range.end);
1600 struct mmu_gather tlb;
1605 tlb_gather_mmu(&tlb, vma->vm_mm, address, range.end);
1608 unmap_single_vma(&tlb, vma, address, range.end, details);
1610 tlb_finish_mmu(&tlb, address, range.end);
2665 * and update local tlb only
2691 /* The PTE changed under us, update local tlb */
4580 * This still avoids useless tlb flushes for .text page faults