Lines Matching refs:tlb
2 /* include/asm-generic/tlb.h
204 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
212 #define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
263 extern bool __tlb_remove_page_size(struct mmu_gather *tlb,
272 #define tlb_delay_rmap(tlb) (((tlb)->delayed_rmap = 1), true)
273 extern void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma);
286 #define tlb_delay_rmap(tlb) (false)
287 static inline void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
311 * requires a complete flush of the tlb
353 void tlb_flush_mmu(struct mmu_gather *tlb);
355 static inline void __tlb_adjust_range(struct mmu_gather *tlb,
359 tlb->start = min(tlb->start, address);
360 tlb->end = max(tlb->end, address + range_size);
363 static inline void __tlb_reset_range(struct mmu_gather *tlb)
365 if (tlb->fullmm) {
366 tlb->start = tlb->end = ~0;
368 tlb->start = TASK_SIZE;
369 tlb->end = 0;
371 tlb->freed_tables = 0;
372 tlb->cleared_ptes = 0;
373 tlb->cleared_pmds = 0;
374 tlb->cleared_puds = 0;
375 tlb->cleared_p4ds = 0;
397 static inline void tlb_flush(struct mmu_gather *tlb)
399 if (tlb->end)
400 flush_tlb_mm(tlb->mm);
411 static inline void tlb_flush(struct mmu_gather *tlb)
413 if (tlb->fullmm || tlb->need_flush_all) {
414 flush_tlb_mm(tlb->mm);
415 } else if (tlb->end) {
417 .vm_mm = tlb->mm,
418 .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) |
419 (tlb->vma_huge ? VM_HUGETLB : 0),
422 flush_tlb_range(&vma, tlb->start, tlb->end);
430 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
443 tlb->vma_huge = is_vm_hugetlb_page(vma);
444 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
445 tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
448 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
454 if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
455 tlb->cleared_puds || tlb->cleared_p4ds))
458 tlb_flush(tlb);
459 __tlb_reset_range(tlb);
462 static inline void tlb_remove_page_size(struct mmu_gather *tlb,
465 if (__tlb_remove_page_size(tlb, encode_page(page, 0), page_size))
466 tlb_flush_mmu(tlb);
469 static __always_inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page, unsigned int flags)
471 return __tlb_remove_page_size(tlb, encode_page(page, flags), PAGE_SIZE);
478 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
480 return tlb_remove_page_size(tlb, page, PAGE_SIZE);
483 static inline void tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt)
485 tlb_remove_table(tlb, pt);
489 static inline void tlb_remove_page_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt)
491 tlb_remove_page(tlb, ptdesc_page(pt));
494 static inline void tlb_change_page_size(struct mmu_gather *tlb,
498 if (tlb->page_size && tlb->page_size != page_size) {
499 if (!tlb->fullmm && !tlb->need_flush_all)
500 tlb_flush_mmu(tlb);
503 tlb->page_size = page_size;
507 static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
509 if (tlb->cleared_ptes)
511 if (tlb->cleared_pmds)
513 if (tlb->cleared_puds)
515 if (tlb->cleared_p4ds)
521 static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
523 return 1UL << tlb_get_unmap_shift(tlb);
527 * In the case of tlb vma handling, we can optimise these away in the
531 static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
533 if (tlb->fullmm)
536 tlb_update_vma_flags(tlb, vma);
542 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
544 if (tlb->fullmm)
553 if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
558 tlb_flush_mmu_tlbonly(tlb);
563 * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
566 static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
569 __tlb_adjust_range(tlb, address, size);
570 tlb->cleared_ptes = 1;
573 static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
576 __tlb_adjust_range(tlb, address, size);
577 tlb->cleared_pmds = 1;
580 static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
583 __tlb_adjust_range(tlb, address, size);
584 tlb->cleared_puds = 1;
587 static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
590 __tlb_adjust_range(tlb, address, size);
591 tlb->cleared_p4ds = 1;
595 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
599 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
602 * so we can later optimise away the tlb invalidate. This helps when
605 #define tlb_remove_tlb_entry(tlb, ptep, address) \
607 tlb_flush_pte_range(tlb, address, PAGE_SIZE); \
608 __tlb_remove_tlb_entry(tlb, ptep, address); \
611 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
615 tlb_flush_p4d_range(tlb, address, _sz); \
617 tlb_flush_pud_range(tlb, address, _sz); \
619 tlb_flush_pmd_range(tlb, address, _sz); \
621 tlb_flush_pte_range(tlb, address, _sz); \
622 __tlb_remove_tlb_entry(tlb, ptep, address); \
626 * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
630 #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
633 #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
635 tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \
636 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
640 * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
644 #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
647 #define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
649 tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \
650 __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
672 #define pte_free_tlb(tlb, ptep, address) \
674 tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \
675 tlb->freed_tables = 1; \
676 __pte_free_tlb(tlb, ptep, address); \
681 #define pmd_free_tlb(tlb, pmdp, address) \
683 tlb_flush_pud_range(tlb, address, PAGE_SIZE); \
684 tlb->freed_tables = 1; \
685 __pmd_free_tlb(tlb, pmdp, address); \
690 #define pud_free_tlb(tlb, pudp, address) \
692 tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \
693 tlb->freed_tables = 1; \
694 __pud_free_tlb(tlb, pudp, address); \
699 #define p4d_free_tlb(tlb, pudp, address) \
701 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
702 tlb->freed_tables = 1; \
703 __p4d_free_tlb(tlb, pudp, address); \