Lines Matching refs:tlb
2 /* include/asm-generic/tlb.h
187 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
195 #define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
246 extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
271 * requires a complete flush of the tlb
307 void tlb_flush_mmu(struct mmu_gather *tlb);
309 static inline void __tlb_adjust_range(struct mmu_gather *tlb,
313 tlb->start = min(tlb->start, address);
314 tlb->end = max(tlb->end, address + range_size);
317 static inline void __tlb_reset_range(struct mmu_gather *tlb)
319 if (tlb->fullmm) {
320 tlb->start = tlb->end = ~0;
322 tlb->start = TASK_SIZE;
323 tlb->end = 0;
325 tlb->freed_tables = 0;
326 tlb->cleared_ptes = 0;
327 tlb->cleared_pmds = 0;
328 tlb->cleared_puds = 0;
329 tlb->cleared_p4ds = 0;
351 static inline void tlb_flush(struct mmu_gather *tlb)
353 if (tlb->end)
354 flush_tlb_mm(tlb->mm);
358 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
361 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
376 static inline void tlb_flush(struct mmu_gather *tlb)
378 if (tlb->fullmm || tlb->need_flush_all) {
379 flush_tlb_mm(tlb->mm);
380 } else if (tlb->end) {
382 .vm_mm = tlb->mm,
383 .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) |
384 (tlb->vma_huge ? VM_HUGETLB : 0),
387 flush_tlb_range(&vma, tlb->start, tlb->end);
392 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
405 tlb->vma_huge = is_vm_hugetlb_page(vma);
406 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
412 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
418 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
424 if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
425 tlb->cleared_puds || tlb->cleared_p4ds))
428 tlb_flush(tlb);
429 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
430 __tlb_reset_range(tlb);
433 static inline void tlb_remove_page_size(struct mmu_gather *tlb,
436 if (__tlb_remove_page_size(tlb, page, page_size))
437 tlb_flush_mmu(tlb);
440 static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
442 return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
449 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
451 return tlb_remove_page_size(tlb, page, PAGE_SIZE);
454 static inline void tlb_change_page_size(struct mmu_gather *tlb,
458 if (tlb->page_size && tlb->page_size != page_size) {
459 if (!tlb->fullmm && !tlb->need_flush_all)
460 tlb_flush_mmu(tlb);
463 tlb->page_size = page_size;
467 static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
469 if (tlb->cleared_ptes)
471 if (tlb->cleared_pmds)
473 if (tlb->cleared_puds)
475 if (tlb->cleared_p4ds)
481 static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
483 return 1UL << tlb_get_unmap_shift(tlb);
487 * In the case of tlb vma handling, we can optimise these away in the
492 static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
494 if (tlb->fullmm)
497 tlb_update_vma_flags(tlb, vma);
503 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
505 if (tlb->fullmm)
514 tlb_flush_mmu_tlbonly(tlb);
519 * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
522 static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
525 __tlb_adjust_range(tlb, address, size);
526 tlb->cleared_ptes = 1;
529 static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
532 __tlb_adjust_range(tlb, address, size);
533 tlb->cleared_pmds = 1;
536 static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
539 __tlb_adjust_range(tlb, address, size);
540 tlb->cleared_puds = 1;
543 static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
546 __tlb_adjust_range(tlb, address, size);
547 tlb->cleared_p4ds = 1;
551 #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
555 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
558 * so we can later optimise away the tlb invalidate. This helps when
561 #define tlb_remove_tlb_entry(tlb, ptep, address) \
563 tlb_flush_pte_range(tlb, address, PAGE_SIZE); \
564 __tlb_remove_tlb_entry(tlb, ptep, address); \
567 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
571 tlb_flush_p4d_range(tlb, address, _sz); \
573 tlb_flush_pud_range(tlb, address, _sz); \
575 tlb_flush_pmd_range(tlb, address, _sz); \
577 tlb_flush_pte_range(tlb, address, _sz); \
578 __tlb_remove_tlb_entry(tlb, ptep, address); \
582 * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
586 #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
589 #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
591 tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \
592 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
596 * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
600 #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
603 #define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
605 tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \
606 __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
628 #define pte_free_tlb(tlb, ptep, address) \
630 tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \
631 tlb->freed_tables = 1; \
632 __pte_free_tlb(tlb, ptep, address); \
637 #define pmd_free_tlb(tlb, pmdp, address) \
639 tlb_flush_pud_range(tlb, address, PAGE_SIZE); \
640 tlb->freed_tables = 1; \
641 __pmd_free_tlb(tlb, pmdp, address); \
646 #define pud_free_tlb(tlb, pudp, address) \
648 tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \
649 tlb->freed_tables = 1; \
650 __pud_free_tlb(tlb, pudp, address); \
655 #define p4d_free_tlb(tlb, pudp, address) \
657 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
658 tlb->freed_tables = 1; \
659 __p4d_free_tlb(tlb, pudp, address); \