Lines Matching refs:end
431 static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
438 for (addr = start; addr < end; addr += page_size)
452 static inline void _tlbiel_va_range(unsigned long start, unsigned long end,
459 __tlbiel_va_range(start, end, pid, page_size, psize);
463 static inline void __tlbie_va_range(unsigned long start, unsigned long end,
470 for (addr = start; addr < end; addr += page_size)
520 unsigned long end;
530 _tlbiel_va_range(t->start, t->end, t->pid, t->page_size,
545 static inline void _tlbie_va_range(unsigned long start, unsigned long end,
552 __tlbie_va_range(start, end, pid, page_size, psize);
557 unsigned long start, unsigned long end,
562 struct tlbiel_va_range t = { .start = start, .end = end,
568 _tlbie_va_range(start, end, pid, page_size, psize, also_pwc);
576 * - flush_tlb_range(vma, start, end) flushes a range of pages
577 * - flush_tlb_kernel_range(start, end) flushes kernel pages
985 void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
993 start, end);
1019 unsigned long start, unsigned long end)
1024 unsigned long nr_pages = (end - start) >> page_shift;
1032 WARN_ON_ONCE(end == TLB_FLUSH_ALL);
1049 if (!flush_pid && (end - start) >= PMD_SIZE)
1063 pseries_rpt_invalidate(pid, tgt, type, pg_sizes, start, end);
1082 hend = end & PMD_MASK;
1090 __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
1099 __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
1106 start, end, pid, page_size, mmu_virtual_psize, flush_pwc);
1114 mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
1118 unsigned long end)
1123 return radix__flush_hugetlb_tlb_range(vma, start, end);
1126 __radix__flush_tlb_range(vma->vm_mm, start, end);
1190 unsigned long end = tlb->end;
1208 * by batching), then it may end up being better to use
1229 radix__flush_tlb_range_psize(mm, start, end, psize);
1231 radix__flush_tlb_pwc_range_psize(mm, start, end, psize);
1236 unsigned long start, unsigned long end,
1242 unsigned long nr_pages = (end - start) >> page_shift;
1250 WARN_ON_ONCE(end == TLB_FLUSH_ALL);
1272 pseries_rpt_invalidate(pid, tgt, type, pg_sizes, start, end);
1291 _tlbiel_va_range(start, end, pid, page_size, psize, also_pwc);
1293 _tlbie_va_range(start, end, pid, page_size, psize, also_pwc);
1296 start, end, pid, page_size, psize, also_pwc);
1300 mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
1304 unsigned long end, int psize)
1306 return __radix__flush_tlb_range_psize(mm, start, end, psize, false);
1310 unsigned long end, int psize)
1312 __radix__flush_tlb_range_psize(mm, start, end, psize, true);
1318 unsigned long pid, end;
1331 end = addr + HPAGE_PMD_SIZE;
1338 _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
1351 addr, end);
1353 _tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
1356 addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
1364 unsigned long start, unsigned long end)
1366 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
1371 unsigned long start, unsigned long end)
1373 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_1G);
1497 static inline void __tlbie_va_range_lpid(unsigned long start, unsigned long end,
1505 for (addr = start; addr < end; addr += page_size)
1511 static inline void _tlbie_va_range_lpid(unsigned long start, unsigned long end,
1519 __tlbie_va_range_lpid(start, end, pid, lpid, page_size, psize);
1529 unsigned long start, unsigned long end)
1548 if (start == 0 && end == -1)
1557 nr_pages = (end - start) >> def->shift;
1570 _tlbie_va_range_lpid(start, end, pid, lpid,