Lines Matching refs:start
188 * flush_tlb_range(vma, start, end)
189 * Invalidate the virtual-address range '[start, end)' on all
195 * flush_tlb_kernel_range(start, end)
196 * Same as flush_tlb_range(..., start, end), but applies to
219 * __flush_tlb_range(vma, start, end, stride, last_level)
220 * Invalidate the virtual-address range '[start, end)' on all
342 * @start: The start address of the range
343 * @pages: Range as the number of pages from 'start'
367 #define __flush_tlb_range_op(op, start, pages, stride, \
377 addr = __TLBI_VADDR(start, asid); \
381 start += stride; \
388 addr = __TLBI_VADDR_RANGE(start, asid, scale, \
393 start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
400 #define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
401 __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false)
404 unsigned long start, unsigned long end,
410 start = round_down(start, stride);
412 pages = (end - start) >> PAGE_SHIFT;
421 (end - start) >= (MAX_TLBI_OPS * stride)) ||
431 __flush_tlb_range_op(vale1is, start, pages, stride, asid, tlb_level, true);
433 __flush_tlb_range_op(vae1is, start, pages, stride, asid, tlb_level, true);
436 mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end);
440 unsigned long start, unsigned long end)
447 __flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0);
450 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
454 if ((end - start) > (MAX_TLBI_OPS * PAGE_SIZE)) {
459 start = __TLBI_VADDR(start, 0);
463 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))