Lines Matching defs:end
93 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
109 size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
121 } while (pte += PFN_DOWN(size), addr += size, addr != end);
126 static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
136 if ((end - addr) != PMD_SIZE)
151 static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
162 next = pmd_addr_end(addr, end);
172 } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
176 static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
186 if ((end - addr) != PUD_SIZE)
201 static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
212 next = pud_addr_end(addr, end);
223 } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
227 static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
237 if ((end - addr) != P4D_SIZE)
252 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
263 next = p4d_addr_end(addr, end);
274 } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
278 static int vmap_range_noflush(unsigned long addr, unsigned long end,
289 BUG_ON(addr >= end);
294 next = pgd_addr_end(addr, end);
299 } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
302 arch_sync_kernel_mappings(start, end);
307 int ioremap_page_range(unsigned long addr, unsigned long end,
312 err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
314 flush_cache_vmap(addr, end);
316 err = kmsan_ioremap_page_range(addr, end, phys_addr, prot,
321 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
330 } while (pte++, addr += PAGE_SIZE, addr != end);
334 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
343 next = pmd_addr_end(addr, end);
356 } while (pmd++, addr = next, addr != end);
359 static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
368 next = pud_addr_end(addr, end);
379 } while (pud++, addr = next, addr != end);
382 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
390 next = p4d_addr_end(addr, end);
399 } while (p4d++, addr = next, addr != end);
414 void __vunmap_range_noflush(unsigned long start, unsigned long end)
421 BUG_ON(addr >= end);
424 next = pgd_addr_end(addr, end);
430 } while (pgd++, addr = next, addr != end);
433 arch_sync_kernel_mappings(start, end);
436 void vunmap_range_noflush(unsigned long start, unsigned long end)
438 kmsan_vunmap_range_noflush(start, end);
439 __vunmap_range_noflush(start, end);
445 * @end: end of the VM area to unmap (non-inclusive)
451 void vunmap_range(unsigned long addr, unsigned long end)
453 flush_cache_vunmap(addr, end);
454 vunmap_range_noflush(addr, end);
455 flush_tlb_kernel_range(addr, end);
459 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
484 } while (pte++, addr += PAGE_SIZE, addr != end);
490 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
500 next = pmd_addr_end(addr, end);
503 } while (pmd++, addr = next, addr != end);
508 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
518 next = pud_addr_end(addr, end);
521 } while (pud++, addr = next, addr != end);
526 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
536 next = p4d_addr_end(addr, end);
539 } while (p4d++, addr = next, addr != end);
543 static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
553 BUG_ON(addr >= end);
556 next = pgd_addr_end(addr, end);
562 } while (pgd++, addr = next, addr != end);
565 arch_sync_kernel_mappings(start, end);
579 int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
582 unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
588 return vmap_small_pages_range_noflush(addr, end, prot, pages);
605 int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
608 int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages,
613 return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
619 * @end: end of the VM area to map (non-inclusive)
628 static int vmap_pages_range(unsigned long addr, unsigned long end,
633 err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
634 flush_cache_vmap(addr, end);
879 * we end up with parent rb_node and correct direction, i name
1145 * start end
1149 * start end
1166 * start end
1170 * start end
1724 static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
1745 end = max(end,
1749 flush_tlb_kernel_range(start, end);
1782 trace_purge_vmap_area_lazy(start, end, num_purged_areas);
2259 static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
2292 end = max(e, end);
2306 if (!__purge_vmap_area_lazy(start, end) && flush)
2307 flush_tlb_kernel_range(start, end);
2326 unsigned long start = ULONG_MAX, end = 0;
2329 _vm_unmap_aliases(start, end, flush);
2378 * the end. Please use this function for short-lived objects.
2572 unsigned long start, unsigned long end, int node,
2595 va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0);
2619 unsigned long start, unsigned long end,
2622 return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
2725 unsigned long start = ULONG_MAX, end = 0;
2731 * Find the start and end range of the direct mappings to make sure that
2742 end = max(addr + page_size, end);
2753 _vm_unmap_aliases(start, end, flush_dmap);
3212 * @end: vm area range end
3236 unsigned long start, unsigned long end, gfp_t gfp_mask,
3281 VM_UNINITIALIZED | vm_flags, start, end, node,
3975 * out - the VA with the highest aligned end address.
3978 * Returns: determined end address within vmap_area
4016 * does everything top-down and scans free blocks from the end looking
4031 unsigned long base, start, size, end, last_end, orig_start, orig_end;
4038 end = start + sizes[area];
4052 BUG_ON(start2 < end && start < end2);
4079 end = start + sizes[area];
4082 base = pvm_determine_end_from_reverse(&va, align) - end;
4102 if (base + end > va->va_end) {
4103 base = pvm_determine_end_from_reverse(&va, align) - end;
4113 base = pvm_determine_end_from_reverse(&va, align) - end;
4127 end = start + sizes[area];
4128 va = pvm_find_va_enclose_addr(base + end);