Lines Matching defs:end

506 void __kprobes flush_icache_range(unsigned long start, unsigned long end)
515 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
733 void __flush_dcache_range(unsigned long start, unsigned long end)
740 for (va = start; va < end; va += 32) {
747 end = __pa(end);
748 for (va = start; va < end; va += 32)
962 static u64 __init memblock_nid_range_sun4u(u64 start, u64 end, int *nid)
967 for ( ; start < end; start += PAGE_SIZE) {
990 return start > end ? end : start;
993 static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
1000 return memblock_nid_range_sun4u(start, end, nid);
1008 ret_end = end;
1029 * end address that we return
1047 ret_end = end;
1057 * particular latency group. As optimization we calculate end value by
1062 ret_end = m_end > end ? end : m_end;
1211 phys_addr_t start, end;
1218 for_each_mem_range(i, &start, &end) {
1219 while (start < end) {
1223 this_end = memblock_nid_range(start, end, &nid);
1226 "start[%llx] end[%lx]\n",
2093 static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
2100 while (start < end) {
2484 unsigned long start, end;
2487 end = start + pavail[i].reg_size;
2489 if (paddr >= start && paddr < end)
2644 void vmemmap_free(unsigned long start, unsigned long end,
3088 code_resource.end = compute_kern_paddr(_etext - 1);
3090 data_resource.end = compute_kern_paddr(_edata - 1);
3092 bss_resource.end = compute_kern_paddr(_end - 1);
3112 res->end = pavail[i].phys_addr + pavail[i].reg_size - 1;
3135 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
3137 if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
3142 if (end > HI_OBP_ADDRESS) {
3143 flush_tsb_kernel_range(HI_OBP_ADDRESS, end);
3144 do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end);
3147 flush_tsb_kernel_range(start, end);
3148 do_flush_tlb_kernel_range(start, end);