Lines Matching defs:end
491 void __kprobes flush_icache_range(unsigned long start, unsigned long end)
500 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
717 void __flush_dcache_range(unsigned long start, unsigned long end)
724 for (va = start; va < end; va += 32) {
731 end = __pa(end);
732 for (va = start; va < end; va += 32)
946 static u64 __init memblock_nid_range_sun4u(u64 start, u64 end, int *nid)
951 for ( ; start < end; start += PAGE_SIZE) {
974 return start > end ? end : start;
977 static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
984 return memblock_nid_range_sun4u(start, end, nid);
992 ret_end = end;
1013 * end address that we return
1031 ret_end = end;
1041 * particular latency group. As optimization we calculate end value by
1046 ret_end = m_end > end ? end : m_end;
1195 phys_addr_t start, end;
1202 for_each_mem_range(i, &start, &end) {
1203 while (start < end) {
1207 this_end = memblock_nid_range(start, end, &nid);
1210 "start[%llx] end[%lx]\n",
2078 static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
2085 while (start < end) {
2469 unsigned long start, end;
2472 end = start + pavail[i].reg_size;
2474 if (paddr >= start && paddr < end)
2630 void vmemmap_free(unsigned long start, unsigned long end,
3054 code_resource.end = compute_kern_paddr(_etext - 1);
3056 data_resource.end = compute_kern_paddr(_edata - 1);
3058 bss_resource.end = compute_kern_paddr(_end - 1);
3078 res->end = pavail[i].phys_addr + pavail[i].reg_size - 1;
3101 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
3103 if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
3108 if (end > HI_OBP_ADDRESS) {
3109 flush_tsb_kernel_range(HI_OBP_ADDRESS, end);
3110 do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end);
3113 flush_tsb_kernel_range(start, end);
3114 do_flush_tlb_kernel_range(start, end);