Lines Matching refs:end
172 static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
191 } while (ptep++, addr += PAGE_SIZE, addr != end);
197 unsigned long end, phys_addr_t phys,
222 next = pte_cont_addr_end(addr, end);
232 } while (addr = next, addr != end);
235 static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
246 next = pmd_addr_end(addr, end);
267 } while (pmdp++, addr = next, addr != end);
273 unsigned long end, phys_addr_t phys,
300 next = pmd_cont_addr_end(addr, end);
310 } while (addr = next, addr != end);
313 static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
340 next = pud_addr_end(addr, end);
364 } while (pudp++, addr = next, addr != end);
375 unsigned long addr, end, next;
387 end = PAGE_ALIGN(virt + size);
390 next = pgd_addr_end(addr, end);
394 } while (pgdp++, addr = next, addr != end);
496 phys_addr_t end, pgprot_t prot, int flags)
498 __create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start,
571 phys_addr_t start, end;
599 for_each_mem_range(i, &start, &end) {
600 if (start >= end)
607 __map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL),
830 static bool pgtable_range_aligned(unsigned long start, unsigned long end,
844 if (end - 1 > ceiling - 1)
850 unsigned long end, bool free_mapped,
867 } while (addr += PAGE_SIZE, addr < end);
871 unsigned long end, bool free_mapped,
878 next = pmd_addr_end(addr, end);
900 } while (addr = next, addr < end);
904 unsigned long end, bool free_mapped,
911 next = pud_addr_end(addr, end);
933 } while (addr = next, addr < end);
937 unsigned long end, bool free_mapped,
944 next = p4d_addr_end(addr, end);
952 } while (addr = next, addr < end);
955 static void unmap_hotplug_range(unsigned long addr, unsigned long end,
970 next = pgd_addr_end(addr, end);
978 } while (addr = next, addr < end);
982 unsigned long end, unsigned long floor,
997 } while (addr += PAGE_SIZE, addr < end);
999 if (!pgtable_range_aligned(start, end, floor, ceiling, PMD_MASK))
1019 unsigned long end, unsigned long floor,
1026 next = pmd_addr_end(addr, end);
1034 } while (addr = next, addr < end);
1039 if (!pgtable_range_aligned(start, end, floor, ceiling, PUD_MASK))
1059 unsigned long end, unsigned long floor,
1066 next = pud_addr_end(addr, end);
1074 } while (addr = next, addr < end);
1079 if (!pgtable_range_aligned(start, end, floor, ceiling, PGDIR_MASK))
1099 unsigned long end, unsigned long floor,
1106 next = p4d_addr_end(addr, end);
1114 } while (addr = next, addr < end);
1117 static void free_empty_tables(unsigned long addr, unsigned long end,
1124 next = pgd_addr_end(addr, end);
1132 } while (addr = next, addr < end);
1149 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
1152 WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
1155 return vmemmap_populate_basepages(start, end, node, altmap);
1157 return vmemmap_populate_hugepages(start, end, node, altmap);
1161 void vmemmap_free(unsigned long start, unsigned long end,
1164 WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
1166 unmap_hotplug_range(start, end, true, altmap);
1167 free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END);
1239 unsigned long next, end;
1251 end = addr + PUD_SIZE;
1254 } while (pmdp++, next += PMD_SIZE, next != end);
1265 unsigned long end = start + size;
1268 WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END));
1270 unmap_hotplug_range(start, end, false, NULL);
1271 free_empty_tables(start, end, PAGE_OFFSET, PAGE_END);
1284 * the end physical address. In this case set start to zero
1298 * also be derived from its end points.
1301 mhp_range.end = end_linear_pa;
1366 unsigned long end = start + (1UL << PA_SECTION_SHIFT);
1378 pr_warn("Boot memory [%lx %lx] offlining attempted\n", start, end);
1388 pr_err("Boot memory [%lx %lx] offlined\n", start, end);
1416 phys_addr_t start, end, addr;
1428 for_each_mem_range(i, &start, &end) {
1429 for (addr = start; addr < end; addr += (1UL << PA_SECTION_SHIFT)) {