Lines Matching defs:start

464 static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start,
467 __create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start,
500 phys_addr_t start, end;
521 memblock_mark_nomap(crashk_res.start,
527 for_each_mem_range(i, &start, &end) {
528 if (start >= end)
535 __map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL),
562 __map_memblock(pgdp, crashk_res.start,
566 memblock_clear_nomap(crashk_res.start,
832 static bool pgtable_range_aligned(unsigned long start, unsigned long end,
836 start &= mask;
837 if (start < floor)
988 unsigned long i, start = addr;
1001 if (!pgtable_range_aligned(start, end, floor, ceiling, PMD_MASK))
1016 __flush_tlb_kernel_pgtable(start);
1025 unsigned long i, next, start = addr;
1041 if (!pgtable_range_aligned(start, end, floor, ceiling, PUD_MASK))
1056 __flush_tlb_kernel_pgtable(start);
1065 unsigned long i, next, start = addr;
1081 if (!pgtable_range_aligned(start, end, floor, ceiling, PGDIR_MASK))
1096 __flush_tlb_kernel_pgtable(start);
1140 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
1143 return vmemmap_populate_basepages(start, end, node, altmap);
1146 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
1149 unsigned long addr = start;
1187 void vmemmap_free(unsigned long start, unsigned long end,
1191 WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
1193 unmap_hotplug_range(start, end, true, altmap);
1194 free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END);
1477 static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
1479 unsigned long end = start + size;
1482 WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END));
1484 unmap_hotplug_range(start, end, false, NULL);
1485 free_empty_tables(start, end, PAGE_OFFSET, PAGE_END);
1488 static bool inside_linear_region(u64 start, u64 size)
1496 * mapping the start physical address is actually bigger than
1497 * the end physical address. In this case set start to zero
1513 return start >= start_linear_pa && (start + size - 1) <= end_linear_pa;
1516 int arch_add_memory(int nid, u64 start, u64 size,
1521 if (!inside_linear_region(start, size)) {
1522 pr_err("[%llx %llx] is outside linear mapping region\n", start, start + size);
1529 __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
1533 memblock_clear_nomap(start, size);
1535 ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT,
1539 __phys_to_virt(start), size);
1541 max_pfn = PFN_UP(start + size);
1548 void arch_remove_memory(int nid, u64 start, u64 size,
1551 unsigned long start_pfn = start >> PAGE_SHIFT;
1555 __remove_pgd_mapping(swapper_pg_dir, __phys_to_virt(start), size);