Lines Matching defs:start
39 static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
43 unsigned long addr = start;
82 flush_tlb_kernel_range(start, start + PMD_SIZE);
98 * The reuse_page is found 'first' in table walk before we start
180 static int vmemmap_remap_range(unsigned long start, unsigned long end,
183 unsigned long addr = start;
187 VM_BUG_ON(!PAGE_ALIGNED(start));
200 flush_tlb_kernel_range(start, end);
268 static inline void reset_struct_pages(struct page *start)
270 struct page *from = start + NR_RESET_STRUCT_PAGE;
273 memcpy(start, from, sizeof(*from) * NR_RESET_STRUCT_PAGE);
300 * vmemmap_remap_free - remap the vmemmap virtual address range [@start, @end)
303 * @start: start address of the vmemmap virtual address range that we want
311 static int vmemmap_remap_free(unsigned long start, unsigned long end,
321 int nid = page_to_nid((struct page *)start);
345 * - The range [@start, @end) and the range [@reuse, @reuse + PAGE_SIZE)
351 * So we need to make sure that @start and @reuse meet the above rules.
353 BUG_ON(start - reuse != PAGE_SIZE);
380 static int alloc_vmemmap_page_list(unsigned long start, unsigned long end,
384 unsigned long nr_pages = (end - start) >> PAGE_SHIFT;
385 int nid = page_to_nid((struct page *)start);
403 * vmemmap_remap_alloc - remap the vmemmap virtual address range [@start, end)
406 * @start: start address of the vmemmap virtual address range that we want
414 static int vmemmap_remap_alloc(unsigned long start, unsigned long end,
425 BUG_ON(start - reuse != PAGE_SIZE);
427 if (alloc_vmemmap_page_list(start, end, &vmemmap_pages))
516 * being at the start of the hotplugged memory region in