Lines Matching refs:start
68 * represents the start of the subsection it is within. Note that we have to
77 /* Return the pfn of the start of the section. */
96 struct page *start;
98 start = vmemmap_subsection_start(vmemmap_addr);
100 for (; (unsigned long)start < vmemmap_end; start += PAGES_PER_SUBSECTION)
106 if (pfn_valid(page_to_pfn(start)))
166 unsigned long start,
178 vmem_back->virt_addr = start;
185 static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
189 unsigned long start_pfn = page_to_pfn((struct page *)start);
200 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
207 start = ALIGN_DOWN(start, page_size);
209 pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
211 for (; start < end; start += page_size) {
221 if (vmemmap_populated(start, page_size))
229 if (altmap && !altmap_cross_boundary(altmap, start, page_size)) {
243 if (vmemmap_list_populate(__pa(p), start, node)) {
261 start, start + page_size, p);
263 rc = vmemmap_create_mapping(start, page_size, __pa(p));
275 static unsigned long vmemmap_list_free(unsigned long start)
283 if (vmem_back->virt_addr == start)
305 void __ref vmemmap_free(unsigned long start, unsigned long end,
313 start = ALIGN_DOWN(start, page_size);
319 pr_debug("vmemmap_free %lx...%lx\n", start, end);
321 for (; start < end; start += page_size) {
331 if (vmemmap_populated(start, page_size))
334 addr = vmemmap_list_free(start);
360 vmemmap_remove_mapping(start, page_size);