Lines Matching refs:page

48 struct page *vmem_map;
52 struct page *zero_page_memmap_ptr; /* map entry for zero page */
59 struct page *page;
61 page = pte_page(pte);
62 addr = (unsigned long) page_address(page);
64 if (test_bit(PG_arch_1, &page->flags))
67 flush_icache_range(addr, addr + page_size(page));
68 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
98 * store (which grows upwards) and install the gateway page which is
129 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
161 * Thus EFI and the kernel may have different page sizes. It is
162 * therefore possible to have the initrd share the same page as
165 * To avoid freeing/using the wrong page (kernel sized) we:
187 * In this example, we must free page 8000 ONLY. So we must align up
204 * This installs a clean page in the kernel's page table.
206 static struct page * __init
207 put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
232 set_pte(pte, mk_pte(page, pgprot));
236 return page;
242 struct page *page;
245 * Map the gate page twice: once read-only to export the ELF
246 * headers etc. and once execute-only page to enable
249 page = virt_to_page(ia64_imva(__start_gate_section));
250 put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
252 page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
253 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
255 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
317 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
319 * virtual address space are implemented but if we pick a large enough page size
330 * The virtual page table has to cover the entire implemented address space within
333 * non-speculative accesses to the virtual page table, so the address range of the
334 * virtual page table itself needs to be covered by virtual page table.
351 panic("Cannot build a big enough virtual-linear page table"
353 " Try using a smaller page size.\n");
356 /* place the VMLPT at the end of each page-table mapped region: */
360 * Set the (virtually mapped linear) page table address. Bit
427 /* Found next valid vmem_map page */
432 end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
433 hole_next_pfn = end_address / sizeof(struct page);
440 struct page *map_start, *map_end;
490 void *page = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE,
492 if (!page)
494 set_pte(pte, pfn_pte(__pa(page) >> PAGE_SHIFT,
507 struct page *start;
508 struct page *end;
517 struct page *map_start, *map_end;
529 * We have to initialize "out of bounds" struct page elements that fit completely
533 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
535 / sizeof(struct page));
552 struct page *start;
573 struct page *pg = pfn_to_page(pfn);