Lines Matching defs:page
408 * successfully (and before the addresses are expected to cause a page fault
459 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
473 struct page *page = pages[*nr];
477 if (WARN_ON(!page))
479 if (WARN_ON(!pfn_valid(page_to_pfn(page))))
482 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
490 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
508 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
526 unsigned long end, pgprot_t prot, struct page **pages, int *nr,
544 pgprot_t prot, struct page **pages)
580 pgprot_t prot, struct page **pages, unsigned int page_shift)
606 pgprot_t prot, struct page **pages, unsigned int page_shift)
620 * @prot: page protection flags to use
629 pgprot_t prot, struct page **pages, unsigned int page_shift)
655 * Walk a vmap address to the struct page it maps. Huge vmap mappings will
656 * return the tail page that corresponds to the base page address, which
659 struct page *vmalloc_to_page(const void *vmalloc_addr)
662 struct page *page = NULL;
709 page = pte_page(pte);
711 return page;
716 * Map a vmalloc()-space virtual address to the physical page frame number.
1688 * There is a tradeoff here: a larger number will cover more kernel page tables
2018 * @gfp_mask: flags for the page level allocator
2315 * to amortize TLB flushing overheads. What this means is that any page you
2318 * still referencing that page (additional to the regular 1:1 kernel mapping).
2382 void *vm_map_ram(struct page **pages, unsigned int count, int node)
2710 int (*set_direct_map)(struct page *page))
2834 struct page *page = vm->pages[i];
2836 BUG_ON(!page);
2837 mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
2842 __free_page(page);
2856 * which was created from the page array passed to vmap().
2881 * @pages: array of page pointers
2884 * @prot: page protection for the mapping
2894 void *vmap(struct page **pages, unsigned int count,
2963 * @prot: page protection for the mapping
2993 unsigned int order, unsigned int nr_pages, struct page **pages)
2998 struct page *page;
3003 * the page array is partly or not at all populated due
3004 * to fails, fallback to a single page allocator that is
3042 * fallback to a single page allocator.
3063 page = alloc_pages(alloc_gfp, order);
3065 page = alloc_pages_node(nid, alloc_gfp, order);
3066 if (unlikely(!page)) {
3079 * small-page vmallocs). Some drivers do their own refcounting
3080 * on vmalloc_to_page() pages, some use page->mapping,
3081 * page->lru, etc.
3084 split_page(page, order);
3087 * Careful, we allocate and map page-order pages, but
3088 * tracking is done per PAGE_SIZE page so as to keep the
3092 pages[nr_allocated + i] = page + i;
3115 array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
3130 "vmalloc error: size %lu, failed to allocated page array size %lu",
3160 * - insufficient huge page-order pages
3162 * Since we always retry allocations at order-0 in the huge page
3173 * page tables allocations ignore external gfp mask, enforce it
3213 * @gfp_mask: flags for the page level allocator
3219 * Allocate enough pages to cover @size from the page level
3367 * @gfp_mask: flags for the page level allocator
3371 * Allocate enough pages to cover @size from the page level allocator with
3408 * Allocate enough pages to cover @size from the page level
3411 * For tight control over page level allocator and protection flags
3426 * @gfp_mask: flags for the page level allocator
3428 * Allocate enough pages to cover @size from the page level
3447 * Allocate enough pages to cover @size from the page level
3451 * For tight control over page level allocator and protection flags
3486 * Allocate enough pages to cover @size from the page level
3489 * For tight control over page level allocator and protection flags
3506 * Allocate enough pages to cover @size from the page level
3536 * page level allocator and map them into contiguous kernel virtual space.
3590 * If the page is not present, fill zero.
3598 struct page *page;
3608 page = vmalloc_to_page(addr);
3617 if (page)
3618 copied = copy_page_to_iter_nofault(page, offset,
3881 struct page *page = vmalloc_to_page(kaddr);
3884 ret = vm_insert_page(vma, uaddr, page);
3902 * @pgoff: number of pages into addr before first page to map
4305 pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",