Lines Matching defs:page
48 struct page *mem_map;
75 struct page *page;
84 page = virt_to_head_page(objp);
90 if (PageSlab(page))
94 * If it's not a compound page, see if we have a matching VMA
99 if (!PageCompound(page)) {
111 return page_size(page);
191 struct page *vmalloc_to_page(const void *addr)
228 * Allocate enough pages to cover @size from the page level
231 * For tight control over page level allocator and protection flags
245 * Allocate enough pages to cover @size from the page level
249 * For tight control over page level allocator and protection flags
263 * Allocate enough pages to cover @size from the page level
266 * For tight control over page level allocator and protection flags
280 * Allocate enough pages to cover @size from the page level
284 * For tight control over page level allocator and protection flags
298 * page level allocator and map them into contiguous kernel virtual space.
326 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
339 void *vm_map_ram(struct page **pages, unsigned int count, int node)
364 struct page *page)
370 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
377 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
512 struct page *page = virt_to_page(from);
515 put_page(page);
559 * and tree and add to the address space's page tree also if not an anonymous
560 * page
1006 * - note that this may not return a page-aligned address if the object
1007 * we're allocating is smaller than a page
1013 /* we don't want to allocate a power-of-2 sized page set */
1616 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1778 /* found one - only interested if it's shared out of the page