Lines Matching defs:page

14  * "current->executable", and page faults do the actual loading. Clean.
198 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
201 struct page *page;
228 &page, NULL);
236 return page;
239 static void put_arg_page(struct page *page)
241 put_page(page);
249 struct page *page)
251 flush_cache_page(bprm->vma, pos, page_to_pfn(page));
309 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
312 struct page *page;
314 page = bprm->page[pos / PAGE_SIZE];
315 if (!page && write) {
316 page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
317 if (!page)
319 bprm->page[pos / PAGE_SIZE] = page;
322 return page;
325 static void put_arg_page(struct page *page)
331 if (bprm->page[i]) {
332 __free_page(bprm->page[i]);
333 bprm->page[i] = NULL;
346 struct page *page)
522 * ensures the destination page is created and not swapped out.
527 struct page *kmapped_page = NULL;
582 struct page *page;
584 page = get_arg_page(bprm, pos, 1);
585 if (!page) {
595 kmapped_page = page;
638 struct page *page;
644 page = get_arg_page(bprm, pos, 1);
645 if (!page)
647 flush_arg_page(bprm, pos & PAGE_MASK, page);
648 memcpy_to_page(page, offset_in_page(pos), arg, bytes_to_copy);
649 put_arg_page(page);
680 * 3) Move vma's page tables to the new range.
713 * move the page tables downwards, on failure we rely on
849 * Align this down to a page boundary as expand_stack
889 char *src = kmap_local_page(bprm->page[index]) + offset;
1686 struct page *page;
1693 page = get_arg_page(bprm, bprm->p, 0);
1694 if (!page) {
1698 kaddr = kmap_local_page(page);
1705 put_arg_page(page);