Lines Matching defs:page
163 * page fault) and maybe some other commands.
195 * FIXME Return the number of page we have migrated, again we need to
544 struct page *page;
558 page = hmm_pfn_to_page(range->hmm_pfns[0]);
561 * page as a compound page. Otherwise, the PTE protections might not be
562 * consistent (e.g., CPU only maps part of a compound page).
563 * Note that the underlying page might still be larger than the
564 * CPU mapping (e.g., a PUD sized compound page partially mapped with
565 * a PMD sized page table entry).
570 args->p.page = hmm_pfn_to_map_order(range->hmm_pfns[0]) +
572 args->p.size = 1UL << args->p.page;
574 page -= (addr - args->p.addr) >> PAGE_SHIFT;
576 if (is_device_private_page(page))
577 args->p.phys[0] = nouveau_dmem_page_addr(page) |
581 args->p.phys[0] = page_to_phys(page) |
596 struct page *page;
616 &page, drm->dev);
618 if (ret <= 0 || !page) {
630 /* Map the page on the GPU. */
631 args->p.page = 12;
634 args->p.phys[0] = page_to_phys(page) |
643 unlock_page(page);
644 put_page(page);
805 args.i.p.page = PAGE_SHIFT;
851 * But if a large page is mapped, make sure subsequent
911 args->p.page = PAGE_SHIFT;