Lines Matching refs:page
47 * nouveau to be more page like (not necessarily with system page size but a
48 * bigger page size) at lowest level and have some shim layer on top that would
85 struct page *free_pages;
89 static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page)
91 return container_of(page->pgmap, struct nouveau_dmem_chunk, pagemap);
94 static struct nouveau_drm *page_to_drm(struct page *page)
96 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
101 unsigned long nouveau_dmem_page_addr(struct page *page)
103 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
104 unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) -
110 static void nouveau_dmem_page_free(struct page *page)
112 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
116 page->zone_device_data = dmem->free_pages;
117 dmem->free_pages = page;
141 static int nouveau_dmem_copy_one(struct nouveau_drm *drm, struct page *spage,
142 struct page *dpage, dma_addr_t *dma_addr)
163 struct nouveau_drm *drm = page_to_drm(vmf->page);
167 struct page *spage, *dpage;
178 .fault_page = vmf->page,
184 * than just one page on CPU fault. When such fault happens it is very
185 * likely that more surrounding page will CPU fault too.
227 nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
231 struct page *page;
279 page = pfn_to_page(pfn_first);
281 for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
282 page->zone_device_data = drm->dmem->free_pages;
283 drm->dmem->free_pages = page;
285 *ppage = page;
306 static struct page *
310 struct page *page = NULL;
315 page = drm->dmem->free_pages;
316 drm->dmem->free_pages = page->zone_device_data;
317 chunk = nouveau_page_to_chunk(page);
322 ret = nouveau_dmem_chunk_alloc(drm, &page);
327 zone_device_page_init(page);
328 return page;
332 nouveau_dmem_page_free_locked(struct nouveau_drm *drm, struct page *page)
334 unlock_page(page);
335 put_page(page);
390 struct page *dpage;
620 struct page *dpage, *spage;