Lines Matching defs:chunk
96 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
98 return chunk->drm;
103 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
105 chunk->pagemap.range.start;
107 return chunk->bo->offset + off;
112 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
113 struct nouveau_dmem *dmem = chunk->drm->dmem;
119 WARN_ON(!chunk->callocated);
120 chunk->callocated--;
122 * FIXME when chunk->callocated reach 0 we should add the chunk to
229 struct nouveau_dmem_chunk *chunk;
236 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
237 if (chunk == NULL) {
250 chunk->drm = drm;
251 chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
252 chunk->pagemap.range.start = res->start;
253 chunk->pagemap.range.end = res->end;
254 chunk->pagemap.nr_range = 1;
255 chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
256 chunk->pagemap.owner = drm->dev;
260 &chunk->bo);
264 ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
268 ptr = memremap_pages(&chunk->pagemap, numa_node_id());
275 list_add(&chunk->list, &drm->dmem->chunks);
278 pfn_first = chunk->pagemap.range.start >> PAGE_SHIFT;
286 chunk->callocated++;
295 nouveau_bo_unpin(chunk->bo);
297 nouveau_bo_ref(NULL, &chunk->bo);
299 release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range));
301 kfree(chunk);
309 struct nouveau_dmem_chunk *chunk;
317 chunk = nouveau_page_to_chunk(page);
318 chunk->callocated++;
341 struct nouveau_dmem_chunk *chunk;
348 list_for_each_entry(chunk, &drm->dmem->chunks, list) {
349 ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
359 struct nouveau_dmem_chunk *chunk;
365 list_for_each_entry(chunk, &drm->dmem->chunks, list)
366 nouveau_bo_unpin(chunk->bo);
371 * Evict all pages mapping a chunk.
374 nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
376 unsigned long i, npages = range_len(&chunk->pagemap.range) >> PAGE_SHIFT;
385 migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
399 nouveau_dmem_copy_one(chunk->drm,
405 nouveau_fence_new(&fence, chunk->drm->dmem->migrate.chan);
412 dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
419 struct nouveau_dmem_chunk *chunk, *tmp;
426 list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
427 nouveau_dmem_evict_chunk(chunk);
428 nouveau_bo_unpin(chunk->bo);
429 nouveau_bo_ref(NULL, &chunk->bo);
430 WARN_ON(chunk->callocated);
431 list_del(&chunk->list);
432 memunmap_pages(&chunk->pagemap);
433 release_mem_region(chunk->pagemap.range.start,
434 range_len(&chunk->pagemap.range));
435 kfree(chunk);