Lines Matching defs:cache

14  * add an extra cache for such buffer objects.
47 struct nouveau_mman *cache;
95 mm_bucket_by_order(struct nouveau_mman *cache, int order)
99 return &cache->bucket[MAX2(order, MM_MIN_ORDER) - MM_MIN_ORDER];
103 mm_bucket_by_size(struct nouveau_mman *cache, unsigned size)
105 return mm_bucket_by_order(cache, mm_get_order(size));
123 mm_slab_new(struct nouveau_mman *cache, struct mm_bucket *bucket, int chunk_order)
142 ret = nouveau_bo_new(cache->dev, cache->domain, 0, size, &cache->config,
151 slab->cache = cache;
155 assert(bucket == mm_bucket_by_order(cache, chunk_order));
158 p_atomic_add(&cache->allocated, size);
162 cache->allocated / 1024);
169 nouveau_mm_allocate(struct nouveau_mman *cache,
177 bucket = mm_bucket_by_size(cache, size);
179 ret = nouveau_bo_new(cache->dev, cache->domain, 0, size, &cache->config,
183 size, cache->config.nv50.memtype, ret);
198 mm_slab_new(cache, bucket, MAX2(mm_get_order(size), MM_MIN_ORDER));
226 struct mm_bucket *bucket = mm_bucket_by_order(slab->cache, slab->order);
254 struct nouveau_mman *cache = MALLOC_STRUCT(nouveau_mman);
257 if (!cache)
260 cache->dev = dev;
261 cache->domain = domain;
262 cache->config = *config;
263 cache->allocated = 0;
266 list_inithead(&cache->bucket[i].free);
267 list_inithead(&cache->bucket[i].used);
268 list_inithead(&cache->bucket[i].full);
269 simple_mtx_init(&cache->bucket[i].lock, mtx_plain);
272 return cache;
288 nouveau_mm_destroy(struct nouveau_mman *cache)
292 if (!cache)
296 if (!list_is_empty(&cache->bucket[i].used) ||
297 !list_is_empty(&cache->bucket[i].full))
298 debug_printf("WARNING: destroying GPU memory cache "
301 nouveau_mm_free_slabs(&cache->bucket[i].free);
302 nouveau_mm_free_slabs(&cache->bucket[i].used);
303 nouveau_mm_free_slabs(&cache->bucket[i].full);
304 simple_mtx_destroy(&cache->bucket[i].lock);
307 FREE(cache);