Lines Matching defs:base

71    struct pb_slab base;
168 return bo_can_reclaim(priv, &bo->base);
175 ASSERTED unsigned slab_size = slab->buffer->base.size;
177 assert(slab->base.num_entries * slab->entry_size <= slab_size);
190 //if (bo->base.usage & RADEON_FLAG_ENCRYPTED)
191 //pb_slab_free(get_slabs(screen, bo->base.size, RADEON_FLAG_ENCRYPTED), &bo->u.slab.entry);
193 pb_slab_free(get_slabs(screen, bo->base.size, 0), &bo->u.slab.entry);
294 pb_cache_init_entry(&screen->pb.bo_cache, bo->cache_entry, &bo->base, heap);
304 pipe_reference_init(&bo->base.reference, 1);
305 bo->base.alignment_log2 = util_logbase2(alignment);
306 bo->base.size = mai.allocationSize;
307 bo->base.vtbl = &bo_vtbl;
308 bo->base.placement = screen->heap_flags[heap];
309 bo->base.usage = flags;
367 assert(bo->u.sparse.num_backing_pages < DIV_ROUND_UP(bo->base.size, ZINK_SPARSE_BUFFER_PAGE_SIZE));
369 size = MIN3(bo->base.size / 16,
371 bo->base.size - (uint64_t)bo->u.sparse.num_backing_pages * ZINK_SPARSE_BUFFER_PAGE_SIZE);
414 bo->u.sparse.num_backing_pages -= backing->bo->base.size / ZINK_SPARSE_BUFFER_PAGE_SIZE;
481 backing->chunks[0].end == backing->bo->base.size / ZINK_SPARSE_BUFFER_PAGE_SIZE)
492 assert(!bo->mem && bo->base.usage & ZINK_ALLOC_SPARSE);
528 pipe_reference_init(&bo->base.reference, 1);
529 bo->base.alignment_log2 = util_logbase2(ZINK_SPARSE_BUFFER_PAGE_SIZE);
530 bo->base.size = size;
531 bo->base.vtbl = &bo_sparse_vtbl;
532 bo->base.placement = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
534 bo->base.usage = ZINK_ALLOC_SPARSE;
544 return &bo->base;
616 pipe_reference_init(&bo->base.reference, 1);
617 bo->base.size = size;
618 assert(alignment <= 1 << bo->base.alignment_log2);
620 return &bo->base;
646 return &bo->base;
660 return &bo->base;
684 VkResult result = VKSCR(MapMemory)(screen->dev, real->mem, 0, real->base.size, 0, &cpu);
746 mem_bind.size = MIN2(res->base.b.width0 - offset, size);
766 assert(offset <= bo->base.size);
767 assert(size <= bo->base.size - offset);
768 assert(size % ZINK_SPARSE_BUFFER_PAGE_SIZE == 0 || offset + size == bo->base.size);
938 if (screen->faked_e5sparse && res->base.b.format == PIPE_FORMAT_R9G9B9E5_FLOAT)
943 if (res->base.b.target == PIPE_BUFFER) {
972 unsigned mipwidth = DIV_ROUND_UP(MAX2(res->base.b.width0 >> l, 1), gwidth);
973 unsigned mipheight = DIV_ROUND_UP(MAX2(res->base.b.height0 >> l, 1), gheight);
974 unsigned mipdepth = DIV_ROUND_UP(res->base.b.array_size > 1 ? res->base.b.array_size : MAX2(res->base.b.depth0 >> l, 1), gdepth);
985 if (res->base.b.array_size > 1) {
994 ibind[i].extent.depth = (d == ndepth - 1 && res->base.b.target != PIPE_TEXTURE_CUBE) ? lastBlockExtent.depth : gdepth;
996 (d + (box->z / gdepth)) * ((MAX2(res->base.b.width0 >> level, 1) / gwidth) * (MAX2(res->base.b.height0 >> level, 1) / gheight)) +
997 (h + (box->y / gheight)) * (MAX2(res->base.b.width0 >> level, 1) / gwidth) +
1211 slab_size = slab->buffer->base.size;
1213 slab->base.num_entries = slab_size / entry_size;
1214 slab->base.num_free = slab->base.num_entries;
1216 slab->entries = CALLOC(slab->base.num_entries, sizeof(*slab->entries));
1220 list_inithead(&slab->base.free);
1222 base_id = p_atomic_fetch_add(&screen->pb.next_bo_unique_id, slab->base.num_entries);
1223 for (unsigned i = 0; i < slab->base.num_entries; ++i) {
1227 bo->base.alignment_log2 = util_logbase2(get_slab_entry_alignment(screen, entry_size));
1228 bo->base.size = entry_size;
1229 bo->base.vtbl = &bo_slab_vtbl;
1232 bo->u.slab.entry.slab = &slab->base;
1244 bo->base.placement = bo->u.slab.real->base.placement;
1246 list_addtail(&bo->u.slab.entry.head, &slab->base.free);
1250 assert(slab->base.num_entries * entry_size <= slab_size);
1252 return &slab->base;