Lines Matching defs:block

71  * block (8k) allocator, which operates out of a bo.  Allocation is done by
72 * either pulling a block from the free list or growing the used range of the
80 * so we just keep it around until garbage collection time. While the block
81 * allocator is lockless for normal operations, we block other threads trying
86 * pool of smaller, fixed size objects, which operates much like the block
88 * space it just allocates a new block from the block pool. This allocator is
98 * block and fill it up. These cases are local to a command buffer and the
100 * block when it runs out of space and chains them together so they can be
207 * without races or imposing locking on the block allocate fast path. On
237 /* The block pool is always initialized to a nonzero size and this function
298 /* We allocated the first block outside the pool so we have to grow
300 * allocate now will get block indexes above the current limit and
378 /* Make sure VMA addresses are 2MiB aligned for the block pool */
400 pool->fd = os_create_anonymous_file(BLOCK_POOL_MEMFD_SIZE, "block pool");
527 * without races or imposing locking on the block allocate fast path. On
574 /** Returns current memory map of the block pool.
577 * offset. The offset parameter is relative to the "center" of the block pool
578 * rather than the start of the block pool BO map.
603 /** Grows and re-centers the block pool.
605 * We grow the block pool in one or both directions in such a way that the
619 * the block pool that only want a one-sided pool.)
621 * 5) We have enough space allocated for at least one more block in
655 /* The block pool is always initialized to a nonzero size and this function
764 /* We need to grow the block pool, but still have some leftover
779 /* We allocated the first block outside the pool so we have to grow
781 * allocate now will get block indexes above the current limit and
811 /* Allocates a block out of the back of the block pool.
813 * This will allocated a block earlier than the "start" of the block pool.
815 * be correct relative to the block pool's map pointer.
818 * gymnastics with the block pool's BO when doing relocations.
828 * number of bytes downwards from the middle to the end of the block.
830 * start of the block.
870 pool->buckets[i].block.next = 0;
871 pool->buckets[i].block.end = 0;
893 struct anv_block_state block, old, new;
903 /* If our state is large, we don't need any sub-allocation from a block.
910 block.u64 = __sync_fetch_and_add(&pool->block.u64, state_size);
912 if (block.next < block.end) {
913 return block.next;
914 } else if (block.next == block.end) {
918 old.u64 = __sync_lock_test_and_set(&pool->block.u64, new.u64);
919 if (old.next != block.next)
920 futex_wake(&pool->block.end, INT_MAX);
923 futex_wait(&pool->block.end, block.end, NULL);
1194 struct anv_state block;
1196 /* The next block */
1200 /* A pointer to the first user-allocated thing in this block. This is
1201 * what valgrind sees as the start of the block.
1218 stream->block = ANV_STATE_NULL;
1221 * state_stream_alloc fetches a new block.
1233 util_dynarray_foreach(&stream->all_blocks, struct anv_state, block) {
1234 VG(VALGRIND_MEMPOOL_FREE(stream, block->map));
1235 VG(VALGRIND_MAKE_MEM_NOACCESS(block->map, block->alloc_size));
1236 anv_state_pool_free_no_vg(stream->state_pool, *block);
1253 if (offset + size > stream->block.alloc_size) {
1258 stream->block = anv_state_pool_alloc_no_vg(stream->state_pool,
1261 struct anv_state, stream->block);
1262 VG(VALGRIND_MAKE_MEM_NOACCESS(stream->block.map, block_size));
1266 assert(offset + size <= stream->block.alloc_size);
1270 struct anv_state state = stream->block;
1278 assert(state.map == stream->block.map);
1283 VG(VALGRIND_MEMPOOL_CHANGE(stream, stream->block.map, stream->block.map,