/third_party/mesa3d/src/gallium/frontends/nine/ |
H A D | stateblock9.c | 65 struct nine_range_pool *pool = &This->base.device->range_pool; in NineStateBlock9_dtor() local 92 nine_range_pool_put_chain(pool, This->state.changed.ps_const_f, r); in NineStateBlock9_dtor() 96 nine_range_pool_put_chain(pool, This->state.changed.vs_const_f, r); in NineStateBlock9_dtor() 100 nine_range_pool_put_chain(pool, This->state.changed.vs_const_i, r); in NineStateBlock9_dtor() 104 nine_range_pool_put_chain(pool, This->state.changed.vs_const_b, r); in NineStateBlock9_dtor() 144 struct nine_range_pool *pool) in nine_state_copy_common() 395 struct nine_range_pool *pool, in nine_state_copy_common_all() 563 struct nine_range_pool *pool = &device->range_pool; in NineStateBlock9_Apply() local 569 nine_state_copy_common_all(device, dst, src, src, TRUE, pool, MaxStreams); in NineStateBlock9_Apply() 571 nine_state_copy_common(device, dst, src, src, TRUE, pool); in NineStateBlock9_Apply() 139 nine_state_copy_common(struct NineDevice9 *device, struct nine_state *dst, struct nine_state *src, struct nine_state *mask, const boolean apply, struct nine_range_pool *pool) nine_state_copy_common() argument 390 nine_state_copy_common_all(struct NineDevice9 *device, struct nine_state *dst, struct nine_state *src, struct nine_state *help, const boolean apply, struct nine_range_pool *pool, const int MaxStreams) nine_state_copy_common_all() argument [all...] |
/foundation/multimedia/image_framework/plugins/common/libs/image/libextplugin/src/hardware/imagecodec/ |
H A D | image_codec.cpp | 408 vector<BufferInfo>& pool = (portIndex == OMX_DirInput) ? inputBufferPool_ : outputBufferPool_; in AllocateHardwareBuffers() local 409 pool.clear(); in AllocateHardwareBuffers() 438 pool.push_back(bufInfo); in AllocateHardwareBuffers() 452 vector<BufferInfo>& pool = (portIndex == OMX_DirInput) ? inputBufferPool_ : outputBufferPool_; in AllocateSurfaceBuffers() local 453 pool.clear(); in AllocateSurfaceBuffers() 485 pool.push_back(bufInfo); in AllocateSurfaceBuffers() 520 vector<BufferInfo>& pool = (portIndex == OMX_DirInput) ? inputBufferPool_ : outputBufferPool_; in FindBufferInfoByID() local 521 for (BufferInfo &info : pool) { in FindBufferInfoByID() 532 const vector<BufferInfo>& pool = (portIndex == OMX_DirInput) ? inputBufferPool_ : outputBufferPool_; in FindBufferIndexByID() local 533 for (size_t i = 0; i < pool in FindBufferIndexByID() 737 vector<BufferInfo>& pool = (portIndex == OMX_DirInput) ? inputBufferPool_ : outputBufferPool_; ReclaimBuffer() local 752 const vector<BufferInfo>& pool = (portIndex == OMX_DirInput) ? inputBufferPool_ : outputBufferPool_; IsAllBufferOwnedByUs() local 780 const vector<BufferInfo>& pool = (portIndex == OMX_DirInput) ? inputBufferPool_ : outputBufferPool_; ClearBufferPool() local [all...] |
/third_party/mesa3d/src/gallium/drivers/r600/sb/ |
H A D | sb_shader.cpp | 37 pool(), all_nodes(), src_stats(), opt_stats(), errors(), in shader() 258 node *n = new (pool.allocate(sizeof(node))) node(nt, nst, flags); in create_node() 264 alu_node* n = new (pool.allocate(sizeof(alu_node))) alu_node(); in create_alu() 271 new (pool.allocate(sizeof(alu_group_node))) alu_group_node(); in create_alu_group() 278 new (pool.allocate(sizeof(alu_packed_node))) alu_packed_node(); in create_alu_packed() 284 cf_node* n = new (pool.allocate(sizeof(cf_node))) cf_node(); in create_cf() 291 fetch_node* n = new (pool.allocate(sizeof(fetch_node))) fetch_node(); in create_fetch() 297 region_node *n = new (pool.allocate(sizeof(region_node))) in create_region() 305 depart_node* n = new (pool.allocate(sizeof(depart_node))) in create_depart() 313 repeat_node* n = new (pool in create_repeat() [all...] |
/third_party/mesa3d/src/intel/vulkan/ |
H A D | anv_private.h | 134 * (1) the dynamic state pool is located within the same 4 GiB as the low 138 * (2) the binding table pool is located at lower addresses than the surface 139 * state pool, within a 4 GiB range. This allows surface state base addresses 634 * both the block pool and the state pools. Unfortunately, in order to 665 #define anv_block_pool_foreach_bo(bo, pool) \ 666 for (struct anv_bo **_pp_bo = (pool)->bos, *bo; \ 667 _pp_bo != &(pool)->bos[(pool)->nbos] && (bo = *_pp_bo, true); \ 679 * around the actual BO so that we grow the pool after the wrapper BO has 691 /* The address where the start of the pool i 738 anv_block_pool_size(struct anv_block_pool *pool) anv_block_pool_size() argument 796 struct anv_state_pool *pool; global() member 1950 struct anv_descriptor_pool *pool; global() member 4112 khr_perf_query_preamble_offset(const struct anv_query_pool *pool, uint32_t pass) khr_perf_query_preamble_offset() argument [all...] |
H A D | anv_batch_chain.c | 352 struct anv_batch_bo *bbo = vk_zalloc(&cmd_buffer->vk.pool->alloc, sizeof(*bbo), in anv_batch_bo_create() 362 result = anv_reloc_list_init(&bbo->relocs, &cmd_buffer->vk.pool->alloc); in anv_batch_bo_create() 373 vk_free(&cmd_buffer->vk.pool->alloc, bbo); in anv_batch_bo_create() 385 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->vk.pool->alloc, sizeof(*bbo), in anv_batch_bo_clone() 395 result = anv_reloc_list_init_clone(&bbo->relocs, &cmd_buffer->vk.pool->alloc, in anv_batch_bo_clone() 409 vk_free(&cmd_buffer->vk.pool->alloc, bbo); in anv_batch_bo_clone() 511 anv_reloc_list_finish(&bbo->relocs, &cmd_buffer->vk.pool->alloc); in anv_batch_bo_destroy() 513 vk_free(&cmd_buffer->vk.pool->alloc, bbo); in anv_batch_bo_destroy() 562 struct anv_state_pool *pool = anv_binding_table_pool(cmd_buffer->device); in anv_cmd_buffer_surface_base_address() local 565 .bo = pool in anv_cmd_buffer_surface_base_address() 1398 adjust_relocations_from_state_pool(struct anv_state_pool *pool, struct anv_reloc_list *relocs, uint32_t last_pool_center_bo_offset) adjust_relocations_from_state_pool() argument 1416 adjust_relocations_to_state_pool(struct anv_state_pool *pool, struct anv_bo *from_bo, struct anv_reloc_list *relocs, uint32_t last_pool_center_bo_offset) adjust_relocations_to_state_pool() argument 1776 struct anv_block_pool *pool; setup_execbuf_for_cmd_buffers() local [all...] |
/kernel/linux/linux-5.10/drivers/usb/host/ |
H A D | xhci-dbgtty.c | 48 struct list_head *pool = &port->write_pool; variable 50 while (!list_empty(pool)) { 51 req = list_entry(pool->next, struct dbc_request, list_pool); 65 list_add(&req->list_pool, pool); 82 struct list_head *pool = &port->read_pool; variable 84 while (!list_empty(pool)) { 88 req = list_entry(pool->next, struct dbc_request, list_pool); 97 list_add(&req->list_pool, pool);
|
/kernel/linux/linux-6.6/drivers/usb/host/ |
H A D | xhci-dbgtty.c | 48 struct list_head *pool = &port->write_pool; variable 50 while (!list_empty(pool)) { 51 req = list_entry(pool->next, struct dbc_request, list_pool); 65 list_add(&req->list_pool, pool); 82 struct list_head *pool = &port->read_pool; variable 84 while (!list_empty(pool)) { 88 req = list_entry(pool->next, struct dbc_request, list_pool); 97 list_add(&req->list_pool, pool);
|
/third_party/vk-gl-cts/framework/delibs/depool/ |
H A D | dePoolHash.h | 23 * \brief Memory pool hash class. 45 * \brief Declare a template pool hash class interface. 60 * Hash* Hash_create (deMemPool* pool); 81 deMemPool* pool; \ 97 TYPENAME* TYPENAME##_create (deMemPool* pool); \ 188 * \brief Implement a template pool hash class. 202 TYPENAME* TYPENAME##_create (deMemPool* pool) \ 205 DE_PTR_TYPE(TYPENAME) hash = DE_POOL_NEW(pool, TYPENAME); \ 210 hash->pool = pool; \ [all...] |
H A D | dePoolMultiSet.c | 21 * \brief Memory pool multiset class. 31 deMemPool* pool = deMemPool_createRoot(DE_NULL, 0); in dePoolMultiSet_selfTest() local 32 deTestMultiSet* set = deTestMultiSet_create(pool); in dePoolMultiSet_selfTest() 145 deMemPool_destroy(pool); in dePoolMultiSet_selfTest()
|
/kernel/liteos_a/kernel/base/include/ |
H A D | los_memory_pri.h | 45 extern VOID OsMemUsedNodeShow(VOID *pool); 51 extern VOID OsMemResetEndNode(VOID *pool, UINTPTR preAddr); 56 /* memory expand size at least 1/8 of pool size if we can */
|
/third_party/ffmpeg/libavutil/ |
H A D | buffer_internal.h | 73 * data. They will be used to free the buffer when the pool is freed. 78 AVBufferPool *pool; member 90 BufferPoolEntry *pool; member 93 * This is used to track when the pool is to be freed. 94 * The pointer to the pool itself held by the caller is considered to 96 * by one, returning the buffer to the pool decreases it by one. 98 * buffers have been released, then it's safe to free the pool and all
|
/kernel/linux/linux-5.10/drivers/s390/scsi/ |
H A D | zfcp_fsf.c | 94 if (likely(req->pool)) { in zfcp_fsf_req_free() 96 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool); in zfcp_fsf_req_free() 97 mempool_free(req, req->pool); in zfcp_fsf_req_free() 252 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); in zfcp_fsf_status_read_handler() 305 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); in zfcp_fsf_status_read_handler() 784 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool) in zfcp_fsf_alloc() argument 788 if (likely(pool)) in zfcp_fsf_alloc() 789 req = mempool_alloc(pool, GFP_ATOMIC); in zfcp_fsf_alloc() 797 req->pool = pool; in zfcp_fsf_alloc() 801 zfcp_fsf_qtcb_alloc(mempool_t *pool) zfcp_fsf_qtcb_alloc() argument 817 zfcp_fsf_req_create(struct zfcp_qdio *qdio, u32 fsf_cmd, u8 sbtype, mempool_t *pool) zfcp_fsf_req_create() argument 1204 zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, struct zfcp_fsf_ct_els *ct, mempool_t *pool, unsigned int timeout) zfcp_fsf_send_ct() argument [all...] |
/kernel/linux/linux-6.6/drivers/s390/scsi/ |
H A D | zfcp_fsf.c | 94 if (likely(req->pool)) { in zfcp_fsf_req_free() 96 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool); in zfcp_fsf_req_free() 97 mempool_free(req, req->pool); in zfcp_fsf_req_free() 265 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); in zfcp_fsf_status_read_handler() 324 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); in zfcp_fsf_status_read_handler() 803 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool) in zfcp_fsf_alloc() argument 807 if (likely(pool)) in zfcp_fsf_alloc() 808 req = mempool_alloc(pool, GFP_ATOMIC); in zfcp_fsf_alloc() 816 req->pool = pool; in zfcp_fsf_alloc() 820 zfcp_fsf_qtcb_alloc(mempool_t *pool) zfcp_fsf_qtcb_alloc() argument 836 zfcp_fsf_req_create(struct zfcp_qdio *qdio, u32 fsf_cmd, u8 sbtype, mempool_t *pool) zfcp_fsf_req_create() argument 1225 zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, struct zfcp_fsf_ct_els *ct, mempool_t *pool, unsigned int timeout) zfcp_fsf_send_ct() argument [all...] |
/kernel/linux/linux-5.10/sound/core/seq/ |
H A D | seq_clientmgr.c | 89 return snd_seq_total_cells(client->pool) > 0; in snd_seq_write_pool_allocated() 231 client->pool = snd_seq_pool_new(poolsize); in seq_create_client1() 232 if (client->pool == NULL) { in seq_create_client1() 263 snd_seq_pool_delete(&client->pool); in seq_create_client1() 281 if (client->pool) in seq_free_client1() 282 snd_seq_pool_delete(&client->pool); in seq_free_client1() 920 /* Allocate a cell from client pool and enqueue it to queue: 921 * if pool is empty and blocking is TRUE, sleep until a new cell is 966 err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, in snd_seq_client_enqueue_event() 1012 * -EAGAIN no space in output pool [all...] |
/foundation/multimedia/camera_framework/services/deferred_processing_service/src/base/task_manager/ |
H A D | task_registry.cpp | 47 std::shared_ptr<ITaskGroup> taskGroup = [delayTask, &name, func = std::move(func), serial, pool = threadPool_] () { in RegisterTaskGroup() 49 std::shared_ptr<ITaskGroup> ret = std::make_shared<DelayedTaskGroup>(name, std::move(func), pool); in RegisterTaskGroup() 52 std::shared_ptr<ITaskGroup> ret = std::make_shared<TaskGroup>(name, std::move(func), serial, pool); in RegisterTaskGroup()
|
/third_party/mesa3d/src/panfrost/lib/ |
H A D | pan_scoreboard.h | 157 panfrost_add_job(struct pan_pool *pool, in panfrost_add_job() argument 240 panfrost_scoreboard_initialize_tiler(struct pan_pool *pool, in panfrost_scoreboard_initialize_tiler() argument 253 transfer = pan_pool_alloc_desc(pool, WRITE_VALUE_JOB); in panfrost_scoreboard_initialize_tiler()
|
/third_party/skia/src/core/ |
H A D | SkExecutor.cpp | 69 // An SkThreadPool is an executor that runs work on a fixed pool of OS threads. 126 auto pool = (SkThreadPool*)ctx; in Loop() local 128 pool->fWorkAvailable.wait(); in Loop() 129 } while (pool->do_work()); in Loop()
|
/third_party/skia/third_party/externals/swiftshader/src/WSI/ |
H A D | WaylandSurfaceKHR.cpp | 77 struct wl_shm_pool *pool = wl_shm_create_pool(shm, fd, extent.height * stride); in attachImage() local 78 wlImage->buffer = wl_shm_pool_create_buffer(pool, 0, extent.width, extent.height, stride, WL_SHM_FORMAT_XRGB8888); in attachImage() 80 wl_shm_pool_destroy(pool); in attachImage()
|
/third_party/skia/third_party/externals/swiftshader/tests/VulkanUnitTests/ |
H A D | Device.hpp | 85 // CreateStorageBufferDescriptorPool creates a new descriptor pool that can 94 // layout from pool. 95 VkResult AllocateDescriptorSet(VkDescriptorPool pool, 120 // CreateCommandPool creates a new command pool. 127 VkResult AllocateCommandBuffer(VkCommandPool pool, VkCommandBuffer *out) const; 130 void FreeCommandBuffer(VkCommandPool pool, VkCommandBuffer buffer);
|
/third_party/protobuf/php/src/Google/Protobuf/Internal/ |
H A D | RepeatedField.php | 79 $pool = DescriptorPool::getGeneratedPool(); variable 80 $desc = $pool->getDescriptorByClassName($klass); 83 $desc = $pool->getDescriptorByClassName($klass);
|
H A D | MapField.php | 84 $pool = DescriptorPool::getGeneratedPool(); variable 85 $desc = $pool->getDescriptorByClassName($klass); 88 $desc = $pool->getDescriptorByClassName($klass);
|
/third_party/protobuf/python/google/protobuf/internal/ |
H A D | keywords_test.py | 46 self.pool = descriptor_pool.Default() 52 des = self.pool.FindMessageTypeByName('google.protobuf.internal.class') 59 des = self.pool.FindMessageTypeByName('google.protobuf.internal.class.try')
|
/third_party/protobuf/ruby/src/main/java/com/google/protobuf/jruby/ |
H A D | RubyBuilder.java | 67 * descriptors and atomically register them into a pool in a way that allows for 138 * Builder.finalize_to_pool(pool) 141 * context to the given pool. The operation occurs atomically, and all 152 RubyDescriptorPool pool = (RubyDescriptorPool) rbPool; in finalizeToPool() 156 pool.addToSymtab(context, (RubyDescriptor) defRb); in finalizeToPool() 158 pool.addToSymtab(context, (RubyEnumDescriptor) defRb); in finalizeToPool()
|
/third_party/vk-gl-cts/external/vulkancts/modules/vulkan/ray_tracing/ |
H A D | vktRayTracingBuildTests.cpp | 146 const BlasPool& pool); 149 void initBottomAccelerationStructures (BlasPool& pool, 280 const BlasPool& pool) -> TlasPtr 283 const std::vector<BlasPtr>& blases = pool.structures(); 359 void RayTracingBuildTestInstance::initBottomAccelerationStructures (BlasPool& pool, in initBottomAccelerationStructures() argument 369 for (size_t instanceNdx = 0; instanceNdx < m_data.instancesGroupCount; ++instanceNdx) pool.add(); in initBottomAccelerationStructures() 371 const std::vector<BlasPtr>& blases = pool.structures(); in initBottomAccelerationStructures() 379 pool.batchCreateAdjust(vkd, device, allocator, maxBuffSize); in initBottomAccelerationStructures() 384 BlasPool pool {}; in verifyAllocationCount() 393 for (size_t instanceNdx = 0; instanceNdx < m_data.instancesGroupCount; ++instanceNdx) pool in verifyAllocationCount() [all...] |
/third_party/vk-gl-cts/external/vulkancts/modules_no_buildgn/vulkan/ray_tracing/ |
H A D | vktRayTracingBuildTests.cpp | 146 const BlasPool& pool); 149 void initBottomAccelerationStructures (BlasPool& pool, 280 const BlasPool& pool) -> TlasPtr 283 const std::vector<BlasPtr>& blases = pool.structures(); 359 void RayTracingBuildTestInstance::initBottomAccelerationStructures (BlasPool& pool, in initBottomAccelerationStructures() argument 369 for (size_t instanceNdx = 0; instanceNdx < m_data.instancesGroupCount; ++instanceNdx) pool.add(); in initBottomAccelerationStructures() 371 const std::vector<BlasPtr>& blases = pool.structures(); in initBottomAccelerationStructures() 379 pool.batchCreateAdjust(vkd, device, allocator, maxBuffSize); in initBottomAccelerationStructures() 384 BlasPool pool {}; in verifyAllocationCount() 393 for (size_t instanceNdx = 0; instanceNdx < m_data.instancesGroupCount; ++instanceNdx) pool in verifyAllocationCount() [all...] |