/kernel/linux/linux-6.6/drivers/net/ipa/ |
H A D | gsi_trans.c | 87 int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count, in gsi_trans_pool_init() argument 100 /* By allocating a few extra entries in our pool (one less in gsi_trans_pool_init() 103 * ever worrying about straddling the end of the pool array. in gsi_trans_pool_init() 105 * we just allocate free entries from the beginning of the pool. in gsi_trans_pool_init() 113 pool->base = virt; in gsi_trans_pool_init() 115 pool->count = alloc_size / size; in gsi_trans_pool_init() 116 pool->free = 0; in gsi_trans_pool_init() 117 pool->max_alloc = max_alloc; in gsi_trans_pool_init() 118 pool->size = size; in gsi_trans_pool_init() 119 pool in gsi_trans_pool_init() 124 gsi_trans_pool_exit(struct gsi_trans_pool *pool) gsi_trans_pool_exit() argument 135 gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool, size_t size, u32 count, u32 max_alloc) gsi_trans_pool_init_dma() argument 175 gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool) gsi_trans_pool_exit_dma() argument 184 gsi_trans_pool_alloc_common(struct gsi_trans_pool *pool, u32 count) gsi_trans_pool_alloc_common() argument 203 gsi_trans_pool_alloc(struct gsi_trans_pool *pool, u32 count) gsi_trans_pool_alloc() argument 209 gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr) gsi_trans_pool_alloc_dma() argument [all...] |
/third_party/nghttp2/src/ |
H A D | memchunk.h | 75 Pool() : pool(nullptr), freelist(nullptr), poolsize(0), freelistsize(0) {} in Pool() 87 pool = new T{pool}; in get() 89 return pool; in get() 99 for (auto p = pool; p;) { in clear() 104 pool = nullptr; in clear() 108 T *pool; 115 Memchunks(Pool<Memchunk> *pool) in Memchunks() 116 : pool(pool), in Memchunks() 418 Pool<Memchunk> *pool; global() member 656 Pool<Memchunk> *pool; global() member [all...] |
/foundation/communication/dhcp/services/dhcp_server/include/ |
H A D | dhcp_address_pool.h | 31 typedef uint32_t (*Distribute)(DhcpAddressPool *pool, uint8_t macAddr[DHCP_HWADDR_LENGTH]); 56 int InitAddressPool(DhcpAddressPool *pool, const char *ifname, PDhcpOptionList options); 57 void FreeAddressPool(DhcpAddressPool *pool); 60 int IsReservedIp(DhcpAddressPool *pool, uint32_t ipAddress); 66 int AddLease(DhcpAddressPool *pool, AddressBinding *lease); 67 AddressBinding *GetLease(DhcpAddressPool *pool, uint32_t ipAddress); 68 int UpdateLease(DhcpAddressPool *pool, AddressBinding *lease); 69 int RemoveLease(DhcpAddressPool *pool, AddressBinding *lease); 70 int LoadBindingRecoders(DhcpAddressPool *pool); 71 int SaveBindingRecoders(const DhcpAddressPool *pool, in [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/engleder/ |
H A D | tsnep_xdp.c | 22 struct xsk_buff_pool *pool, u16 queue_id) in tsnep_xdp_enable_pool() 40 retval = xsk_pool_dma_map(pool, adapter->dmadev, in tsnep_xdp_enable_pool() 43 netdev_err(adapter->netdev, "failed to map XSK pool\n"); in tsnep_xdp_enable_pool() 48 retval = tsnep_enable_xsk(queue, pool); in tsnep_xdp_enable_pool() 50 xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC); in tsnep_xdp_enable_pool() 60 struct xsk_buff_pool *pool; in tsnep_xdp_disable_pool() local 67 pool = xsk_get_pool_from_qid(adapter->netdev, queue_id); in tsnep_xdp_disable_pool() 68 if (!pool) in tsnep_xdp_disable_pool() 75 xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC); in tsnep_xdp_disable_pool() 81 struct xsk_buff_pool *pool, u1 in tsnep_xdp_setup_pool() 21 tsnep_xdp_enable_pool(struct tsnep_adapter *adapter, struct xsk_buff_pool *pool, u16 queue_id) tsnep_xdp_enable_pool() argument 80 tsnep_xdp_setup_pool(struct tsnep_adapter *adapter, struct xsk_buff_pool *pool, u16 queue_id) tsnep_xdp_setup_pool() argument [all...] |
/kernel/linux/linux-5.10/tools/testing/selftests/drivers/net/mlxsw/ |
H A D | sharedbuffer_configuration.py | 16 objects, pool, tcbind and portpool. Provide an interface to get random 22 - random pool number 30 for pool in pools: 31 self._pools.append(pool) 47 def _get_th(self, pool): 50 if pool["thtype"] == "dynamic": 58 for pool in self._pools: 59 if pool["type"] == "ingress": 60 ing_pools.append(pool) 62 egr_pools.append(pool) [all...] |
/kernel/linux/linux-5.10/net/rds/ |
H A D | ib_frmr.c | 56 struct rds_ib_mr_pool *pool; in rds_ib_alloc_frmr() local 62 pool = rds_ibdev->mr_8k_pool; in rds_ib_alloc_frmr() 64 pool = rds_ibdev->mr_1m_pool; in rds_ib_alloc_frmr() 66 ibmr = rds_ib_try_reuse_ibmr(pool); in rds_ib_alloc_frmr() 79 pool->max_pages); in rds_ib_alloc_frmr() 86 ibmr->pool = pool; in rds_ib_alloc_frmr() 87 if (pool->pool_type == RDS_IB_MR_8K_POOL) in rds_ib_alloc_frmr() 92 if (atomic_read(&pool->item_count) > pool in rds_ib_alloc_frmr() 108 struct rds_ib_mr_pool *pool = ibmr->pool; rds_ib_free_frmr() local 187 rds_ib_map_frmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr_pool *pool, struct rds_ib_mr *ibmr, struct scatterlist *sg, unsigned int sg_len) rds_ib_map_frmr() argument 439 struct rds_ib_mr_pool *pool = ibmr->pool; rds_ib_free_frmr_list() local [all...] |
/kernel/linux/linux-5.10/drivers/scsi/lpfc/ |
H A D | lpfc_mem.c | 45 #define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */ 46 #define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ 47 #define LPFC_DEVICE_DATA_POOL_SIZE 64 /* max elements in device data pool */ 48 #define LPFC_RRQ_POOL_SIZE 256 /* max elements in non-DMA pool */ 49 #define LPFC_MBX_POOL_SIZE 256 /* max elements in MBX non-DMA pool */ 88 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; in lpfc_mem_alloc() local 98 pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE, in lpfc_mem_alloc() 101 if (!pool->elements) in lpfc_mem_alloc() 104 pool->max_count = 0; in lpfc_mem_alloc() 105 pool in lpfc_mem_alloc() 218 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; lpfc_mem_free() local 366 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; lpfc_mbuf_alloc() local 399 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; __lpfc_mbuf_free() local [all...] |
/kernel/linux/linux-6.6/net/rds/ |
H A D | ib_frmr.c | 56 struct rds_ib_mr_pool *pool; in rds_ib_alloc_frmr() local 62 pool = rds_ibdev->mr_8k_pool; in rds_ib_alloc_frmr() 64 pool = rds_ibdev->mr_1m_pool; in rds_ib_alloc_frmr() 66 ibmr = rds_ib_try_reuse_ibmr(pool); in rds_ib_alloc_frmr() 79 pool->max_pages); in rds_ib_alloc_frmr() 86 ibmr->pool = pool; in rds_ib_alloc_frmr() 87 if (pool->pool_type == RDS_IB_MR_8K_POOL) in rds_ib_alloc_frmr() 92 if (atomic_read(&pool->item_count) > pool in rds_ib_alloc_frmr() 108 struct rds_ib_mr_pool *pool = ibmr->pool; rds_ib_free_frmr() local 187 rds_ib_map_frmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr_pool *pool, struct rds_ib_mr *ibmr, struct scatterlist *sg, unsigned int sg_len) rds_ib_map_frmr() argument 439 struct rds_ib_mr_pool *pool = ibmr->pool; rds_ib_free_frmr_list() local [all...] |
/kernel/linux/linux-6.6/tools/testing/selftests/drivers/net/mlxsw/ |
H A D | sharedbuffer_configuration.py | 16 objects, pool, tcbind and portpool. Provide an interface to get random 22 - random pool number 30 for pool in pools: 31 self._pools.append(pool) 47 def _get_th(self, pool): 50 if pool["thtype"] == "dynamic": 58 for pool in self._pools: 59 if pool["type"] == "ingress": 60 ing_pools.append(pool) 62 egr_pools.append(pool) [all...] |
/kernel/linux/linux-6.6/include/net/page_pool/ |
H A D | types.h | 47 * struct page_pool_params - page pool parameters 95 * @cached: recycling placed page in the page pool cache 96 * @cache_full: page pool cache was full 98 * @ring_full: page released from page pool because the ptr ring was full 110 * struct page_pool_stats - combined page pool use statistics 114 * Wrapper struct for combining page pool stats with different storage 132 void (*disconnect)(void *pool); 185 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp); 186 struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset, 193 void page_pool_unlink_napi(struct page_pool *pool); 200 page_pool_unlink_napi(struct page_pool *pool) page_pool_unlink_napi() argument 204 page_pool_destroy(struct page_pool *pool) page_pool_destroy() argument 208 page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), struct xdp_mem_info *mem) page_pool_use_xdp_mem() argument 214 page_pool_put_page_bulk(struct page_pool *pool, void **data, int count) page_pool_put_page_bulk() argument [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/ulp/rtrs/ |
H A D | rtrs.c | 504 struct rtrs_rdma_dev_pd *pool) in rtrs_rdma_dev_pd_init() 506 WARN_ON(pool->ops && (!pool->ops->alloc ^ !pool->ops->free)); in rtrs_rdma_dev_pd_init() 507 INIT_LIST_HEAD(&pool->list); in rtrs_rdma_dev_pd_init() 508 mutex_init(&pool->mutex); in rtrs_rdma_dev_pd_init() 509 pool->pd_flags = pd_flags; in rtrs_rdma_dev_pd_init() 513 void rtrs_rdma_dev_pd_deinit(struct rtrs_rdma_dev_pd *pool) in rtrs_rdma_dev_pd_deinit() argument 515 mutex_destroy(&pool->mutex); in rtrs_rdma_dev_pd_deinit() 516 WARN_ON(!list_empty(&pool in rtrs_rdma_dev_pd_deinit() 503 rtrs_rdma_dev_pd_init(enum ib_pd_flags pd_flags, struct rtrs_rdma_dev_pd *pool) rtrs_rdma_dev_pd_init() argument 522 struct rtrs_rdma_dev_pd *pool; dev_free() local 555 rtrs_ib_dev_find_or_add(struct ib_device *ib_dev, struct rtrs_rdma_dev_pd *pool) rtrs_ib_dev_find_or_add() argument [all...] |
/kernel/linux/linux-6.6/net/xdp/ |
H A D | xsk.c | 39 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool) in xsk_set_rx_need_wakeup() argument 41 if (pool->cached_need_wakeup & XDP_WAKEUP_RX) in xsk_set_rx_need_wakeup() 44 pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP; in xsk_set_rx_need_wakeup() 45 pool->cached_need_wakeup |= XDP_WAKEUP_RX; in xsk_set_rx_need_wakeup() 49 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool) in xsk_set_tx_need_wakeup() argument 53 if (pool->cached_need_wakeup & XDP_WAKEUP_TX) in xsk_set_tx_need_wakeup() 57 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { in xsk_set_tx_need_wakeup() 62 pool->cached_need_wakeup |= XDP_WAKEUP_TX; in xsk_set_tx_need_wakeup() 66 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool) in xsk_clear_rx_need_wakeup() argument 68 if (!(pool in xsk_clear_rx_need_wakeup() 76 xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool) xsk_clear_tx_need_wakeup() argument 93 xsk_uses_need_wakeup(struct xsk_buff_pool *pool) xsk_uses_need_wakeup() argument 123 xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool, u16 queue_id) xsk_reg_pool_at_qid() argument 398 xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries) xsk_tx_completed() argument 404 xsk_tx_release(struct xsk_buff_pool *pool) xsk_tx_release() argument 418 xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) xsk_tx_peek_desc() argument 449 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries) xsk_tx_peek_release_fallback() argument 461 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts) xsk_tx_peek_release_desc_batch() argument 581 struct xsk_buff_pool *pool = xs->pool; xsk_build_skb_zerocopy() local 838 struct xsk_buff_pool *pool; __xsk_sendmsg() local 921 struct xsk_buff_pool *pool; xsk_poll() local [all...] |
/third_party/mesa3d/src/virtio/vulkan/ |
H A D | vn_descriptor_set.c | 271 /* descriptor pool commands */ 288 struct vn_descriptor_pool *pool = in vn_CreateDescriptorPool() local 289 vk_zalloc(alloc, sizeof(*pool), VN_DEFAULT_ALIGN, in vn_CreateDescriptorPool() 291 if (!pool) in vn_CreateDescriptorPool() 294 vn_object_base_init(&pool->base, VK_OBJECT_TYPE_DESCRIPTOR_POOL, in vn_CreateDescriptorPool() 297 pool->allocator = *alloc; in vn_CreateDescriptorPool() 300 * allocation must not fail due to a fragmented pool per spec. In this in vn_CreateDescriptorPool() 301 * case, set allocation can be asynchronous with pool resource tracking. in vn_CreateDescriptorPool() 303 pool->async_set_allocation = in vn_CreateDescriptorPool() 308 pool in vn_CreateDescriptorPool() 344 struct vn_descriptor_pool *pool = vn_DestroyDescriptorPool() local 369 vn_descriptor_pool_alloc_descriptors( struct vn_descriptor_pool *pool, const struct vn_descriptor_set_layout *layout, uint32_t last_binding_descriptor_count) vn_descriptor_pool_alloc_descriptors() argument 422 vn_descriptor_pool_free_descriptors( struct vn_descriptor_pool *pool, const struct vn_descriptor_set_layout *layout, uint32_t last_binding_descriptor_count) vn_descriptor_pool_free_descriptors() argument 446 vn_descriptor_pool_reset_descriptors(struct vn_descriptor_pool *pool) vn_descriptor_pool_reset_descriptors() argument 461 struct vn_descriptor_pool *pool = vn_ResetDescriptorPool() local 486 struct vn_descriptor_pool *pool = vn_AllocateDescriptorSets() local 605 struct vn_descriptor_pool *pool = vn_FreeDescriptorSets() local [all...] |
/kernel/linux/linux-6.6/drivers/mtd/ubi/ |
H A D | fastmap-wl.c | 52 * @pool: fastmap pool description object 55 struct ubi_fm_pool *pool) in return_unused_pool_pebs() 60 for (i = pool->used; i < pool->size; i++) { in return_unused_pool_pebs() 61 e = ubi->lookuptbl[pool->pebs[i]]; in return_unused_pool_pebs() 103 * @is_wl_pool: whether UBI is filling wear leveling pool 108 * For wear leveling pool, UBI should also reserve free pebs for bad pebs 134 struct ubi_fm_pool *pool = &ubi->fm_pool; in ubi_refill_pools() local 141 return_unused_pool_pebs(ubi, pool); in ubi_refill_pools() 54 return_unused_pool_pebs(struct ubi_device *ubi, struct ubi_fm_pool *pool) return_unused_pool_pebs() argument 233 struct ubi_fm_pool *pool = &ubi->fm_pool; ubi_wl_get_peb() local 288 struct ubi_fm_pool *pool = &ubi->fm_wl_pool; next_peb_for_wl() local 338 struct ubi_fm_pool *pool = &ubi->fm_wl_pool; get_peb_for_wl() local [all...] |
/kernel/linux/linux-5.10/include/linux/ |
H A D | zbud.h | 10 int (*evict)(struct zbud_pool *pool, unsigned long handle); 14 void zbud_destroy_pool(struct zbud_pool *pool); 15 int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp, 17 void zbud_free(struct zbud_pool *pool, unsigned long handle); 18 int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries); 19 void *zbud_map(struct zbud_pool *pool, unsigned long handle); 20 void zbud_unmap(struct zbud_pool *pool, unsigned long handle); 21 u64 zbud_get_pool_size(struct zbud_pool *pool);
|
/third_party/mesa3d/src/freedreno/vulkan/ |
H A D | tu_descriptor_set.c | 32 pool_base(struct tu_descriptor_pool *pool) in pool_base() argument 34 return pool->host_bo ?: pool->bo->map; in pool_base() 500 struct tu_descriptor_pool *pool, in tu_descriptor_set_create() 509 if (pool->host_memory_base) { in tu_descriptor_set_create() 510 if (pool->host_memory_end - pool->host_memory_ptr < mem_size) in tu_descriptor_set_create() 513 set = (struct tu_descriptor_set*)pool->host_memory_ptr; in tu_descriptor_set_create() 514 pool->host_memory_ptr += mem_size; in tu_descriptor_set_create() 531 set->pool in tu_descriptor_set_create() 499 tu_descriptor_set_create(struct tu_device *device, struct tu_descriptor_pool *pool, struct tu_descriptor_set_layout *layout, const uint32_t *variable_count, struct tu_descriptor_set **out_set) tu_descriptor_set_create() argument 618 tu_descriptor_set_destroy(struct tu_device *device, struct tu_descriptor_pool *pool, struct tu_descriptor_set *set, bool free_bo) tu_descriptor_set_destroy() argument 648 struct tu_descriptor_pool *pool; tu_CreateDescriptorPool() local [all...] |
/third_party/mesa3d/src/gallium/drivers/zink/ |
H A D | zink_descriptors_lazy.c | 48 VkDescriptorPool pool; member 372 VkDescriptorPool pool; in create_pool() local 379 VkResult result = VKSCR(CreateDescriptorPool)(screen->dev, &dpci, 0, &pool); in create_pool() 384 return pool; in create_pool() 391 check_pool_alloc(struct zink_context *ctx, struct zink_descriptor_pool *pool, struct hash_entry *he, struct zink_program *pg, in check_pool_alloc() argument 396 if (pool->set_idx == pool->sets_alloc) { in check_pool_alloc() 397 unsigned sets_to_alloc = MIN2(MIN2(MAX2(pool->sets_alloc * 10, 10), MAX_LAZY_DESCRIPTORS) - pool->sets_alloc, 100); in check_pool_alloc() 399 /* overflowed pool in check_pool_alloc() 415 struct zink_descriptor_pool *pool = rzalloc(bdd, struct zink_descriptor_pool); create_push_pool() local 430 check_push_pool_alloc(struct zink_context *ctx, struct zink_descriptor_pool *pool, struct zink_batch_descriptor_data_lazy *bdd, bool is_compute) check_push_pool_alloc() argument 459 struct zink_descriptor_pool *pool; get_descriptor_pool_lazy() local 478 get_descriptor_set_lazy(struct zink_descriptor_pool *pool) get_descriptor_set_lazy() argument 493 struct zink_descriptor_pool *pool = get_descriptor_pool_lazy(ctx, pg, type, bdd, pg->is_compute); populate_sets() local 564 struct zink_descriptor_pool *pool = check_push_pool_alloc(ctx, bdd->push_pool[0], bdd, false); zink_descriptors_alloc_lazy_push() local 608 struct zink_descriptor_pool *pool = check_push_pool_alloc(ctx, bdd->push_pool[pg->is_compute], bdd, pg->is_compute); zink_descriptors_update_lazy() local 674 struct zink_descriptor_pool *pool = (void*)entry->data; zink_batch_descriptor_deinit_lazy() local 687 pool_destroy(struct zink_screen *screen, struct zink_descriptor_pool *pool) pool_destroy() argument 702 struct zink_descriptor_pool *pool = (void*)entry->data; zink_batch_descriptor_reset_lazy() local 717 struct zink_descriptor_pool *pool = util_dynarray_pop(&bdd->overflowed_pools, struct zink_descriptor_pool*); zink_batch_descriptor_reset_lazy() local [all...] |
/kernel/linux/linux-6.6/drivers/tee/amdtee/ |
H A D | shm_pool.c | 11 static int pool_op_alloc(struct tee_shm_pool *pool, struct tee_shm *shm, in pool_op_alloc() argument 41 static void pool_op_free(struct tee_shm_pool *pool, struct tee_shm *shm) in pool_op_free() argument 49 static void pool_op_destroy_pool(struct tee_shm_pool *pool) in pool_op_destroy_pool() argument 51 kfree(pool); in pool_op_destroy_pool() 62 struct tee_shm_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL); in amdtee_config_shm() local 64 if (!pool) in amdtee_config_shm() 67 pool->ops = &pool_ops; in amdtee_config_shm() 69 return pool; in amdtee_config_shm()
|
/third_party/mesa3d/src/gallium/drivers/d3d12/ |
H A D | d3d12_descriptor_pool.cpp | 45 struct d3d12_descriptor_pool *pool; member 199 struct d3d12_descriptor_pool *pool = CALLOC_STRUCT(d3d12_descriptor_pool); in d3d12_descriptor_pool_new() local 200 if (!pool) in d3d12_descriptor_pool_new() 203 pool->dev = screen->dev; in d3d12_descriptor_pool_new() 204 pool->type = type; in d3d12_descriptor_pool_new() 205 pool->num_descriptors = num_descriptors; in d3d12_descriptor_pool_new() 206 list_inithead(&pool->heaps); in d3d12_descriptor_pool_new() 208 return pool; in d3d12_descriptor_pool_new() 212 d3d12_descriptor_pool_free(struct d3d12_descriptor_pool *pool) in d3d12_descriptor_pool_free() argument 214 list_for_each_entry_safe(struct d3d12_descriptor_heap, heap, &pool in d3d12_descriptor_pool_free() 222 d3d12_descriptor_pool_alloc_handle(struct d3d12_descriptor_pool *pool, struct d3d12_descriptor_handle *handle) d3d12_descriptor_pool_alloc_handle() argument [all...] |
/third_party/mesa3d/src/amd/vulkan/ |
H A D | radv_descriptor_set.c | 612 radv_descriptor_set_create(struct radv_device *device, struct radv_descriptor_pool *pool, in radv_descriptor_set_create() argument 616 if (pool->entry_count == pool->max_entry_count) in radv_descriptor_set_create() 633 if (pool->host_memory_base) { in radv_descriptor_set_create() 634 if (pool->host_memory_end - pool->host_memory_ptr < mem_size) in radv_descriptor_set_create() 637 set = (struct radv_descriptor_set *)pool->host_memory_ptr; in radv_descriptor_set_create() 638 pool->host_memory_ptr += mem_size; in radv_descriptor_set_create() 671 * resets via the pool. */ in radv_descriptor_set_create() 672 if (pool in radv_descriptor_set_create() 733 radv_descriptor_set_destroy(struct radv_device *device, struct radv_descriptor_pool *pool, struct radv_descriptor_set *set, bool free_bo) radv_descriptor_set_destroy() argument 756 radv_destroy_descriptor_pool(struct radv_device *device, const VkAllocationCallbacks *pAllocator, struct radv_descriptor_pool *pool) radv_destroy_descriptor_pool() argument 778 struct radv_descriptor_pool *pool; radv_CreateDescriptorPool() local [all...] |
/kernel/liteos_m/testsuites/include/ |
H A D | los_dlinkmem.h | 51 * Memory pool information structure
54 void *pPoolAddr; /* *<Starting address of a memory pool */
55 UINT32 uwPoolSize; /* *<Memory pool size */
82 * @param pool [IN] Starting address of memory.
93 extern UINT32 LOS_DLnkInitMem(VOID *pool, UINT32 size);
110 * @param pool [IN] Starting address of memory.
121 extern void *LOS_DLnkAllocMem(VOID *pool, UINT32 size);
137 * @param pool [IN] Starting address of memory.
148 extern UINT32 LOS_DLnkFreeMem(VOID *pool, VOID *mem);
169 * @param pool [I [all...] |
/third_party/node/deps/openssl/openssl/providers/implementations/rands/seeding/ |
H A D | rand_vxworks.c | 79 int ossl_rand_pool_add_additional_data(RAND_POOL *pool) in ossl_rand_pool_add_additional_data() argument 96 return ossl_rand_pool_add(pool, (unsigned char *)&data, sizeof(data), 0); in ossl_rand_pool_add_additional_data() 99 int ossl_pool_add_nonce_data(RAND_POOL *pool) in ossl_pool_add_nonce_data() argument 118 return ossl_rand_pool_add(pool, (unsigned char *)&data, sizeof(data), 0); in ossl_pool_add_nonce_data() 121 size_t ossl_pool_acquire_entropy(RAND_POOL *pool) in ossl_pool_acquire_entropy() argument 127 bytes_needed = ossl_rand_pool_bytes_needed(pool, 1 /*entropy_factor*/); in ossl_pool_acquire_entropy() 134 buffer = ossl_rand_pool_add_begin(pool, bytes_needed); in ossl_pool_acquire_entropy() 142 ossl_rand_pool_add_end(pool, bytes_needed, 8 * bytes_needed); in ossl_pool_acquire_entropy() 159 return ossl_rand_pool_entropy_available(pool); in ossl_pool_acquire_entropy() 165 return ossl_rand_pool_entropy_available(pool); in ossl_pool_acquire_entropy() [all...] |
/third_party/openssl/providers/implementations/rands/seeding/ |
H A D | rand_vxworks.c | 79 int ossl_rand_pool_add_additional_data(RAND_POOL *pool) in ossl_rand_pool_add_additional_data() argument 96 return ossl_rand_pool_add(pool, (unsigned char *)&data, sizeof(data), 0); in ossl_rand_pool_add_additional_data() 99 int ossl_pool_add_nonce_data(RAND_POOL *pool) in ossl_pool_add_nonce_data() argument 118 return ossl_rand_pool_add(pool, (unsigned char *)&data, sizeof(data), 0); in ossl_pool_add_nonce_data() 121 size_t ossl_pool_acquire_entropy(RAND_POOL *pool) in ossl_pool_acquire_entropy() argument 127 bytes_needed = ossl_rand_pool_bytes_needed(pool, 1 /*entropy_factor*/); in ossl_pool_acquire_entropy() 134 buffer = ossl_rand_pool_add_begin(pool, bytes_needed); in ossl_pool_acquire_entropy() 142 ossl_rand_pool_add_end(pool, bytes_needed, 8 * bytes_needed); in ossl_pool_acquire_entropy() 159 return ossl_rand_pool_entropy_available(pool); in ossl_pool_acquire_entropy() 165 return ossl_rand_pool_entropy_available(pool); in ossl_pool_acquire_entropy() [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/chelsio/libcxgb/ |
H A D | libcxgb_ppm.c | 121 struct cxgbi_ppm_pool *pool; in ppm_get_cpu_entries() local 125 if (!ppm->pool) in ppm_get_cpu_entries() 129 pool = per_cpu_ptr(ppm->pool, cpu); in ppm_get_cpu_entries() 130 spin_lock_bh(&pool->lock); in ppm_get_cpu_entries() 133 i = ppm_find_unused_entries(pool->bmap, ppm->pool_index_max, in ppm_get_cpu_entries() 134 pool->next, count, 0); in ppm_get_cpu_entries() 136 pool->next = 0; in ppm_get_cpu_entries() 137 spin_unlock_bh(&pool->lock); in ppm_get_cpu_entries() 141 pool in ppm_get_cpu_entries() 197 struct cxgbi_ppm_pool *pool; ppm_unmark_entries() local 395 struct cxgbi_ppm_pool *pool = NULL; cxgbi_ppm_init() local [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/chelsio/libcxgb/ |
H A D | libcxgb_ppm.c | 121 struct cxgbi_ppm_pool *pool; in ppm_get_cpu_entries() local 125 if (!ppm->pool) in ppm_get_cpu_entries() 129 pool = per_cpu_ptr(ppm->pool, cpu); in ppm_get_cpu_entries() 130 spin_lock_bh(&pool->lock); in ppm_get_cpu_entries() 133 i = ppm_find_unused_entries(pool->bmap, ppm->pool_index_max, in ppm_get_cpu_entries() 134 pool->next, count, 0); in ppm_get_cpu_entries() 136 pool->next = 0; in ppm_get_cpu_entries() 137 spin_unlock_bh(&pool->lock); in ppm_get_cpu_entries() 141 pool in ppm_get_cpu_entries() 197 struct cxgbi_ppm_pool *pool; ppm_unmark_entries() local 395 struct cxgbi_ppm_pool *pool = NULL; cxgbi_ppm_init() local [all...] |