/kernel/linux/linux-6.6/drivers/net/ethernet/freescale/fman/ |
H A D | fman_muram.c | 13 struct gen_pool *pool; member 48 muram->pool = gen_pool_create(ilog2(64), -1); in fman_muram_init() 49 if (!muram->pool) { in fman_muram_init() 50 pr_err("%s(): MURAM pool create failed\n", __func__); in fman_muram_init() 60 ret = gen_pool_add_virt(muram->pool, (unsigned long)vaddr, in fman_muram_init() 63 pr_err("%s(): MURAM pool add failed\n", __func__); in fman_muram_init() 75 gen_pool_destroy(muram->pool); in fman_muram_init() 109 vaddr = gen_pool_alloc(muram->pool, size); in fman_muram_alloc() 131 gen_pool_free(muram->pool, addr, size); in fman_muram_free_mem()
|
/kernel/linux/linux-6.6/drivers/soc/fsl/qbman/ |
H A D | bman_test_api.c | 37 static struct bman_pool *pool; variable 105 pool = bman_new_pool(); in bman_test_api() 106 if (!pool) { in bman_test_api() 119 if (bman_release(pool, bufs_in + i, num)) { in bman_test_api() 132 tmp = bman_acquire(pool, bufs_out + i - num, num); in bman_test_api() 136 i = bman_acquire(pool, NULL, 1); in bman_test_api() 145 bman_free_pool(pool); in bman_test_api()
|
/third_party/mesa3d/src/virtio/vulkan/ |
H A D | vn_feedback.h | 53 VkCommandPool pool; member 58 struct vn_feedback_pool *pool, 63 vn_feedback_pool_fini(struct vn_feedback_pool *pool); 66 vn_feedback_pool_alloc(struct vn_feedback_pool *pool, 70 vn_feedback_pool_free(struct vn_feedback_pool *pool, 118 struct vn_feedback_cmd_pool *pool, 124 struct vn_feedback_cmd_pool *pool,
|
/third_party/protobuf/python/google/protobuf/ |
H A D | symbol_database.py | 51 # The database's underlying descriptor pool can be queried, so it's not 53 filename = db.pool.FindFileContainingSymbol('MyMessage') 95 self.pool._AddDescriptor(message_descriptor) 108 self.pool._AddEnumDescriptor(enum_descriptor) 120 self.pool._AddServiceDescriptor(service_descriptor) 130 self.pool._InternalAddFileDescriptor(file_descriptor) 148 return self._classes[self.pool.FindMessageTypeByName(symbol)] 178 file_desc = self.pool.FindFileByName(file_name) 189 _DEFAULT = SymbolDatabase(pool=descriptor_pool.Default())
|
/kernel/linux/linux-6.6/drivers/net/ethernet/marvell/octeontx2/nic/ |
H A D | qos_sq.c | 21 struct otx2_pool *pool; in otx2_qos_aura_pool_free() local 23 if (!pfvf->qset.pool) in otx2_qos_aura_pool_free() 26 pool = &pfvf->qset.pool[pool_id]; in otx2_qos_aura_pool_free() 27 qmem_free(pfvf->dev, pool->stack); in otx2_qos_aura_pool_free() 28 qmem_free(pfvf->dev, pool->fc_addr); in otx2_qos_aura_pool_free() 29 pool->stack = NULL; in otx2_qos_aura_pool_free() 30 pool->fc_addr = NULL; in otx2_qos_aura_pool_free() 39 struct otx2_pool *pool; in otx2_qos_sq_aura_pool_init() local 57 pool in otx2_qos_sq_aura_pool_init() [all...] |
H A D | otx2_common.c | 523 static int otx2_alloc_pool_buf(struct otx2_nic *pfvf, struct otx2_pool *pool, in otx2_alloc_pool_buf() argument 530 sz = SKB_DATA_ALIGN(pool->rbsize); in otx2_alloc_pool_buf() 533 page = page_pool_alloc_frag(pool->page_pool, &offset, sz, GFP_ATOMIC); in otx2_alloc_pool_buf() 541 static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, in __otx2_alloc_rbuf() argument 546 if (pool->page_pool) in __otx2_alloc_rbuf() 547 return otx2_alloc_pool_buf(pfvf, pool, dma); in __otx2_alloc_rbuf() 549 buf = napi_alloc_frag_align(pool->rbsize, OTX2_ALIGN); in __otx2_alloc_rbuf() 553 *dma = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize, in __otx2_alloc_rbuf() 563 int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, in otx2_alloc_rbuf() argument 569 ret = __otx2_alloc_rbuf(pfvf, pool, dm in otx2_alloc_rbuf() 927 struct otx2_pool *pool; otx2_sq_init() local 1211 otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool, u64 iova, int size) otx2_free_bufs() argument 1234 struct otx2_pool *pool; otx2_free_aura_ptr() local 1265 struct otx2_pool *pool; otx2_aura_pool_free() local 1286 struct otx2_pool *pool; otx2_aura_init() local 1364 struct otx2_pool *pool; otx2_pool_init() local 1432 struct otx2_pool *pool; otx2_sq_aura_pool_init() local 1502 struct otx2_pool *pool; otx2_rq_aura_pool_init() local [all...] |
/third_party/wpa_supplicant/wpa_supplicant-2.9/src/crypto/ |
H A D | random.c | 50 static u32 pool[POOL_WORDS]; variable 94 w ^= pool[pool_pos]; in random_mix_pool() 95 w ^= pool[(pool_pos + POOL_TAP1) & POOL_WORDS_MASK]; in random_mix_pool() 96 w ^= pool[(pool_pos + POOL_TAP2) & POOL_WORDS_MASK]; in random_mix_pool() 97 w ^= pool[(pool_pos + POOL_TAP3) & POOL_WORDS_MASK]; in random_mix_pool() 98 w ^= pool[(pool_pos + POOL_TAP4) & POOL_WORDS_MASK]; in random_mix_pool() 99 w ^= pool[(pool_pos + POOL_TAP5) & POOL_WORDS_MASK]; in random_mix_pool() 100 pool[pool_pos] = (w >> 3) ^ twist[w & 7]; in random_mix_pool() 112 /* First, add hash back to pool to make backtracking more difficult. */ in random_extract() 113 hmac_sha1(dummy_key, sizeof(dummy_key), (const u8 *) pool, in random_extract() [all...] |
/third_party/wpa_supplicant/wpa_supplicant-2.9_standard/src/crypto/ |
H A D | random.c | 50 static u32 pool[POOL_WORDS]; variable 94 w ^= pool[pool_pos]; in random_mix_pool() 95 w ^= pool[(pool_pos + POOL_TAP1) & POOL_WORDS_MASK]; in random_mix_pool() 96 w ^= pool[(pool_pos + POOL_TAP2) & POOL_WORDS_MASK]; in random_mix_pool() 97 w ^= pool[(pool_pos + POOL_TAP3) & POOL_WORDS_MASK]; in random_mix_pool() 98 w ^= pool[(pool_pos + POOL_TAP4) & POOL_WORDS_MASK]; in random_mix_pool() 99 w ^= pool[(pool_pos + POOL_TAP5) & POOL_WORDS_MASK]; in random_mix_pool() 100 pool[pool_pos] = (w >> 3) ^ twist[w & 7]; in random_mix_pool() 112 /* First, add hash back to pool to make backtracking more difficult. */ in random_extract() 113 hmac_sha1(stub_key, sizeof(stub_key), (const u8 *) pool, in random_extract() [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/intel/ixgbe/ |
H A D | ixgbe_xsk.c | 24 struct xsk_buff_pool *pool, in ixgbe_xsk_pool_enable() 38 err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR); in ixgbe_xsk_pool_enable() 57 xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR); in ixgbe_xsk_pool_enable() 67 struct xsk_buff_pool *pool; in ixgbe_xsk_pool_disable() local 70 pool = xsk_get_pool_from_qid(adapter->netdev, qid); in ixgbe_xsk_pool_disable() 71 if (!pool) in ixgbe_xsk_pool_disable() 81 xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR); in ixgbe_xsk_pool_disable() 90 struct xsk_buff_pool *pool, in ixgbe_xsk_pool_setup() 93 return pool ? ixgbe_xsk_pool_enable(adapter, pool, qi in ixgbe_xsk_pool_setup() 23 ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter, struct xsk_buff_pool *pool, u16 qid) ixgbe_xsk_pool_enable() argument 89 ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter, struct xsk_buff_pool *pool, u16 qid) ixgbe_xsk_pool_setup() argument 385 struct xsk_buff_pool *pool = xdp_ring->xsk_pool; ixgbe_xmit_zc() local 453 struct xsk_buff_pool *pool = tx_ring->xsk_pool; ixgbe_clean_xdp_tx_irq() local 540 struct xsk_buff_pool *pool = tx_ring->xsk_pool; ixgbe_xsk_clean_tx_ring() local [all...] |
/kernel/linux/linux-6.6/drivers/misc/ |
H A D | sram.c | 60 part->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY), in sram_add_pool() 62 if (IS_ERR(part->pool)) in sram_add_pool() 63 return PTR_ERR(part->pool); in sram_add_pool() 65 ret = gen_pool_add_virt(part->pool, (unsigned long)part->base, start, in sram_add_pool() 119 if (block->pool) { in sram_add_partition() 158 if (part->pool && in sram_free_partitions() 159 gen_pool_avail(part->pool) < gen_pool_size(part->pool)) in sram_free_partitions() 160 dev_err(sram->dev, "removed pool while SRAM allocated\n"); in sram_free_partitions() 222 block->pool in sram_reserve_regions() [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/intel/ixgbe/ |
H A D | ixgbe_xsk.c | 24 struct xsk_buff_pool *pool, in ixgbe_xsk_pool_enable() 38 err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR); in ixgbe_xsk_pool_enable() 57 xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR); in ixgbe_xsk_pool_enable() 67 struct xsk_buff_pool *pool; in ixgbe_xsk_pool_disable() local 70 pool = xsk_get_pool_from_qid(adapter->netdev, qid); in ixgbe_xsk_pool_disable() 71 if (!pool) in ixgbe_xsk_pool_disable() 81 xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR); in ixgbe_xsk_pool_disable() 90 struct xsk_buff_pool *pool, in ixgbe_xsk_pool_setup() 93 return pool ? ixgbe_xsk_pool_enable(adapter, pool, qi in ixgbe_xsk_pool_setup() 23 ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter, struct xsk_buff_pool *pool, u16 qid) ixgbe_xsk_pool_enable() argument 89 ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter, struct xsk_buff_pool *pool, u16 qid) ixgbe_xsk_pool_setup() argument 398 struct xsk_buff_pool *pool = xdp_ring->xsk_pool; ixgbe_xmit_zc() local 466 struct xsk_buff_pool *pool = tx_ring->xsk_pool; ixgbe_clean_xdp_tx_irq() local 553 struct xsk_buff_pool *pool = tx_ring->xsk_pool; ixgbe_xsk_clean_tx_ring() local [all...] |
/kernel/linux/linux-5.10/net/sunrpc/ |
H A D | svc_xprt.c | 48 * svc_pool->sp_lock protects most of the fields of that pool. 410 struct svc_pool *pool; in svc_xprt_do_enqueue() local 426 pool = svc_pool_for_cpu(xprt->xpt_server, cpu); in svc_xprt_do_enqueue() 428 atomic_long_inc(&pool->sp_stats.packets); in svc_xprt_do_enqueue() 430 spin_lock_bh(&pool->sp_lock); in svc_xprt_do_enqueue() 431 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); in svc_xprt_do_enqueue() 432 pool->sp_stats.sockets_queued++; in svc_xprt_do_enqueue() 433 spin_unlock_bh(&pool->sp_lock); in svc_xprt_do_enqueue() 437 list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { in svc_xprt_do_enqueue() 440 atomic_long_inc(&pool in svc_xprt_do_enqueue() 470 svc_xprt_dequeue(struct svc_pool *pool) svc_xprt_dequeue() argument 554 struct svc_pool *pool; svc_wake_up() local 689 struct svc_pool *pool = rqstp->rq_pool; rqst_should_sleep() local 712 struct svc_pool *pool = rqstp->rq_pool; svc_get_next_xprt() local 1079 struct svc_pool *pool; svc_dequeue_net() local 1376 struct svc_pool *pool = p; svc_pool_stats_next() local 1400 struct svc_pool *pool = p; svc_pool_stats_show() local [all...] |
/third_party/python/Objects/ |
H A D | obmalloc.c | 930 * Enable by default since it allows larger pool sizes. Can be disabled 982 # error "pool size must be equal to system page size" 988 # error "arena size not an exact multiple of pool size" 1004 block *freeblock; /* pool's free list head */ 1005 struct pool_header *nextpool; /* next pool of this size class */ 1006 struct pool_header *prevpool; /* previous pool "" */ 1024 /* Pool-aligned pointer to the next pool to be carved off. */ 1044 * with at least one available pool, both members are used in the 1060 /* Round pointer P down to the closest pool-aligned address <= P, as a poolp */ 1063 /* Return total number of blocks in pool o 1687 address_in_range(void *p, poolp pool) address_in_range() argument 1770 address_in_range(void *p, poolp pool) address_in_range() argument 1790 pymalloc_pool_extend(poolp pool, uint size) pymalloc_pool_extend() argument 1851 poolp pool = usable_arenas->freepools; allocate_from_new_pool() local 1967 poolp pool = usedpools[size + size]; pymalloc_alloc() local 2032 insert_to_usedpool(poolp pool) insert_to_usedpool() argument 2048 insert_to_freepool(poolp pool) insert_to_freepool() argument 2228 poolp pool = POOL_ADDR(p); pymalloc_free() local 2304 poolp pool; pymalloc_realloc() local [all...] |
/third_party/mesa3d/src/gallium/drivers/r600/ |
H A D | compute_memory_pool.h | 45 /** Start pointer in dwords relative in the pool bo. If an item 51 * items against it. They are listed in the pool's unallocated list */ 54 struct compute_memory_pool* pool; member 62 int64_t size_in_dw; /**< Size of the pool in dwords */ 64 struct r600_resource *bo; /**< The pool buffer object resource */ 67 uint32_t *shadow; /**< host copy of the pool, used for growing the pool */ 69 uint32_t status; /**< Status of the pool */ 71 /** Allocated memory items in the pool, they must be ordered by "start_in_dw" */ 75 * yet in the pool */ [all...] |
/third_party/node/deps/openssl/openssl/providers/implementations/rands/seeding/ |
H A D | rand_cpu_x86.c | 38 size_t ossl_prov_acquire_entropy_from_cpu(RAND_POOL *pool) in ossl_prov_acquire_entropy_from_cpu() argument 43 bytes_needed = ossl_rand_pool_bytes_needed(pool, 1 /*entropy_factor*/); in ossl_prov_acquire_entropy_from_cpu() 45 buffer = ossl_rand_pool_add_begin(pool, bytes_needed); in ossl_prov_acquire_entropy_from_cpu() 49 ossl_rand_pool_add_end(pool, bytes_needed, 8 * bytes_needed); in ossl_prov_acquire_entropy_from_cpu() 51 ossl_rand_pool_add_end(pool, 0, 0); in ossl_prov_acquire_entropy_from_cpu() 56 return ossl_rand_pool_entropy_available(pool); in ossl_prov_acquire_entropy_from_cpu()
|
/third_party/openssl/providers/implementations/rands/seeding/ |
H A D | rand_cpu_x86.c | 38 size_t ossl_prov_acquire_entropy_from_cpu(RAND_POOL *pool) in ossl_prov_acquire_entropy_from_cpu() argument 43 bytes_needed = ossl_rand_pool_bytes_needed(pool, 1 /*entropy_factor*/); in ossl_prov_acquire_entropy_from_cpu() 45 buffer = ossl_rand_pool_add_begin(pool, bytes_needed); in ossl_prov_acquire_entropy_from_cpu() 49 ossl_rand_pool_add_end(pool, bytes_needed, 8 * bytes_needed); in ossl_prov_acquire_entropy_from_cpu() 51 ossl_rand_pool_add_end(pool, 0, 0); in ossl_prov_acquire_entropy_from_cpu() 56 return ossl_rand_pool_entropy_available(pool); in ossl_prov_acquire_entropy_from_cpu()
|
/third_party/libwebsockets/lib/misc/threadpool/ |
H A D | threadpool.c | 81 pthread_mutex_t lock; /* protects all pool lists */ 203 struct lws_pool *pool = &tp->pool_list[n]; in lws_threadpool_dump() 204 struct lws_threadpool_task *task = pool->task; in lws_threadpool_dump() 332 struct lws_pool *pool = &tp->pool_list[n]; in lws_threadpool_tsi_context() local 334 task = pool->task; in lws_threadpool_tsi_context() 392 lws_threadpool_worker_sync(struct lws_pool *pool, in lws_threadpool_worker_sync() argument 402 pthread_mutex_lock(&pool->lock); /* ======================= pool lock */ in lws_threadpool_worker_sync() 405 pool->tp->name, task, task->name, lws_wsi_tag(task_to_wsi(task))); in lws_threadpool_worker_sync() 421 "wsi to sync to\n", __func__, pool in lws_threadpool_worker_sync() 494 struct lws_pool *pool = d; lws_threadpool_worker() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/ulp/rtrs/ |
H A D | rtrs.c | 560 struct rtrs_rdma_dev_pd *pool) in rtrs_rdma_dev_pd_init() 562 INIT_LIST_HEAD(&pool->list); in rtrs_rdma_dev_pd_init() 563 mutex_init(&pool->mutex); in rtrs_rdma_dev_pd_init() 564 pool->pd_flags = pd_flags; in rtrs_rdma_dev_pd_init() 568 void rtrs_rdma_dev_pd_deinit(struct rtrs_rdma_dev_pd *pool) in rtrs_rdma_dev_pd_deinit() argument 570 mutex_destroy(&pool->mutex); in rtrs_rdma_dev_pd_deinit() 571 WARN_ON(!list_empty(&pool->list)); in rtrs_rdma_dev_pd_deinit() 577 struct rtrs_rdma_dev_pd *pool; in dev_free() local 581 pool = dev->pool; in dev_free() 559 rtrs_rdma_dev_pd_init(enum ib_pd_flags pd_flags, struct rtrs_rdma_dev_pd *pool) rtrs_rdma_dev_pd_init() argument 603 rtrs_ib_dev_find_or_add(struct ib_device *ib_dev, struct rtrs_rdma_dev_pd *pool) rtrs_ib_dev_find_or_add() argument [all...] |
/third_party/mesa3d/src/gallium/drivers/asahi/ |
H A D | agx_uniforms.c | 28 * current batch as necessary. Note anything uploaded via the batch's pool does 29 * not require an update to the BO list, since the entire pool will be added 42 return agx_pool_upload_aligned(&batch->pool, in agx_const_buffer_ptr() 58 struct agx_ptr ptr = agx_pool_alloc_aligned(&batch->pool, count * sizeof(uint64_t), 8); in agx_push_location_direct() 71 struct agx_ptr ptr = agx_pool_alloc_aligned(&batch->pool, count * sizeof(uint64_t), 8); in agx_push_location_direct() 89 return agx_pool_upload_aligned(&batch->pool, &ctx->blend_color, in agx_push_location_direct() 96 struct agx_ptr ptr = agx_pool_alloc_aligned(&batch->pool, count * sizeof(uint16_t), 8); in agx_push_location_direct() 121 struct agx_pool *pool = &ctx->batch->pool; in agx_push_location() local 124 return agx_pool_upload(pool, in agx_push_location() [all...] |
/third_party/skia/tests/ |
H A D | CachedDataTest.cpp | 36 static SkCachedData* make_data(size_t size, SkDiscardableMemoryPool* pool) { in make_data() argument 37 if (pool) { in make_data() 38 SkDiscardableMemory* dm = pool->create(size); in make_data() 39 // the pool "can" return null, but it shouldn't in these controlled conditions in make_data() 49 size_t size, SkDiscardableMemoryPool* pool) { in test_locking() 50 SkCachedData* data = make_data(size, pool); in test_locking() 82 sk_sp<SkDiscardableMemoryPool> pool(SkDiscardableMemoryPool::Make(1000)); in DEF_TEST() 88 SkCachedData* data = test_locking(reporter, size, useDiscardable ? pool.get() : nullptr); in DEF_TEST() 95 data = test_locking(reporter, size, useDiscardable ? pool.get() : nullptr); in DEF_TEST() 48 test_locking(skiatest::Reporter* reporter, size_t size, SkDiscardableMemoryPool* pool) test_locking() argument
|
/third_party/vk-gl-cts/framework/delibs/decpp/ |
H A D | dePoolArray.cpp | 21 * \brief Array template backed by memory pool. 34 MemPool pool; in intArrayTest() local 35 PoolArray<int> arr (&pool); in intArrayTest() 36 PoolArray<deUint16> arr16 (&pool); in intArrayTest() 43 pool.alloc(1); in intArrayTest() 90 PoolArray<int> arr2(&pool); in intArrayTest() 108 MemPool pool; in alignedIntArrayTest() local 109 PoolArray<int, 16> arr (&pool); in alignedIntArrayTest() 110 PoolArray<deUint16, 8> arr16 (&pool); in alignedIntArrayTest() 117 pool in alignedIntArrayTest() 240 MemPool pool; sideEffectTest() local 272 MemPool pool; iteratorTest() local [all...] |
/kernel/linux/linux-5.10/drivers/staging/media/ipu3/ |
H A D | ipu3-css-pool.h | 27 * imgu_css_pool - circular buffer pool definition 45 struct imgu_css_pool *pool); 46 int imgu_css_pool_init(struct imgu_device *imgu, struct imgu_css_pool *pool, 48 void imgu_css_pool_get(struct imgu_css_pool *pool); 49 void imgu_css_pool_put(struct imgu_css_pool *pool); 50 const struct imgu_css_map *imgu_css_pool_last(struct imgu_css_pool *pool,
|
/kernel/linux/linux-6.6/drivers/staging/media/ipu3/ |
H A D | ipu3-css-pool.h | 28 * struct imgu_css_pool - circular buffer pool definition 46 struct imgu_css_pool *pool); 47 int imgu_css_pool_init(struct imgu_device *imgu, struct imgu_css_pool *pool, 49 void imgu_css_pool_get(struct imgu_css_pool *pool); 50 void imgu_css_pool_put(struct imgu_css_pool *pool); 51 const struct imgu_css_map *imgu_css_pool_last(struct imgu_css_pool *pool,
|
/third_party/mesa3d/src/gallium/drivers/r300/compiler/ |
H A D | memory_pool.h | 29 * Provides a pool of memory that can quickly be allocated from, at the 31 * Instead, the entire pool can be freed at once. 45 void memory_pool_init(struct memory_pool * pool); 46 void memory_pool_destroy(struct memory_pool * pool); 47 void * memory_pool_malloc(struct memory_pool * pool, unsigned int bytes); 58 * memory_pool_array_reserve(pool, type, Array, Size, Reserved, k); 66 #define memory_pool_array_reserve(pool, type, array, size, reserved, num) do { \ 73 newarray = memory_pool_malloc((pool), newreserve * sizeof(type)); \
|
/third_party/protobuf/php/src/Google/Protobuf/Internal/ |
H A D | EnumBuilderContext.php | 42 private $pool; variable 44 public function __construct($full_name, $klass, $pool) 49 $this->pool = $pool; variable 61 $this->pool->addEnumDescriptor($this->descriptor);
|