Lines Matching defs:pool
686 static void kdesc_fill_pool(struct knav_pool *pool)
691 region = pool->region;
692 pool->desc_size = region->desc_size;
693 for (i = 0; i < pool->num_desc; i++) {
694 int index = pool->region_offset + i;
698 dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES);
699 dma_sync_single_for_device(pool->dev, dma_addr, dma_size,
701 knav_queue_push(pool->queue, dma_addr, dma_size, 0);
706 static void kdesc_empty_pool(struct knav_pool *pool)
713 if (!pool->queue)
717 dma = knav_queue_pop(pool->queue, &size);
720 desc = knav_pool_desc_dma_to_virt(pool, dma);
722 dev_dbg(pool->kdev->dev,
727 WARN_ON(i != pool->num_desc);
728 knav_queue_close(pool->queue);
735 struct knav_pool *pool = ph;
736 return pool->region->dma_start + (virt - pool->region->virt_start);
742 struct knav_pool *pool = ph;
743 return pool->region->virt_start + (dma - pool->region->dma_start);
748 * knav_pool_create() - Create a pool of descriptors
749 * @name: - name to give the pool handle
750 * @num_desc: - numbers of descriptors in the pool
754 * Returns a pool handle on success.
761 struct knav_pool *pool, *pi = NULL, *iter;
772 pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
773 if (!pool) {
774 dev_err(kdev->dev, "out of memory allocating pool\n");
791 pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
792 if (IS_ERR(pool->queue)) {
794 "failed to open queue for pool(%s), error %ld\n",
795 name, PTR_ERR(pool->queue));
796 ret = PTR_ERR(pool->queue);
800 pool->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
801 pool->kdev = kdev;
802 pool->dev = kdev->dev;
807 dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n",
829 pool->region = region;
830 pool->num_desc = num_desc;
831 pool->region_offset = last_offset;
833 list_add_tail(&pool->list, &kdev->pools);
834 list_add_tail(&pool->region_inst, node);
836 dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n",
843 kdesc_fill_pool(pool);
844 return pool;
849 kfree(pool->name);
850 devm_kfree(kdev->dev, pool);
856 * knav_pool_destroy() - Free a pool of descriptors
857 * @ph: - pool handle
861 struct knav_pool *pool = ph;
863 if (!pool)
866 if (!pool->region)
869 kdesc_empty_pool(pool);
872 pool->region->used_desc -= pool->num_desc;
873 list_del(&pool->region_inst);
874 list_del(&pool->list);
877 kfree(pool->name);
878 devm_kfree(kdev->dev, pool);
884 * knav_pool_desc_get() - Get a descriptor from the pool
885 * @ph: - pool handle
887 * Returns descriptor from the pool.
891 struct knav_pool *pool = ph;
896 dma = knav_queue_pop(pool->queue, &size);
899 data = knav_pool_desc_dma_to_virt(pool, dma);
905 * knav_pool_desc_put() - return a descriptor to the pool
906 * @ph: - pool handle
911 struct knav_pool *pool = ph;
913 dma = knav_pool_desc_virt_to_dma(pool, desc);
914 knav_queue_push(pool->queue, dma, pool->region->desc_size, 0);
920 * @ph: - pool handle
931 struct knav_pool *pool = ph;
932 *dma = knav_pool_desc_virt_to_dma(pool, desc);
933 size = min(size, pool->region->desc_size);
936 dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE);
947 * @ph: - pool handle
956 struct knav_pool *pool = ph;
960 desc_sz = min(dma_sz, pool->region->desc_size);
961 desc = knav_pool_desc_dma_to_virt(pool, dma);
962 dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE);
969 * knav_pool_count() - Get the number of descriptors in pool.
970 * @ph: - pool handle
971 * Returns number of elements in the pool.
975 struct knav_pool *pool = ph;
976 return knav_queue_get_count(pool->queue);
986 struct knav_pool *pool;
1028 pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
1029 if (!pool) {
1030 dev_err(kdev->dev, "out of memory allocating dummy pool\n");
1033 pool->num_desc = 0;
1034 pool->region_offset = region->num_desc;
1035 list_add(&pool->region_inst, ®ion->pools);
1350 struct knav_pool *pool, *tmp;
1357 list_for_each_entry_safe(pool, tmp, ®ion->pools, region_inst)
1358 knav_pool_destroy(pool);