/kernel/linux/linux-6.6/drivers/crypto/intel/keembay/ |
H A D | ocs-hcu.c | 436 * @dma_list: The OCS DMA list mapping the data to hash. 444 const struct ocs_hcu_dma_list *dma_list, in ocs_hcu_ll_dma_start() 450 if (!dma_list) in ocs_hcu_ll_dma_start() 471 writel(dma_list->dma_addr, hcu_dev->io_base + OCS_HCU_DMA_NEXT_SRC_DESCR); in ocs_hcu_ll_dma_start() 492 struct ocs_hcu_dma_list *dma_list; in ocs_hcu_dma_list_alloc() local 494 dma_list = kmalloc(sizeof(*dma_list), GFP_KERNEL); in ocs_hcu_dma_list_alloc() 495 if (!dma_list) in ocs_hcu_dma_list_alloc() 499 dma_list->head = dma_alloc_coherent(hcu_dev->dev, in ocs_hcu_dma_list_alloc() 500 sizeof(*dma_list in ocs_hcu_dma_list_alloc() 443 ocs_hcu_ll_dma_start(struct ocs_hcu_dev *hcu_dev, const struct ocs_hcu_dma_list *dma_list, bool finalize) ocs_hcu_ll_dma_start() argument 512 ocs_hcu_dma_list_free(struct ocs_hcu_dev *hcu_dev, struct ocs_hcu_dma_list *dma_list) ocs_hcu_dma_list_free() argument 526 ocs_hcu_dma_list_add_tail(struct ocs_hcu_dev *hcu_dev, struct ocs_hcu_dma_list *dma_list, dma_addr_t addr, u32 len) ocs_hcu_dma_list_add_tail() argument 607 ocs_hcu_hash_update(struct ocs_hcu_dev *hcu_dev, struct ocs_hcu_hash_ctx *ctx, const struct ocs_hcu_dma_list *dma_list) ocs_hcu_hash_update() argument 644 ocs_hcu_hash_finup(struct ocs_hcu_dev *hcu_dev, const struct ocs_hcu_hash_ctx *ctx, const struct ocs_hcu_dma_list *dma_list, u8 *dgst, size_t dgst_len) ocs_hcu_hash_finup() argument 777 ocs_hcu_hmac(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo, const u8 *key, size_t key_len, const struct ocs_hcu_dma_list *dma_list, u8 *dgst, size_t dgst_len) ocs_hcu_hmac() argument [all...] |
H A D | ocs-hcu.h | 77 struct ocs_hcu_dma_list *dma_list); 80 struct ocs_hcu_dma_list *dma_list, 87 const struct ocs_hcu_dma_list *dma_list); 91 const struct ocs_hcu_dma_list *dma_list, 103 const struct ocs_hcu_dma_list *dma_list,
|
H A D | keembay-ocs-hcu-core.c | 60 * @dma_list: OCS DMA linked list. 70 * @sg_dma_nents: Number of sg entries mapped in dma_list. 78 struct ocs_hcu_dma_list *dma_list; member 206 /* Free dma_list (if allocated). */ in kmb_ocs_hcu_dma_cleanup() 207 if (rctx->dma_list) { in kmb_ocs_hcu_dma_cleanup() 208 ocs_hcu_dma_list_free(hcu_dev, rctx->dma_list); in kmb_ocs_hcu_dma_cleanup() 209 rctx->dma_list = NULL; in kmb_ocs_hcu_dma_cleanup() 289 rctx->dma_list = ocs_hcu_dma_list_alloc(rctx->hcu_dev, nents); in kmb_ocs_dma_prepare() 290 if (!rctx->dma_list) { in kmb_ocs_dma_prepare() 297 rc = ocs_hcu_dma_list_add_tail(rctx->hcu_dev, rctx->dma_list, in kmb_ocs_dma_prepare() [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/mthca/ |
H A D | mthca_allocator.c | 201 u64 *dma_list = NULL; in mthca_buf_alloc() local 222 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc() 224 if (!dma_list) in mthca_buf_alloc() 228 dma_list[i] = t + i * (1 << shift); in mthca_buf_alloc() 234 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc() 236 if (!dma_list) in mthca_buf_alloc() 255 dma_list[i] = t; in mthca_buf_alloc() 263 dma_list, shif in mthca_buf_alloc() [all...] |
H A D | mthca_eq.c | 471 u64 *dma_list = NULL; in mthca_create_eq() local 490 dma_list = kmalloc_array(npages, sizeof(*dma_list), GFP_KERNEL); in mthca_create_eq() 491 if (!dma_list) in mthca_create_eq() 505 dma_list[i] = t; in mthca_create_eq() 519 dma_list, PAGE_SHIFT, npages, in mthca_create_eq() 551 kfree(dma_list); in mthca_create_eq() 582 kfree(dma_list); in mthca_create_eq()
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/mthca/ |
H A D | mthca_allocator.c | 196 u64 *dma_list = NULL; in mthca_buf_alloc() local 217 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc() 219 if (!dma_list) in mthca_buf_alloc() 223 dma_list[i] = t + i * (1 << shift); in mthca_buf_alloc() 229 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc() 231 if (!dma_list) in mthca_buf_alloc() 250 dma_list[i] = t; in mthca_buf_alloc() 258 dma_list, shif in mthca_buf_alloc() [all...] |
H A D | mthca_eq.c | 471 u64 *dma_list = NULL; in mthca_create_eq() local 490 dma_list = kmalloc_array(npages, sizeof(*dma_list), GFP_KERNEL); in mthca_create_eq() 491 if (!dma_list) in mthca_create_eq() 505 dma_list[i] = t; in mthca_create_eq() 519 dma_list, PAGE_SHIFT, npages, in mthca_create_eq() 551 kfree(dma_list); in mthca_create_eq() 582 kfree(dma_list); in mthca_create_eq()
|
/kernel/linux/linux-5.10/arch/powerpc/platforms/cell/spufs/ |
H A D | spu_utils.h | 38 struct dma_list_elem dma_list[15] __attribute__ ((aligned(8))); variable 92 dma_list[i].size = 16384; in build_dma_list() 93 dma_list[i].ea_low = ea_low; in build_dma_list()
|
H A D | spu_save.c | 50 unsigned int list = (unsigned int)&dma_list[0]; in save_upper_240kb() 51 unsigned int size = sizeof(dma_list); in save_upper_240kb()
|
H A D | spu_restore.c | 50 unsigned int list = (unsigned int)&dma_list[0]; in restore_upper_240kb() 51 unsigned int size = sizeof(dma_list); in restore_upper_240kb()
|
/kernel/linux/linux-6.6/arch/powerpc/platforms/cell/spufs/ |
H A D | spu_utils.h | 38 struct dma_list_elem dma_list[15] __attribute__ ((aligned(8))); variable 92 dma_list[i].size = 16384; in build_dma_list() 93 dma_list[i].ea_low = ea_low; in build_dma_list()
|
H A D | spu_save.c | 50 unsigned int list = (unsigned int)&dma_list[0]; in save_upper_240kb() 51 unsigned int size = sizeof(dma_list); in save_upper_240kb()
|
H A D | spu_restore.c | 50 unsigned int list = (unsigned int)&dma_list[0]; in restore_upper_240kb() 51 unsigned int size = sizeof(dma_list); in restore_upper_240kb()
|
/kernel/linux/linux-5.10/drivers/misc/genwqe/ |
H A D | card_utils.c | 229 static void genwqe_unmap_pages(struct genwqe_dev *cd, dma_addr_t *dma_list, in genwqe_unmap_pages() argument 235 for (i = 0; (i < num_pages) && (dma_list[i] != 0x0); i++) { in genwqe_unmap_pages() 236 pci_unmap_page(pci_dev, dma_list[i], in genwqe_unmap_pages() 238 dma_list[i] = 0x0; in genwqe_unmap_pages() 244 dma_addr_t *dma_list) in genwqe_map_pages() 253 dma_list[i] = 0x0; in genwqe_map_pages() 266 dma_list[i] = daddr; in genwqe_map_pages() 271 genwqe_unmap_pages(cd, dma_list, num_pages); in genwqe_map_pages() 375 dma_addr_t *dma_list) in genwqe_setup_sgl() 410 daddr = dma_list[ in genwqe_setup_sgl() 242 genwqe_map_pages(struct genwqe_dev *cd, struct page **page_list, int num_pages, dma_addr_t *dma_list) genwqe_map_pages() argument 374 genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, dma_addr_t *dma_list) genwqe_setup_sgl() argument [all...] |
H A D | card_base.h | 171 dma_addr_t *dma_list; /* list of dma addresses per page */ member 372 dma_addr_t *dma_list);
|
/kernel/linux/linux-6.6/drivers/misc/genwqe/ |
H A D | card_utils.c | 229 static void genwqe_unmap_pages(struct genwqe_dev *cd, dma_addr_t *dma_list, in genwqe_unmap_pages() argument 235 for (i = 0; (i < num_pages) && (dma_list[i] != 0x0); i++) { in genwqe_unmap_pages() 236 dma_unmap_page(&pci_dev->dev, dma_list[i], PAGE_SIZE, in genwqe_unmap_pages() 238 dma_list[i] = 0x0; in genwqe_unmap_pages() 244 dma_addr_t *dma_list) in genwqe_map_pages() 253 dma_list[i] = 0x0; in genwqe_map_pages() 266 dma_list[i] = daddr; in genwqe_map_pages() 271 genwqe_unmap_pages(cd, dma_list, num_pages); in genwqe_map_pages() 375 dma_addr_t *dma_list) in genwqe_setup_sgl() 410 daddr = dma_list[ in genwqe_setup_sgl() 242 genwqe_map_pages(struct genwqe_dev *cd, struct page **page_list, int num_pages, dma_addr_t *dma_list) genwqe_map_pages() argument 374 genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl, dma_addr_t *dma_list) genwqe_setup_sgl() argument [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/core/ |
H A D | umem_odp.c | 85 umem_odp->dma_list = kvcalloc( in ib_init_umem_odp() 86 ndmas, sizeof(*umem_odp->dma_list), GFP_KERNEL); in ib_init_umem_odp() 87 if (!umem_odp->dma_list) { in ib_init_umem_odp() 102 kvfree(umem_odp->dma_list); in ib_init_umem_odp() 280 kvfree(umem_odp->dma_list); in ib_umem_odp_release() 309 dma_addr_t *dma_addr = &umem_odp->dma_list[dma_index]; in ib_umem_odp_map_dma_single_page() 336 * The DMA addresses of the mapped pages is updated in umem_odp->dma_list. 433 WARN_ON(umem_odp->dma_list[dma_index]); 492 dma = umem_odp->dma_list[idx]; in ib_umem_odp_unmap_dma_pages() 516 umem_odp->dma_list[id in ib_umem_odp_unmap_dma_pages() [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/core/ |
H A D | umem_odp.c | 83 umem_odp->dma_list = kvcalloc( in ib_init_umem_odp() 84 ndmas, sizeof(*umem_odp->dma_list), GFP_KERNEL); in ib_init_umem_odp() 85 if (!umem_odp->dma_list) { in ib_init_umem_odp() 100 kvfree(umem_odp->dma_list); in ib_init_umem_odp() 277 kvfree(umem_odp->dma_list); in ib_umem_odp_release() 303 dma_addr_t *dma_addr = &umem_odp->dma_list[dma_index]; in ib_umem_odp_map_dma_single_page() 330 * The DMA addresses of the mapped pages is updated in umem_odp->dma_list. 427 WARN_ON(umem_odp->dma_list[dma_index]); 486 dma = umem_odp->dma_list[idx]; in ib_umem_odp_unmap_dma_pages() 510 umem_odp->dma_list[id in ib_umem_odp_unmap_dma_pages() [all...] |
/kernel/linux/linux-5.10/drivers/vme/ |
H A D | vme.c | 944 struct vme_dma_list *dma_list; in vme_new_dma_list() local 951 dma_list = kmalloc(sizeof(*dma_list), GFP_KERNEL); in vme_new_dma_list() 952 if (!dma_list) in vme_new_dma_list() 955 INIT_LIST_HEAD(&dma_list->entries); in vme_new_dma_list() 956 dma_list->parent = list_entry(resource->entry, in vme_new_dma_list() 959 mutex_init(&dma_list->mtx); in vme_new_dma_list() 961 return dma_list; in vme_new_dma_list()
|
/kernel/linux/linux-6.6/drivers/staging/vme_user/ |
H A D | vme.c | 931 struct vme_dma_list *dma_list; in vme_new_dma_list() local 938 dma_list = kmalloc(sizeof(*dma_list), GFP_KERNEL); in vme_new_dma_list() 939 if (!dma_list) in vme_new_dma_list() 942 INIT_LIST_HEAD(&dma_list->entries); in vme_new_dma_list() 943 dma_list->parent = list_entry(resource->entry, in vme_new_dma_list() 946 mutex_init(&dma_list->mtx); in vme_new_dma_list() 948 return dma_list; in vme_new_dma_list()
|
/kernel/linux/linux-6.6/drivers/vfio/ |
H A D | vfio_iommu_type1.c | 67 struct rb_root dma_list; member 168 struct rb_node *node = iommu->dma_list.rb_node; in vfio_find_dma() 188 struct rb_node *node = iommu->dma_list.rb_node; in vfio_find_dma_first_node() 211 struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL; in vfio_link_dma() 225 rb_insert_color(&new->node, &iommu->dma_list); in vfio_link_dma() 230 rb_erase(&old->node, &iommu->dma_list); in vfio_unlink_dma() 277 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { in vfio_iommu_populate_bitmap_full() 288 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { in vfio_dma_bitmap_alloc_all() 313 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { in vfio_dma_bitmap_free_all() 1217 for (n = rb_first(&iommu->dma_list); in vfio_iova_dirty_bitmap() [all...] |
/kernel/linux/linux-5.10/drivers/vfio/ |
H A D | vfio_iommu_type1.c | 70 struct rb_root dma_list; member 168 struct rb_node *node = iommu->dma_list.rb_node; in vfio_find_dma() 186 struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL; in vfio_link_dma() 200 rb_insert_color(&new->node, &iommu->dma_list); in vfio_link_dma() 205 rb_erase(&old->node, &iommu->dma_list); in vfio_unlink_dma() 252 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { in vfio_iommu_populate_bitmap_full() 263 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { in vfio_dma_bitmap_alloc_all() 288 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { in vfio_dma_bitmap_free_all() 1123 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { in vfio_iova_dirty_bitmap() 1517 n = rb_first(&iommu->dma_list); in vfio_iommu_replay() [all...] |
/kernel/linux/linux-5.10/drivers/block/rsxx/ |
H A D | dma.c | 672 struct list_head dma_list[RSXX_MAX_TARGETS]; in rsxx_dma_queue_bio() local 690 INIT_LIST_HEAD(&dma_list[i]); in rsxx_dma_queue_bio() 701 st = rsxx_queue_discard(card, &dma_list[tgt], laddr, in rsxx_dma_queue_bio() 723 st = rsxx_queue_dma(card, &dma_list[tgt], in rsxx_dma_queue_bio() 741 if (!list_empty(&dma_list[i])) { in rsxx_dma_queue_bio() 744 list_splice_tail(&dma_list[i], &card->ctrl[i].queue); in rsxx_dma_queue_bio() 756 rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i], in rsxx_dma_queue_bio()
|
/kernel/linux/linux-5.10/include/rdma/ |
H A D | ib_umem_odp.h | 25 dma_addr_t *dma_list; member 27 * The umem_mutex protects the page_list and dma_list fields of an ODP
|
/kernel/linux/linux-6.6/include/rdma/ |
H A D | ib_umem_odp.h | 25 dma_addr_t *dma_list; member 27 * The umem_mutex protects the page_list and dma_list fields of an ODP
|