Lines Matching defs:dma
40 * @dma: DMA buffer for non-embedded commands.
45 struct efc_dma *dma)
50 if (length > sizeof(config->payload.embed) && !dma) {
61 if (!dma) {
72 config->payload.mem.addr.low = cpu_to_le32(lower_32_bits(dma->phys));
73 config->payload.mem.addr.high = cpu_to_le32(upper_32_bits(dma->phys));
75 cpu_to_le32(dma->size & SLI4_SLICONF_PMD_LEN);
76 config->payload_len = cpu_to_le32(dma->size);
78 sli4->bmbx_non_emb_pmd = dma;
79 return dma->virt;
430 u32 payload_buffer_size, struct efc_dma *dma)
439 page_count = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE) * num_rqs;
446 dma->size = payload_size;
447 dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size,
448 &dma->phys, GFP_KERNEL);
449 if (!dma->virt)
452 memset(dma->virt, 0, payload_size);
454 req = sli_config_cmd_init(sli4, sli4->bmbx.virt, payload_size, dma);
463 num_pages = sli_page_count(qs[0]->dma.size, SLI_PAGE_SIZE);
465 req->rqe_count = cpu_to_le16(qs[0]->dma.size / SLI4_RQE_SIZE);
474 for (p = 0, addr = qs[i]->dma.phys; p < num_pages;
490 if (!q->dma.size)
493 dma_free_coherent(&sli4->pci->dev, q->dma.size,
494 q->dma.virt, q->dma.phys);
495 memset(&q->dma, 0, sizeof(struct efc_dma));
502 if (q->dma.virt) {
509 q->dma.size = size * n_entries;
510 q->dma.virt = dma_alloc_coherent(&sli4->pci->dev, q->dma.size,
511 &q->dma.phys, GFP_KERNEL);
512 if (!q->dma.virt) {
513 memset(&q->dma, 0, sizeof(struct efc_dma));
518 memset(q->dma.virt, 0, size * n_entries);
553 if (sli_cmd_rq_create_v1(sli4, sli4->bmbx.virt, &q->dma, cq->id,
587 struct efc_dma dma = {0};
602 &dma)) {
616 rsp = dma.virt;
633 dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, dma.phys);
641 if (dma.virt)
642 dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt,
643 dma.phys);
781 if (!sli_cmd_common_create_eq(sli4, sli4->bmbx.virt, &q->dma) &&
787 if (!sli_cmd_common_create_cq(sli4, sli4->bmbx.virt, &q->dma,
796 &q->dma, assoc->id) &&
802 if (!sli_cmd_wq_create(sli4, sli4->bmbx.virt, &q->dma,
819 struct efc_dma *dma)
829 n_cqe = qs[0]->dma.size / SLI4_CQE_BYTES;
845 num_pages_cq = sli_page_count(qs[0]->dma.size, page_bytes);
850 dma->size = payload_size;
851 dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size,
852 &dma->phys, GFP_KERNEL);
853 if (!dma->virt)
856 memset(dma->virt, 0, payload_size);
858 req = sli_config_cmd_init(sli4, sli4->bmbx.virt, payload_size, dma);
903 for (p = 0, addr = qs[i]->dma.phys; p < num_pages_cq;
921 struct efc_dma dma = {0};
932 if (sli_cmd_cq_set_create(sli4, qs, num_cqs, eqs, &dma))
943 res = dma.virt;
961 dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt, dma.phys);
969 if (dma.virt)
970 dma_free_coherent(&sli4->pci->dev, dma.size, dma.virt,
971 dma.phys);
1127 u8 *qe = q->dma.virt;
1149 u8 *qe = q->dma.virt;
1170 u8 *qe = q->dma.virt;
1199 u8 *qe = q->dma.virt;
1241 u8 *qe = q->dma.virt;
1285 u8 *qe = q->dma.virt;
2976 struct efc_dma *dma)
2981 if (!dma)
2984 psize = dma->size;
2986 req = sli_config_cmd_init(sli4, buf, psize, dma);
3248 sli_cmd_read_sparm64(struct sli4 *sli4, void *buf, struct efc_dma *dma, u16 vpi)
3257 if (!dma || !dma->phys) {
3268 (dma->size & SLI4_BDE_LEN_MASK));
3270 cpu_to_le32(lower_32_bits(dma->phys));
3272 cpu_to_le32(upper_32_bits(dma->phys));
3280 sli_cmd_read_topology(struct sli4 *sli4, void *buf, struct efc_dma *dma)
3284 if (!dma || !dma->size)
3287 if (dma->size < SLI4_MIN_LOOP_MAP_BYTES) {
3288 efc_log_err(sli4, "loop map buffer too small %zx\n", dma->size);
3296 memset(dma->virt, 0, dma->size);
3300 (dma->size & SLI4_BDE_LEN_MASK));
3302 cpu_to_le32(lower_32_bits(dma->phys));
3304 cpu_to_le32(upper_32_bits(dma->phys));
3397 struct efc_dma *dma, u8 update, u8 enable_t10_pi)
3426 cpu_to_le32(lower_32_bits(dma->phys));
3428 cpu_to_le32(upper_32_bits(dma->phys));
3437 u16 vfi, u16 fcfi, struct efc_dma dma,
3454 cpu_to_le32(lower_32_bits(dma.phys));
3456 cpu_to_le32(upper_32_bits(dma.phys));
3673 struct efc_dma *dma)
3677 hdr = sli_config_cmd_init(sli4, buf, SLI4_RQST_CMDSZ(hdr), dma);
3683 hdr->request_length = cpu_to_le32(dma->size);
3690 struct efc_dma *dma)
3694 hdr = sli_config_cmd_init(sli4, buf, SLI4_RQST_CMDSZ(hdr), dma);
3700 hdr->request_length = cpu_to_le32(dma->size);
3783 struct efc_dma *dma)
3816 bde->u.data.low = cpu_to_le32(lower_32_bits(dma->phys));
3817 bde->u.data.high = cpu_to_le32(upper_32_bits(dma->phys));
3842 u32 offset, char *obj_name, struct efc_dma *dma)
3868 if (dma) {
3869 bde->u.data.low = cpu_to_le32(lower_32_bits(dma->phys));
3870 bde->u.data.high = cpu_to_le32(upper_32_bits(dma->phys));
5006 struct efc_dma *page1[], struct efc_dma *dma)
5013 SLI4_CFG_PYLD_LENGTH(post_sgl_pages), dma);
5048 sli_cmd_post_hdr_templates(struct sli4 *sli4, void *buf, struct efc_dma *dma,
5056 page_count = sli_page_count(dma->size, SLI_PAGE_SIZE);
5092 phys = dma->phys;