Lines Matching refs:iu
225 struct srp_iu *iu;
227 iu = kmalloc(sizeof *iu, gfp_mask);
228 if (!iu)
231 iu->buf = kzalloc(size, gfp_mask);
232 if (!iu->buf)
235 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
237 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
240 iu->size = size;
241 iu->direction = direction;
243 return iu;
246 kfree(iu->buf);
248 kfree(iu);
253 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
255 if (!iu)
258 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
259 iu->direction);
260 kfree(iu->buf);
261 kfree(iu);
1809 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1815 list_add(&iu->list, &ch->free_tx);
1839 struct srp_iu *iu;
1858 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1859 list_del(&iu->list);
1860 return iu;
1870 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1880 list_add(&iu->list, &ch->free_tx);
1886 * @iu: Information unit to send.
1889 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1894 if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE))
1897 iu->sge[0].addr = iu->dma;
1898 iu->sge[0].length = len;
1899 iu->sge[0].lkey = target->lkey;
1901 iu->cqe.done = srp_send_done;
1904 wr.wr_cqe = &iu->cqe;
1905 wr.sg_list = &iu->sge[0];
1906 wr.num_sge = iu->num_sge;
1913 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1919 list.addr = iu->dma;
1920 list.length = iu->size;
1923 iu->cqe.done = srp_recv_done;
1926 wr.wr_cqe = &iu->cqe;
1998 struct srp_iu *iu;
2003 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
2006 if (!iu) {
2012 iu->num_sge = 1;
2013 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
2014 memcpy(iu->buf, rsp, len);
2015 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
2017 err = srp_post_send(ch, iu, len);
2021 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
2061 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
2073 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
2076 opcode = *(u8 *) iu->buf;
2082 iu->buf, wc->byte_len, true);
2087 srp_process_rsp(ch, iu->buf);
2091 srp_process_cred_req(ch, iu->buf);
2095 srp_process_aer_req(ch, iu->buf);
2110 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
2113 res = srp_post_recv(ch, iu);
2157 struct srp_iu *iu;
2173 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2176 if (!iu)
2180 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len,
2183 cmd = iu->buf;
2198 req->cmd = iu;
2215 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len,
2218 if (srp_post_send(ch, iu, len)) {
2230 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2379 struct srp_iu *iu = ch->rx_ring[i];
2381 ret = srp_post_recv(ch, iu);
2720 struct srp_iu *iu;
2733 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2736 if (!iu) {
2742 iu->num_sge = 1;
2744 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2746 tsk_mgmt = iu->buf;
2761 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2763 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2764 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);