Lines Matching refs:iu

230 	struct srp_iu *iu;
232 iu = kmalloc(sizeof *iu, gfp_mask);
233 if (!iu)
236 iu->buf = kzalloc(size, gfp_mask);
237 if (!iu->buf)
240 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
242 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
245 iu->size = size;
246 iu->direction = direction;
248 return iu;
251 kfree(iu->buf);
253 kfree(iu);
258 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
260 if (!iu)
263 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
264 iu->direction);
265 kfree(iu->buf);
266 kfree(iu);
1808 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1814 list_add(&iu->list, &ch->free_tx);
1838 struct srp_iu *iu;
1857 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1858 list_del(&iu->list);
1859 return iu;
1869 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1879 list_add(&iu->list, &ch->free_tx);
1885 * @iu: Information unit to send.
1888 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1893 if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE))
1896 iu->sge[0].addr = iu->dma;
1897 iu->sge[0].length = len;
1898 iu->sge[0].lkey = target->lkey;
1900 iu->cqe.done = srp_send_done;
1903 wr.wr_cqe = &iu->cqe;
1904 wr.sg_list = &iu->sge[0];
1905 wr.num_sge = iu->num_sge;
1912 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1918 list.addr = iu->dma;
1919 list.length = iu->size;
1922 iu->cqe.done = srp_recv_done;
1925 wr.wr_cqe = &iu->cqe;
1997 struct srp_iu *iu;
2002 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
2005 if (!iu) {
2011 iu->num_sge = 1;
2012 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
2013 memcpy(iu->buf, rsp, len);
2014 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
2016 err = srp_post_send(ch, iu, len);
2020 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
2060 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
2072 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
2075 opcode = *(u8 *) iu->buf;
2081 iu->buf, wc->byte_len, true);
2086 srp_process_rsp(ch, iu->buf);
2090 srp_process_cred_req(ch, iu->buf);
2094 srp_process_aer_req(ch, iu->buf);
2109 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
2112 res = srp_post_recv(ch, iu);
2155 struct srp_iu *iu;
2171 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2174 if (!iu)
2178 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len,
2181 cmd = iu->buf;
2196 req->cmd = iu;
2213 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len,
2216 if (srp_post_send(ch, iu, len)) {
2228 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2377 struct srp_iu *iu = ch->rx_ring[i];
2379 ret = srp_post_recv(ch, iu);
2718 struct srp_iu *iu;
2731 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2734 if (!iu) {
2740 iu->num_sge = 1;
2742 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2744 tsk_mgmt = iu->buf;
2759 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2761 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2762 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);