Lines Matching refs:lsop

767 	struct nvmefc_ls_req_op *lsop;
773 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
774 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
775 lsop->flags |= FCOP_FLAGS_TERMIO;
779 &lsop->ls_req);
1037 __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
1039 struct nvme_fc_rport *rport = lsop->rport;
1040 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1045 if (!lsop->req_queued) {
1050 list_del(&lsop->lsreq_list);
1052 lsop->req_queued = false;
1065 struct nvmefc_ls_req_op *lsop,
1068 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1079 lsop->rport = rport;
1080 lsop->req_queued = false;
1081 INIT_LIST_HEAD(&lsop->lsreq_list);
1082 init_completion(&lsop->ls_done);
1095 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
1097 lsop->req_queued = true;
1109 lsop->ls_error = ret;
1111 lsop->req_queued = false;
1112 list_del(&lsop->lsreq_list);
1126 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1128 lsop->ls_error = status;
1129 complete(&lsop->ls_done);
1133 nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
1135 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
1139 ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
1148 wait_for_completion(&lsop->ls_done);
1150 __nvme_fc_finish_ls_req(lsop);
1152 ret = lsop->ls_error;
1167 struct nvmefc_ls_req_op *lsop,
1172 return __nvme_fc_send_ls_req(rport, lsop, done);
1179 struct nvmefc_ls_req_op *lsop;
1186 lsop = kzalloc((sizeof(*lsop) +
1189 if (!lsop) {
1197 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)&lsop[1];
1199 lsreq = &lsop->ls_req;
1225 lsop->queue = queue;
1232 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1283 kfree(lsop);
1296 struct nvmefc_ls_req_op *lsop;
1302 lsop = kzalloc((sizeof(*lsop) +
1305 if (!lsop) {
1313 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)&lsop[1];
1315 lsreq = &lsop->ls_req;
1340 lsop->queue = queue;
1347 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1385 kfree(lsop);
1397 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1399 __nvme_fc_finish_ls_req(lsop);
1403 kfree(lsop);
1428 struct nvmefc_ls_req_op *lsop;
1432 lsop = kzalloc((sizeof(*lsop) +
1435 if (!lsop) {
1443 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
1445 lsreq = &lsop->ls_req;
1454 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1457 kfree(lsop);
1463 struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private;
1464 struct nvme_fc_rport *rport = lsop->rport;
1469 list_del(&lsop->lsrcv_list);
1472 fc_dma_sync_single_for_cpu(lport->dev, lsop->rspdma,
1473 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1474 fc_dma_unmap_single(lport->dev, lsop->rspdma,
1475 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1477 kfree(lsop);
1483 nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop)
1485 struct nvme_fc_rport *rport = lsop->rport;
1487 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
1490 fc_dma_sync_single_for_device(lport->dev, lsop->rspdma,
1491 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1494 lsop->lsrsp);
1499 nvme_fc_xmt_ls_rsp_done(lsop->lsrsp);
1506 struct nvmefc_ls_rcv_op *lsop)
1509 &lsop->rqstbuf->rq_dis_assoc;
1523 ctrl->rcv_disconn = lsop;
1558 nvme_fc_ls_disconnect_assoc(struct nvmefc_ls_rcv_op *lsop)
1560 struct nvme_fc_rport *rport = lsop->rport;
1562 &lsop->rqstbuf->rq_dis_assoc;
1564 &lsop->rspbuf->rsp_dis_assoc;
1570 ret = nvmefc_vldt_lsreq_discon_assoc(lsop->rqstdatalen, rqst);
1573 ctrl = nvme_fc_match_disconn_ls(rport, lsop);
1582 lsop->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1593 lsop->lsrsp->rsplen = sizeof(*acc);
1621 nvme_fc_handle_ls_rqst(struct nvmefc_ls_rcv_op *lsop)
1623 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0;
1626 lsop->lsrsp->nvme_fc_private = lsop;
1627 lsop->lsrsp->rspbuf = lsop->rspbuf;
1628 lsop->lsrsp->rspdma = lsop->rspdma;
1629 lsop->lsrsp->done = nvme_fc_xmt_ls_rsp_done;
1631 lsop->lsrsp->rsplen = 0;
1640 ret = nvme_fc_ls_disconnect_assoc(lsop);
1643 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
1644 sizeof(*lsop->rspbuf), w0->ls_cmd,
1649 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
1650 sizeof(*lsop->rspbuf), w0->ls_cmd,
1654 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf,
1655 sizeof(*lsop->rspbuf), w0->ls_cmd,
1669 struct nvmefc_ls_rcv_op *lsop;
1676 list_for_each_entry(lsop, &rport->ls_rcv_list, lsrcv_list) {
1677 if (lsop->handled)
1680 lsop->handled = true;
1683 sendrsp = nvme_fc_handle_ls_rqst(lsop);
1686 w0 = &lsop->rqstbuf->w0;
1687 lsop->lsrsp->rsplen = nvme_fc_format_rjt(
1688 lsop->rspbuf,
1689 sizeof(*lsop->rspbuf),
1695 nvme_fc_xmt_ls_rsp(lsop);
1728 struct nvmefc_ls_rcv_op *lsop;
1753 lsop = kzalloc(sizeof(*lsop) +
1757 if (!lsop) {
1765 lsop->rqstbuf = (union nvmefc_ls_requests *)&lsop[1];
1766 lsop->rspbuf = (union nvmefc_ls_responses *)&lsop->rqstbuf[1];
1768 lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf,
1769 sizeof(*lsop->rspbuf),
1771 if (fc_dma_mapping_error(lport->dev, lsop->rspdma)) {
1780 lsop->rport = rport;
1781 lsop->lsrsp = lsrsp;
1783 memcpy(lsop->rqstbuf, lsreqbuf, lsreqbuf_len);
1784 lsop->rqstdatalen = lsreqbuf_len;
1792 list_add_tail(&lsop->lsrcv_list, &rport->ls_rcv_list);
1800 fc_dma_unmap_single(lport->dev, lsop->rspdma,
1801 sizeof(*lsop->rspbuf), DMA_TO_DEVICE);
1803 kfree(lsop);