Lines Matching defs:tgtport
32 struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */
34 struct nvmet_fc_tgtport *tgtport;
51 struct nvmet_fc_tgtport *tgtport;
55 struct list_head lsreq_list; /* tgtport->ls_req_list */
90 struct nvmet_fc_tgtport *tgtport;
93 struct list_head fcp_list; /* tgtport->fcp_list */
119 struct nvmet_fc_tgtport *tgtport;
155 struct nvmet_fc_tgtport *tgtport;
166 struct nvmet_fc_tgtport *tgtport;
180 return (iodptr - iodptr->tgtport->iod);
252 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
255 struct nvmet_fc_tgtport *tgtport =
258 nvmet_fc_tgtport_put(tgtport);
260 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
261 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
264 static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
363 struct nvmet_fc_tgtport *tgtport = lsop->tgtport;
367 spin_lock_irqsave(&tgtport->lock, flags);
370 spin_unlock_irqrestore(&tgtport->lock, flags);
378 spin_unlock_irqrestore(&tgtport->lock, flags);
380 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
385 queue_work(nvmet_wq, &tgtport->put_work);
389 __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport,
397 if (!tgtport->ops->ls_req)
400 if (!nvmet_fc_tgtport_get(tgtport))
407 lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr,
410 if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) {
416 spin_lock_irqsave(&tgtport->lock, flags);
418 list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list);
422 spin_unlock_irqrestore(&tgtport->lock, flags);
424 ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle,
433 spin_lock_irqsave(&tgtport->lock, flags);
436 spin_unlock_irqrestore(&tgtport->lock, flags);
437 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
441 nvmet_fc_tgtport_put(tgtport);
447 nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport,
453 return __nvmet_fc_send_ls_req(tgtport, lsop, done);
489 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
501 if (!tgtport->ops->ls_req || !assoc->hostport ||
507 tgtport->ops->lsrqst_priv_sz), GFP_KERNEL);
509 dev_info(tgtport->dev,
511 tgtport->fc_target_port.port_num, assoc->a_id);
518 if (tgtport->ops->lsrqst_priv_sz)
523 lsop->tgtport = tgtport;
529 ret = nvmet_fc_send_ls_req_async(tgtport, lsop,
532 dev_info(tgtport->dev,
534 tgtport->fc_target_port.port_num, assoc->a_id, ret);
544 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
554 tgtport->iod = iod;
558 iod->tgtport = tgtport;
559 list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
569 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
572 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
582 fc_dma_unmap_single(tgtport->dev, iod->rspdma,
594 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
596 struct nvmet_fc_ls_iod *iod = tgtport->iod;
600 fc_dma_unmap_single(tgtport->dev,
606 kfree(tgtport->iod);
610 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
615 spin_lock_irqsave(&tgtport->lock, flags);
616 iod = list_first_entry_or_null(&tgtport->ls_rcv_list,
619 list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist);
620 spin_unlock_irqrestore(&tgtport->lock, flags);
626 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
631 spin_lock_irqsave(&tgtport->lock, flags);
632 list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
633 spin_unlock_irqrestore(&tgtport->lock, flags);
637 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
645 fod->tgtport = tgtport;
654 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
656 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
659 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
672 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
680 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
708 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
719 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
721 nvmet_fc_handle_fcp_rqst(tgtport, fod);
731 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
740 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
744 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
755 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
790 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
815 assoc->tgtport->fc_target_port.port_num,
833 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
845 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
859 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
882 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
909 tgtport->ops->fcp_abort(
910 &tgtport->fc_target_port, fod->fcpreq);
932 tgtport->ops->defer_rcv(&tgtport->fc_target_port,
935 tgtport->ops->fcp_abort(&tgtport->fc_target_port,
938 tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
958 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
970 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
990 struct nvmet_fc_tgtport *tgtport = hostport->tgtport;
993 spin_lock_irqsave(&tgtport->lock, flags);
995 spin_unlock_irqrestore(&tgtport->lock, flags);
996 if (tgtport->ops->host_release && hostport->invalid)
997 tgtport->ops->host_release(hostport->hosthandle);
999 nvmet_fc_tgtport_put(tgtport);
1025 nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
1029 lockdep_assert_held(&tgtport->lock);
1031 list_for_each_entry(host, &tgtport->host_list, host_list) {
1042 nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
1055 if (!nvmet_fc_tgtport_get(tgtport))
1058 spin_lock_irqsave(&tgtport->lock, flags);
1059 match = nvmet_fc_match_hostport(tgtport, hosthandle);
1060 spin_unlock_irqrestore(&tgtport->lock, flags);
1064 nvmet_fc_tgtport_put(tgtport);
1071 nvmet_fc_tgtport_put(tgtport);
1075 spin_lock_irqsave(&tgtport->lock, flags);
1076 match = nvmet_fc_match_hostport(tgtport, hosthandle);
1082 newhost->tgtport = tgtport;
1087 list_add_tail(&newhost->host_list, &tgtport->host_list);
1089 spin_unlock_irqrestore(&tgtport->lock, flags);
1106 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
1109 nvmet_fc_tgtport_put(tgtport);
1115 nvmet_fc_tgtport_get(assoc->tgtport);
1120 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
1128 if (!tgtport->pe)
1135 idx = ida_alloc(&tgtport->assoc_cnt, GFP_KERNEL);
1139 if (!nvmet_fc_tgtport_get(tgtport))
1142 assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle);
1146 assoc->tgtport = tgtport;
1157 spin_lock_irqsave(&tgtport->lock, flags);
1159 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
1167 list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list);
1169 spin_unlock_irqrestore(&tgtport->lock, flags);
1175 nvmet_fc_tgtport_put(tgtport);
1177 ida_free(&tgtport->assoc_cnt, idx);
1188 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
1202 spin_lock_irqsave(&tgtport->lock, flags);
1204 spin_unlock_irqrestore(&tgtport->lock, flags);
1207 nvmet_fc_xmt_ls_rsp(tgtport, oldls);
1208 ida_free(&tgtport->assoc_cnt, assoc->a_id);
1209 dev_info(tgtport->dev,
1211 tgtport->fc_target_port.port_num, assoc->a_id);
1213 nvmet_fc_tgtport_put(tgtport);
1231 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
1241 spin_lock_irqsave(&tgtport->lock, flags);
1243 spin_unlock_irqrestore(&tgtport->lock, flags);
1253 dev_info(tgtport->dev,
1255 tgtport->fc_target_port.port_num, assoc->a_id);
1259 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
1266 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
1280 nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport,
1286 pe->tgtport = tgtport;
1287 tgtport->pe = pe;
1292 pe->node_name = tgtport->fc_target_port.node_name;
1293 pe->port_name = tgtport->fc_target_port.port_name;
1305 if (pe->tgtport)
1306 pe->tgtport->pe = NULL;
1317 nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport)
1323 pe = tgtport->pe;
1325 pe->tgtport = NULL;
1326 tgtport->pe = NULL;
1334 * nvmet configured, the lldd unregistered the tgtport, and is now
1339 nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
1346 if (tgtport->fc_target_port.node_name == pe->node_name &&
1347 tgtport->fc_target_port.port_name == pe->port_name) {
1348 WARN_ON(pe->tgtport);
1349 tgtport->pe = pe;
1350 pe->tgtport = tgtport;
1464 struct nvmet_fc_tgtport *tgtport =
1466 struct device *dev = tgtport->dev;
1470 list_del(&tgtport->tgt_list);
1473 nvmet_fc_free_ls_iodlist(tgtport);
1476 tgtport->ops->targetport_delete(&tgtport->fc_target_port);
1479 tgtport->fc_target_port.port_num);
1481 ida_destroy(&tgtport->assoc_cnt);
1483 kfree(tgtport);
1489 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
1491 kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
1495 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
1497 return kref_get_unless_zero(&tgtport->ref);
1501 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
1506 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
1548 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1553 spin_lock_irqsave(&tgtport->lock, flags);
1555 &tgtport->assoc_list, a_list) {
1566 spin_unlock_irqrestore(&tgtport->lock, flags);
1569 if (noassoc && tgtport->ops->host_release)
1570 tgtport->ops->host_release(hosthandle);
1580 struct nvmet_fc_tgtport *tgtport, *next;
1588 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
1590 if (!nvmet_fc_tgtport_get(tgtport))
1595 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) {
1605 nvmet_fc_tgtport_put(tgtport);
1632 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1634 nvmet_fc_portentry_unbind_tgt(tgtport);
1637 __nvmet_fc_free_assocs(tgtport);
1648 nvmet_fc_tgtport_put(tgtport);
1659 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1696 tgtport, iod->hosthandle);
1710 dev_err(tgtport->dev,
1724 dev_info(tgtport->dev,
1726 tgtport->fc_target_port.port_num, iod->assoc->a_id);
1750 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1786 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1803 dev_err(tgtport->dev,
1840 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1857 assoc = nvmet_fc_find_target_assoc(tgtport,
1865 dev_err(tgtport->dev,
1895 spin_lock_irqsave(&tgtport->lock, flags);
1898 spin_unlock_irqrestore(&tgtport->lock, flags);
1901 dev_info(tgtport->dev,
1904 tgtport->fc_target_port.port_num, assoc->a_id);
1912 nvmet_fc_xmt_ls_rsp(tgtport, oldls);
1933 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1935 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1937 nvmet_fc_free_ls_iod(tgtport, iod);
1938 nvmet_fc_tgtport_put(tgtport);
1942 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1947 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1950 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp);
1959 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1982 nvmet_fc_ls_create_association(tgtport, iod);
1986 nvmet_fc_ls_create_connection(tgtport, iod);
1990 sendrsp = nvmet_fc_ls_disconnect(tgtport, iod);
1999 nvmet_fc_xmt_ls_rsp(tgtport, iod);
2010 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
2012 nvmet_fc_handle_ls_rqst(tgtport, iod);
2040 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2045 dev_info(tgtport->dev,
2053 if (!nvmet_fc_tgtport_get(tgtport)) {
2054 dev_info(tgtport->dev,
2061 iod = nvmet_fc_alloc_ls_iod(tgtport);
2063 dev_info(tgtport->dev,
2067 nvmet_fc_tgtport_put(tgtport);
2102 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
2120 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
2146 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
2204 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
2211 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
2225 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
2231 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
2239 nvmet_fc_prep_fcp_rsp(tgtport, fod);
2241 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
2243 nvmet_fc_abort_op(tgtport, fod);
2247 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
2268 * a new sg list to use for the tgtport api.
2273 fcpreq->sg_cnt < tgtport->max_sg_cnt &&
2300 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
2302 nvmet_fc_prep_fcp_rsp(tgtport, fod);
2305 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
2331 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2340 nvmet_fc_abort_op(tgtport, fod);
2354 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2385 nvmet_fc_transfer_fcp_data(tgtport, fod,
2400 nvmet_fc_abort_op(tgtport, fod);
2416 nvmet_fc_transfer_fcp_data(tgtport, fod,
2426 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2453 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
2470 nvmet_fc_abort_op(tgtport, fod);
2491 nvmet_fc_transfer_fcp_data(tgtport, fod,
2502 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2510 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2512 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2520 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2554 if (!tgtport->pe)
2556 fod->req.port = tgtport->pe->port;
2592 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2606 nvmet_fc_abort_op(tgtport, fod);
2661 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2675 queue = nvmet_fc_find_target_queue(tgtport,
2698 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
2703 if (!tgtport->ops->defer_rcv) {
2866 struct nvmet_fc_tgtport *tgtport;
2890 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2891 if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2892 (tgtport->fc_target_port.port_name == traddr.pn)) {
2894 if (!tgtport->pe) {
2895 nvmet_fc_portentry_bind(tgtport, pe, port);
2918 __nvmet_fc_free_assocs(pe->tgtport);
2927 struct nvmet_fc_tgtport *tgtport = pe->tgtport;
2929 if (tgtport && tgtport->ops->discovery_event)
2930 tgtport->ops->discovery_event(&tgtport->fc_target_port);