Lines Matching defs:tgtport
32 struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */
34 struct nvmet_fc_tgtport *tgtport;
51 struct nvmet_fc_tgtport *tgtport;
55 struct list_head lsreq_list; /* tgtport->ls_req_list */
90 struct nvmet_fc_tgtport *tgtport;
93 struct list_head fcp_list; /* tgtport->fcp_list */
117 struct nvmet_fc_tgtport *tgtport;
152 struct nvmet_fc_tgtport *tgtport;
163 struct nvmet_fc_tgtport *tgtport;
176 return (iodptr - iodptr->tgtport->iod);
248 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
249 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
250 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
253 static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
352 struct nvmet_fc_tgtport *tgtport = lsop->tgtport;
356 spin_lock_irqsave(&tgtport->lock, flags);
359 spin_unlock_irqrestore(&tgtport->lock, flags);
367 spin_unlock_irqrestore(&tgtport->lock, flags);
369 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
373 nvmet_fc_tgtport_put(tgtport);
377 __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport,
385 if (!tgtport->ops->ls_req)
388 if (!nvmet_fc_tgtport_get(tgtport))
395 lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr,
398 if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) {
404 spin_lock_irqsave(&tgtport->lock, flags);
406 list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list);
410 spin_unlock_irqrestore(&tgtport->lock, flags);
412 ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle,
421 spin_lock_irqsave(&tgtport->lock, flags);
424 spin_unlock_irqrestore(&tgtport->lock, flags);
425 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
429 nvmet_fc_tgtport_put(tgtport);
435 nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport,
441 return __nvmet_fc_send_ls_req(tgtport, lsop, done);
477 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
489 if (!tgtport->ops->ls_req || !assoc->hostport ||
495 tgtport->ops->lsrqst_priv_sz), GFP_KERNEL);
497 dev_info(tgtport->dev,
499 tgtport->fc_target_port.port_num, assoc->a_id);
506 if (tgtport->ops->lsrqst_priv_sz)
511 lsop->tgtport = tgtport;
517 ret = nvmet_fc_send_ls_req_async(tgtport, lsop,
520 dev_info(tgtport->dev,
522 tgtport->fc_target_port.port_num, assoc->a_id, ret);
532 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
542 tgtport->iod = iod;
546 iod->tgtport = tgtport;
547 list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
557 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
560 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
570 fc_dma_unmap_single(tgtport->dev, iod->rspdma,
582 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
584 struct nvmet_fc_ls_iod *iod = tgtport->iod;
588 fc_dma_unmap_single(tgtport->dev,
594 kfree(tgtport->iod);
598 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
603 spin_lock_irqsave(&tgtport->lock, flags);
604 iod = list_first_entry_or_null(&tgtport->ls_rcv_list,
607 list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist);
608 spin_unlock_irqrestore(&tgtport->lock, flags);
614 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
619 spin_lock_irqsave(&tgtport->lock, flags);
620 list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
621 spin_unlock_irqrestore(&tgtport->lock, flags);
625 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
633 fod->tgtport = tgtport;
642 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
644 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
647 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
660 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
668 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
696 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
707 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
709 nvmet_fc_handle_fcp_rqst(tgtport, fod);
719 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
728 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
732 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
743 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
778 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
807 assoc->tgtport->fc_target_port.port_num,
825 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
832 spin_lock_irqsave(&assoc->tgtport->lock, flags);
834 spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
839 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
856 spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
858 spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
860 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
885 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
912 tgtport->ops->fcp_abort(
913 &tgtport->fc_target_port, fod->fcpreq);
935 tgtport->ops->defer_rcv(&tgtport->fc_target_port,
938 tgtport->ops->fcp_abort(&tgtport->fc_target_port,
941 tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
961 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
973 spin_lock_irqsave(&tgtport->lock, flags);
974 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
981 spin_unlock_irqrestore(&tgtport->lock, flags);
985 spin_unlock_irqrestore(&tgtport->lock, flags);
994 struct nvmet_fc_tgtport *tgtport = hostport->tgtport;
997 spin_lock_irqsave(&tgtport->lock, flags);
999 spin_unlock_irqrestore(&tgtport->lock, flags);
1000 if (tgtport->ops->host_release && hostport->invalid)
1001 tgtport->ops->host_release(hostport->hosthandle);
1003 nvmet_fc_tgtport_put(tgtport);
1029 nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
1039 if (!nvmet_fc_tgtport_get(tgtport))
1044 spin_lock_irqsave(&tgtport->lock, flags);
1045 list_for_each_entry(host, &tgtport->host_list, host_list) {
1053 spin_unlock_irqrestore(&tgtport->lock, flags);
1055 nvmet_fc_tgtport_put(tgtport);
1059 newhost->tgtport = tgtport;
1064 spin_lock_irqsave(&tgtport->lock, flags);
1065 list_for_each_entry(host, &tgtport->host_list, host_list) {
1077 nvmet_fc_tgtport_put(tgtport);
1079 list_add_tail(&newhost->host_list, &tgtport->host_list);
1080 spin_unlock_irqrestore(&tgtport->lock, flags);
1096 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
1108 idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
1112 if (!nvmet_fc_tgtport_get(tgtport))
1115 assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle);
1119 assoc->tgtport = tgtport;
1130 spin_lock_irqsave(&tgtport->lock, flags);
1132 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
1140 list_add_tail(&assoc->a_list, &tgtport->assoc_list);
1142 spin_unlock_irqrestore(&tgtport->lock, flags);
1148 nvmet_fc_tgtport_put(tgtport);
1150 ida_simple_remove(&tgtport->assoc_cnt, idx);
1161 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
1169 spin_lock_irqsave(&tgtport->lock, flags);
1172 spin_unlock_irqrestore(&tgtport->lock, flags);
1175 nvmet_fc_xmt_ls_rsp(tgtport, oldls);
1176 ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
1177 dev_info(tgtport->dev,
1179 tgtport->fc_target_port.port_num, assoc->a_id);
1181 nvmet_fc_tgtport_put(tgtport);
1199 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
1210 spin_lock_irqsave(&tgtport->lock, flags);
1216 spin_unlock_irqrestore(&tgtport->lock, flags);
1219 spin_lock_irqsave(&tgtport->lock, flags);
1222 spin_unlock_irqrestore(&tgtport->lock, flags);
1224 dev_info(tgtport->dev,
1226 tgtport->fc_target_port.port_num, assoc->a_id);
1232 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
1239 spin_lock_irqsave(&tgtport->lock, flags);
1240 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
1248 spin_unlock_irqrestore(&tgtport->lock, flags);
1254 nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport,
1260 pe->tgtport = tgtport;
1261 tgtport->pe = pe;
1266 pe->node_name = tgtport->fc_target_port.node_name;
1267 pe->port_name = tgtport->fc_target_port.port_name;
1279 if (pe->tgtport)
1280 pe->tgtport->pe = NULL;
1291 nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport)
1297 pe = tgtport->pe;
1299 pe->tgtport = NULL;
1300 tgtport->pe = NULL;
1308 * nvmet configured, the lldd unregistered the tgtport, and is now
1313 nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
1320 if (tgtport->fc_target_port.node_name == pe->node_name &&
1321 tgtport->fc_target_port.port_name == pe->port_name) {
1322 WARN_ON(pe->tgtport);
1323 tgtport->pe = pe;
1324 pe->tgtport = tgtport;
1437 struct nvmet_fc_tgtport *tgtport =
1439 struct device *dev = tgtport->dev;
1443 list_del(&tgtport->tgt_list);
1446 nvmet_fc_free_ls_iodlist(tgtport);
1449 tgtport->ops->targetport_delete(&tgtport->fc_target_port);
1452 tgtport->fc_target_port.port_num);
1454 ida_destroy(&tgtport->assoc_cnt);
1456 kfree(tgtport);
1462 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
1464 kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
1468 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
1470 return kref_get_unless_zero(&tgtport->ref);
1474 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
1479 spin_lock_irqsave(&tgtport->lock, flags);
1481 &tgtport->assoc_list, a_list) {
1488 spin_unlock_irqrestore(&tgtport->lock, flags);
1524 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1529 spin_lock_irqsave(&tgtport->lock, flags);
1531 &tgtport->assoc_list, a_list) {
1543 spin_unlock_irqrestore(&tgtport->lock, flags);
1546 if (noassoc && tgtport->ops->host_release)
1547 tgtport->ops->host_release(hosthandle);
1557 struct nvmet_fc_tgtport *tgtport, *next;
1565 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
1567 if (!nvmet_fc_tgtport_get(tgtport))
1571 spin_lock_irqsave(&tgtport->lock, flags);
1572 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
1580 spin_unlock_irqrestore(&tgtport->lock, flags);
1582 nvmet_fc_tgtport_put(tgtport);
1610 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1612 nvmet_fc_portentry_unbind_tgt(tgtport);
1615 __nvmet_fc_free_assocs(tgtport);
1624 nvmet_fc_tgtport_put(tgtport);
1635 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1672 tgtport, iod->hosthandle);
1686 dev_err(tgtport->dev,
1700 dev_info(tgtport->dev,
1702 tgtport->fc_target_port.port_num, iod->assoc->a_id);
1726 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1762 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1779 dev_err(tgtport->dev,
1816 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1833 assoc = nvmet_fc_find_target_assoc(tgtport,
1841 dev_err(tgtport->dev,
1874 spin_lock_irqsave(&tgtport->lock, flags);
1877 spin_unlock_irqrestore(&tgtport->lock, flags);
1882 dev_info(tgtport->dev,
1885 tgtport->fc_target_port.port_num, assoc->a_id);
1893 nvmet_fc_xmt_ls_rsp(tgtport, oldls);
1911 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1913 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1915 nvmet_fc_free_ls_iod(tgtport, iod);
1916 nvmet_fc_tgtport_put(tgtport);
1920 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1925 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1928 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp);
1937 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1960 nvmet_fc_ls_create_association(tgtport, iod);
1964 nvmet_fc_ls_create_connection(tgtport, iod);
1968 sendrsp = nvmet_fc_ls_disconnect(tgtport, iod);
1977 nvmet_fc_xmt_ls_rsp(tgtport, iod);
1988 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1990 nvmet_fc_handle_ls_rqst(tgtport, iod);
2017 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2022 dev_info(tgtport->dev,
2030 if (!nvmet_fc_tgtport_get(tgtport)) {
2031 dev_info(tgtport->dev,
2038 iod = nvmet_fc_alloc_ls_iod(tgtport);
2040 dev_info(tgtport->dev,
2044 nvmet_fc_tgtport_put(tgtport);
2079 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
2097 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
2123 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
2181 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
2188 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
2202 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
2208 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
2216 nvmet_fc_prep_fcp_rsp(tgtport, fod);
2218 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
2220 nvmet_fc_abort_op(tgtport, fod);
2224 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
2245 * a new sg list to use for the tgtport api.
2250 fcpreq->sg_cnt < tgtport->max_sg_cnt &&
2277 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
2279 nvmet_fc_prep_fcp_rsp(tgtport, fod);
2282 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
2308 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2317 nvmet_fc_abort_op(tgtport, fod);
2331 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2362 nvmet_fc_transfer_fcp_data(tgtport, fod,
2377 nvmet_fc_abort_op(tgtport, fod);
2393 nvmet_fc_transfer_fcp_data(tgtport, fod,
2403 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2430 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
2447 nvmet_fc_abort_op(tgtport, fod);
2468 nvmet_fc_transfer_fcp_data(tgtport, fod,
2479 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2487 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2489 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2497 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2531 if (tgtport->pe)
2532 fod->req.port = tgtport->pe->port;
2568 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2582 nvmet_fc_abort_op(tgtport, fod);
2637 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2651 queue = nvmet_fc_find_target_queue(tgtport,
2674 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
2679 if (!tgtport->ops->defer_rcv) {
2842 struct nvmet_fc_tgtport *tgtport;
2866 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2867 if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2868 (tgtport->fc_target_port.port_name == traddr.pn)) {
2870 if (!tgtport->pe) {
2871 nvmet_fc_portentry_bind(tgtport, pe, port);
2900 struct nvmet_fc_tgtport *tgtport = pe->tgtport;
2902 if (tgtport && tgtport->ops->discovery_event)
2903 tgtport->ops->discovery_event(&tgtport->fc_target_port);