18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 28c2ecf20Sopenharmony_ci/* 38c2ecf20Sopenharmony_ci * Copyright (c) 2016 Avago Technologies. All rights reserved. 48c2ecf20Sopenharmony_ci */ 58c2ecf20Sopenharmony_ci#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 68c2ecf20Sopenharmony_ci#include <linux/module.h> 78c2ecf20Sopenharmony_ci#include <linux/parser.h> 88c2ecf20Sopenharmony_ci#include <uapi/scsi/fc/fc_fs.h> 98c2ecf20Sopenharmony_ci#include <uapi/scsi/fc/fc_els.h> 108c2ecf20Sopenharmony_ci#include <linux/delay.h> 118c2ecf20Sopenharmony_ci#include <linux/overflow.h> 128c2ecf20Sopenharmony_ci 138c2ecf20Sopenharmony_ci#include "nvme.h" 148c2ecf20Sopenharmony_ci#include "fabrics.h" 158c2ecf20Sopenharmony_ci#include <linux/nvme-fc-driver.h> 168c2ecf20Sopenharmony_ci#include <linux/nvme-fc.h> 178c2ecf20Sopenharmony_ci#include "fc.h" 188c2ecf20Sopenharmony_ci#include <scsi/scsi_transport_fc.h> 198c2ecf20Sopenharmony_ci 208c2ecf20Sopenharmony_ci/* *************************** Data Structures/Defines ****************** */ 218c2ecf20Sopenharmony_ci 228c2ecf20Sopenharmony_ci 238c2ecf20Sopenharmony_cienum nvme_fc_queue_flags { 248c2ecf20Sopenharmony_ci NVME_FC_Q_CONNECTED = 0, 258c2ecf20Sopenharmony_ci NVME_FC_Q_LIVE, 268c2ecf20Sopenharmony_ci}; 278c2ecf20Sopenharmony_ci 288c2ecf20Sopenharmony_ci#define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */ 298c2ecf20Sopenharmony_ci#define NVME_FC_DEFAULT_RECONNECT_TMO 2 /* delay between reconnects 308c2ecf20Sopenharmony_ci * when connected and a 318c2ecf20Sopenharmony_ci * connection failure. 328c2ecf20Sopenharmony_ci */ 338c2ecf20Sopenharmony_ci 348c2ecf20Sopenharmony_cistruct nvme_fc_queue { 358c2ecf20Sopenharmony_ci struct nvme_fc_ctrl *ctrl; 368c2ecf20Sopenharmony_ci struct device *dev; 378c2ecf20Sopenharmony_ci struct blk_mq_hw_ctx *hctx; 388c2ecf20Sopenharmony_ci void *lldd_handle; 398c2ecf20Sopenharmony_ci size_t cmnd_capsule_len; 408c2ecf20Sopenharmony_ci u32 qnum; 418c2ecf20Sopenharmony_ci u32 rqcnt; 428c2ecf20Sopenharmony_ci u32 seqno; 438c2ecf20Sopenharmony_ci 448c2ecf20Sopenharmony_ci u64 connection_id; 458c2ecf20Sopenharmony_ci atomic_t csn; 468c2ecf20Sopenharmony_ci 478c2ecf20Sopenharmony_ci unsigned long flags; 488c2ecf20Sopenharmony_ci} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ 498c2ecf20Sopenharmony_ci 508c2ecf20Sopenharmony_cienum nvme_fcop_flags { 518c2ecf20Sopenharmony_ci FCOP_FLAGS_TERMIO = (1 << 0), 528c2ecf20Sopenharmony_ci FCOP_FLAGS_AEN = (1 << 1), 538c2ecf20Sopenharmony_ci}; 548c2ecf20Sopenharmony_ci 558c2ecf20Sopenharmony_cistruct nvmefc_ls_req_op { 568c2ecf20Sopenharmony_ci struct nvmefc_ls_req ls_req; 578c2ecf20Sopenharmony_ci 588c2ecf20Sopenharmony_ci struct nvme_fc_rport *rport; 598c2ecf20Sopenharmony_ci struct nvme_fc_queue *queue; 608c2ecf20Sopenharmony_ci struct request *rq; 618c2ecf20Sopenharmony_ci u32 flags; 628c2ecf20Sopenharmony_ci 638c2ecf20Sopenharmony_ci int ls_error; 648c2ecf20Sopenharmony_ci struct completion ls_done; 658c2ecf20Sopenharmony_ci struct list_head lsreq_list; /* rport->ls_req_list */ 668c2ecf20Sopenharmony_ci bool req_queued; 678c2ecf20Sopenharmony_ci}; 688c2ecf20Sopenharmony_ci 698c2ecf20Sopenharmony_cistruct nvmefc_ls_rcv_op { 708c2ecf20Sopenharmony_ci struct nvme_fc_rport *rport; 718c2ecf20Sopenharmony_ci struct nvmefc_ls_rsp *lsrsp; 728c2ecf20Sopenharmony_ci union nvmefc_ls_requests *rqstbuf; 738c2ecf20Sopenharmony_ci union nvmefc_ls_responses *rspbuf; 748c2ecf20Sopenharmony_ci u16 rqstdatalen; 758c2ecf20Sopenharmony_ci bool handled; 768c2ecf20Sopenharmony_ci dma_addr_t rspdma; 778c2ecf20Sopenharmony_ci struct list_head lsrcv_list; /* rport->ls_rcv_list */ 788c2ecf20Sopenharmony_ci} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ 798c2ecf20Sopenharmony_ci 808c2ecf20Sopenharmony_cienum nvme_fcpop_state { 818c2ecf20Sopenharmony_ci FCPOP_STATE_UNINIT = 0, 828c2ecf20Sopenharmony_ci FCPOP_STATE_IDLE = 1, 838c2ecf20Sopenharmony_ci FCPOP_STATE_ACTIVE = 2, 848c2ecf20Sopenharmony_ci FCPOP_STATE_ABORTED = 3, 858c2ecf20Sopenharmony_ci FCPOP_STATE_COMPLETE = 4, 868c2ecf20Sopenharmony_ci}; 878c2ecf20Sopenharmony_ci 888c2ecf20Sopenharmony_cistruct nvme_fc_fcp_op { 898c2ecf20Sopenharmony_ci struct nvme_request nreq; /* 908c2ecf20Sopenharmony_ci * nvme/host/core.c 918c2ecf20Sopenharmony_ci * requires this to be 928c2ecf20Sopenharmony_ci * the 1st element in the 938c2ecf20Sopenharmony_ci * private structure 948c2ecf20Sopenharmony_ci * associated with the 958c2ecf20Sopenharmony_ci * request. 968c2ecf20Sopenharmony_ci */ 978c2ecf20Sopenharmony_ci struct nvmefc_fcp_req fcp_req; 988c2ecf20Sopenharmony_ci 998c2ecf20Sopenharmony_ci struct nvme_fc_ctrl *ctrl; 1008c2ecf20Sopenharmony_ci struct nvme_fc_queue *queue; 1018c2ecf20Sopenharmony_ci struct request *rq; 1028c2ecf20Sopenharmony_ci 1038c2ecf20Sopenharmony_ci atomic_t state; 1048c2ecf20Sopenharmony_ci u32 flags; 1058c2ecf20Sopenharmony_ci u32 rqno; 1068c2ecf20Sopenharmony_ci u32 nents; 1078c2ecf20Sopenharmony_ci 1088c2ecf20Sopenharmony_ci struct nvme_fc_cmd_iu cmd_iu; 1098c2ecf20Sopenharmony_ci struct nvme_fc_ersp_iu rsp_iu; 1108c2ecf20Sopenharmony_ci}; 1118c2ecf20Sopenharmony_ci 1128c2ecf20Sopenharmony_cistruct nvme_fcp_op_w_sgl { 1138c2ecf20Sopenharmony_ci struct nvme_fc_fcp_op op; 1148c2ecf20Sopenharmony_ci struct scatterlist sgl[NVME_INLINE_SG_CNT]; 1158c2ecf20Sopenharmony_ci uint8_t priv[]; 1168c2ecf20Sopenharmony_ci}; 1178c2ecf20Sopenharmony_ci 1188c2ecf20Sopenharmony_cistruct nvme_fc_lport { 1198c2ecf20Sopenharmony_ci struct nvme_fc_local_port localport; 1208c2ecf20Sopenharmony_ci 1218c2ecf20Sopenharmony_ci struct ida endp_cnt; 1228c2ecf20Sopenharmony_ci struct list_head port_list; /* nvme_fc_port_list */ 1238c2ecf20Sopenharmony_ci struct list_head endp_list; 1248c2ecf20Sopenharmony_ci struct device *dev; /* physical device for dma */ 1258c2ecf20Sopenharmony_ci struct nvme_fc_port_template *ops; 1268c2ecf20Sopenharmony_ci struct kref ref; 1278c2ecf20Sopenharmony_ci atomic_t act_rport_cnt; 1288c2ecf20Sopenharmony_ci} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ 1298c2ecf20Sopenharmony_ci 1308c2ecf20Sopenharmony_cistruct nvme_fc_rport { 1318c2ecf20Sopenharmony_ci struct nvme_fc_remote_port remoteport; 1328c2ecf20Sopenharmony_ci 1338c2ecf20Sopenharmony_ci struct list_head endp_list; /* for lport->endp_list */ 1348c2ecf20Sopenharmony_ci struct list_head ctrl_list; 1358c2ecf20Sopenharmony_ci struct list_head ls_req_list; 1368c2ecf20Sopenharmony_ci struct list_head ls_rcv_list; 1378c2ecf20Sopenharmony_ci struct list_head disc_list; 1388c2ecf20Sopenharmony_ci struct device *dev; /* physical device for dma */ 1398c2ecf20Sopenharmony_ci struct nvme_fc_lport *lport; 1408c2ecf20Sopenharmony_ci spinlock_t lock; 1418c2ecf20Sopenharmony_ci struct kref ref; 1428c2ecf20Sopenharmony_ci atomic_t act_ctrl_cnt; 1438c2ecf20Sopenharmony_ci unsigned long dev_loss_end; 1448c2ecf20Sopenharmony_ci struct work_struct lsrcv_work; 1458c2ecf20Sopenharmony_ci} __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ 1468c2ecf20Sopenharmony_ci 1478c2ecf20Sopenharmony_ci/* fc_ctrl flags values - specified as bit positions */ 1488c2ecf20Sopenharmony_ci#define ASSOC_ACTIVE 0 1498c2ecf20Sopenharmony_ci#define ASSOC_FAILED 1 1508c2ecf20Sopenharmony_ci#define FCCTRL_TERMIO 2 1518c2ecf20Sopenharmony_ci 1528c2ecf20Sopenharmony_cistruct nvme_fc_ctrl { 1538c2ecf20Sopenharmony_ci spinlock_t lock; 1548c2ecf20Sopenharmony_ci struct nvme_fc_queue *queues; 1558c2ecf20Sopenharmony_ci struct device *dev; 1568c2ecf20Sopenharmony_ci struct nvme_fc_lport *lport; 1578c2ecf20Sopenharmony_ci struct nvme_fc_rport *rport; 1588c2ecf20Sopenharmony_ci u32 cnum; 1598c2ecf20Sopenharmony_ci 1608c2ecf20Sopenharmony_ci bool ioq_live; 1618c2ecf20Sopenharmony_ci u64 association_id; 1628c2ecf20Sopenharmony_ci struct nvmefc_ls_rcv_op *rcv_disconn; 1638c2ecf20Sopenharmony_ci 1648c2ecf20Sopenharmony_ci struct list_head ctrl_list; /* rport->ctrl_list */ 1658c2ecf20Sopenharmony_ci 1668c2ecf20Sopenharmony_ci struct blk_mq_tag_set admin_tag_set; 1678c2ecf20Sopenharmony_ci struct blk_mq_tag_set tag_set; 1688c2ecf20Sopenharmony_ci 1698c2ecf20Sopenharmony_ci struct work_struct ioerr_work; 1708c2ecf20Sopenharmony_ci struct delayed_work connect_work; 1718c2ecf20Sopenharmony_ci 1728c2ecf20Sopenharmony_ci struct kref ref; 1738c2ecf20Sopenharmony_ci unsigned long flags; 1748c2ecf20Sopenharmony_ci u32 iocnt; 1758c2ecf20Sopenharmony_ci wait_queue_head_t ioabort_wait; 1768c2ecf20Sopenharmony_ci 1778c2ecf20Sopenharmony_ci struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS]; 1788c2ecf20Sopenharmony_ci 1798c2ecf20Sopenharmony_ci struct nvme_ctrl ctrl; 1808c2ecf20Sopenharmony_ci}; 1818c2ecf20Sopenharmony_ci 1828c2ecf20Sopenharmony_cistatic inline struct nvme_fc_ctrl * 1838c2ecf20Sopenharmony_cito_fc_ctrl(struct nvme_ctrl *ctrl) 1848c2ecf20Sopenharmony_ci{ 1858c2ecf20Sopenharmony_ci return container_of(ctrl, struct nvme_fc_ctrl, ctrl); 1868c2ecf20Sopenharmony_ci} 1878c2ecf20Sopenharmony_ci 1888c2ecf20Sopenharmony_cistatic inline struct nvme_fc_lport * 1898c2ecf20Sopenharmony_cilocalport_to_lport(struct nvme_fc_local_port *portptr) 1908c2ecf20Sopenharmony_ci{ 1918c2ecf20Sopenharmony_ci return container_of(portptr, struct nvme_fc_lport, localport); 1928c2ecf20Sopenharmony_ci} 1938c2ecf20Sopenharmony_ci 1948c2ecf20Sopenharmony_cistatic inline struct nvme_fc_rport * 1958c2ecf20Sopenharmony_ciremoteport_to_rport(struct nvme_fc_remote_port *portptr) 1968c2ecf20Sopenharmony_ci{ 1978c2ecf20Sopenharmony_ci return container_of(portptr, struct nvme_fc_rport, remoteport); 1988c2ecf20Sopenharmony_ci} 1998c2ecf20Sopenharmony_ci 2008c2ecf20Sopenharmony_cistatic inline struct nvmefc_ls_req_op * 2018c2ecf20Sopenharmony_cils_req_to_lsop(struct nvmefc_ls_req *lsreq) 2028c2ecf20Sopenharmony_ci{ 2038c2ecf20Sopenharmony_ci return container_of(lsreq, struct nvmefc_ls_req_op, ls_req); 2048c2ecf20Sopenharmony_ci} 2058c2ecf20Sopenharmony_ci 2068c2ecf20Sopenharmony_cistatic inline struct nvme_fc_fcp_op * 2078c2ecf20Sopenharmony_cifcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq) 2088c2ecf20Sopenharmony_ci{ 2098c2ecf20Sopenharmony_ci return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req); 2108c2ecf20Sopenharmony_ci} 2118c2ecf20Sopenharmony_ci 2128c2ecf20Sopenharmony_ci 2138c2ecf20Sopenharmony_ci 2148c2ecf20Sopenharmony_ci/* *************************** Globals **************************** */ 2158c2ecf20Sopenharmony_ci 2168c2ecf20Sopenharmony_ci 2178c2ecf20Sopenharmony_cistatic DEFINE_SPINLOCK(nvme_fc_lock); 2188c2ecf20Sopenharmony_ci 2198c2ecf20Sopenharmony_cistatic LIST_HEAD(nvme_fc_lport_list); 2208c2ecf20Sopenharmony_cistatic DEFINE_IDA(nvme_fc_local_port_cnt); 2218c2ecf20Sopenharmony_cistatic DEFINE_IDA(nvme_fc_ctrl_cnt); 2228c2ecf20Sopenharmony_ci 2238c2ecf20Sopenharmony_cistatic struct workqueue_struct *nvme_fc_wq; 2248c2ecf20Sopenharmony_ci 2258c2ecf20Sopenharmony_cistatic bool nvme_fc_waiting_to_unload; 2268c2ecf20Sopenharmony_cistatic DECLARE_COMPLETION(nvme_fc_unload_proceed); 2278c2ecf20Sopenharmony_ci 2288c2ecf20Sopenharmony_ci/* 2298c2ecf20Sopenharmony_ci * These items are short-term. They will eventually be moved into 2308c2ecf20Sopenharmony_ci * a generic FC class. See comments in module init. 2318c2ecf20Sopenharmony_ci */ 2328c2ecf20Sopenharmony_cistatic struct device *fc_udev_device; 2338c2ecf20Sopenharmony_ci 2348c2ecf20Sopenharmony_cistatic void nvme_fc_complete_rq(struct request *rq); 2358c2ecf20Sopenharmony_ci 2368c2ecf20Sopenharmony_ci/* *********************** FC-NVME Port Management ************************ */ 2378c2ecf20Sopenharmony_ci 2388c2ecf20Sopenharmony_cistatic void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *, 2398c2ecf20Sopenharmony_ci struct nvme_fc_queue *, unsigned int); 2408c2ecf20Sopenharmony_ci 2418c2ecf20Sopenharmony_cistatic void nvme_fc_handle_ls_rqst_work(struct work_struct *work); 2428c2ecf20Sopenharmony_ci 2438c2ecf20Sopenharmony_ci 2448c2ecf20Sopenharmony_cistatic void 2458c2ecf20Sopenharmony_cinvme_fc_free_lport(struct kref *ref) 2468c2ecf20Sopenharmony_ci{ 2478c2ecf20Sopenharmony_ci struct nvme_fc_lport *lport = 2488c2ecf20Sopenharmony_ci container_of(ref, struct nvme_fc_lport, ref); 2498c2ecf20Sopenharmony_ci unsigned long flags; 2508c2ecf20Sopenharmony_ci 2518c2ecf20Sopenharmony_ci WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED); 2528c2ecf20Sopenharmony_ci WARN_ON(!list_empty(&lport->endp_list)); 2538c2ecf20Sopenharmony_ci 2548c2ecf20Sopenharmony_ci /* remove from transport list */ 2558c2ecf20Sopenharmony_ci spin_lock_irqsave(&nvme_fc_lock, flags); 2568c2ecf20Sopenharmony_ci list_del(&lport->port_list); 2578c2ecf20Sopenharmony_ci if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list)) 2588c2ecf20Sopenharmony_ci complete(&nvme_fc_unload_proceed); 2598c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&nvme_fc_lock, flags); 2608c2ecf20Sopenharmony_ci 2618c2ecf20Sopenharmony_ci ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num); 2628c2ecf20Sopenharmony_ci ida_destroy(&lport->endp_cnt); 2638c2ecf20Sopenharmony_ci 2648c2ecf20Sopenharmony_ci put_device(lport->dev); 2658c2ecf20Sopenharmony_ci 2668c2ecf20Sopenharmony_ci kfree(lport); 2678c2ecf20Sopenharmony_ci} 2688c2ecf20Sopenharmony_ci 2698c2ecf20Sopenharmony_cistatic void 2708c2ecf20Sopenharmony_cinvme_fc_lport_put(struct nvme_fc_lport *lport) 2718c2ecf20Sopenharmony_ci{ 2728c2ecf20Sopenharmony_ci kref_put(&lport->ref, nvme_fc_free_lport); 2738c2ecf20Sopenharmony_ci} 2748c2ecf20Sopenharmony_ci 2758c2ecf20Sopenharmony_cistatic int 2768c2ecf20Sopenharmony_cinvme_fc_lport_get(struct nvme_fc_lport *lport) 2778c2ecf20Sopenharmony_ci{ 2788c2ecf20Sopenharmony_ci return kref_get_unless_zero(&lport->ref); 2798c2ecf20Sopenharmony_ci} 2808c2ecf20Sopenharmony_ci 2818c2ecf20Sopenharmony_ci 2828c2ecf20Sopenharmony_cistatic struct nvme_fc_lport * 2838c2ecf20Sopenharmony_cinvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo, 2848c2ecf20Sopenharmony_ci struct nvme_fc_port_template *ops, 2858c2ecf20Sopenharmony_ci struct device *dev) 2868c2ecf20Sopenharmony_ci{ 2878c2ecf20Sopenharmony_ci struct nvme_fc_lport *lport; 2888c2ecf20Sopenharmony_ci unsigned long flags; 2898c2ecf20Sopenharmony_ci 2908c2ecf20Sopenharmony_ci spin_lock_irqsave(&nvme_fc_lock, flags); 2918c2ecf20Sopenharmony_ci 2928c2ecf20Sopenharmony_ci list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { 2938c2ecf20Sopenharmony_ci if (lport->localport.node_name != pinfo->node_name || 2948c2ecf20Sopenharmony_ci lport->localport.port_name != pinfo->port_name) 2958c2ecf20Sopenharmony_ci continue; 2968c2ecf20Sopenharmony_ci 2978c2ecf20Sopenharmony_ci if (lport->dev != dev) { 2988c2ecf20Sopenharmony_ci lport = ERR_PTR(-EXDEV); 2998c2ecf20Sopenharmony_ci goto out_done; 3008c2ecf20Sopenharmony_ci } 3018c2ecf20Sopenharmony_ci 3028c2ecf20Sopenharmony_ci if (lport->localport.port_state != FC_OBJSTATE_DELETED) { 3038c2ecf20Sopenharmony_ci lport = ERR_PTR(-EEXIST); 3048c2ecf20Sopenharmony_ci goto out_done; 3058c2ecf20Sopenharmony_ci } 3068c2ecf20Sopenharmony_ci 3078c2ecf20Sopenharmony_ci if (!nvme_fc_lport_get(lport)) { 3088c2ecf20Sopenharmony_ci /* 3098c2ecf20Sopenharmony_ci * fails if ref cnt already 0. If so, 3108c2ecf20Sopenharmony_ci * act as if lport already deleted 3118c2ecf20Sopenharmony_ci */ 3128c2ecf20Sopenharmony_ci lport = NULL; 3138c2ecf20Sopenharmony_ci goto out_done; 3148c2ecf20Sopenharmony_ci } 3158c2ecf20Sopenharmony_ci 3168c2ecf20Sopenharmony_ci /* resume the lport */ 3178c2ecf20Sopenharmony_ci 3188c2ecf20Sopenharmony_ci lport->ops = ops; 3198c2ecf20Sopenharmony_ci lport->localport.port_role = pinfo->port_role; 3208c2ecf20Sopenharmony_ci lport->localport.port_id = pinfo->port_id; 3218c2ecf20Sopenharmony_ci lport->localport.port_state = FC_OBJSTATE_ONLINE; 3228c2ecf20Sopenharmony_ci 3238c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&nvme_fc_lock, flags); 3248c2ecf20Sopenharmony_ci 3258c2ecf20Sopenharmony_ci return lport; 3268c2ecf20Sopenharmony_ci } 3278c2ecf20Sopenharmony_ci 3288c2ecf20Sopenharmony_ci lport = NULL; 3298c2ecf20Sopenharmony_ci 3308c2ecf20Sopenharmony_ciout_done: 3318c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&nvme_fc_lock, flags); 3328c2ecf20Sopenharmony_ci 3338c2ecf20Sopenharmony_ci return lport; 3348c2ecf20Sopenharmony_ci} 3358c2ecf20Sopenharmony_ci 3368c2ecf20Sopenharmony_ci/** 3378c2ecf20Sopenharmony_ci * nvme_fc_register_localport - transport entry point called by an 3388c2ecf20Sopenharmony_ci * LLDD to register the existence of a NVME 3398c2ecf20Sopenharmony_ci * host FC port. 3408c2ecf20Sopenharmony_ci * @pinfo: pointer to information about the port to be registered 3418c2ecf20Sopenharmony_ci * @template: LLDD entrypoints and operational parameters for the port 3428c2ecf20Sopenharmony_ci * @dev: physical hardware device node port corresponds to. Will be 3438c2ecf20Sopenharmony_ci * used for DMA mappings 3448c2ecf20Sopenharmony_ci * @portptr: pointer to a local port pointer. Upon success, the routine 3458c2ecf20Sopenharmony_ci * will allocate a nvme_fc_local_port structure and place its 3468c2ecf20Sopenharmony_ci * address in the local port pointer. Upon failure, local port 3478c2ecf20Sopenharmony_ci * pointer will be set to 0. 3488c2ecf20Sopenharmony_ci * 3498c2ecf20Sopenharmony_ci * Returns: 3508c2ecf20Sopenharmony_ci * a completion status. Must be 0 upon success; a negative errno 3518c2ecf20Sopenharmony_ci * (ex: -ENXIO) upon failure. 3528c2ecf20Sopenharmony_ci */ 3538c2ecf20Sopenharmony_ciint 3548c2ecf20Sopenharmony_cinvme_fc_register_localport(struct nvme_fc_port_info *pinfo, 3558c2ecf20Sopenharmony_ci struct nvme_fc_port_template *template, 3568c2ecf20Sopenharmony_ci struct device *dev, 3578c2ecf20Sopenharmony_ci struct nvme_fc_local_port **portptr) 3588c2ecf20Sopenharmony_ci{ 3598c2ecf20Sopenharmony_ci struct nvme_fc_lport *newrec; 3608c2ecf20Sopenharmony_ci unsigned long flags; 3618c2ecf20Sopenharmony_ci int ret, idx; 3628c2ecf20Sopenharmony_ci 3638c2ecf20Sopenharmony_ci if (!template->localport_delete || !template->remoteport_delete || 3648c2ecf20Sopenharmony_ci !template->ls_req || !template->fcp_io || 3658c2ecf20Sopenharmony_ci !template->ls_abort || !template->fcp_abort || 3668c2ecf20Sopenharmony_ci !template->max_hw_queues || !template->max_sgl_segments || 3678c2ecf20Sopenharmony_ci !template->max_dif_sgl_segments || !template->dma_boundary) { 3688c2ecf20Sopenharmony_ci ret = -EINVAL; 3698c2ecf20Sopenharmony_ci goto out_reghost_failed; 3708c2ecf20Sopenharmony_ci } 3718c2ecf20Sopenharmony_ci 3728c2ecf20Sopenharmony_ci /* 3738c2ecf20Sopenharmony_ci * look to see if there is already a localport that had been 3748c2ecf20Sopenharmony_ci * deregistered and in the process of waiting for all the 3758c2ecf20Sopenharmony_ci * references to fully be removed. If the references haven't 3768c2ecf20Sopenharmony_ci * expired, we can simply re-enable the localport. Remoteports 3778c2ecf20Sopenharmony_ci * and controller reconnections should resume naturally. 3788c2ecf20Sopenharmony_ci */ 3798c2ecf20Sopenharmony_ci newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev); 3808c2ecf20Sopenharmony_ci 3818c2ecf20Sopenharmony_ci /* found an lport, but something about its state is bad */ 3828c2ecf20Sopenharmony_ci if (IS_ERR(newrec)) { 3838c2ecf20Sopenharmony_ci ret = PTR_ERR(newrec); 3848c2ecf20Sopenharmony_ci goto out_reghost_failed; 3858c2ecf20Sopenharmony_ci 3868c2ecf20Sopenharmony_ci /* found existing lport, which was resumed */ 3878c2ecf20Sopenharmony_ci } else if (newrec) { 3888c2ecf20Sopenharmony_ci *portptr = &newrec->localport; 3898c2ecf20Sopenharmony_ci return 0; 3908c2ecf20Sopenharmony_ci } 3918c2ecf20Sopenharmony_ci 3928c2ecf20Sopenharmony_ci /* nothing found - allocate a new localport struct */ 3938c2ecf20Sopenharmony_ci 3948c2ecf20Sopenharmony_ci newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz), 3958c2ecf20Sopenharmony_ci GFP_KERNEL); 3968c2ecf20Sopenharmony_ci if (!newrec) { 3978c2ecf20Sopenharmony_ci ret = -ENOMEM; 3988c2ecf20Sopenharmony_ci goto out_reghost_failed; 3998c2ecf20Sopenharmony_ci } 4008c2ecf20Sopenharmony_ci 4018c2ecf20Sopenharmony_ci idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL); 4028c2ecf20Sopenharmony_ci if (idx < 0) { 4038c2ecf20Sopenharmony_ci ret = -ENOSPC; 4048c2ecf20Sopenharmony_ci goto out_fail_kfree; 4058c2ecf20Sopenharmony_ci } 4068c2ecf20Sopenharmony_ci 4078c2ecf20Sopenharmony_ci if (!get_device(dev) && dev) { 4088c2ecf20Sopenharmony_ci ret = -ENODEV; 4098c2ecf20Sopenharmony_ci goto out_ida_put; 4108c2ecf20Sopenharmony_ci } 4118c2ecf20Sopenharmony_ci 4128c2ecf20Sopenharmony_ci INIT_LIST_HEAD(&newrec->port_list); 4138c2ecf20Sopenharmony_ci INIT_LIST_HEAD(&newrec->endp_list); 4148c2ecf20Sopenharmony_ci kref_init(&newrec->ref); 4158c2ecf20Sopenharmony_ci atomic_set(&newrec->act_rport_cnt, 0); 4168c2ecf20Sopenharmony_ci newrec->ops = template; 4178c2ecf20Sopenharmony_ci newrec->dev = dev; 4188c2ecf20Sopenharmony_ci ida_init(&newrec->endp_cnt); 4198c2ecf20Sopenharmony_ci if (template->local_priv_sz) 4208c2ecf20Sopenharmony_ci newrec->localport.private = &newrec[1]; 4218c2ecf20Sopenharmony_ci else 4228c2ecf20Sopenharmony_ci newrec->localport.private = NULL; 4238c2ecf20Sopenharmony_ci newrec->localport.node_name = pinfo->node_name; 4248c2ecf20Sopenharmony_ci newrec->localport.port_name = pinfo->port_name; 4258c2ecf20Sopenharmony_ci newrec->localport.port_role = pinfo->port_role; 4268c2ecf20Sopenharmony_ci newrec->localport.port_id = pinfo->port_id; 4278c2ecf20Sopenharmony_ci newrec->localport.port_state = FC_OBJSTATE_ONLINE; 4288c2ecf20Sopenharmony_ci newrec->localport.port_num = idx; 4298c2ecf20Sopenharmony_ci 4308c2ecf20Sopenharmony_ci spin_lock_irqsave(&nvme_fc_lock, flags); 4318c2ecf20Sopenharmony_ci list_add_tail(&newrec->port_list, &nvme_fc_lport_list); 4328c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&nvme_fc_lock, flags); 4338c2ecf20Sopenharmony_ci 4348c2ecf20Sopenharmony_ci if (dev) 4358c2ecf20Sopenharmony_ci dma_set_seg_boundary(dev, template->dma_boundary); 4368c2ecf20Sopenharmony_ci 4378c2ecf20Sopenharmony_ci *portptr = &newrec->localport; 4388c2ecf20Sopenharmony_ci return 0; 4398c2ecf20Sopenharmony_ci 4408c2ecf20Sopenharmony_ciout_ida_put: 4418c2ecf20Sopenharmony_ci ida_simple_remove(&nvme_fc_local_port_cnt, idx); 4428c2ecf20Sopenharmony_ciout_fail_kfree: 4438c2ecf20Sopenharmony_ci kfree(newrec); 4448c2ecf20Sopenharmony_ciout_reghost_failed: 4458c2ecf20Sopenharmony_ci *portptr = NULL; 4468c2ecf20Sopenharmony_ci 4478c2ecf20Sopenharmony_ci return ret; 4488c2ecf20Sopenharmony_ci} 4498c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(nvme_fc_register_localport); 4508c2ecf20Sopenharmony_ci 4518c2ecf20Sopenharmony_ci/** 4528c2ecf20Sopenharmony_ci * nvme_fc_unregister_localport - transport entry point called by an 4538c2ecf20Sopenharmony_ci * LLDD to deregister/remove a previously 4548c2ecf20Sopenharmony_ci * registered a NVME host FC port. 4558c2ecf20Sopenharmony_ci * @portptr: pointer to the (registered) local port that is to be deregistered. 4568c2ecf20Sopenharmony_ci * 4578c2ecf20Sopenharmony_ci * Returns: 4588c2ecf20Sopenharmony_ci * a completion status. Must be 0 upon success; a negative errno 4598c2ecf20Sopenharmony_ci * (ex: -ENXIO) upon failure. 4608c2ecf20Sopenharmony_ci */ 4618c2ecf20Sopenharmony_ciint 4628c2ecf20Sopenharmony_cinvme_fc_unregister_localport(struct nvme_fc_local_port *portptr) 4638c2ecf20Sopenharmony_ci{ 4648c2ecf20Sopenharmony_ci struct nvme_fc_lport *lport = localport_to_lport(portptr); 4658c2ecf20Sopenharmony_ci unsigned long flags; 4668c2ecf20Sopenharmony_ci 4678c2ecf20Sopenharmony_ci if (!portptr) 4688c2ecf20Sopenharmony_ci return -EINVAL; 4698c2ecf20Sopenharmony_ci 4708c2ecf20Sopenharmony_ci spin_lock_irqsave(&nvme_fc_lock, flags); 4718c2ecf20Sopenharmony_ci 4728c2ecf20Sopenharmony_ci if (portptr->port_state != FC_OBJSTATE_ONLINE) { 4738c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&nvme_fc_lock, flags); 4748c2ecf20Sopenharmony_ci return -EINVAL; 4758c2ecf20Sopenharmony_ci } 4768c2ecf20Sopenharmony_ci portptr->port_state = FC_OBJSTATE_DELETED; 4778c2ecf20Sopenharmony_ci 4788c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&nvme_fc_lock, flags); 4798c2ecf20Sopenharmony_ci 4808c2ecf20Sopenharmony_ci if (atomic_read(&lport->act_rport_cnt) == 0) 4818c2ecf20Sopenharmony_ci lport->ops->localport_delete(&lport->localport); 4828c2ecf20Sopenharmony_ci 4838c2ecf20Sopenharmony_ci nvme_fc_lport_put(lport); 4848c2ecf20Sopenharmony_ci 4858c2ecf20Sopenharmony_ci return 0; 4868c2ecf20Sopenharmony_ci} 4878c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(nvme_fc_unregister_localport); 4888c2ecf20Sopenharmony_ci 4898c2ecf20Sopenharmony_ci/* 4908c2ecf20Sopenharmony_ci * TRADDR strings, per FC-NVME are fixed format: 4918c2ecf20Sopenharmony_ci * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters 4928c2ecf20Sopenharmony_ci * udev event will only differ by prefix of what field is 4938c2ecf20Sopenharmony_ci * being specified: 4948c2ecf20Sopenharmony_ci * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters 4958c2ecf20Sopenharmony_ci * 19 + 43 + null_fudge = 64 characters 4968c2ecf20Sopenharmony_ci */ 4978c2ecf20Sopenharmony_ci#define FCNVME_TRADDR_LENGTH 64 4988c2ecf20Sopenharmony_ci 4998c2ecf20Sopenharmony_cistatic void 5008c2ecf20Sopenharmony_cinvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport, 5018c2ecf20Sopenharmony_ci struct nvme_fc_rport *rport) 5028c2ecf20Sopenharmony_ci{ 5038c2ecf20Sopenharmony_ci char hostaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_HOST_TRADDR=...*/ 5048c2ecf20Sopenharmony_ci char tgtaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_TRADDR=...*/ 5058c2ecf20Sopenharmony_ci char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL }; 5068c2ecf20Sopenharmony_ci 5078c2ecf20Sopenharmony_ci if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY)) 5088c2ecf20Sopenharmony_ci return; 5098c2ecf20Sopenharmony_ci 5108c2ecf20Sopenharmony_ci snprintf(hostaddr, sizeof(hostaddr), 5118c2ecf20Sopenharmony_ci "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx", 5128c2ecf20Sopenharmony_ci lport->localport.node_name, lport->localport.port_name); 5138c2ecf20Sopenharmony_ci snprintf(tgtaddr, sizeof(tgtaddr), 5148c2ecf20Sopenharmony_ci "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx", 5158c2ecf20Sopenharmony_ci rport->remoteport.node_name, rport->remoteport.port_name); 5168c2ecf20Sopenharmony_ci kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp); 5178c2ecf20Sopenharmony_ci} 5188c2ecf20Sopenharmony_ci 5198c2ecf20Sopenharmony_cistatic void 5208c2ecf20Sopenharmony_cinvme_fc_free_rport(struct kref *ref) 5218c2ecf20Sopenharmony_ci{ 5228c2ecf20Sopenharmony_ci struct nvme_fc_rport *rport = 5238c2ecf20Sopenharmony_ci container_of(ref, struct nvme_fc_rport, ref); 5248c2ecf20Sopenharmony_ci struct nvme_fc_lport *lport = 5258c2ecf20Sopenharmony_ci localport_to_lport(rport->remoteport.localport); 5268c2ecf20Sopenharmony_ci unsigned long flags; 5278c2ecf20Sopenharmony_ci 5288c2ecf20Sopenharmony_ci WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED); 5298c2ecf20Sopenharmony_ci WARN_ON(!list_empty(&rport->ctrl_list)); 5308c2ecf20Sopenharmony_ci 5318c2ecf20Sopenharmony_ci /* remove from lport list */ 5328c2ecf20Sopenharmony_ci spin_lock_irqsave(&nvme_fc_lock, flags); 5338c2ecf20Sopenharmony_ci list_del(&rport->endp_list); 5348c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&nvme_fc_lock, flags); 5358c2ecf20Sopenharmony_ci 5368c2ecf20Sopenharmony_ci WARN_ON(!list_empty(&rport->disc_list)); 5378c2ecf20Sopenharmony_ci ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num); 5388c2ecf20Sopenharmony_ci 5398c2ecf20Sopenharmony_ci kfree(rport); 5408c2ecf20Sopenharmony_ci 5418c2ecf20Sopenharmony_ci nvme_fc_lport_put(lport); 5428c2ecf20Sopenharmony_ci} 5438c2ecf20Sopenharmony_ci 5448c2ecf20Sopenharmony_cistatic void 5458c2ecf20Sopenharmony_cinvme_fc_rport_put(struct nvme_fc_rport *rport) 5468c2ecf20Sopenharmony_ci{ 5478c2ecf20Sopenharmony_ci kref_put(&rport->ref, nvme_fc_free_rport); 5488c2ecf20Sopenharmony_ci} 5498c2ecf20Sopenharmony_ci 5508c2ecf20Sopenharmony_cistatic int 5518c2ecf20Sopenharmony_cinvme_fc_rport_get(struct nvme_fc_rport *rport) 5528c2ecf20Sopenharmony_ci{ 5538c2ecf20Sopenharmony_ci return kref_get_unless_zero(&rport->ref); 5548c2ecf20Sopenharmony_ci} 5558c2ecf20Sopenharmony_ci 5568c2ecf20Sopenharmony_cistatic void 5578c2ecf20Sopenharmony_cinvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl) 5588c2ecf20Sopenharmony_ci{ 5598c2ecf20Sopenharmony_ci switch (ctrl->ctrl.state) { 5608c2ecf20Sopenharmony_ci case NVME_CTRL_NEW: 5618c2ecf20Sopenharmony_ci case NVME_CTRL_CONNECTING: 5628c2ecf20Sopenharmony_ci /* 5638c2ecf20Sopenharmony_ci * As all reconnects were suppressed, schedule a 5648c2ecf20Sopenharmony_ci * connect. 5658c2ecf20Sopenharmony_ci */ 5668c2ecf20Sopenharmony_ci dev_info(ctrl->ctrl.device, 5678c2ecf20Sopenharmony_ci "NVME-FC{%d}: connectivity re-established. " 5688c2ecf20Sopenharmony_ci "Attempting reconnect\n", ctrl->cnum); 5698c2ecf20Sopenharmony_ci 5708c2ecf20Sopenharmony_ci queue_delayed_work(nvme_wq, &ctrl->connect_work, 0); 5718c2ecf20Sopenharmony_ci break; 5728c2ecf20Sopenharmony_ci 5738c2ecf20Sopenharmony_ci case NVME_CTRL_RESETTING: 5748c2ecf20Sopenharmony_ci /* 5758c2ecf20Sopenharmony_ci * Controller is already in the process of terminating the 5768c2ecf20Sopenharmony_ci * association. No need to do anything further. The reconnect 5778c2ecf20Sopenharmony_ci * step will naturally occur after the reset completes. 5788c2ecf20Sopenharmony_ci */ 5798c2ecf20Sopenharmony_ci break; 5808c2ecf20Sopenharmony_ci 5818c2ecf20Sopenharmony_ci default: 5828c2ecf20Sopenharmony_ci /* no action to take - let it delete */ 5838c2ecf20Sopenharmony_ci break; 5848c2ecf20Sopenharmony_ci } 5858c2ecf20Sopenharmony_ci} 5868c2ecf20Sopenharmony_ci 5878c2ecf20Sopenharmony_cistatic struct nvme_fc_rport * 5888c2ecf20Sopenharmony_cinvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport, 5898c2ecf20Sopenharmony_ci struct nvme_fc_port_info *pinfo) 5908c2ecf20Sopenharmony_ci{ 5918c2ecf20Sopenharmony_ci struct nvme_fc_rport *rport; 5928c2ecf20Sopenharmony_ci struct nvme_fc_ctrl *ctrl; 5938c2ecf20Sopenharmony_ci unsigned long flags; 5948c2ecf20Sopenharmony_ci 5958c2ecf20Sopenharmony_ci spin_lock_irqsave(&nvme_fc_lock, flags); 5968c2ecf20Sopenharmony_ci 5978c2ecf20Sopenharmony_ci list_for_each_entry(rport, &lport->endp_list, endp_list) { 5988c2ecf20Sopenharmony_ci if (rport->remoteport.node_name != pinfo->node_name || 5998c2ecf20Sopenharmony_ci rport->remoteport.port_name != pinfo->port_name) 6008c2ecf20Sopenharmony_ci continue; 6018c2ecf20Sopenharmony_ci 6028c2ecf20Sopenharmony_ci if (!nvme_fc_rport_get(rport)) { 6038c2ecf20Sopenharmony_ci rport = ERR_PTR(-ENOLCK); 6048c2ecf20Sopenharmony_ci goto out_done; 6058c2ecf20Sopenharmony_ci } 6068c2ecf20Sopenharmony_ci 6078c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&nvme_fc_lock, flags); 6088c2ecf20Sopenharmony_ci 6098c2ecf20Sopenharmony_ci spin_lock_irqsave(&rport->lock, flags); 6108c2ecf20Sopenharmony_ci 6118c2ecf20Sopenharmony_ci /* has it been unregistered */ 6128c2ecf20Sopenharmony_ci if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) { 6138c2ecf20Sopenharmony_ci /* means lldd called us twice */ 6148c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&rport->lock, flags); 6158c2ecf20Sopenharmony_ci nvme_fc_rport_put(rport); 6168c2ecf20Sopenharmony_ci return ERR_PTR(-ESTALE); 6178c2ecf20Sopenharmony_ci } 6188c2ecf20Sopenharmony_ci 6198c2ecf20Sopenharmony_ci rport->remoteport.port_role = pinfo->port_role; 6208c2ecf20Sopenharmony_ci rport->remoteport.port_id = pinfo->port_id; 6218c2ecf20Sopenharmony_ci rport->remoteport.port_state = FC_OBJSTATE_ONLINE; 6228c2ecf20Sopenharmony_ci rport->dev_loss_end = 0; 6238c2ecf20Sopenharmony_ci 6248c2ecf20Sopenharmony_ci /* 6258c2ecf20Sopenharmony_ci * kick off a reconnect attempt on all associations to the 6268c2ecf20Sopenharmony_ci * remote port. A successful reconnects will resume i/o. 6278c2ecf20Sopenharmony_ci */ 6288c2ecf20Sopenharmony_ci list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) 6298c2ecf20Sopenharmony_ci nvme_fc_resume_controller(ctrl); 6308c2ecf20Sopenharmony_ci 6318c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&rport->lock, flags); 6328c2ecf20Sopenharmony_ci 6338c2ecf20Sopenharmony_ci return rport; 6348c2ecf20Sopenharmony_ci } 6358c2ecf20Sopenharmony_ci 6368c2ecf20Sopenharmony_ci rport = NULL; 6378c2ecf20Sopenharmony_ci 6388c2ecf20Sopenharmony_ciout_done: 6398c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&nvme_fc_lock, flags); 6408c2ecf20Sopenharmony_ci 6418c2ecf20Sopenharmony_ci return rport; 6428c2ecf20Sopenharmony_ci} 6438c2ecf20Sopenharmony_ci 6448c2ecf20Sopenharmony_cistatic inline void 6458c2ecf20Sopenharmony_ci__nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport, 6468c2ecf20Sopenharmony_ci struct nvme_fc_port_info *pinfo) 6478c2ecf20Sopenharmony_ci{ 6488c2ecf20Sopenharmony_ci if (pinfo->dev_loss_tmo) 6498c2ecf20Sopenharmony_ci rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo; 6508c2ecf20Sopenharmony_ci else 6518c2ecf20Sopenharmony_ci rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO; 6528c2ecf20Sopenharmony_ci} 6538c2ecf20Sopenharmony_ci 6548c2ecf20Sopenharmony_ci/** 6558c2ecf20Sopenharmony_ci * nvme_fc_register_remoteport - transport entry point called by an 6568c2ecf20Sopenharmony_ci * LLDD to register the existence of a NVME 6578c2ecf20Sopenharmony_ci * subsystem FC port on its fabric. 6588c2ecf20Sopenharmony_ci * @localport: pointer to the (registered) local port that the remote 6598c2ecf20Sopenharmony_ci * subsystem port is connected to. 6608c2ecf20Sopenharmony_ci * @pinfo: pointer to information about the port to be registered 6618c2ecf20Sopenharmony_ci * @portptr: pointer to a remote port pointer. Upon success, the routine 6628c2ecf20Sopenharmony_ci * will allocate a nvme_fc_remote_port structure and place its 6638c2ecf20Sopenharmony_ci * address in the remote port pointer. Upon failure, remote port 6648c2ecf20Sopenharmony_ci * pointer will be set to 0. 6658c2ecf20Sopenharmony_ci * 6668c2ecf20Sopenharmony_ci * Returns: 6678c2ecf20Sopenharmony_ci * a completion status. Must be 0 upon success; a negative errno 6688c2ecf20Sopenharmony_ci * (ex: -ENXIO) upon failure. 6698c2ecf20Sopenharmony_ci */ 6708c2ecf20Sopenharmony_ciint 6718c2ecf20Sopenharmony_cinvme_fc_register_remoteport(struct nvme_fc_local_port *localport, 6728c2ecf20Sopenharmony_ci struct nvme_fc_port_info *pinfo, 6738c2ecf20Sopenharmony_ci struct nvme_fc_remote_port **portptr) 6748c2ecf20Sopenharmony_ci{ 6758c2ecf20Sopenharmony_ci struct nvme_fc_lport *lport = localport_to_lport(localport); 6768c2ecf20Sopenharmony_ci struct nvme_fc_rport *newrec; 6778c2ecf20Sopenharmony_ci unsigned long flags; 6788c2ecf20Sopenharmony_ci int ret, idx; 6798c2ecf20Sopenharmony_ci 6808c2ecf20Sopenharmony_ci if (!nvme_fc_lport_get(lport)) { 6818c2ecf20Sopenharmony_ci ret = -ESHUTDOWN; 6828c2ecf20Sopenharmony_ci goto out_reghost_failed; 6838c2ecf20Sopenharmony_ci } 6848c2ecf20Sopenharmony_ci 6858c2ecf20Sopenharmony_ci /* 6868c2ecf20Sopenharmony_ci * look to see if there is already a remoteport that is waiting 6878c2ecf20Sopenharmony_ci * for a reconnect (within dev_loss_tmo) with the same WWN's. 6888c2ecf20Sopenharmony_ci * If so, transition to it and reconnect. 6898c2ecf20Sopenharmony_ci */ 6908c2ecf20Sopenharmony_ci newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo); 6918c2ecf20Sopenharmony_ci 6928c2ecf20Sopenharmony_ci /* found an rport, but something about its state is bad */ 6938c2ecf20Sopenharmony_ci if (IS_ERR(newrec)) { 6948c2ecf20Sopenharmony_ci ret = PTR_ERR(newrec); 6958c2ecf20Sopenharmony_ci goto out_lport_put; 6968c2ecf20Sopenharmony_ci 6978c2ecf20Sopenharmony_ci /* found existing rport, which was resumed */ 6988c2ecf20Sopenharmony_ci } else if (newrec) { 6998c2ecf20Sopenharmony_ci nvme_fc_lport_put(lport); 7008c2ecf20Sopenharmony_ci __nvme_fc_set_dev_loss_tmo(newrec, pinfo); 7018c2ecf20Sopenharmony_ci nvme_fc_signal_discovery_scan(lport, newrec); 7028c2ecf20Sopenharmony_ci *portptr = &newrec->remoteport; 7038c2ecf20Sopenharmony_ci return 0; 7048c2ecf20Sopenharmony_ci } 7058c2ecf20Sopenharmony_ci 7068c2ecf20Sopenharmony_ci /* nothing found - allocate a new remoteport struct */ 7078c2ecf20Sopenharmony_ci 7088c2ecf20Sopenharmony_ci newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz), 7098c2ecf20Sopenharmony_ci GFP_KERNEL); 7108c2ecf20Sopenharmony_ci if (!newrec) { 7118c2ecf20Sopenharmony_ci ret = -ENOMEM; 7128c2ecf20Sopenharmony_ci goto out_lport_put; 7138c2ecf20Sopenharmony_ci } 7148c2ecf20Sopenharmony_ci 7158c2ecf20Sopenharmony_ci idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL); 7168c2ecf20Sopenharmony_ci if (idx < 0) { 7178c2ecf20Sopenharmony_ci ret = -ENOSPC; 7188c2ecf20Sopenharmony_ci goto out_kfree_rport; 7198c2ecf20Sopenharmony_ci } 7208c2ecf20Sopenharmony_ci 7218c2ecf20Sopenharmony_ci INIT_LIST_HEAD(&newrec->endp_list); 7228c2ecf20Sopenharmony_ci INIT_LIST_HEAD(&newrec->ctrl_list); 7238c2ecf20Sopenharmony_ci INIT_LIST_HEAD(&newrec->ls_req_list); 7248c2ecf20Sopenharmony_ci INIT_LIST_HEAD(&newrec->disc_list); 7258c2ecf20Sopenharmony_ci kref_init(&newrec->ref); 7268c2ecf20Sopenharmony_ci atomic_set(&newrec->act_ctrl_cnt, 0); 7278c2ecf20Sopenharmony_ci spin_lock_init(&newrec->lock); 7288c2ecf20Sopenharmony_ci newrec->remoteport.localport = &lport->localport; 7298c2ecf20Sopenharmony_ci INIT_LIST_HEAD(&newrec->ls_rcv_list); 7308c2ecf20Sopenharmony_ci newrec->dev = lport->dev; 7318c2ecf20Sopenharmony_ci newrec->lport = lport; 7328c2ecf20Sopenharmony_ci if (lport->ops->remote_priv_sz) 7338c2ecf20Sopenharmony_ci newrec->remoteport.private = &newrec[1]; 7348c2ecf20Sopenharmony_ci else 7358c2ecf20Sopenharmony_ci newrec->remoteport.private = NULL; 7368c2ecf20Sopenharmony_ci newrec->remoteport.port_role = pinfo->port_role; 7378c2ecf20Sopenharmony_ci newrec->remoteport.node_name = pinfo->node_name; 7388c2ecf20Sopenharmony_ci newrec->remoteport.port_name = pinfo->port_name; 7398c2ecf20Sopenharmony_ci newrec->remoteport.port_id = pinfo->port_id; 7408c2ecf20Sopenharmony_ci newrec->remoteport.port_state = FC_OBJSTATE_ONLINE; 7418c2ecf20Sopenharmony_ci newrec->remoteport.port_num = idx; 7428c2ecf20Sopenharmony_ci __nvme_fc_set_dev_loss_tmo(newrec, pinfo); 7438c2ecf20Sopenharmony_ci INIT_WORK(&newrec->lsrcv_work, nvme_fc_handle_ls_rqst_work); 7448c2ecf20Sopenharmony_ci 7458c2ecf20Sopenharmony_ci spin_lock_irqsave(&nvme_fc_lock, flags); 7468c2ecf20Sopenharmony_ci list_add_tail(&newrec->endp_list, &lport->endp_list); 7478c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&nvme_fc_lock, flags); 7488c2ecf20Sopenharmony_ci 7498c2ecf20Sopenharmony_ci nvme_fc_signal_discovery_scan(lport, newrec); 7508c2ecf20Sopenharmony_ci 7518c2ecf20Sopenharmony_ci *portptr = &newrec->remoteport; 7528c2ecf20Sopenharmony_ci return 0; 7538c2ecf20Sopenharmony_ci 7548c2ecf20Sopenharmony_ciout_kfree_rport: 7558c2ecf20Sopenharmony_ci kfree(newrec); 7568c2ecf20Sopenharmony_ciout_lport_put: 7578c2ecf20Sopenharmony_ci nvme_fc_lport_put(lport); 7588c2ecf20Sopenharmony_ciout_reghost_failed: 7598c2ecf20Sopenharmony_ci *portptr = NULL; 7608c2ecf20Sopenharmony_ci return ret; 7618c2ecf20Sopenharmony_ci} 7628c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(nvme_fc_register_remoteport); 7638c2ecf20Sopenharmony_ci 7648c2ecf20Sopenharmony_cistatic int 7658c2ecf20Sopenharmony_cinvme_fc_abort_lsops(struct nvme_fc_rport *rport) 7668c2ecf20Sopenharmony_ci{ 7678c2ecf20Sopenharmony_ci struct nvmefc_ls_req_op *lsop; 7688c2ecf20Sopenharmony_ci unsigned long flags; 7698c2ecf20Sopenharmony_ci 7708c2ecf20Sopenharmony_cirestart: 7718c2ecf20Sopenharmony_ci spin_lock_irqsave(&rport->lock, flags); 7728c2ecf20Sopenharmony_ci 7738c2ecf20Sopenharmony_ci list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) { 7748c2ecf20Sopenharmony_ci if (!(lsop->flags & FCOP_FLAGS_TERMIO)) { 7758c2ecf20Sopenharmony_ci lsop->flags |= FCOP_FLAGS_TERMIO; 7768c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&rport->lock, flags); 7778c2ecf20Sopenharmony_ci rport->lport->ops->ls_abort(&rport->lport->localport, 7788c2ecf20Sopenharmony_ci &rport->remoteport, 7798c2ecf20Sopenharmony_ci &lsop->ls_req); 7808c2ecf20Sopenharmony_ci goto restart; 7818c2ecf20Sopenharmony_ci } 7828c2ecf20Sopenharmony_ci } 7838c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&rport->lock, flags); 7848c2ecf20Sopenharmony_ci 7858c2ecf20Sopenharmony_ci return 0; 7868c2ecf20Sopenharmony_ci} 7878c2ecf20Sopenharmony_ci 7888c2ecf20Sopenharmony_cistatic void 7898c2ecf20Sopenharmony_cinvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl) 7908c2ecf20Sopenharmony_ci{ 7918c2ecf20Sopenharmony_ci dev_info(ctrl->ctrl.device, 7928c2ecf20Sopenharmony_ci "NVME-FC{%d}: controller connectivity lost. Awaiting " 7938c2ecf20Sopenharmony_ci "Reconnect", ctrl->cnum); 7948c2ecf20Sopenharmony_ci 7958c2ecf20Sopenharmony_ci switch (ctrl->ctrl.state) { 7968c2ecf20Sopenharmony_ci case NVME_CTRL_NEW: 7978c2ecf20Sopenharmony_ci case NVME_CTRL_LIVE: 7988c2ecf20Sopenharmony_ci /* 7998c2ecf20Sopenharmony_ci * Schedule a controller reset. The reset will terminate the 8008c2ecf20Sopenharmony_ci * association and schedule the reconnect timer. Reconnects 8018c2ecf20Sopenharmony_ci * will be attempted until either the ctlr_loss_tmo 8028c2ecf20Sopenharmony_ci * (max_retries * connect_delay) expires or the remoteport's 8038c2ecf20Sopenharmony_ci * dev_loss_tmo expires. 8048c2ecf20Sopenharmony_ci */ 8058c2ecf20Sopenharmony_ci if (nvme_reset_ctrl(&ctrl->ctrl)) { 8068c2ecf20Sopenharmony_ci dev_warn(ctrl->ctrl.device, 8078c2ecf20Sopenharmony_ci "NVME-FC{%d}: Couldn't schedule reset.\n", 8088c2ecf20Sopenharmony_ci ctrl->cnum); 8098c2ecf20Sopenharmony_ci nvme_delete_ctrl(&ctrl->ctrl); 8108c2ecf20Sopenharmony_ci } 8118c2ecf20Sopenharmony_ci break; 8128c2ecf20Sopenharmony_ci 8138c2ecf20Sopenharmony_ci case NVME_CTRL_CONNECTING: 8148c2ecf20Sopenharmony_ci /* 8158c2ecf20Sopenharmony_ci * The association has already been terminated and the 8168c2ecf20Sopenharmony_ci * controller is attempting reconnects. No need to do anything 8178c2ecf20Sopenharmony_ci * futher. Reconnects will be attempted until either the 8188c2ecf20Sopenharmony_ci * ctlr_loss_tmo (max_retries * connect_delay) expires or the 8198c2ecf20Sopenharmony_ci * remoteport's dev_loss_tmo expires. 8208c2ecf20Sopenharmony_ci */ 8218c2ecf20Sopenharmony_ci break; 8228c2ecf20Sopenharmony_ci 8238c2ecf20Sopenharmony_ci case NVME_CTRL_RESETTING: 8248c2ecf20Sopenharmony_ci /* 8258c2ecf20Sopenharmony_ci * Controller is already in the process of terminating the 8268c2ecf20Sopenharmony_ci * association. No need to do anything further. The reconnect 8278c2ecf20Sopenharmony_ci * step will kick in naturally after the association is 8288c2ecf20Sopenharmony_ci * terminated. 8298c2ecf20Sopenharmony_ci */ 8308c2ecf20Sopenharmony_ci break; 8318c2ecf20Sopenharmony_ci 8328c2ecf20Sopenharmony_ci case NVME_CTRL_DELETING: 8338c2ecf20Sopenharmony_ci case NVME_CTRL_DELETING_NOIO: 8348c2ecf20Sopenharmony_ci default: 8358c2ecf20Sopenharmony_ci /* no action to take - let it delete */ 8368c2ecf20Sopenharmony_ci break; 8378c2ecf20Sopenharmony_ci } 8388c2ecf20Sopenharmony_ci} 8398c2ecf20Sopenharmony_ci 8408c2ecf20Sopenharmony_ci/** 8418c2ecf20Sopenharmony_ci * nvme_fc_unregister_remoteport - transport entry point called by an 8428c2ecf20Sopenharmony_ci * LLDD to deregister/remove a previously 8438c2ecf20Sopenharmony_ci * registered a NVME subsystem FC port. 8448c2ecf20Sopenharmony_ci * @portptr: pointer to the (registered) remote port that is to be 8458c2ecf20Sopenharmony_ci * deregistered. 8468c2ecf20Sopenharmony_ci * 8478c2ecf20Sopenharmony_ci * Returns: 8488c2ecf20Sopenharmony_ci * a completion status. Must be 0 upon success; a negative errno 8498c2ecf20Sopenharmony_ci * (ex: -ENXIO) upon failure. 8508c2ecf20Sopenharmony_ci */ 8518c2ecf20Sopenharmony_ciint 8528c2ecf20Sopenharmony_cinvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr) 8538c2ecf20Sopenharmony_ci{ 8548c2ecf20Sopenharmony_ci struct nvme_fc_rport *rport = remoteport_to_rport(portptr); 8558c2ecf20Sopenharmony_ci struct nvme_fc_ctrl *ctrl; 8568c2ecf20Sopenharmony_ci unsigned long flags; 8578c2ecf20Sopenharmony_ci 8588c2ecf20Sopenharmony_ci if (!portptr) 8598c2ecf20Sopenharmony_ci return -EINVAL; 8608c2ecf20Sopenharmony_ci 8618c2ecf20Sopenharmony_ci spin_lock_irqsave(&rport->lock, flags); 8628c2ecf20Sopenharmony_ci 8638c2ecf20Sopenharmony_ci if (portptr->port_state != FC_OBJSTATE_ONLINE) { 8648c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&rport->lock, flags); 8658c2ecf20Sopenharmony_ci return -EINVAL; 8668c2ecf20Sopenharmony_ci } 8678c2ecf20Sopenharmony_ci portptr->port_state = FC_OBJSTATE_DELETED; 8688c2ecf20Sopenharmony_ci 8698c2ecf20Sopenharmony_ci rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ); 8708c2ecf20Sopenharmony_ci 8718c2ecf20Sopenharmony_ci list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { 8728c2ecf20Sopenharmony_ci /* if dev_loss_tmo==0, dev loss is immediate */ 8738c2ecf20Sopenharmony_ci if (!portptr->dev_loss_tmo) { 8748c2ecf20Sopenharmony_ci dev_warn(ctrl->ctrl.device, 8758c2ecf20Sopenharmony_ci "NVME-FC{%d}: controller connectivity lost.\n", 8768c2ecf20Sopenharmony_ci ctrl->cnum); 8778c2ecf20Sopenharmony_ci nvme_delete_ctrl(&ctrl->ctrl); 8788c2ecf20Sopenharmony_ci } else 8798c2ecf20Sopenharmony_ci nvme_fc_ctrl_connectivity_loss(ctrl); 8808c2ecf20Sopenharmony_ci } 8818c2ecf20Sopenharmony_ci 8828c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&rport->lock, flags); 8838c2ecf20Sopenharmony_ci 8848c2ecf20Sopenharmony_ci nvme_fc_abort_lsops(rport); 8858c2ecf20Sopenharmony_ci 8868c2ecf20Sopenharmony_ci if (atomic_read(&rport->act_ctrl_cnt) == 0) 8878c2ecf20Sopenharmony_ci rport->lport->ops->remoteport_delete(portptr); 8888c2ecf20Sopenharmony_ci 8898c2ecf20Sopenharmony_ci /* 8908c2ecf20Sopenharmony_ci * release the reference, which will allow, if all controllers 8918c2ecf20Sopenharmony_ci * go away, which should only occur after dev_loss_tmo occurs, 8928c2ecf20Sopenharmony_ci * for the rport to be torn down. 8938c2ecf20Sopenharmony_ci */ 8948c2ecf20Sopenharmony_ci nvme_fc_rport_put(rport); 8958c2ecf20Sopenharmony_ci 8968c2ecf20Sopenharmony_ci return 0; 8978c2ecf20Sopenharmony_ci} 8988c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport); 8998c2ecf20Sopenharmony_ci 9008c2ecf20Sopenharmony_ci/** 9018c2ecf20Sopenharmony_ci * nvme_fc_rescan_remoteport - transport entry point called by an 9028c2ecf20Sopenharmony_ci * LLDD to request a nvme device rescan. 9038c2ecf20Sopenharmony_ci * @remoteport: pointer to the (registered) remote port that is to be 9048c2ecf20Sopenharmony_ci * rescanned. 9058c2ecf20Sopenharmony_ci * 9068c2ecf20Sopenharmony_ci * Returns: N/A 9078c2ecf20Sopenharmony_ci */ 9088c2ecf20Sopenharmony_civoid 9098c2ecf20Sopenharmony_cinvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport) 9108c2ecf20Sopenharmony_ci{ 9118c2ecf20Sopenharmony_ci struct nvme_fc_rport *rport = remoteport_to_rport(remoteport); 9128c2ecf20Sopenharmony_ci 9138c2ecf20Sopenharmony_ci nvme_fc_signal_discovery_scan(rport->lport, rport); 9148c2ecf20Sopenharmony_ci} 9158c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport); 9168c2ecf20Sopenharmony_ci 9178c2ecf20Sopenharmony_ciint 9188c2ecf20Sopenharmony_cinvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr, 9198c2ecf20Sopenharmony_ci u32 dev_loss_tmo) 9208c2ecf20Sopenharmony_ci{ 9218c2ecf20Sopenharmony_ci struct nvme_fc_rport *rport = remoteport_to_rport(portptr); 9228c2ecf20Sopenharmony_ci unsigned long flags; 9238c2ecf20Sopenharmony_ci 9248c2ecf20Sopenharmony_ci spin_lock_irqsave(&rport->lock, flags); 9258c2ecf20Sopenharmony_ci 9268c2ecf20Sopenharmony_ci if (portptr->port_state != FC_OBJSTATE_ONLINE) { 9278c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&rport->lock, flags); 9288c2ecf20Sopenharmony_ci return -EINVAL; 9298c2ecf20Sopenharmony_ci } 9308c2ecf20Sopenharmony_ci 9318c2ecf20Sopenharmony_ci /* a dev_loss_tmo of 0 (immediate) is allowed to be set */ 9328c2ecf20Sopenharmony_ci rport->remoteport.dev_loss_tmo = dev_loss_tmo; 9338c2ecf20Sopenharmony_ci 9348c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&rport->lock, flags); 9358c2ecf20Sopenharmony_ci 9368c2ecf20Sopenharmony_ci return 0; 9378c2ecf20Sopenharmony_ci} 9388c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss); 9398c2ecf20Sopenharmony_ci 9408c2ecf20Sopenharmony_ci 9418c2ecf20Sopenharmony_ci/* *********************** FC-NVME DMA Handling **************************** */ 9428c2ecf20Sopenharmony_ci 9438c2ecf20Sopenharmony_ci/* 9448c2ecf20Sopenharmony_ci * The fcloop device passes in a NULL device pointer. Real LLD's will 9458c2ecf20Sopenharmony_ci * pass in a valid device pointer. If NULL is passed to the dma mapping 9468c2ecf20Sopenharmony_ci * routines, depending on the platform, it may or may not succeed, and 9478c2ecf20Sopenharmony_ci * may crash. 9488c2ecf20Sopenharmony_ci * 9498c2ecf20Sopenharmony_ci * As such: 9508c2ecf20Sopenharmony_ci * Wrapper all the dma routines and check the dev pointer. 9518c2ecf20Sopenharmony_ci * 9528c2ecf20Sopenharmony_ci * If simple mappings (return just a dma address, we'll noop them, 9538c2ecf20Sopenharmony_ci * returning a dma address of 0. 9548c2ecf20Sopenharmony_ci * 9558c2ecf20Sopenharmony_ci * On more complex mappings (dma_map_sg), a pseudo routine fills 9568c2ecf20Sopenharmony_ci * in the scatter list, setting all dma addresses to 0. 9578c2ecf20Sopenharmony_ci */ 9588c2ecf20Sopenharmony_ci 9598c2ecf20Sopenharmony_cistatic inline dma_addr_t 9608c2ecf20Sopenharmony_cifc_dma_map_single(struct device *dev, void *ptr, size_t size, 9618c2ecf20Sopenharmony_ci enum dma_data_direction dir) 9628c2ecf20Sopenharmony_ci{ 9638c2ecf20Sopenharmony_ci return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L; 9648c2ecf20Sopenharmony_ci} 9658c2ecf20Sopenharmony_ci 9668c2ecf20Sopenharmony_cistatic inline int 9678c2ecf20Sopenharmony_cifc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 9688c2ecf20Sopenharmony_ci{ 9698c2ecf20Sopenharmony_ci return dev ? dma_mapping_error(dev, dma_addr) : 0; 9708c2ecf20Sopenharmony_ci} 9718c2ecf20Sopenharmony_ci 9728c2ecf20Sopenharmony_cistatic inline void 9738c2ecf20Sopenharmony_cifc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, 9748c2ecf20Sopenharmony_ci enum dma_data_direction dir) 9758c2ecf20Sopenharmony_ci{ 9768c2ecf20Sopenharmony_ci if (dev) 9778c2ecf20Sopenharmony_ci dma_unmap_single(dev, addr, size, dir); 9788c2ecf20Sopenharmony_ci} 9798c2ecf20Sopenharmony_ci 9808c2ecf20Sopenharmony_cistatic inline void 9818c2ecf20Sopenharmony_cifc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, 9828c2ecf20Sopenharmony_ci enum dma_data_direction dir) 9838c2ecf20Sopenharmony_ci{ 9848c2ecf20Sopenharmony_ci if (dev) 9858c2ecf20Sopenharmony_ci dma_sync_single_for_cpu(dev, addr, size, dir); 9868c2ecf20Sopenharmony_ci} 9878c2ecf20Sopenharmony_ci 9888c2ecf20Sopenharmony_cistatic inline void 9898c2ecf20Sopenharmony_cifc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, 9908c2ecf20Sopenharmony_ci enum dma_data_direction dir) 9918c2ecf20Sopenharmony_ci{ 9928c2ecf20Sopenharmony_ci if (dev) 9938c2ecf20Sopenharmony_ci dma_sync_single_for_device(dev, addr, size, dir); 9948c2ecf20Sopenharmony_ci} 9958c2ecf20Sopenharmony_ci 9968c2ecf20Sopenharmony_ci/* pseudo dma_map_sg call */ 9978c2ecf20Sopenharmony_cistatic int 9988c2ecf20Sopenharmony_cifc_map_sg(struct scatterlist *sg, int nents) 9998c2ecf20Sopenharmony_ci{ 10008c2ecf20Sopenharmony_ci struct scatterlist *s; 10018c2ecf20Sopenharmony_ci int i; 10028c2ecf20Sopenharmony_ci 10038c2ecf20Sopenharmony_ci WARN_ON(nents == 0 || sg[0].length == 0); 10048c2ecf20Sopenharmony_ci 10058c2ecf20Sopenharmony_ci for_each_sg(sg, s, nents, i) { 10068c2ecf20Sopenharmony_ci s->dma_address = 0L; 10078c2ecf20Sopenharmony_ci#ifdef CONFIG_NEED_SG_DMA_LENGTH 10088c2ecf20Sopenharmony_ci s->dma_length = s->length; 10098c2ecf20Sopenharmony_ci#endif 10108c2ecf20Sopenharmony_ci } 10118c2ecf20Sopenharmony_ci return nents; 10128c2ecf20Sopenharmony_ci} 10138c2ecf20Sopenharmony_ci 10148c2ecf20Sopenharmony_cistatic inline int 10158c2ecf20Sopenharmony_cifc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 10168c2ecf20Sopenharmony_ci enum dma_data_direction dir) 10178c2ecf20Sopenharmony_ci{ 10188c2ecf20Sopenharmony_ci return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents); 10198c2ecf20Sopenharmony_ci} 10208c2ecf20Sopenharmony_ci 10218c2ecf20Sopenharmony_cistatic inline void 10228c2ecf20Sopenharmony_cifc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 10238c2ecf20Sopenharmony_ci enum dma_data_direction dir) 10248c2ecf20Sopenharmony_ci{ 10258c2ecf20Sopenharmony_ci if (dev) 10268c2ecf20Sopenharmony_ci dma_unmap_sg(dev, sg, nents, dir); 10278c2ecf20Sopenharmony_ci} 10288c2ecf20Sopenharmony_ci 10298c2ecf20Sopenharmony_ci/* *********************** FC-NVME LS Handling **************************** */ 10308c2ecf20Sopenharmony_ci 10318c2ecf20Sopenharmony_cistatic void nvme_fc_ctrl_put(struct nvme_fc_ctrl *); 10328c2ecf20Sopenharmony_cistatic int nvme_fc_ctrl_get(struct nvme_fc_ctrl *); 10338c2ecf20Sopenharmony_ci 10348c2ecf20Sopenharmony_cistatic void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg); 10358c2ecf20Sopenharmony_ci 10368c2ecf20Sopenharmony_cistatic void 10378c2ecf20Sopenharmony_ci__nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop) 10388c2ecf20Sopenharmony_ci{ 10398c2ecf20Sopenharmony_ci struct nvme_fc_rport *rport = lsop->rport; 10408c2ecf20Sopenharmony_ci struct nvmefc_ls_req *lsreq = &lsop->ls_req; 10418c2ecf20Sopenharmony_ci unsigned long flags; 10428c2ecf20Sopenharmony_ci 10438c2ecf20Sopenharmony_ci spin_lock_irqsave(&rport->lock, flags); 10448c2ecf20Sopenharmony_ci 10458c2ecf20Sopenharmony_ci if (!lsop->req_queued) { 10468c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&rport->lock, flags); 10478c2ecf20Sopenharmony_ci return; 10488c2ecf20Sopenharmony_ci } 10498c2ecf20Sopenharmony_ci 10508c2ecf20Sopenharmony_ci list_del(&lsop->lsreq_list); 10518c2ecf20Sopenharmony_ci 10528c2ecf20Sopenharmony_ci lsop->req_queued = false; 10538c2ecf20Sopenharmony_ci 10548c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&rport->lock, flags); 10558c2ecf20Sopenharmony_ci 10568c2ecf20Sopenharmony_ci fc_dma_unmap_single(rport->dev, lsreq->rqstdma, 10578c2ecf20Sopenharmony_ci (lsreq->rqstlen + lsreq->rsplen), 10588c2ecf20Sopenharmony_ci DMA_BIDIRECTIONAL); 10598c2ecf20Sopenharmony_ci 10608c2ecf20Sopenharmony_ci nvme_fc_rport_put(rport); 10618c2ecf20Sopenharmony_ci} 10628c2ecf20Sopenharmony_ci 10638c2ecf20Sopenharmony_cistatic int 10648c2ecf20Sopenharmony_ci__nvme_fc_send_ls_req(struct nvme_fc_rport *rport, 10658c2ecf20Sopenharmony_ci struct nvmefc_ls_req_op *lsop, 10668c2ecf20Sopenharmony_ci void (*done)(struct nvmefc_ls_req *req, int status)) 10678c2ecf20Sopenharmony_ci{ 10688c2ecf20Sopenharmony_ci struct nvmefc_ls_req *lsreq = &lsop->ls_req; 10698c2ecf20Sopenharmony_ci unsigned long flags; 10708c2ecf20Sopenharmony_ci int ret = 0; 10718c2ecf20Sopenharmony_ci 10728c2ecf20Sopenharmony_ci if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) 10738c2ecf20Sopenharmony_ci return -ECONNREFUSED; 10748c2ecf20Sopenharmony_ci 10758c2ecf20Sopenharmony_ci if (!nvme_fc_rport_get(rport)) 10768c2ecf20Sopenharmony_ci return -ESHUTDOWN; 10778c2ecf20Sopenharmony_ci 10788c2ecf20Sopenharmony_ci lsreq->done = done; 10798c2ecf20Sopenharmony_ci lsop->rport = rport; 10808c2ecf20Sopenharmony_ci lsop->req_queued = false; 10818c2ecf20Sopenharmony_ci INIT_LIST_HEAD(&lsop->lsreq_list); 10828c2ecf20Sopenharmony_ci init_completion(&lsop->ls_done); 10838c2ecf20Sopenharmony_ci 10848c2ecf20Sopenharmony_ci lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr, 10858c2ecf20Sopenharmony_ci lsreq->rqstlen + lsreq->rsplen, 10868c2ecf20Sopenharmony_ci DMA_BIDIRECTIONAL); 10878c2ecf20Sopenharmony_ci if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) { 10888c2ecf20Sopenharmony_ci ret = -EFAULT; 10898c2ecf20Sopenharmony_ci goto out_putrport; 10908c2ecf20Sopenharmony_ci } 10918c2ecf20Sopenharmony_ci lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; 10928c2ecf20Sopenharmony_ci 10938c2ecf20Sopenharmony_ci spin_lock_irqsave(&rport->lock, flags); 10948c2ecf20Sopenharmony_ci 10958c2ecf20Sopenharmony_ci list_add_tail(&lsop->lsreq_list, &rport->ls_req_list); 10968c2ecf20Sopenharmony_ci 10978c2ecf20Sopenharmony_ci lsop->req_queued = true; 10988c2ecf20Sopenharmony_ci 10998c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&rport->lock, flags); 11008c2ecf20Sopenharmony_ci 11018c2ecf20Sopenharmony_ci ret = rport->lport->ops->ls_req(&rport->lport->localport, 11028c2ecf20Sopenharmony_ci &rport->remoteport, lsreq); 11038c2ecf20Sopenharmony_ci if (ret) 11048c2ecf20Sopenharmony_ci goto out_unlink; 11058c2ecf20Sopenharmony_ci 11068c2ecf20Sopenharmony_ci return 0; 11078c2ecf20Sopenharmony_ci 11088c2ecf20Sopenharmony_ciout_unlink: 11098c2ecf20Sopenharmony_ci lsop->ls_error = ret; 11108c2ecf20Sopenharmony_ci spin_lock_irqsave(&rport->lock, flags); 11118c2ecf20Sopenharmony_ci lsop->req_queued = false; 11128c2ecf20Sopenharmony_ci list_del(&lsop->lsreq_list); 11138c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&rport->lock, flags); 11148c2ecf20Sopenharmony_ci fc_dma_unmap_single(rport->dev, lsreq->rqstdma, 11158c2ecf20Sopenharmony_ci (lsreq->rqstlen + lsreq->rsplen), 11168c2ecf20Sopenharmony_ci DMA_BIDIRECTIONAL); 11178c2ecf20Sopenharmony_ciout_putrport: 11188c2ecf20Sopenharmony_ci nvme_fc_rport_put(rport); 11198c2ecf20Sopenharmony_ci 11208c2ecf20Sopenharmony_ci return ret; 11218c2ecf20Sopenharmony_ci} 11228c2ecf20Sopenharmony_ci 11238c2ecf20Sopenharmony_cistatic void 11248c2ecf20Sopenharmony_cinvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status) 11258c2ecf20Sopenharmony_ci{ 11268c2ecf20Sopenharmony_ci struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq); 11278c2ecf20Sopenharmony_ci 11288c2ecf20Sopenharmony_ci lsop->ls_error = status; 11298c2ecf20Sopenharmony_ci complete(&lsop->ls_done); 11308c2ecf20Sopenharmony_ci} 11318c2ecf20Sopenharmony_ci 11328c2ecf20Sopenharmony_cistatic int 11338c2ecf20Sopenharmony_cinvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop) 11348c2ecf20Sopenharmony_ci{ 11358c2ecf20Sopenharmony_ci struct nvmefc_ls_req *lsreq = &lsop->ls_req; 11368c2ecf20Sopenharmony_ci struct fcnvme_ls_rjt *rjt = lsreq->rspaddr; 11378c2ecf20Sopenharmony_ci int ret; 11388c2ecf20Sopenharmony_ci 11398c2ecf20Sopenharmony_ci ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done); 11408c2ecf20Sopenharmony_ci 11418c2ecf20Sopenharmony_ci if (!ret) { 11428c2ecf20Sopenharmony_ci /* 11438c2ecf20Sopenharmony_ci * No timeout/not interruptible as we need the struct 11448c2ecf20Sopenharmony_ci * to exist until the lldd calls us back. Thus mandate 11458c2ecf20Sopenharmony_ci * wait until driver calls back. lldd responsible for 11468c2ecf20Sopenharmony_ci * the timeout action 11478c2ecf20Sopenharmony_ci */ 11488c2ecf20Sopenharmony_ci wait_for_completion(&lsop->ls_done); 11498c2ecf20Sopenharmony_ci 11508c2ecf20Sopenharmony_ci __nvme_fc_finish_ls_req(lsop); 11518c2ecf20Sopenharmony_ci 11528c2ecf20Sopenharmony_ci ret = lsop->ls_error; 11538c2ecf20Sopenharmony_ci } 11548c2ecf20Sopenharmony_ci 11558c2ecf20Sopenharmony_ci if (ret) 11568c2ecf20Sopenharmony_ci return ret; 11578c2ecf20Sopenharmony_ci 11588c2ecf20Sopenharmony_ci /* ACC or RJT payload ? */ 11598c2ecf20Sopenharmony_ci if (rjt->w0.ls_cmd == FCNVME_LS_RJT) 11608c2ecf20Sopenharmony_ci return -ENXIO; 11618c2ecf20Sopenharmony_ci 11628c2ecf20Sopenharmony_ci return 0; 11638c2ecf20Sopenharmony_ci} 11648c2ecf20Sopenharmony_ci 11658c2ecf20Sopenharmony_cistatic int 11668c2ecf20Sopenharmony_cinvme_fc_send_ls_req_async(struct nvme_fc_rport *rport, 11678c2ecf20Sopenharmony_ci struct nvmefc_ls_req_op *lsop, 11688c2ecf20Sopenharmony_ci void (*done)(struct nvmefc_ls_req *req, int status)) 11698c2ecf20Sopenharmony_ci{ 11708c2ecf20Sopenharmony_ci /* don't wait for completion */ 11718c2ecf20Sopenharmony_ci 11728c2ecf20Sopenharmony_ci return __nvme_fc_send_ls_req(rport, lsop, done); 11738c2ecf20Sopenharmony_ci} 11748c2ecf20Sopenharmony_ci 11758c2ecf20Sopenharmony_cistatic int 11768c2ecf20Sopenharmony_cinvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl, 11778c2ecf20Sopenharmony_ci struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio) 11788c2ecf20Sopenharmony_ci{ 11798c2ecf20Sopenharmony_ci struct nvmefc_ls_req_op *lsop; 11808c2ecf20Sopenharmony_ci struct nvmefc_ls_req *lsreq; 11818c2ecf20Sopenharmony_ci struct fcnvme_ls_cr_assoc_rqst *assoc_rqst; 11828c2ecf20Sopenharmony_ci struct fcnvme_ls_cr_assoc_acc *assoc_acc; 11838c2ecf20Sopenharmony_ci unsigned long flags; 11848c2ecf20Sopenharmony_ci int ret, fcret = 0; 11858c2ecf20Sopenharmony_ci 11868c2ecf20Sopenharmony_ci lsop = kzalloc((sizeof(*lsop) + 11878c2ecf20Sopenharmony_ci sizeof(*assoc_rqst) + sizeof(*assoc_acc) + 11888c2ecf20Sopenharmony_ci ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); 11898c2ecf20Sopenharmony_ci if (!lsop) { 11908c2ecf20Sopenharmony_ci dev_info(ctrl->ctrl.device, 11918c2ecf20Sopenharmony_ci "NVME-FC{%d}: send Create Association failed: ENOMEM\n", 11928c2ecf20Sopenharmony_ci ctrl->cnum); 11938c2ecf20Sopenharmony_ci ret = -ENOMEM; 11948c2ecf20Sopenharmony_ci goto out_no_memory; 11958c2ecf20Sopenharmony_ci } 11968c2ecf20Sopenharmony_ci 11978c2ecf20Sopenharmony_ci assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)&lsop[1]; 11988c2ecf20Sopenharmony_ci assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1]; 11998c2ecf20Sopenharmony_ci lsreq = &lsop->ls_req; 12008c2ecf20Sopenharmony_ci if (ctrl->lport->ops->lsrqst_priv_sz) 12018c2ecf20Sopenharmony_ci lsreq->private = &assoc_acc[1]; 12028c2ecf20Sopenharmony_ci else 12038c2ecf20Sopenharmony_ci lsreq->private = NULL; 12048c2ecf20Sopenharmony_ci 12058c2ecf20Sopenharmony_ci assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION; 12068c2ecf20Sopenharmony_ci assoc_rqst->desc_list_len = 12078c2ecf20Sopenharmony_ci cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); 12088c2ecf20Sopenharmony_ci 12098c2ecf20Sopenharmony_ci assoc_rqst->assoc_cmd.desc_tag = 12108c2ecf20Sopenharmony_ci cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD); 12118c2ecf20Sopenharmony_ci assoc_rqst->assoc_cmd.desc_len = 12128c2ecf20Sopenharmony_ci fcnvme_lsdesc_len( 12138c2ecf20Sopenharmony_ci sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)); 12148c2ecf20Sopenharmony_ci 12158c2ecf20Sopenharmony_ci assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); 12168c2ecf20Sopenharmony_ci assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1); 12178c2ecf20Sopenharmony_ci /* Linux supports only Dynamic controllers */ 12188c2ecf20Sopenharmony_ci assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff); 12198c2ecf20Sopenharmony_ci uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id); 12208c2ecf20Sopenharmony_ci strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn, 12218c2ecf20Sopenharmony_ci min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE)); 12228c2ecf20Sopenharmony_ci strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn, 12238c2ecf20Sopenharmony_ci min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE)); 12248c2ecf20Sopenharmony_ci 12258c2ecf20Sopenharmony_ci lsop->queue = queue; 12268c2ecf20Sopenharmony_ci lsreq->rqstaddr = assoc_rqst; 12278c2ecf20Sopenharmony_ci lsreq->rqstlen = sizeof(*assoc_rqst); 12288c2ecf20Sopenharmony_ci lsreq->rspaddr = assoc_acc; 12298c2ecf20Sopenharmony_ci lsreq->rsplen = sizeof(*assoc_acc); 12308c2ecf20Sopenharmony_ci lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC; 12318c2ecf20Sopenharmony_ci 12328c2ecf20Sopenharmony_ci ret = nvme_fc_send_ls_req(ctrl->rport, lsop); 12338c2ecf20Sopenharmony_ci if (ret) 12348c2ecf20Sopenharmony_ci goto out_free_buffer; 12358c2ecf20Sopenharmony_ci 12368c2ecf20Sopenharmony_ci /* process connect LS completion */ 12378c2ecf20Sopenharmony_ci 12388c2ecf20Sopenharmony_ci /* validate the ACC response */ 12398c2ecf20Sopenharmony_ci if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) 12408c2ecf20Sopenharmony_ci fcret = VERR_LSACC; 12418c2ecf20Sopenharmony_ci else if (assoc_acc->hdr.desc_list_len != 12428c2ecf20Sopenharmony_ci fcnvme_lsdesc_len( 12438c2ecf20Sopenharmony_ci sizeof(struct fcnvme_ls_cr_assoc_acc))) 12448c2ecf20Sopenharmony_ci fcret = VERR_CR_ASSOC_ACC_LEN; 12458c2ecf20Sopenharmony_ci else if (assoc_acc->hdr.rqst.desc_tag != 12468c2ecf20Sopenharmony_ci cpu_to_be32(FCNVME_LSDESC_RQST)) 12478c2ecf20Sopenharmony_ci fcret = VERR_LSDESC_RQST; 12488c2ecf20Sopenharmony_ci else if (assoc_acc->hdr.rqst.desc_len != 12498c2ecf20Sopenharmony_ci fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst))) 12508c2ecf20Sopenharmony_ci fcret = VERR_LSDESC_RQST_LEN; 12518c2ecf20Sopenharmony_ci else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION) 12528c2ecf20Sopenharmony_ci fcret = VERR_CR_ASSOC; 12538c2ecf20Sopenharmony_ci else if (assoc_acc->associd.desc_tag != 12548c2ecf20Sopenharmony_ci cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) 12558c2ecf20Sopenharmony_ci fcret = VERR_ASSOC_ID; 12568c2ecf20Sopenharmony_ci else if (assoc_acc->associd.desc_len != 12578c2ecf20Sopenharmony_ci fcnvme_lsdesc_len( 12588c2ecf20Sopenharmony_ci sizeof(struct fcnvme_lsdesc_assoc_id))) 12598c2ecf20Sopenharmony_ci fcret = VERR_ASSOC_ID_LEN; 12608c2ecf20Sopenharmony_ci else if (assoc_acc->connectid.desc_tag != 12618c2ecf20Sopenharmony_ci cpu_to_be32(FCNVME_LSDESC_CONN_ID)) 12628c2ecf20Sopenharmony_ci fcret = VERR_CONN_ID; 12638c2ecf20Sopenharmony_ci else if (assoc_acc->connectid.desc_len != 12648c2ecf20Sopenharmony_ci fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id))) 12658c2ecf20Sopenharmony_ci fcret = VERR_CONN_ID_LEN; 12668c2ecf20Sopenharmony_ci 12678c2ecf20Sopenharmony_ci if (fcret) { 12688c2ecf20Sopenharmony_ci ret = -EBADF; 12698c2ecf20Sopenharmony_ci dev_err(ctrl->dev, 12708c2ecf20Sopenharmony_ci "q %d Create Association LS failed: %s\n", 12718c2ecf20Sopenharmony_ci queue->qnum, validation_errors[fcret]); 12728c2ecf20Sopenharmony_ci } else { 12738c2ecf20Sopenharmony_ci spin_lock_irqsave(&ctrl->lock, flags); 12748c2ecf20Sopenharmony_ci ctrl->association_id = 12758c2ecf20Sopenharmony_ci be64_to_cpu(assoc_acc->associd.association_id); 12768c2ecf20Sopenharmony_ci queue->connection_id = 12778c2ecf20Sopenharmony_ci be64_to_cpu(assoc_acc->connectid.connection_id); 12788c2ecf20Sopenharmony_ci set_bit(NVME_FC_Q_CONNECTED, &queue->flags); 12798c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&ctrl->lock, flags); 12808c2ecf20Sopenharmony_ci } 12818c2ecf20Sopenharmony_ci 12828c2ecf20Sopenharmony_ciout_free_buffer: 12838c2ecf20Sopenharmony_ci kfree(lsop); 12848c2ecf20Sopenharmony_ciout_no_memory: 12858c2ecf20Sopenharmony_ci if (ret) 12868c2ecf20Sopenharmony_ci dev_err(ctrl->dev, 12878c2ecf20Sopenharmony_ci "queue %d connect admin queue failed (%d).\n", 12888c2ecf20Sopenharmony_ci queue->qnum, ret); 12898c2ecf20Sopenharmony_ci return ret; 12908c2ecf20Sopenharmony_ci} 12918c2ecf20Sopenharmony_ci 12928c2ecf20Sopenharmony_cistatic int 12938c2ecf20Sopenharmony_cinvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, 12948c2ecf20Sopenharmony_ci u16 qsize, u16 ersp_ratio) 12958c2ecf20Sopenharmony_ci{ 12968c2ecf20Sopenharmony_ci struct nvmefc_ls_req_op *lsop; 12978c2ecf20Sopenharmony_ci struct nvmefc_ls_req *lsreq; 12988c2ecf20Sopenharmony_ci struct fcnvme_ls_cr_conn_rqst *conn_rqst; 12998c2ecf20Sopenharmony_ci struct fcnvme_ls_cr_conn_acc *conn_acc; 13008c2ecf20Sopenharmony_ci int ret, fcret = 0; 13018c2ecf20Sopenharmony_ci 13028c2ecf20Sopenharmony_ci lsop = kzalloc((sizeof(*lsop) + 13038c2ecf20Sopenharmony_ci sizeof(*conn_rqst) + sizeof(*conn_acc) + 13048c2ecf20Sopenharmony_ci ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); 13058c2ecf20Sopenharmony_ci if (!lsop) { 13068c2ecf20Sopenharmony_ci dev_info(ctrl->ctrl.device, 13078c2ecf20Sopenharmony_ci "NVME-FC{%d}: send Create Connection failed: ENOMEM\n", 13088c2ecf20Sopenharmony_ci ctrl->cnum); 13098c2ecf20Sopenharmony_ci ret = -ENOMEM; 13108c2ecf20Sopenharmony_ci goto out_no_memory; 13118c2ecf20Sopenharmony_ci } 13128c2ecf20Sopenharmony_ci 13138c2ecf20Sopenharmony_ci conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)&lsop[1]; 13148c2ecf20Sopenharmony_ci conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1]; 13158c2ecf20Sopenharmony_ci lsreq = &lsop->ls_req; 13168c2ecf20Sopenharmony_ci if (ctrl->lport->ops->lsrqst_priv_sz) 13178c2ecf20Sopenharmony_ci lsreq->private = (void *)&conn_acc[1]; 13188c2ecf20Sopenharmony_ci else 13198c2ecf20Sopenharmony_ci lsreq->private = NULL; 13208c2ecf20Sopenharmony_ci 13218c2ecf20Sopenharmony_ci conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION; 13228c2ecf20Sopenharmony_ci conn_rqst->desc_list_len = cpu_to_be32( 13238c2ecf20Sopenharmony_ci sizeof(struct fcnvme_lsdesc_assoc_id) + 13248c2ecf20Sopenharmony_ci sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); 13258c2ecf20Sopenharmony_ci 13268c2ecf20Sopenharmony_ci conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); 13278c2ecf20Sopenharmony_ci conn_rqst->associd.desc_len = 13288c2ecf20Sopenharmony_ci fcnvme_lsdesc_len( 13298c2ecf20Sopenharmony_ci sizeof(struct fcnvme_lsdesc_assoc_id)); 13308c2ecf20Sopenharmony_ci conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id); 13318c2ecf20Sopenharmony_ci conn_rqst->connect_cmd.desc_tag = 13328c2ecf20Sopenharmony_ci cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD); 13338c2ecf20Sopenharmony_ci conn_rqst->connect_cmd.desc_len = 13348c2ecf20Sopenharmony_ci fcnvme_lsdesc_len( 13358c2ecf20Sopenharmony_ci sizeof(struct fcnvme_lsdesc_cr_conn_cmd)); 13368c2ecf20Sopenharmony_ci conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); 13378c2ecf20Sopenharmony_ci conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum); 13388c2ecf20Sopenharmony_ci conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1); 13398c2ecf20Sopenharmony_ci 13408c2ecf20Sopenharmony_ci lsop->queue = queue; 13418c2ecf20Sopenharmony_ci lsreq->rqstaddr = conn_rqst; 13428c2ecf20Sopenharmony_ci lsreq->rqstlen = sizeof(*conn_rqst); 13438c2ecf20Sopenharmony_ci lsreq->rspaddr = conn_acc; 13448c2ecf20Sopenharmony_ci lsreq->rsplen = sizeof(*conn_acc); 13458c2ecf20Sopenharmony_ci lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC; 13468c2ecf20Sopenharmony_ci 13478c2ecf20Sopenharmony_ci ret = nvme_fc_send_ls_req(ctrl->rport, lsop); 13488c2ecf20Sopenharmony_ci if (ret) 13498c2ecf20Sopenharmony_ci goto out_free_buffer; 13508c2ecf20Sopenharmony_ci 13518c2ecf20Sopenharmony_ci /* process connect LS completion */ 13528c2ecf20Sopenharmony_ci 13538c2ecf20Sopenharmony_ci /* validate the ACC response */ 13548c2ecf20Sopenharmony_ci if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) 13558c2ecf20Sopenharmony_ci fcret = VERR_LSACC; 13568c2ecf20Sopenharmony_ci else if (conn_acc->hdr.desc_list_len != 13578c2ecf20Sopenharmony_ci fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc))) 13588c2ecf20Sopenharmony_ci fcret = VERR_CR_CONN_ACC_LEN; 13598c2ecf20Sopenharmony_ci else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST)) 13608c2ecf20Sopenharmony_ci fcret = VERR_LSDESC_RQST; 13618c2ecf20Sopenharmony_ci else if (conn_acc->hdr.rqst.desc_len != 13628c2ecf20Sopenharmony_ci fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst))) 13638c2ecf20Sopenharmony_ci fcret = VERR_LSDESC_RQST_LEN; 13648c2ecf20Sopenharmony_ci else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION) 13658c2ecf20Sopenharmony_ci fcret = VERR_CR_CONN; 13668c2ecf20Sopenharmony_ci else if (conn_acc->connectid.desc_tag != 13678c2ecf20Sopenharmony_ci cpu_to_be32(FCNVME_LSDESC_CONN_ID)) 13688c2ecf20Sopenharmony_ci fcret = VERR_CONN_ID; 13698c2ecf20Sopenharmony_ci else if (conn_acc->connectid.desc_len != 13708c2ecf20Sopenharmony_ci fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id))) 13718c2ecf20Sopenharmony_ci fcret = VERR_CONN_ID_LEN; 13728c2ecf20Sopenharmony_ci 13738c2ecf20Sopenharmony_ci if (fcret) { 13748c2ecf20Sopenharmony_ci ret = -EBADF; 13758c2ecf20Sopenharmony_ci dev_err(ctrl->dev, 13768c2ecf20Sopenharmony_ci "q %d Create I/O Connection LS failed: %s\n", 13778c2ecf20Sopenharmony_ci queue->qnum, validation_errors[fcret]); 13788c2ecf20Sopenharmony_ci } else { 13798c2ecf20Sopenharmony_ci queue->connection_id = 13808c2ecf20Sopenharmony_ci be64_to_cpu(conn_acc->connectid.connection_id); 13818c2ecf20Sopenharmony_ci set_bit(NVME_FC_Q_CONNECTED, &queue->flags); 13828c2ecf20Sopenharmony_ci } 13838c2ecf20Sopenharmony_ci 13848c2ecf20Sopenharmony_ciout_free_buffer: 13858c2ecf20Sopenharmony_ci kfree(lsop); 13868c2ecf20Sopenharmony_ciout_no_memory: 13878c2ecf20Sopenharmony_ci if (ret) 13888c2ecf20Sopenharmony_ci dev_err(ctrl->dev, 13898c2ecf20Sopenharmony_ci "queue %d connect I/O queue failed (%d).\n", 13908c2ecf20Sopenharmony_ci queue->qnum, ret); 13918c2ecf20Sopenharmony_ci return ret; 13928c2ecf20Sopenharmony_ci} 13938c2ecf20Sopenharmony_ci 13948c2ecf20Sopenharmony_cistatic void 13958c2ecf20Sopenharmony_cinvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) 13968c2ecf20Sopenharmony_ci{ 13978c2ecf20Sopenharmony_ci struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq); 13988c2ecf20Sopenharmony_ci 13998c2ecf20Sopenharmony_ci __nvme_fc_finish_ls_req(lsop); 14008c2ecf20Sopenharmony_ci 14018c2ecf20Sopenharmony_ci /* fc-nvme initiator doesn't care about success or failure of cmd */ 14028c2ecf20Sopenharmony_ci 14038c2ecf20Sopenharmony_ci kfree(lsop); 14048c2ecf20Sopenharmony_ci} 14058c2ecf20Sopenharmony_ci 14068c2ecf20Sopenharmony_ci/* 14078c2ecf20Sopenharmony_ci * This routine sends a FC-NVME LS to disconnect (aka terminate) 14088c2ecf20Sopenharmony_ci * the FC-NVME Association. Terminating the association also 14098c2ecf20Sopenharmony_ci * terminates the FC-NVME connections (per queue, both admin and io 14108c2ecf20Sopenharmony_ci * queues) that are part of the association. E.g. things are torn 14118c2ecf20Sopenharmony_ci * down, and the related FC-NVME Association ID and Connection IDs 14128c2ecf20Sopenharmony_ci * become invalid. 14138c2ecf20Sopenharmony_ci * 14148c2ecf20Sopenharmony_ci * The behavior of the fc-nvme initiator is such that it's 14158c2ecf20Sopenharmony_ci * understanding of the association and connections will implicitly 14168c2ecf20Sopenharmony_ci * be torn down. The action is implicit as it may be due to a loss of 14178c2ecf20Sopenharmony_ci * connectivity with the fc-nvme target, so you may never get a 14188c2ecf20Sopenharmony_ci * response even if you tried. As such, the action of this routine 14198c2ecf20Sopenharmony_ci * is to asynchronously send the LS, ignore any results of the LS, and 14208c2ecf20Sopenharmony_ci * continue on with terminating the association. If the fc-nvme target 14218c2ecf20Sopenharmony_ci * is present and receives the LS, it too can tear down. 14228c2ecf20Sopenharmony_ci */ 14238c2ecf20Sopenharmony_cistatic void 14248c2ecf20Sopenharmony_cinvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl) 14258c2ecf20Sopenharmony_ci{ 14268c2ecf20Sopenharmony_ci struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst; 14278c2ecf20Sopenharmony_ci struct fcnvme_ls_disconnect_assoc_acc *discon_acc; 14288c2ecf20Sopenharmony_ci struct nvmefc_ls_req_op *lsop; 14298c2ecf20Sopenharmony_ci struct nvmefc_ls_req *lsreq; 14308c2ecf20Sopenharmony_ci int ret; 14318c2ecf20Sopenharmony_ci 14328c2ecf20Sopenharmony_ci lsop = kzalloc((sizeof(*lsop) + 14338c2ecf20Sopenharmony_ci sizeof(*discon_rqst) + sizeof(*discon_acc) + 14348c2ecf20Sopenharmony_ci ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); 14358c2ecf20Sopenharmony_ci if (!lsop) { 14368c2ecf20Sopenharmony_ci dev_info(ctrl->ctrl.device, 14378c2ecf20Sopenharmony_ci "NVME-FC{%d}: send Disconnect Association " 14388c2ecf20Sopenharmony_ci "failed: ENOMEM\n", 14398c2ecf20Sopenharmony_ci ctrl->cnum); 14408c2ecf20Sopenharmony_ci return; 14418c2ecf20Sopenharmony_ci } 14428c2ecf20Sopenharmony_ci 14438c2ecf20Sopenharmony_ci discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1]; 14448c2ecf20Sopenharmony_ci discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1]; 14458c2ecf20Sopenharmony_ci lsreq = &lsop->ls_req; 14468c2ecf20Sopenharmony_ci if (ctrl->lport->ops->lsrqst_priv_sz) 14478c2ecf20Sopenharmony_ci lsreq->private = (void *)&discon_acc[1]; 14488c2ecf20Sopenharmony_ci else 14498c2ecf20Sopenharmony_ci lsreq->private = NULL; 14508c2ecf20Sopenharmony_ci 14518c2ecf20Sopenharmony_ci nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc, 14528c2ecf20Sopenharmony_ci ctrl->association_id); 14538c2ecf20Sopenharmony_ci 14548c2ecf20Sopenharmony_ci ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop, 14558c2ecf20Sopenharmony_ci nvme_fc_disconnect_assoc_done); 14568c2ecf20Sopenharmony_ci if (ret) 14578c2ecf20Sopenharmony_ci kfree(lsop); 14588c2ecf20Sopenharmony_ci} 14598c2ecf20Sopenharmony_ci 14608c2ecf20Sopenharmony_cistatic void 14618c2ecf20Sopenharmony_cinvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) 14628c2ecf20Sopenharmony_ci{ 14638c2ecf20Sopenharmony_ci struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private; 14648c2ecf20Sopenharmony_ci struct nvme_fc_rport *rport = lsop->rport; 14658c2ecf20Sopenharmony_ci struct nvme_fc_lport *lport = rport->lport; 14668c2ecf20Sopenharmony_ci unsigned long flags; 14678c2ecf20Sopenharmony_ci 14688c2ecf20Sopenharmony_ci spin_lock_irqsave(&rport->lock, flags); 14698c2ecf20Sopenharmony_ci list_del(&lsop->lsrcv_list); 14708c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&rport->lock, flags); 14718c2ecf20Sopenharmony_ci 14728c2ecf20Sopenharmony_ci fc_dma_sync_single_for_cpu(lport->dev, lsop->rspdma, 14738c2ecf20Sopenharmony_ci sizeof(*lsop->rspbuf), DMA_TO_DEVICE); 14748c2ecf20Sopenharmony_ci fc_dma_unmap_single(lport->dev, lsop->rspdma, 14758c2ecf20Sopenharmony_ci sizeof(*lsop->rspbuf), DMA_TO_DEVICE); 14768c2ecf20Sopenharmony_ci 14778c2ecf20Sopenharmony_ci kfree(lsop); 14788c2ecf20Sopenharmony_ci 14798c2ecf20Sopenharmony_ci nvme_fc_rport_put(rport); 14808c2ecf20Sopenharmony_ci} 14818c2ecf20Sopenharmony_ci 14828c2ecf20Sopenharmony_cistatic void 14838c2ecf20Sopenharmony_cinvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop) 14848c2ecf20Sopenharmony_ci{ 14858c2ecf20Sopenharmony_ci struct nvme_fc_rport *rport = lsop->rport; 14868c2ecf20Sopenharmony_ci struct nvme_fc_lport *lport = rport->lport; 14878c2ecf20Sopenharmony_ci struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0; 14888c2ecf20Sopenharmony_ci int ret; 14898c2ecf20Sopenharmony_ci 14908c2ecf20Sopenharmony_ci fc_dma_sync_single_for_device(lport->dev, lsop->rspdma, 14918c2ecf20Sopenharmony_ci sizeof(*lsop->rspbuf), DMA_TO_DEVICE); 14928c2ecf20Sopenharmony_ci 14938c2ecf20Sopenharmony_ci ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport, 14948c2ecf20Sopenharmony_ci lsop->lsrsp); 14958c2ecf20Sopenharmony_ci if (ret) { 14968c2ecf20Sopenharmony_ci dev_warn(lport->dev, 14978c2ecf20Sopenharmony_ci "LLDD rejected LS RSP xmt: LS %d status %d\n", 14988c2ecf20Sopenharmony_ci w0->ls_cmd, ret); 14998c2ecf20Sopenharmony_ci nvme_fc_xmt_ls_rsp_done(lsop->lsrsp); 15008c2ecf20Sopenharmony_ci return; 15018c2ecf20Sopenharmony_ci } 15028c2ecf20Sopenharmony_ci} 15038c2ecf20Sopenharmony_ci 15048c2ecf20Sopenharmony_cistatic struct nvme_fc_ctrl * 15058c2ecf20Sopenharmony_cinvme_fc_match_disconn_ls(struct nvme_fc_rport *rport, 15068c2ecf20Sopenharmony_ci struct nvmefc_ls_rcv_op *lsop) 15078c2ecf20Sopenharmony_ci{ 15088c2ecf20Sopenharmony_ci struct fcnvme_ls_disconnect_assoc_rqst *rqst = 15098c2ecf20Sopenharmony_ci &lsop->rqstbuf->rq_dis_assoc; 15108c2ecf20Sopenharmony_ci struct nvme_fc_ctrl *ctrl, *ret = NULL; 15118c2ecf20Sopenharmony_ci struct nvmefc_ls_rcv_op *oldls = NULL; 15128c2ecf20Sopenharmony_ci u64 association_id = be64_to_cpu(rqst->associd.association_id); 15138c2ecf20Sopenharmony_ci unsigned long flags; 15148c2ecf20Sopenharmony_ci 15158c2ecf20Sopenharmony_ci spin_lock_irqsave(&rport->lock, flags); 15168c2ecf20Sopenharmony_ci 15178c2ecf20Sopenharmony_ci list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { 15188c2ecf20Sopenharmony_ci if (!nvme_fc_ctrl_get(ctrl)) 15198c2ecf20Sopenharmony_ci continue; 15208c2ecf20Sopenharmony_ci spin_lock(&ctrl->lock); 15218c2ecf20Sopenharmony_ci if (association_id == ctrl->association_id) { 15228c2ecf20Sopenharmony_ci oldls = ctrl->rcv_disconn; 15238c2ecf20Sopenharmony_ci ctrl->rcv_disconn = lsop; 15248c2ecf20Sopenharmony_ci ret = ctrl; 15258c2ecf20Sopenharmony_ci } 15268c2ecf20Sopenharmony_ci spin_unlock(&ctrl->lock); 15278c2ecf20Sopenharmony_ci if (ret) 15288c2ecf20Sopenharmony_ci /* leave the ctrl get reference */ 15298c2ecf20Sopenharmony_ci break; 15308c2ecf20Sopenharmony_ci nvme_fc_ctrl_put(ctrl); 15318c2ecf20Sopenharmony_ci } 15328c2ecf20Sopenharmony_ci 15338c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&rport->lock, flags); 15348c2ecf20Sopenharmony_ci 15358c2ecf20Sopenharmony_ci /* transmit a response for anything that was pending */ 15368c2ecf20Sopenharmony_ci if (oldls) { 15378c2ecf20Sopenharmony_ci dev_info(rport->lport->dev, 15388c2ecf20Sopenharmony_ci "NVME-FC{%d}: Multiple Disconnect Association " 15398c2ecf20Sopenharmony_ci "LS's received\n", ctrl->cnum); 15408c2ecf20Sopenharmony_ci /* overwrite good response with bogus failure */ 15418c2ecf20Sopenharmony_ci oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, 15428c2ecf20Sopenharmony_ci sizeof(*oldls->rspbuf), 15438c2ecf20Sopenharmony_ci rqst->w0.ls_cmd, 15448c2ecf20Sopenharmony_ci FCNVME_RJT_RC_UNAB, 15458c2ecf20Sopenharmony_ci FCNVME_RJT_EXP_NONE, 0); 15468c2ecf20Sopenharmony_ci nvme_fc_xmt_ls_rsp(oldls); 15478c2ecf20Sopenharmony_ci } 15488c2ecf20Sopenharmony_ci 15498c2ecf20Sopenharmony_ci return ret; 15508c2ecf20Sopenharmony_ci} 15518c2ecf20Sopenharmony_ci 15528c2ecf20Sopenharmony_ci/* 15538c2ecf20Sopenharmony_ci * returns true to mean LS handled and ls_rsp can be sent 15548c2ecf20Sopenharmony_ci * returns false to defer ls_rsp xmt (will be done as part of 15558c2ecf20Sopenharmony_ci * association termination) 15568c2ecf20Sopenharmony_ci */ 15578c2ecf20Sopenharmony_cistatic bool 15588c2ecf20Sopenharmony_cinvme_fc_ls_disconnect_assoc(struct nvmefc_ls_rcv_op *lsop) 15598c2ecf20Sopenharmony_ci{ 15608c2ecf20Sopenharmony_ci struct nvme_fc_rport *rport = lsop->rport; 15618c2ecf20Sopenharmony_ci struct fcnvme_ls_disconnect_assoc_rqst *rqst = 15628c2ecf20Sopenharmony_ci &lsop->rqstbuf->rq_dis_assoc; 15638c2ecf20Sopenharmony_ci struct fcnvme_ls_disconnect_assoc_acc *acc = 15648c2ecf20Sopenharmony_ci &lsop->rspbuf->rsp_dis_assoc; 15658c2ecf20Sopenharmony_ci struct nvme_fc_ctrl *ctrl = NULL; 15668c2ecf20Sopenharmony_ci int ret = 0; 15678c2ecf20Sopenharmony_ci 15688c2ecf20Sopenharmony_ci memset(acc, 0, sizeof(*acc)); 15698c2ecf20Sopenharmony_ci 15708c2ecf20Sopenharmony_ci ret = nvmefc_vldt_lsreq_discon_assoc(lsop->rqstdatalen, rqst); 15718c2ecf20Sopenharmony_ci if (!ret) { 15728c2ecf20Sopenharmony_ci /* match an active association */ 15738c2ecf20Sopenharmony_ci ctrl = nvme_fc_match_disconn_ls(rport, lsop); 15748c2ecf20Sopenharmony_ci if (!ctrl) 15758c2ecf20Sopenharmony_ci ret = VERR_NO_ASSOC; 15768c2ecf20Sopenharmony_ci } 15778c2ecf20Sopenharmony_ci 15788c2ecf20Sopenharmony_ci if (ret) { 15798c2ecf20Sopenharmony_ci dev_info(rport->lport->dev, 15808c2ecf20Sopenharmony_ci "Disconnect LS failed: %s\n", 15818c2ecf20Sopenharmony_ci validation_errors[ret]); 15828c2ecf20Sopenharmony_ci lsop->lsrsp->rsplen = nvme_fc_format_rjt(acc, 15838c2ecf20Sopenharmony_ci sizeof(*acc), rqst->w0.ls_cmd, 15848c2ecf20Sopenharmony_ci (ret == VERR_NO_ASSOC) ? 15858c2ecf20Sopenharmony_ci FCNVME_RJT_RC_INV_ASSOC : 15868c2ecf20Sopenharmony_ci FCNVME_RJT_RC_LOGIC, 15878c2ecf20Sopenharmony_ci FCNVME_RJT_EXP_NONE, 0); 15888c2ecf20Sopenharmony_ci return true; 15898c2ecf20Sopenharmony_ci } 15908c2ecf20Sopenharmony_ci 15918c2ecf20Sopenharmony_ci /* format an ACCept response */ 15928c2ecf20Sopenharmony_ci 15938c2ecf20Sopenharmony_ci lsop->lsrsp->rsplen = sizeof(*acc); 15948c2ecf20Sopenharmony_ci 15958c2ecf20Sopenharmony_ci nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC, 15968c2ecf20Sopenharmony_ci fcnvme_lsdesc_len( 15978c2ecf20Sopenharmony_ci sizeof(struct fcnvme_ls_disconnect_assoc_acc)), 15988c2ecf20Sopenharmony_ci FCNVME_LS_DISCONNECT_ASSOC); 15998c2ecf20Sopenharmony_ci 16008c2ecf20Sopenharmony_ci /* 16018c2ecf20Sopenharmony_ci * the transmit of the response will occur after the exchanges 16028c2ecf20Sopenharmony_ci * for the association have been ABTS'd by 16038c2ecf20Sopenharmony_ci * nvme_fc_delete_association(). 16048c2ecf20Sopenharmony_ci */ 16058c2ecf20Sopenharmony_ci 16068c2ecf20Sopenharmony_ci /* fail the association */ 16078c2ecf20Sopenharmony_ci nvme_fc_error_recovery(ctrl, "Disconnect Association LS received"); 16088c2ecf20Sopenharmony_ci 16098c2ecf20Sopenharmony_ci /* release the reference taken by nvme_fc_match_disconn_ls() */ 16108c2ecf20Sopenharmony_ci nvme_fc_ctrl_put(ctrl); 16118c2ecf20Sopenharmony_ci 16128c2ecf20Sopenharmony_ci return false; 16138c2ecf20Sopenharmony_ci} 16148c2ecf20Sopenharmony_ci 16158c2ecf20Sopenharmony_ci/* 16168c2ecf20Sopenharmony_ci * Actual Processing routine for received FC-NVME LS Requests from the LLD 16178c2ecf20Sopenharmony_ci * returns true if a response should be sent afterward, false if rsp will 16188c2ecf20Sopenharmony_ci * be sent asynchronously. 16198c2ecf20Sopenharmony_ci */ 16208c2ecf20Sopenharmony_cistatic bool 16218c2ecf20Sopenharmony_cinvme_fc_handle_ls_rqst(struct nvmefc_ls_rcv_op *lsop) 16228c2ecf20Sopenharmony_ci{ 16238c2ecf20Sopenharmony_ci struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0; 16248c2ecf20Sopenharmony_ci bool ret = true; 16258c2ecf20Sopenharmony_ci 16268c2ecf20Sopenharmony_ci lsop->lsrsp->nvme_fc_private = lsop; 16278c2ecf20Sopenharmony_ci lsop->lsrsp->rspbuf = lsop->rspbuf; 16288c2ecf20Sopenharmony_ci lsop->lsrsp->rspdma = lsop->rspdma; 16298c2ecf20Sopenharmony_ci lsop->lsrsp->done = nvme_fc_xmt_ls_rsp_done; 16308c2ecf20Sopenharmony_ci /* Be preventative. handlers will later set to valid length */ 16318c2ecf20Sopenharmony_ci lsop->lsrsp->rsplen = 0; 16328c2ecf20Sopenharmony_ci 16338c2ecf20Sopenharmony_ci /* 16348c2ecf20Sopenharmony_ci * handlers: 16358c2ecf20Sopenharmony_ci * parse request input, execute the request, and format the 16368c2ecf20Sopenharmony_ci * LS response 16378c2ecf20Sopenharmony_ci */ 16388c2ecf20Sopenharmony_ci switch (w0->ls_cmd) { 16398c2ecf20Sopenharmony_ci case FCNVME_LS_DISCONNECT_ASSOC: 16408c2ecf20Sopenharmony_ci ret = nvme_fc_ls_disconnect_assoc(lsop); 16418c2ecf20Sopenharmony_ci break; 16428c2ecf20Sopenharmony_ci case FCNVME_LS_DISCONNECT_CONN: 16438c2ecf20Sopenharmony_ci lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, 16448c2ecf20Sopenharmony_ci sizeof(*lsop->rspbuf), w0->ls_cmd, 16458c2ecf20Sopenharmony_ci FCNVME_RJT_RC_UNSUP, FCNVME_RJT_EXP_NONE, 0); 16468c2ecf20Sopenharmony_ci break; 16478c2ecf20Sopenharmony_ci case FCNVME_LS_CREATE_ASSOCIATION: 16488c2ecf20Sopenharmony_ci case FCNVME_LS_CREATE_CONNECTION: 16498c2ecf20Sopenharmony_ci lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, 16508c2ecf20Sopenharmony_ci sizeof(*lsop->rspbuf), w0->ls_cmd, 16518c2ecf20Sopenharmony_ci FCNVME_RJT_RC_LOGIC, FCNVME_RJT_EXP_NONE, 0); 16528c2ecf20Sopenharmony_ci break; 16538c2ecf20Sopenharmony_ci default: 16548c2ecf20Sopenharmony_ci lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, 16558c2ecf20Sopenharmony_ci sizeof(*lsop->rspbuf), w0->ls_cmd, 16568c2ecf20Sopenharmony_ci FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0); 16578c2ecf20Sopenharmony_ci break; 16588c2ecf20Sopenharmony_ci } 16598c2ecf20Sopenharmony_ci 16608c2ecf20Sopenharmony_ci return(ret); 16618c2ecf20Sopenharmony_ci} 16628c2ecf20Sopenharmony_ci 16638c2ecf20Sopenharmony_cistatic void 16648c2ecf20Sopenharmony_cinvme_fc_handle_ls_rqst_work(struct work_struct *work) 16658c2ecf20Sopenharmony_ci{ 16668c2ecf20Sopenharmony_ci struct nvme_fc_rport *rport = 16678c2ecf20Sopenharmony_ci container_of(work, struct nvme_fc_rport, lsrcv_work); 16688c2ecf20Sopenharmony_ci struct fcnvme_ls_rqst_w0 *w0; 16698c2ecf20Sopenharmony_ci struct nvmefc_ls_rcv_op *lsop; 16708c2ecf20Sopenharmony_ci unsigned long flags; 16718c2ecf20Sopenharmony_ci bool sendrsp; 16728c2ecf20Sopenharmony_ci 16738c2ecf20Sopenharmony_cirestart: 16748c2ecf20Sopenharmony_ci sendrsp = true; 16758c2ecf20Sopenharmony_ci spin_lock_irqsave(&rport->lock, flags); 16768c2ecf20Sopenharmony_ci list_for_each_entry(lsop, &rport->ls_rcv_list, lsrcv_list) { 16778c2ecf20Sopenharmony_ci if (lsop->handled) 16788c2ecf20Sopenharmony_ci continue; 16798c2ecf20Sopenharmony_ci 16808c2ecf20Sopenharmony_ci lsop->handled = true; 16818c2ecf20Sopenharmony_ci if (rport->remoteport.port_state == FC_OBJSTATE_ONLINE) { 16828c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&rport->lock, flags); 16838c2ecf20Sopenharmony_ci sendrsp = nvme_fc_handle_ls_rqst(lsop); 16848c2ecf20Sopenharmony_ci } else { 16858c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&rport->lock, flags); 16868c2ecf20Sopenharmony_ci w0 = &lsop->rqstbuf->w0; 16878c2ecf20Sopenharmony_ci lsop->lsrsp->rsplen = nvme_fc_format_rjt( 16888c2ecf20Sopenharmony_ci lsop->rspbuf, 16898c2ecf20Sopenharmony_ci sizeof(*lsop->rspbuf), 16908c2ecf20Sopenharmony_ci w0->ls_cmd, 16918c2ecf20Sopenharmony_ci FCNVME_RJT_RC_UNAB, 16928c2ecf20Sopenharmony_ci FCNVME_RJT_EXP_NONE, 0); 16938c2ecf20Sopenharmony_ci } 16948c2ecf20Sopenharmony_ci if (sendrsp) 16958c2ecf20Sopenharmony_ci nvme_fc_xmt_ls_rsp(lsop); 16968c2ecf20Sopenharmony_ci goto restart; 16978c2ecf20Sopenharmony_ci } 16988c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&rport->lock, flags); 16998c2ecf20Sopenharmony_ci} 17008c2ecf20Sopenharmony_ci 17018c2ecf20Sopenharmony_ci/** 17028c2ecf20Sopenharmony_ci * nvme_fc_rcv_ls_req - transport entry point called by an LLDD 17038c2ecf20Sopenharmony_ci * upon the reception of a NVME LS request. 17048c2ecf20Sopenharmony_ci * 17058c2ecf20Sopenharmony_ci * The nvme-fc layer will copy payload to an internal structure for 17068c2ecf20Sopenharmony_ci * processing. As such, upon completion of the routine, the LLDD may 17078c2ecf20Sopenharmony_ci * immediately free/reuse the LS request buffer passed in the call. 17088c2ecf20Sopenharmony_ci * 17098c2ecf20Sopenharmony_ci * If this routine returns error, the LLDD should abort the exchange. 17108c2ecf20Sopenharmony_ci * 17118c2ecf20Sopenharmony_ci * @remoteport: pointer to the (registered) remote port that the LS 17128c2ecf20Sopenharmony_ci * was received from. The remoteport is associated with 17138c2ecf20Sopenharmony_ci * a specific localport. 17148c2ecf20Sopenharmony_ci * @lsrsp: pointer to a nvmefc_ls_rsp response structure to be 17158c2ecf20Sopenharmony_ci * used to reference the exchange corresponding to the LS 17168c2ecf20Sopenharmony_ci * when issuing an ls response. 17178c2ecf20Sopenharmony_ci * @lsreqbuf: pointer to the buffer containing the LS Request 17188c2ecf20Sopenharmony_ci * @lsreqbuf_len: length, in bytes, of the received LS request 17198c2ecf20Sopenharmony_ci */ 17208c2ecf20Sopenharmony_ciint 17218c2ecf20Sopenharmony_cinvme_fc_rcv_ls_req(struct nvme_fc_remote_port *portptr, 17228c2ecf20Sopenharmony_ci struct nvmefc_ls_rsp *lsrsp, 17238c2ecf20Sopenharmony_ci void *lsreqbuf, u32 lsreqbuf_len) 17248c2ecf20Sopenharmony_ci{ 17258c2ecf20Sopenharmony_ci struct nvme_fc_rport *rport = remoteport_to_rport(portptr); 17268c2ecf20Sopenharmony_ci struct nvme_fc_lport *lport = rport->lport; 17278c2ecf20Sopenharmony_ci struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; 17288c2ecf20Sopenharmony_ci struct nvmefc_ls_rcv_op *lsop; 17298c2ecf20Sopenharmony_ci unsigned long flags; 17308c2ecf20Sopenharmony_ci int ret; 17318c2ecf20Sopenharmony_ci 17328c2ecf20Sopenharmony_ci nvme_fc_rport_get(rport); 17338c2ecf20Sopenharmony_ci 17348c2ecf20Sopenharmony_ci /* validate there's a routine to transmit a response */ 17358c2ecf20Sopenharmony_ci if (!lport->ops->xmt_ls_rsp) { 17368c2ecf20Sopenharmony_ci dev_info(lport->dev, 17378c2ecf20Sopenharmony_ci "RCV %s LS failed: no LLDD xmt_ls_rsp\n", 17388c2ecf20Sopenharmony_ci (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 17398c2ecf20Sopenharmony_ci nvmefc_ls_names[w0->ls_cmd] : ""); 17408c2ecf20Sopenharmony_ci ret = -EINVAL; 17418c2ecf20Sopenharmony_ci goto out_put; 17428c2ecf20Sopenharmony_ci } 17438c2ecf20Sopenharmony_ci 17448c2ecf20Sopenharmony_ci if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { 17458c2ecf20Sopenharmony_ci dev_info(lport->dev, 17468c2ecf20Sopenharmony_ci "RCV %s LS failed: payload too large\n", 17478c2ecf20Sopenharmony_ci (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 17488c2ecf20Sopenharmony_ci nvmefc_ls_names[w0->ls_cmd] : ""); 17498c2ecf20Sopenharmony_ci ret = -E2BIG; 17508c2ecf20Sopenharmony_ci goto out_put; 17518c2ecf20Sopenharmony_ci } 17528c2ecf20Sopenharmony_ci 17538c2ecf20Sopenharmony_ci lsop = kzalloc(sizeof(*lsop) + 17548c2ecf20Sopenharmony_ci sizeof(union nvmefc_ls_requests) + 17558c2ecf20Sopenharmony_ci sizeof(union nvmefc_ls_responses), 17568c2ecf20Sopenharmony_ci GFP_KERNEL); 17578c2ecf20Sopenharmony_ci if (!lsop) { 17588c2ecf20Sopenharmony_ci dev_info(lport->dev, 17598c2ecf20Sopenharmony_ci "RCV %s LS failed: No memory\n", 17608c2ecf20Sopenharmony_ci (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 17618c2ecf20Sopenharmony_ci nvmefc_ls_names[w0->ls_cmd] : ""); 17628c2ecf20Sopenharmony_ci ret = -ENOMEM; 17638c2ecf20Sopenharmony_ci goto out_put; 17648c2ecf20Sopenharmony_ci } 17658c2ecf20Sopenharmony_ci lsop->rqstbuf = (union nvmefc_ls_requests *)&lsop[1]; 17668c2ecf20Sopenharmony_ci lsop->rspbuf = (union nvmefc_ls_responses *)&lsop->rqstbuf[1]; 17678c2ecf20Sopenharmony_ci 17688c2ecf20Sopenharmony_ci lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf, 17698c2ecf20Sopenharmony_ci sizeof(*lsop->rspbuf), 17708c2ecf20Sopenharmony_ci DMA_TO_DEVICE); 17718c2ecf20Sopenharmony_ci if (fc_dma_mapping_error(lport->dev, lsop->rspdma)) { 17728c2ecf20Sopenharmony_ci dev_info(lport->dev, 17738c2ecf20Sopenharmony_ci "RCV %s LS failed: DMA mapping failure\n", 17748c2ecf20Sopenharmony_ci (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? 17758c2ecf20Sopenharmony_ci nvmefc_ls_names[w0->ls_cmd] : ""); 17768c2ecf20Sopenharmony_ci ret = -EFAULT; 17778c2ecf20Sopenharmony_ci goto out_free; 17788c2ecf20Sopenharmony_ci } 17798c2ecf20Sopenharmony_ci 17808c2ecf20Sopenharmony_ci lsop->rport = rport; 17818c2ecf20Sopenharmony_ci lsop->lsrsp = lsrsp; 17828c2ecf20Sopenharmony_ci 17838c2ecf20Sopenharmony_ci memcpy(lsop->rqstbuf, lsreqbuf, lsreqbuf_len); 17848c2ecf20Sopenharmony_ci lsop->rqstdatalen = lsreqbuf_len; 17858c2ecf20Sopenharmony_ci 17868c2ecf20Sopenharmony_ci spin_lock_irqsave(&rport->lock, flags); 17878c2ecf20Sopenharmony_ci if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) { 17888c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&rport->lock, flags); 17898c2ecf20Sopenharmony_ci ret = -ENOTCONN; 17908c2ecf20Sopenharmony_ci goto out_unmap; 17918c2ecf20Sopenharmony_ci } 17928c2ecf20Sopenharmony_ci list_add_tail(&lsop->lsrcv_list, &rport->ls_rcv_list); 17938c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&rport->lock, flags); 17948c2ecf20Sopenharmony_ci 17958c2ecf20Sopenharmony_ci schedule_work(&rport->lsrcv_work); 17968c2ecf20Sopenharmony_ci 17978c2ecf20Sopenharmony_ci return 0; 17988c2ecf20Sopenharmony_ci 17998c2ecf20Sopenharmony_ciout_unmap: 18008c2ecf20Sopenharmony_ci fc_dma_unmap_single(lport->dev, lsop->rspdma, 18018c2ecf20Sopenharmony_ci sizeof(*lsop->rspbuf), DMA_TO_DEVICE); 18028c2ecf20Sopenharmony_ciout_free: 18038c2ecf20Sopenharmony_ci kfree(lsop); 18048c2ecf20Sopenharmony_ciout_put: 18058c2ecf20Sopenharmony_ci nvme_fc_rport_put(rport); 18068c2ecf20Sopenharmony_ci return ret; 18078c2ecf20Sopenharmony_ci} 18088c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(nvme_fc_rcv_ls_req); 18098c2ecf20Sopenharmony_ci 18108c2ecf20Sopenharmony_ci 18118c2ecf20Sopenharmony_ci/* *********************** NVME Ctrl Routines **************************** */ 18128c2ecf20Sopenharmony_ci 18138c2ecf20Sopenharmony_cistatic void 18148c2ecf20Sopenharmony_ci__nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl, 18158c2ecf20Sopenharmony_ci struct nvme_fc_fcp_op *op) 18168c2ecf20Sopenharmony_ci{ 18178c2ecf20Sopenharmony_ci fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma, 18188c2ecf20Sopenharmony_ci sizeof(op->rsp_iu), DMA_FROM_DEVICE); 18198c2ecf20Sopenharmony_ci fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma, 18208c2ecf20Sopenharmony_ci sizeof(op->cmd_iu), DMA_TO_DEVICE); 18218c2ecf20Sopenharmony_ci 18228c2ecf20Sopenharmony_ci atomic_set(&op->state, FCPOP_STATE_UNINIT); 18238c2ecf20Sopenharmony_ci} 18248c2ecf20Sopenharmony_ci 18258c2ecf20Sopenharmony_cistatic void 18268c2ecf20Sopenharmony_cinvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq, 18278c2ecf20Sopenharmony_ci unsigned int hctx_idx) 18288c2ecf20Sopenharmony_ci{ 18298c2ecf20Sopenharmony_ci struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 18308c2ecf20Sopenharmony_ci 18318c2ecf20Sopenharmony_ci return __nvme_fc_exit_request(set->driver_data, op); 18328c2ecf20Sopenharmony_ci} 18338c2ecf20Sopenharmony_ci 18348c2ecf20Sopenharmony_cistatic int 18358c2ecf20Sopenharmony_ci__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op) 18368c2ecf20Sopenharmony_ci{ 18378c2ecf20Sopenharmony_ci unsigned long flags; 18388c2ecf20Sopenharmony_ci int opstate; 18398c2ecf20Sopenharmony_ci 18408c2ecf20Sopenharmony_ci spin_lock_irqsave(&ctrl->lock, flags); 18418c2ecf20Sopenharmony_ci opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); 18428c2ecf20Sopenharmony_ci if (opstate != FCPOP_STATE_ACTIVE) 18438c2ecf20Sopenharmony_ci atomic_set(&op->state, opstate); 18448c2ecf20Sopenharmony_ci else if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) { 18458c2ecf20Sopenharmony_ci op->flags |= FCOP_FLAGS_TERMIO; 18468c2ecf20Sopenharmony_ci ctrl->iocnt++; 18478c2ecf20Sopenharmony_ci } 18488c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&ctrl->lock, flags); 18498c2ecf20Sopenharmony_ci 18508c2ecf20Sopenharmony_ci if (opstate != FCPOP_STATE_ACTIVE) 18518c2ecf20Sopenharmony_ci return -ECANCELED; 18528c2ecf20Sopenharmony_ci 18538c2ecf20Sopenharmony_ci ctrl->lport->ops->fcp_abort(&ctrl->lport->localport, 18548c2ecf20Sopenharmony_ci &ctrl->rport->remoteport, 18558c2ecf20Sopenharmony_ci op->queue->lldd_handle, 18568c2ecf20Sopenharmony_ci &op->fcp_req); 18578c2ecf20Sopenharmony_ci 18588c2ecf20Sopenharmony_ci return 0; 18598c2ecf20Sopenharmony_ci} 18608c2ecf20Sopenharmony_ci 18618c2ecf20Sopenharmony_cistatic void 18628c2ecf20Sopenharmony_cinvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl) 18638c2ecf20Sopenharmony_ci{ 18648c2ecf20Sopenharmony_ci struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; 18658c2ecf20Sopenharmony_ci int i; 18668c2ecf20Sopenharmony_ci 18678c2ecf20Sopenharmony_ci /* ensure we've initialized the ops once */ 18688c2ecf20Sopenharmony_ci if (!(aen_op->flags & FCOP_FLAGS_AEN)) 18698c2ecf20Sopenharmony_ci return; 18708c2ecf20Sopenharmony_ci 18718c2ecf20Sopenharmony_ci for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) 18728c2ecf20Sopenharmony_ci __nvme_fc_abort_op(ctrl, aen_op); 18738c2ecf20Sopenharmony_ci} 18748c2ecf20Sopenharmony_ci 18758c2ecf20Sopenharmony_cistatic inline void 18768c2ecf20Sopenharmony_ci__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl, 18778c2ecf20Sopenharmony_ci struct nvme_fc_fcp_op *op, int opstate) 18788c2ecf20Sopenharmony_ci{ 18798c2ecf20Sopenharmony_ci unsigned long flags; 18808c2ecf20Sopenharmony_ci 18818c2ecf20Sopenharmony_ci if (opstate == FCPOP_STATE_ABORTED) { 18828c2ecf20Sopenharmony_ci spin_lock_irqsave(&ctrl->lock, flags); 18838c2ecf20Sopenharmony_ci if (test_bit(FCCTRL_TERMIO, &ctrl->flags) && 18848c2ecf20Sopenharmony_ci op->flags & FCOP_FLAGS_TERMIO) { 18858c2ecf20Sopenharmony_ci if (!--ctrl->iocnt) 18868c2ecf20Sopenharmony_ci wake_up(&ctrl->ioabort_wait); 18878c2ecf20Sopenharmony_ci } 18888c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&ctrl->lock, flags); 18898c2ecf20Sopenharmony_ci } 18908c2ecf20Sopenharmony_ci} 18918c2ecf20Sopenharmony_ci 18928c2ecf20Sopenharmony_cistatic void 18938c2ecf20Sopenharmony_cinvme_fc_ctrl_ioerr_work(struct work_struct *work) 18948c2ecf20Sopenharmony_ci{ 18958c2ecf20Sopenharmony_ci struct nvme_fc_ctrl *ctrl = 18968c2ecf20Sopenharmony_ci container_of(work, struct nvme_fc_ctrl, ioerr_work); 18978c2ecf20Sopenharmony_ci 18988c2ecf20Sopenharmony_ci nvme_fc_error_recovery(ctrl, "transport detected io error"); 18998c2ecf20Sopenharmony_ci} 19008c2ecf20Sopenharmony_ci 19018c2ecf20Sopenharmony_cistatic void 19028c2ecf20Sopenharmony_cinvme_fc_fcpio_done(struct nvmefc_fcp_req *req) 19038c2ecf20Sopenharmony_ci{ 19048c2ecf20Sopenharmony_ci struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req); 19058c2ecf20Sopenharmony_ci struct request *rq = op->rq; 19068c2ecf20Sopenharmony_ci struct nvmefc_fcp_req *freq = &op->fcp_req; 19078c2ecf20Sopenharmony_ci struct nvme_fc_ctrl *ctrl = op->ctrl; 19088c2ecf20Sopenharmony_ci struct nvme_fc_queue *queue = op->queue; 19098c2ecf20Sopenharmony_ci struct nvme_completion *cqe = &op->rsp_iu.cqe; 19108c2ecf20Sopenharmony_ci struct nvme_command *sqe = &op->cmd_iu.sqe; 19118c2ecf20Sopenharmony_ci __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); 19128c2ecf20Sopenharmony_ci union nvme_result result; 19138c2ecf20Sopenharmony_ci bool terminate_assoc = true; 19148c2ecf20Sopenharmony_ci int opstate; 19158c2ecf20Sopenharmony_ci 19168c2ecf20Sopenharmony_ci /* 19178c2ecf20Sopenharmony_ci * WARNING: 19188c2ecf20Sopenharmony_ci * The current linux implementation of a nvme controller 19198c2ecf20Sopenharmony_ci * allocates a single tag set for all io queues and sizes 19208c2ecf20Sopenharmony_ci * the io queues to fully hold all possible tags. Thus, the 19218c2ecf20Sopenharmony_ci * implementation does not reference or care about the sqhd 19228c2ecf20Sopenharmony_ci * value as it never needs to use the sqhd/sqtail pointers 19238c2ecf20Sopenharmony_ci * for submission pacing. 19248c2ecf20Sopenharmony_ci * 19258c2ecf20Sopenharmony_ci * This affects the FC-NVME implementation in two ways: 19268c2ecf20Sopenharmony_ci * 1) As the value doesn't matter, we don't need to waste 19278c2ecf20Sopenharmony_ci * cycles extracting it from ERSPs and stamping it in the 19288c2ecf20Sopenharmony_ci * cases where the transport fabricates CQEs on successful 19298c2ecf20Sopenharmony_ci * completions. 19308c2ecf20Sopenharmony_ci * 2) The FC-NVME implementation requires that delivery of 19318c2ecf20Sopenharmony_ci * ERSP completions are to go back to the nvme layer in order 19328c2ecf20Sopenharmony_ci * relative to the rsn, such that the sqhd value will always 19338c2ecf20Sopenharmony_ci * be "in order" for the nvme layer. As the nvme layer in 19348c2ecf20Sopenharmony_ci * linux doesn't care about sqhd, there's no need to return 19358c2ecf20Sopenharmony_ci * them in order. 19368c2ecf20Sopenharmony_ci * 19378c2ecf20Sopenharmony_ci * Additionally: 19388c2ecf20Sopenharmony_ci * As the core nvme layer in linux currently does not look at 19398c2ecf20Sopenharmony_ci * every field in the cqe - in cases where the FC transport must 19408c2ecf20Sopenharmony_ci * fabricate a CQE, the following fields will not be set as they 19418c2ecf20Sopenharmony_ci * are not referenced: 19428c2ecf20Sopenharmony_ci * cqe.sqid, cqe.sqhd, cqe.command_id 19438c2ecf20Sopenharmony_ci * 19448c2ecf20Sopenharmony_ci * Failure or error of an individual i/o, in a transport 19458c2ecf20Sopenharmony_ci * detected fashion unrelated to the nvme completion status, 19468c2ecf20Sopenharmony_ci * potentially cause the initiator and target sides to get out 19478c2ecf20Sopenharmony_ci * of sync on SQ head/tail (aka outstanding io count allowed). 19488c2ecf20Sopenharmony_ci * Per FC-NVME spec, failure of an individual command requires 19498c2ecf20Sopenharmony_ci * the connection to be terminated, which in turn requires the 19508c2ecf20Sopenharmony_ci * association to be terminated. 19518c2ecf20Sopenharmony_ci */ 19528c2ecf20Sopenharmony_ci 19538c2ecf20Sopenharmony_ci opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); 19548c2ecf20Sopenharmony_ci 19558c2ecf20Sopenharmony_ci fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, 19568c2ecf20Sopenharmony_ci sizeof(op->rsp_iu), DMA_FROM_DEVICE); 19578c2ecf20Sopenharmony_ci 19588c2ecf20Sopenharmony_ci if (opstate == FCPOP_STATE_ABORTED) 19598c2ecf20Sopenharmony_ci status = cpu_to_le16(NVME_SC_HOST_ABORTED_CMD << 1); 19608c2ecf20Sopenharmony_ci else if (freq->status) { 19618c2ecf20Sopenharmony_ci status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); 19628c2ecf20Sopenharmony_ci dev_info(ctrl->ctrl.device, 19638c2ecf20Sopenharmony_ci "NVME-FC{%d}: io failed due to lldd error %d\n", 19648c2ecf20Sopenharmony_ci ctrl->cnum, freq->status); 19658c2ecf20Sopenharmony_ci } 19668c2ecf20Sopenharmony_ci 19678c2ecf20Sopenharmony_ci /* 19688c2ecf20Sopenharmony_ci * For the linux implementation, if we have an unsuccesful 19698c2ecf20Sopenharmony_ci * status, they blk-mq layer can typically be called with the 19708c2ecf20Sopenharmony_ci * non-zero status and the content of the cqe isn't important. 19718c2ecf20Sopenharmony_ci */ 19728c2ecf20Sopenharmony_ci if (status) 19738c2ecf20Sopenharmony_ci goto done; 19748c2ecf20Sopenharmony_ci 19758c2ecf20Sopenharmony_ci /* 19768c2ecf20Sopenharmony_ci * command completed successfully relative to the wire 19778c2ecf20Sopenharmony_ci * protocol. However, validate anything received and 19788c2ecf20Sopenharmony_ci * extract the status and result from the cqe (create it 19798c2ecf20Sopenharmony_ci * where necessary). 19808c2ecf20Sopenharmony_ci */ 19818c2ecf20Sopenharmony_ci 19828c2ecf20Sopenharmony_ci switch (freq->rcv_rsplen) { 19838c2ecf20Sopenharmony_ci 19848c2ecf20Sopenharmony_ci case 0: 19858c2ecf20Sopenharmony_ci case NVME_FC_SIZEOF_ZEROS_RSP: 19868c2ecf20Sopenharmony_ci /* 19878c2ecf20Sopenharmony_ci * No response payload or 12 bytes of payload (which 19888c2ecf20Sopenharmony_ci * should all be zeros) are considered successful and 19898c2ecf20Sopenharmony_ci * no payload in the CQE by the transport. 19908c2ecf20Sopenharmony_ci */ 19918c2ecf20Sopenharmony_ci if (freq->transferred_length != 19928c2ecf20Sopenharmony_ci be32_to_cpu(op->cmd_iu.data_len)) { 19938c2ecf20Sopenharmony_ci status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); 19948c2ecf20Sopenharmony_ci dev_info(ctrl->ctrl.device, 19958c2ecf20Sopenharmony_ci "NVME-FC{%d}: io failed due to bad transfer " 19968c2ecf20Sopenharmony_ci "length: %d vs expected %d\n", 19978c2ecf20Sopenharmony_ci ctrl->cnum, freq->transferred_length, 19988c2ecf20Sopenharmony_ci be32_to_cpu(op->cmd_iu.data_len)); 19998c2ecf20Sopenharmony_ci goto done; 20008c2ecf20Sopenharmony_ci } 20018c2ecf20Sopenharmony_ci result.u64 = 0; 20028c2ecf20Sopenharmony_ci break; 20038c2ecf20Sopenharmony_ci 20048c2ecf20Sopenharmony_ci case sizeof(struct nvme_fc_ersp_iu): 20058c2ecf20Sopenharmony_ci /* 20068c2ecf20Sopenharmony_ci * The ERSP IU contains a full completion with CQE. 20078c2ecf20Sopenharmony_ci * Validate ERSP IU and look at cqe. 20088c2ecf20Sopenharmony_ci */ 20098c2ecf20Sopenharmony_ci if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) != 20108c2ecf20Sopenharmony_ci (freq->rcv_rsplen / 4) || 20118c2ecf20Sopenharmony_ci be32_to_cpu(op->rsp_iu.xfrd_len) != 20128c2ecf20Sopenharmony_ci freq->transferred_length || 20138c2ecf20Sopenharmony_ci op->rsp_iu.ersp_result || 20148c2ecf20Sopenharmony_ci sqe->common.command_id != cqe->command_id)) { 20158c2ecf20Sopenharmony_ci status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); 20168c2ecf20Sopenharmony_ci dev_info(ctrl->ctrl.device, 20178c2ecf20Sopenharmony_ci "NVME-FC{%d}: io failed due to bad NVMe_ERSP: " 20188c2ecf20Sopenharmony_ci "iu len %d, xfr len %d vs %d, status code " 20198c2ecf20Sopenharmony_ci "%d, cmdid %d vs %d\n", 20208c2ecf20Sopenharmony_ci ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len), 20218c2ecf20Sopenharmony_ci be32_to_cpu(op->rsp_iu.xfrd_len), 20228c2ecf20Sopenharmony_ci freq->transferred_length, 20238c2ecf20Sopenharmony_ci op->rsp_iu.ersp_result, 20248c2ecf20Sopenharmony_ci sqe->common.command_id, 20258c2ecf20Sopenharmony_ci cqe->command_id); 20268c2ecf20Sopenharmony_ci goto done; 20278c2ecf20Sopenharmony_ci } 20288c2ecf20Sopenharmony_ci result = cqe->result; 20298c2ecf20Sopenharmony_ci status = cqe->status; 20308c2ecf20Sopenharmony_ci break; 20318c2ecf20Sopenharmony_ci 20328c2ecf20Sopenharmony_ci default: 20338c2ecf20Sopenharmony_ci status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1); 20348c2ecf20Sopenharmony_ci dev_info(ctrl->ctrl.device, 20358c2ecf20Sopenharmony_ci "NVME-FC{%d}: io failed due to odd NVMe_xRSP iu " 20368c2ecf20Sopenharmony_ci "len %d\n", 20378c2ecf20Sopenharmony_ci ctrl->cnum, freq->rcv_rsplen); 20388c2ecf20Sopenharmony_ci goto done; 20398c2ecf20Sopenharmony_ci } 20408c2ecf20Sopenharmony_ci 20418c2ecf20Sopenharmony_ci terminate_assoc = false; 20428c2ecf20Sopenharmony_ci 20438c2ecf20Sopenharmony_cidone: 20448c2ecf20Sopenharmony_ci if (op->flags & FCOP_FLAGS_AEN) { 20458c2ecf20Sopenharmony_ci nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); 20468c2ecf20Sopenharmony_ci __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); 20478c2ecf20Sopenharmony_ci atomic_set(&op->state, FCPOP_STATE_IDLE); 20488c2ecf20Sopenharmony_ci op->flags = FCOP_FLAGS_AEN; /* clear other flags */ 20498c2ecf20Sopenharmony_ci nvme_fc_ctrl_put(ctrl); 20508c2ecf20Sopenharmony_ci goto check_error; 20518c2ecf20Sopenharmony_ci } 20528c2ecf20Sopenharmony_ci 20538c2ecf20Sopenharmony_ci __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); 20548c2ecf20Sopenharmony_ci if (!nvme_try_complete_req(rq, status, result)) 20558c2ecf20Sopenharmony_ci nvme_fc_complete_rq(rq); 20568c2ecf20Sopenharmony_ci 20578c2ecf20Sopenharmony_cicheck_error: 20588c2ecf20Sopenharmony_ci if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING) 20598c2ecf20Sopenharmony_ci queue_work(nvme_reset_wq, &ctrl->ioerr_work); 20608c2ecf20Sopenharmony_ci} 20618c2ecf20Sopenharmony_ci 20628c2ecf20Sopenharmony_cistatic int 20638c2ecf20Sopenharmony_ci__nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, 20648c2ecf20Sopenharmony_ci struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op, 20658c2ecf20Sopenharmony_ci struct request *rq, u32 rqno) 20668c2ecf20Sopenharmony_ci{ 20678c2ecf20Sopenharmony_ci struct nvme_fcp_op_w_sgl *op_w_sgl = 20688c2ecf20Sopenharmony_ci container_of(op, typeof(*op_w_sgl), op); 20698c2ecf20Sopenharmony_ci struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; 20708c2ecf20Sopenharmony_ci int ret = 0; 20718c2ecf20Sopenharmony_ci 20728c2ecf20Sopenharmony_ci memset(op, 0, sizeof(*op)); 20738c2ecf20Sopenharmony_ci op->fcp_req.cmdaddr = &op->cmd_iu; 20748c2ecf20Sopenharmony_ci op->fcp_req.cmdlen = sizeof(op->cmd_iu); 20758c2ecf20Sopenharmony_ci op->fcp_req.rspaddr = &op->rsp_iu; 20768c2ecf20Sopenharmony_ci op->fcp_req.rsplen = sizeof(op->rsp_iu); 20778c2ecf20Sopenharmony_ci op->fcp_req.done = nvme_fc_fcpio_done; 20788c2ecf20Sopenharmony_ci op->ctrl = ctrl; 20798c2ecf20Sopenharmony_ci op->queue = queue; 20808c2ecf20Sopenharmony_ci op->rq = rq; 20818c2ecf20Sopenharmony_ci op->rqno = rqno; 20828c2ecf20Sopenharmony_ci 20838c2ecf20Sopenharmony_ci cmdiu->format_id = NVME_CMD_FORMAT_ID; 20848c2ecf20Sopenharmony_ci cmdiu->fc_id = NVME_CMD_FC_ID; 20858c2ecf20Sopenharmony_ci cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32)); 20868c2ecf20Sopenharmony_ci if (queue->qnum) 20878c2ecf20Sopenharmony_ci cmdiu->rsv_cat = fccmnd_set_cat_css(0, 20888c2ecf20Sopenharmony_ci (NVME_CC_CSS_NVM >> NVME_CC_CSS_SHIFT)); 20898c2ecf20Sopenharmony_ci else 20908c2ecf20Sopenharmony_ci cmdiu->rsv_cat = fccmnd_set_cat_admin(0); 20918c2ecf20Sopenharmony_ci 20928c2ecf20Sopenharmony_ci op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev, 20938c2ecf20Sopenharmony_ci &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE); 20948c2ecf20Sopenharmony_ci if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) { 20958c2ecf20Sopenharmony_ci dev_err(ctrl->dev, 20968c2ecf20Sopenharmony_ci "FCP Op failed - cmdiu dma mapping failed.\n"); 20978c2ecf20Sopenharmony_ci ret = -EFAULT; 20988c2ecf20Sopenharmony_ci goto out_on_error; 20998c2ecf20Sopenharmony_ci } 21008c2ecf20Sopenharmony_ci 21018c2ecf20Sopenharmony_ci op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev, 21028c2ecf20Sopenharmony_ci &op->rsp_iu, sizeof(op->rsp_iu), 21038c2ecf20Sopenharmony_ci DMA_FROM_DEVICE); 21048c2ecf20Sopenharmony_ci if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) { 21058c2ecf20Sopenharmony_ci dev_err(ctrl->dev, 21068c2ecf20Sopenharmony_ci "FCP Op failed - rspiu dma mapping failed.\n"); 21078c2ecf20Sopenharmony_ci ret = -EFAULT; 21088c2ecf20Sopenharmony_ci } 21098c2ecf20Sopenharmony_ci 21108c2ecf20Sopenharmony_ci atomic_set(&op->state, FCPOP_STATE_IDLE); 21118c2ecf20Sopenharmony_ciout_on_error: 21128c2ecf20Sopenharmony_ci return ret; 21138c2ecf20Sopenharmony_ci} 21148c2ecf20Sopenharmony_ci 21158c2ecf20Sopenharmony_cistatic int 21168c2ecf20Sopenharmony_cinvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq, 21178c2ecf20Sopenharmony_ci unsigned int hctx_idx, unsigned int numa_node) 21188c2ecf20Sopenharmony_ci{ 21198c2ecf20Sopenharmony_ci struct nvme_fc_ctrl *ctrl = set->driver_data; 21208c2ecf20Sopenharmony_ci struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq); 21218c2ecf20Sopenharmony_ci int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; 21228c2ecf20Sopenharmony_ci struct nvme_fc_queue *queue = &ctrl->queues[queue_idx]; 21238c2ecf20Sopenharmony_ci int res; 21248c2ecf20Sopenharmony_ci 21258c2ecf20Sopenharmony_ci res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++); 21268c2ecf20Sopenharmony_ci if (res) 21278c2ecf20Sopenharmony_ci return res; 21288c2ecf20Sopenharmony_ci op->op.fcp_req.first_sgl = op->sgl; 21298c2ecf20Sopenharmony_ci op->op.fcp_req.private = &op->priv[0]; 21308c2ecf20Sopenharmony_ci nvme_req(rq)->ctrl = &ctrl->ctrl; 21318c2ecf20Sopenharmony_ci return res; 21328c2ecf20Sopenharmony_ci} 21338c2ecf20Sopenharmony_ci 21348c2ecf20Sopenharmony_cistatic int 21358c2ecf20Sopenharmony_cinvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl) 21368c2ecf20Sopenharmony_ci{ 21378c2ecf20Sopenharmony_ci struct nvme_fc_fcp_op *aen_op; 21388c2ecf20Sopenharmony_ci struct nvme_fc_cmd_iu *cmdiu; 21398c2ecf20Sopenharmony_ci struct nvme_command *sqe; 21408c2ecf20Sopenharmony_ci void *private = NULL; 21418c2ecf20Sopenharmony_ci int i, ret; 21428c2ecf20Sopenharmony_ci 21438c2ecf20Sopenharmony_ci aen_op = ctrl->aen_ops; 21448c2ecf20Sopenharmony_ci for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { 21458c2ecf20Sopenharmony_ci if (ctrl->lport->ops->fcprqst_priv_sz) { 21468c2ecf20Sopenharmony_ci private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz, 21478c2ecf20Sopenharmony_ci GFP_KERNEL); 21488c2ecf20Sopenharmony_ci if (!private) 21498c2ecf20Sopenharmony_ci return -ENOMEM; 21508c2ecf20Sopenharmony_ci } 21518c2ecf20Sopenharmony_ci 21528c2ecf20Sopenharmony_ci cmdiu = &aen_op->cmd_iu; 21538c2ecf20Sopenharmony_ci sqe = &cmdiu->sqe; 21548c2ecf20Sopenharmony_ci ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0], 21558c2ecf20Sopenharmony_ci aen_op, (struct request *)NULL, 21568c2ecf20Sopenharmony_ci (NVME_AQ_BLK_MQ_DEPTH + i)); 21578c2ecf20Sopenharmony_ci if (ret) { 21588c2ecf20Sopenharmony_ci kfree(private); 21598c2ecf20Sopenharmony_ci return ret; 21608c2ecf20Sopenharmony_ci } 21618c2ecf20Sopenharmony_ci 21628c2ecf20Sopenharmony_ci aen_op->flags = FCOP_FLAGS_AEN; 21638c2ecf20Sopenharmony_ci aen_op->fcp_req.private = private; 21648c2ecf20Sopenharmony_ci 21658c2ecf20Sopenharmony_ci memset(sqe, 0, sizeof(*sqe)); 21668c2ecf20Sopenharmony_ci sqe->common.opcode = nvme_admin_async_event; 21678c2ecf20Sopenharmony_ci /* Note: core layer may overwrite the sqe.command_id value */ 21688c2ecf20Sopenharmony_ci sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i; 21698c2ecf20Sopenharmony_ci } 21708c2ecf20Sopenharmony_ci return 0; 21718c2ecf20Sopenharmony_ci} 21728c2ecf20Sopenharmony_ci 21738c2ecf20Sopenharmony_cistatic void 21748c2ecf20Sopenharmony_cinvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl) 21758c2ecf20Sopenharmony_ci{ 21768c2ecf20Sopenharmony_ci struct nvme_fc_fcp_op *aen_op; 21778c2ecf20Sopenharmony_ci int i; 21788c2ecf20Sopenharmony_ci 21798c2ecf20Sopenharmony_ci cancel_work_sync(&ctrl->ctrl.async_event_work); 21808c2ecf20Sopenharmony_ci aen_op = ctrl->aen_ops; 21818c2ecf20Sopenharmony_ci for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) { 21828c2ecf20Sopenharmony_ci __nvme_fc_exit_request(ctrl, aen_op); 21838c2ecf20Sopenharmony_ci 21848c2ecf20Sopenharmony_ci kfree(aen_op->fcp_req.private); 21858c2ecf20Sopenharmony_ci aen_op->fcp_req.private = NULL; 21868c2ecf20Sopenharmony_ci } 21878c2ecf20Sopenharmony_ci} 21888c2ecf20Sopenharmony_ci 21898c2ecf20Sopenharmony_cistatic inline void 21908c2ecf20Sopenharmony_ci__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl, 21918c2ecf20Sopenharmony_ci unsigned int qidx) 21928c2ecf20Sopenharmony_ci{ 21938c2ecf20Sopenharmony_ci struct nvme_fc_queue *queue = &ctrl->queues[qidx]; 21948c2ecf20Sopenharmony_ci 21958c2ecf20Sopenharmony_ci hctx->driver_data = queue; 21968c2ecf20Sopenharmony_ci queue->hctx = hctx; 21978c2ecf20Sopenharmony_ci} 21988c2ecf20Sopenharmony_ci 21998c2ecf20Sopenharmony_cistatic int 22008c2ecf20Sopenharmony_cinvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 22018c2ecf20Sopenharmony_ci unsigned int hctx_idx) 22028c2ecf20Sopenharmony_ci{ 22038c2ecf20Sopenharmony_ci struct nvme_fc_ctrl *ctrl = data; 22048c2ecf20Sopenharmony_ci 22058c2ecf20Sopenharmony_ci __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1); 22068c2ecf20Sopenharmony_ci 22078c2ecf20Sopenharmony_ci return 0; 22088c2ecf20Sopenharmony_ci} 22098c2ecf20Sopenharmony_ci 22108c2ecf20Sopenharmony_cistatic int 22118c2ecf20Sopenharmony_cinvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, 22128c2ecf20Sopenharmony_ci unsigned int hctx_idx) 22138c2ecf20Sopenharmony_ci{ 22148c2ecf20Sopenharmony_ci struct nvme_fc_ctrl *ctrl = data; 22158c2ecf20Sopenharmony_ci 22168c2ecf20Sopenharmony_ci __nvme_fc_init_hctx(hctx, ctrl, hctx_idx); 22178c2ecf20Sopenharmony_ci 22188c2ecf20Sopenharmony_ci return 0; 22198c2ecf20Sopenharmony_ci} 22208c2ecf20Sopenharmony_ci 22218c2ecf20Sopenharmony_cistatic void 22228c2ecf20Sopenharmony_cinvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx) 22238c2ecf20Sopenharmony_ci{ 22248c2ecf20Sopenharmony_ci struct nvme_fc_queue *queue; 22258c2ecf20Sopenharmony_ci 22268c2ecf20Sopenharmony_ci queue = &ctrl->queues[idx]; 22278c2ecf20Sopenharmony_ci memset(queue, 0, sizeof(*queue)); 22288c2ecf20Sopenharmony_ci queue->ctrl = ctrl; 22298c2ecf20Sopenharmony_ci queue->qnum = idx; 22308c2ecf20Sopenharmony_ci atomic_set(&queue->csn, 0); 22318c2ecf20Sopenharmony_ci queue->dev = ctrl->dev; 22328c2ecf20Sopenharmony_ci 22338c2ecf20Sopenharmony_ci if (idx > 0) 22348c2ecf20Sopenharmony_ci queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; 22358c2ecf20Sopenharmony_ci else 22368c2ecf20Sopenharmony_ci queue->cmnd_capsule_len = sizeof(struct nvme_command); 22378c2ecf20Sopenharmony_ci 22388c2ecf20Sopenharmony_ci /* 22398c2ecf20Sopenharmony_ci * Considered whether we should allocate buffers for all SQEs 22408c2ecf20Sopenharmony_ci * and CQEs and dma map them - mapping their respective entries 22418c2ecf20Sopenharmony_ci * into the request structures (kernel vm addr and dma address) 22428c2ecf20Sopenharmony_ci * thus the driver could use the buffers/mappings directly. 22438c2ecf20Sopenharmony_ci * It only makes sense if the LLDD would use them for its 22448c2ecf20Sopenharmony_ci * messaging api. It's very unlikely most adapter api's would use 22458c2ecf20Sopenharmony_ci * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload 22468c2ecf20Sopenharmony_ci * structures were used instead. 22478c2ecf20Sopenharmony_ci */ 22488c2ecf20Sopenharmony_ci} 22498c2ecf20Sopenharmony_ci 22508c2ecf20Sopenharmony_ci/* 22518c2ecf20Sopenharmony_ci * This routine terminates a queue at the transport level. 22528c2ecf20Sopenharmony_ci * The transport has already ensured that all outstanding ios on 22538c2ecf20Sopenharmony_ci * the queue have been terminated. 22548c2ecf20Sopenharmony_ci * The transport will send a Disconnect LS request to terminate 22558c2ecf20Sopenharmony_ci * the queue's connection. Termination of the admin queue will also 22568c2ecf20Sopenharmony_ci * terminate the association at the target. 22578c2ecf20Sopenharmony_ci */ 22588c2ecf20Sopenharmony_cistatic void 22598c2ecf20Sopenharmony_cinvme_fc_free_queue(struct nvme_fc_queue *queue) 22608c2ecf20Sopenharmony_ci{ 22618c2ecf20Sopenharmony_ci if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags)) 22628c2ecf20Sopenharmony_ci return; 22638c2ecf20Sopenharmony_ci 22648c2ecf20Sopenharmony_ci clear_bit(NVME_FC_Q_LIVE, &queue->flags); 22658c2ecf20Sopenharmony_ci /* 22668c2ecf20Sopenharmony_ci * Current implementation never disconnects a single queue. 22678c2ecf20Sopenharmony_ci * It always terminates a whole association. So there is never 22688c2ecf20Sopenharmony_ci * a disconnect(queue) LS sent to the target. 22698c2ecf20Sopenharmony_ci */ 22708c2ecf20Sopenharmony_ci 22718c2ecf20Sopenharmony_ci queue->connection_id = 0; 22728c2ecf20Sopenharmony_ci atomic_set(&queue->csn, 0); 22738c2ecf20Sopenharmony_ci} 22748c2ecf20Sopenharmony_ci 22758c2ecf20Sopenharmony_cistatic void 22768c2ecf20Sopenharmony_ci__nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl, 22778c2ecf20Sopenharmony_ci struct nvme_fc_queue *queue, unsigned int qidx) 22788c2ecf20Sopenharmony_ci{ 22798c2ecf20Sopenharmony_ci if (ctrl->lport->ops->delete_queue) 22808c2ecf20Sopenharmony_ci ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx, 22818c2ecf20Sopenharmony_ci queue->lldd_handle); 22828c2ecf20Sopenharmony_ci queue->lldd_handle = NULL; 22838c2ecf20Sopenharmony_ci} 22848c2ecf20Sopenharmony_ci 22858c2ecf20Sopenharmony_cistatic void 22868c2ecf20Sopenharmony_cinvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl) 22878c2ecf20Sopenharmony_ci{ 22888c2ecf20Sopenharmony_ci int i; 22898c2ecf20Sopenharmony_ci 22908c2ecf20Sopenharmony_ci for (i = 1; i < ctrl->ctrl.queue_count; i++) 22918c2ecf20Sopenharmony_ci nvme_fc_free_queue(&ctrl->queues[i]); 22928c2ecf20Sopenharmony_ci} 22938c2ecf20Sopenharmony_ci 22948c2ecf20Sopenharmony_cistatic int 22958c2ecf20Sopenharmony_ci__nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl, 22968c2ecf20Sopenharmony_ci struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize) 22978c2ecf20Sopenharmony_ci{ 22988c2ecf20Sopenharmony_ci int ret = 0; 22998c2ecf20Sopenharmony_ci 23008c2ecf20Sopenharmony_ci queue->lldd_handle = NULL; 23018c2ecf20Sopenharmony_ci if (ctrl->lport->ops->create_queue) 23028c2ecf20Sopenharmony_ci ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport, 23038c2ecf20Sopenharmony_ci qidx, qsize, &queue->lldd_handle); 23048c2ecf20Sopenharmony_ci 23058c2ecf20Sopenharmony_ci return ret; 23068c2ecf20Sopenharmony_ci} 23078c2ecf20Sopenharmony_ci 23088c2ecf20Sopenharmony_cistatic void 23098c2ecf20Sopenharmony_cinvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl) 23108c2ecf20Sopenharmony_ci{ 23118c2ecf20Sopenharmony_ci struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1]; 23128c2ecf20Sopenharmony_ci int i; 23138c2ecf20Sopenharmony_ci 23148c2ecf20Sopenharmony_ci for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--) 23158c2ecf20Sopenharmony_ci __nvme_fc_delete_hw_queue(ctrl, queue, i); 23168c2ecf20Sopenharmony_ci} 23178c2ecf20Sopenharmony_ci 23188c2ecf20Sopenharmony_cistatic int 23198c2ecf20Sopenharmony_cinvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) 23208c2ecf20Sopenharmony_ci{ 23218c2ecf20Sopenharmony_ci struct nvme_fc_queue *queue = &ctrl->queues[1]; 23228c2ecf20Sopenharmony_ci int i, ret; 23238c2ecf20Sopenharmony_ci 23248c2ecf20Sopenharmony_ci for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) { 23258c2ecf20Sopenharmony_ci ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize); 23268c2ecf20Sopenharmony_ci if (ret) 23278c2ecf20Sopenharmony_ci goto delete_queues; 23288c2ecf20Sopenharmony_ci } 23298c2ecf20Sopenharmony_ci 23308c2ecf20Sopenharmony_ci return 0; 23318c2ecf20Sopenharmony_ci 23328c2ecf20Sopenharmony_cidelete_queues: 23338c2ecf20Sopenharmony_ci for (; i > 0; i--) 23348c2ecf20Sopenharmony_ci __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i); 23358c2ecf20Sopenharmony_ci return ret; 23368c2ecf20Sopenharmony_ci} 23378c2ecf20Sopenharmony_ci 23388c2ecf20Sopenharmony_cistatic int 23398c2ecf20Sopenharmony_cinvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) 23408c2ecf20Sopenharmony_ci{ 23418c2ecf20Sopenharmony_ci int i, ret = 0; 23428c2ecf20Sopenharmony_ci 23438c2ecf20Sopenharmony_ci for (i = 1; i < ctrl->ctrl.queue_count; i++) { 23448c2ecf20Sopenharmony_ci ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize, 23458c2ecf20Sopenharmony_ci (qsize / 5)); 23468c2ecf20Sopenharmony_ci if (ret) 23478c2ecf20Sopenharmony_ci break; 23488c2ecf20Sopenharmony_ci ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false); 23498c2ecf20Sopenharmony_ci if (ret) 23508c2ecf20Sopenharmony_ci break; 23518c2ecf20Sopenharmony_ci 23528c2ecf20Sopenharmony_ci set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags); 23538c2ecf20Sopenharmony_ci } 23548c2ecf20Sopenharmony_ci 23558c2ecf20Sopenharmony_ci return ret; 23568c2ecf20Sopenharmony_ci} 23578c2ecf20Sopenharmony_ci 23588c2ecf20Sopenharmony_cistatic void 23598c2ecf20Sopenharmony_cinvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl) 23608c2ecf20Sopenharmony_ci{ 23618c2ecf20Sopenharmony_ci int i; 23628c2ecf20Sopenharmony_ci 23638c2ecf20Sopenharmony_ci for (i = 1; i < ctrl->ctrl.queue_count; i++) 23648c2ecf20Sopenharmony_ci nvme_fc_init_queue(ctrl, i); 23658c2ecf20Sopenharmony_ci} 23668c2ecf20Sopenharmony_ci 23678c2ecf20Sopenharmony_cistatic void 23688c2ecf20Sopenharmony_cinvme_fc_ctrl_free(struct kref *ref) 23698c2ecf20Sopenharmony_ci{ 23708c2ecf20Sopenharmony_ci struct nvme_fc_ctrl *ctrl = 23718c2ecf20Sopenharmony_ci container_of(ref, struct nvme_fc_ctrl, ref); 23728c2ecf20Sopenharmony_ci unsigned long flags; 23738c2ecf20Sopenharmony_ci 23748c2ecf20Sopenharmony_ci if (ctrl->ctrl.tagset) { 23758c2ecf20Sopenharmony_ci blk_cleanup_queue(ctrl->ctrl.connect_q); 23768c2ecf20Sopenharmony_ci blk_mq_free_tag_set(&ctrl->tag_set); 23778c2ecf20Sopenharmony_ci } 23788c2ecf20Sopenharmony_ci 23798c2ecf20Sopenharmony_ci /* remove from rport list */ 23808c2ecf20Sopenharmony_ci spin_lock_irqsave(&ctrl->rport->lock, flags); 23818c2ecf20Sopenharmony_ci list_del(&ctrl->ctrl_list); 23828c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&ctrl->rport->lock, flags); 23838c2ecf20Sopenharmony_ci 23848c2ecf20Sopenharmony_ci nvme_start_admin_queue(&ctrl->ctrl); 23858c2ecf20Sopenharmony_ci blk_cleanup_queue(ctrl->ctrl.admin_q); 23868c2ecf20Sopenharmony_ci blk_cleanup_queue(ctrl->ctrl.fabrics_q); 23878c2ecf20Sopenharmony_ci blk_mq_free_tag_set(&ctrl->admin_tag_set); 23888c2ecf20Sopenharmony_ci 23898c2ecf20Sopenharmony_ci kfree(ctrl->queues); 23908c2ecf20Sopenharmony_ci 23918c2ecf20Sopenharmony_ci put_device(ctrl->dev); 23928c2ecf20Sopenharmony_ci nvme_fc_rport_put(ctrl->rport); 23938c2ecf20Sopenharmony_ci 23948c2ecf20Sopenharmony_ci ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); 23958c2ecf20Sopenharmony_ci if (ctrl->ctrl.opts) 23968c2ecf20Sopenharmony_ci nvmf_free_options(ctrl->ctrl.opts); 23978c2ecf20Sopenharmony_ci kfree(ctrl); 23988c2ecf20Sopenharmony_ci} 23998c2ecf20Sopenharmony_ci 24008c2ecf20Sopenharmony_cistatic void 24018c2ecf20Sopenharmony_cinvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl) 24028c2ecf20Sopenharmony_ci{ 24038c2ecf20Sopenharmony_ci kref_put(&ctrl->ref, nvme_fc_ctrl_free); 24048c2ecf20Sopenharmony_ci} 24058c2ecf20Sopenharmony_ci 24068c2ecf20Sopenharmony_cistatic int 24078c2ecf20Sopenharmony_cinvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl) 24088c2ecf20Sopenharmony_ci{ 24098c2ecf20Sopenharmony_ci return kref_get_unless_zero(&ctrl->ref); 24108c2ecf20Sopenharmony_ci} 24118c2ecf20Sopenharmony_ci 24128c2ecf20Sopenharmony_ci/* 24138c2ecf20Sopenharmony_ci * All accesses from nvme core layer done - can now free the 24148c2ecf20Sopenharmony_ci * controller. Called after last nvme_put_ctrl() call 24158c2ecf20Sopenharmony_ci */ 24168c2ecf20Sopenharmony_cistatic void 24178c2ecf20Sopenharmony_cinvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl) 24188c2ecf20Sopenharmony_ci{ 24198c2ecf20Sopenharmony_ci struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 24208c2ecf20Sopenharmony_ci 24218c2ecf20Sopenharmony_ci WARN_ON(nctrl != &ctrl->ctrl); 24228c2ecf20Sopenharmony_ci 24238c2ecf20Sopenharmony_ci nvme_fc_ctrl_put(ctrl); 24248c2ecf20Sopenharmony_ci} 24258c2ecf20Sopenharmony_ci 24268c2ecf20Sopenharmony_ci/* 24278c2ecf20Sopenharmony_ci * This routine is used by the transport when it needs to find active 24288c2ecf20Sopenharmony_ci * io on a queue that is to be terminated. The transport uses 24298c2ecf20Sopenharmony_ci * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke 24308c2ecf20Sopenharmony_ci * this routine to kill them on a 1 by 1 basis. 24318c2ecf20Sopenharmony_ci * 24328c2ecf20Sopenharmony_ci * As FC allocates FC exchange for each io, the transport must contact 24338c2ecf20Sopenharmony_ci * the LLDD to terminate the exchange, thus releasing the FC exchange. 24348c2ecf20Sopenharmony_ci * After terminating the exchange the LLDD will call the transport's 24358c2ecf20Sopenharmony_ci * normal io done path for the request, but it will have an aborted 24368c2ecf20Sopenharmony_ci * status. The done path will return the io request back to the block 24378c2ecf20Sopenharmony_ci * layer with an error status. 24388c2ecf20Sopenharmony_ci */ 24398c2ecf20Sopenharmony_cistatic bool 24408c2ecf20Sopenharmony_cinvme_fc_terminate_exchange(struct request *req, void *data, bool reserved) 24418c2ecf20Sopenharmony_ci{ 24428c2ecf20Sopenharmony_ci struct nvme_ctrl *nctrl = data; 24438c2ecf20Sopenharmony_ci struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 24448c2ecf20Sopenharmony_ci struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req); 24458c2ecf20Sopenharmony_ci 24468c2ecf20Sopenharmony_ci op->nreq.flags |= NVME_REQ_CANCELLED; 24478c2ecf20Sopenharmony_ci __nvme_fc_abort_op(ctrl, op); 24488c2ecf20Sopenharmony_ci return true; 24498c2ecf20Sopenharmony_ci} 24508c2ecf20Sopenharmony_ci 24518c2ecf20Sopenharmony_ci/* 24528c2ecf20Sopenharmony_ci * This routine runs through all outstanding commands on the association 24538c2ecf20Sopenharmony_ci * and aborts them. This routine is typically be called by the 24548c2ecf20Sopenharmony_ci * delete_association routine. It is also called due to an error during 24558c2ecf20Sopenharmony_ci * reconnect. In that scenario, it is most likely a command that initializes 24568c2ecf20Sopenharmony_ci * the controller, including fabric Connect commands on io queues, that 24578c2ecf20Sopenharmony_ci * may have timed out or failed thus the io must be killed for the connect 24588c2ecf20Sopenharmony_ci * thread to see the error. 24598c2ecf20Sopenharmony_ci */ 24608c2ecf20Sopenharmony_cistatic void 24618c2ecf20Sopenharmony_ci__nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues) 24628c2ecf20Sopenharmony_ci{ 24638c2ecf20Sopenharmony_ci int q; 24648c2ecf20Sopenharmony_ci 24658c2ecf20Sopenharmony_ci /* 24668c2ecf20Sopenharmony_ci * if aborting io, the queues are no longer good, mark them 24678c2ecf20Sopenharmony_ci * all as not live. 24688c2ecf20Sopenharmony_ci */ 24698c2ecf20Sopenharmony_ci if (ctrl->ctrl.queue_count > 1) { 24708c2ecf20Sopenharmony_ci for (q = 1; q < ctrl->ctrl.queue_count; q++) 24718c2ecf20Sopenharmony_ci clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags); 24728c2ecf20Sopenharmony_ci } 24738c2ecf20Sopenharmony_ci clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); 24748c2ecf20Sopenharmony_ci 24758c2ecf20Sopenharmony_ci /* 24768c2ecf20Sopenharmony_ci * If io queues are present, stop them and terminate all outstanding 24778c2ecf20Sopenharmony_ci * ios on them. As FC allocates FC exchange for each io, the 24788c2ecf20Sopenharmony_ci * transport must contact the LLDD to terminate the exchange, 24798c2ecf20Sopenharmony_ci * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr() 24808c2ecf20Sopenharmony_ci * to tell us what io's are busy and invoke a transport routine 24818c2ecf20Sopenharmony_ci * to kill them with the LLDD. After terminating the exchange 24828c2ecf20Sopenharmony_ci * the LLDD will call the transport's normal io done path, but it 24838c2ecf20Sopenharmony_ci * will have an aborted status. The done path will return the 24848c2ecf20Sopenharmony_ci * io requests back to the block layer as part of normal completions 24858c2ecf20Sopenharmony_ci * (but with error status). 24868c2ecf20Sopenharmony_ci */ 24878c2ecf20Sopenharmony_ci if (ctrl->ctrl.queue_count > 1) { 24888c2ecf20Sopenharmony_ci nvme_stop_queues(&ctrl->ctrl); 24898c2ecf20Sopenharmony_ci nvme_sync_io_queues(&ctrl->ctrl); 24908c2ecf20Sopenharmony_ci blk_mq_tagset_busy_iter(&ctrl->tag_set, 24918c2ecf20Sopenharmony_ci nvme_fc_terminate_exchange, &ctrl->ctrl); 24928c2ecf20Sopenharmony_ci blk_mq_tagset_wait_completed_request(&ctrl->tag_set); 24938c2ecf20Sopenharmony_ci if (start_queues) 24948c2ecf20Sopenharmony_ci nvme_start_queues(&ctrl->ctrl); 24958c2ecf20Sopenharmony_ci } 24968c2ecf20Sopenharmony_ci 24978c2ecf20Sopenharmony_ci /* 24988c2ecf20Sopenharmony_ci * Other transports, which don't have link-level contexts bound 24998c2ecf20Sopenharmony_ci * to sqe's, would try to gracefully shutdown the controller by 25008c2ecf20Sopenharmony_ci * writing the registers for shutdown and polling (call 25018c2ecf20Sopenharmony_ci * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially 25028c2ecf20Sopenharmony_ci * just aborted and we will wait on those contexts, and given 25038c2ecf20Sopenharmony_ci * there was no indication of how live the controlelr is on the 25048c2ecf20Sopenharmony_ci * link, don't send more io to create more contexts for the 25058c2ecf20Sopenharmony_ci * shutdown. Let the controller fail via keepalive failure if 25068c2ecf20Sopenharmony_ci * its still present. 25078c2ecf20Sopenharmony_ci */ 25088c2ecf20Sopenharmony_ci 25098c2ecf20Sopenharmony_ci /* 25108c2ecf20Sopenharmony_ci * clean up the admin queue. Same thing as above. 25118c2ecf20Sopenharmony_ci */ 25128c2ecf20Sopenharmony_ci nvme_stop_admin_queue(&ctrl->ctrl); 25138c2ecf20Sopenharmony_ci blk_sync_queue(ctrl->ctrl.admin_q); 25148c2ecf20Sopenharmony_ci blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, 25158c2ecf20Sopenharmony_ci nvme_fc_terminate_exchange, &ctrl->ctrl); 25168c2ecf20Sopenharmony_ci blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set); 25178c2ecf20Sopenharmony_ci} 25188c2ecf20Sopenharmony_ci 25198c2ecf20Sopenharmony_cistatic void 25208c2ecf20Sopenharmony_cinvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) 25218c2ecf20Sopenharmony_ci{ 25228c2ecf20Sopenharmony_ci /* 25238c2ecf20Sopenharmony_ci * if an error (io timeout, etc) while (re)connecting, the remote 25248c2ecf20Sopenharmony_ci * port requested terminating of the association (disconnect_ls) 25258c2ecf20Sopenharmony_ci * or an error (timeout or abort) occurred on an io while creating 25268c2ecf20Sopenharmony_ci * the controller. Abort any ios on the association and let the 25278c2ecf20Sopenharmony_ci * create_association error path resolve things. 25288c2ecf20Sopenharmony_ci */ 25298c2ecf20Sopenharmony_ci if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) { 25308c2ecf20Sopenharmony_ci __nvme_fc_abort_outstanding_ios(ctrl, true); 25318c2ecf20Sopenharmony_ci set_bit(ASSOC_FAILED, &ctrl->flags); 25328c2ecf20Sopenharmony_ci return; 25338c2ecf20Sopenharmony_ci } 25348c2ecf20Sopenharmony_ci 25358c2ecf20Sopenharmony_ci /* Otherwise, only proceed if in LIVE state - e.g. on first error */ 25368c2ecf20Sopenharmony_ci if (ctrl->ctrl.state != NVME_CTRL_LIVE) 25378c2ecf20Sopenharmony_ci return; 25388c2ecf20Sopenharmony_ci 25398c2ecf20Sopenharmony_ci dev_warn(ctrl->ctrl.device, 25408c2ecf20Sopenharmony_ci "NVME-FC{%d}: transport association event: %s\n", 25418c2ecf20Sopenharmony_ci ctrl->cnum, errmsg); 25428c2ecf20Sopenharmony_ci dev_warn(ctrl->ctrl.device, 25438c2ecf20Sopenharmony_ci "NVME-FC{%d}: resetting controller\n", ctrl->cnum); 25448c2ecf20Sopenharmony_ci 25458c2ecf20Sopenharmony_ci nvme_reset_ctrl(&ctrl->ctrl); 25468c2ecf20Sopenharmony_ci} 25478c2ecf20Sopenharmony_ci 25488c2ecf20Sopenharmony_cistatic enum blk_eh_timer_return 25498c2ecf20Sopenharmony_cinvme_fc_timeout(struct request *rq, bool reserved) 25508c2ecf20Sopenharmony_ci{ 25518c2ecf20Sopenharmony_ci struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 25528c2ecf20Sopenharmony_ci struct nvme_fc_ctrl *ctrl = op->ctrl; 25538c2ecf20Sopenharmony_ci struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; 25548c2ecf20Sopenharmony_ci struct nvme_command *sqe = &cmdiu->sqe; 25558c2ecf20Sopenharmony_ci 25568c2ecf20Sopenharmony_ci /* 25578c2ecf20Sopenharmony_ci * Attempt to abort the offending command. Command completion 25588c2ecf20Sopenharmony_ci * will detect the aborted io and will fail the connection. 25598c2ecf20Sopenharmony_ci */ 25608c2ecf20Sopenharmony_ci dev_info(ctrl->ctrl.device, 25618c2ecf20Sopenharmony_ci "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: " 25628c2ecf20Sopenharmony_ci "x%08x/x%08x\n", 25638c2ecf20Sopenharmony_ci ctrl->cnum, op->queue->qnum, sqe->common.opcode, 25648c2ecf20Sopenharmony_ci sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11); 25658c2ecf20Sopenharmony_ci if (__nvme_fc_abort_op(ctrl, op)) 25668c2ecf20Sopenharmony_ci nvme_fc_error_recovery(ctrl, "io timeout abort failed"); 25678c2ecf20Sopenharmony_ci 25688c2ecf20Sopenharmony_ci /* 25698c2ecf20Sopenharmony_ci * the io abort has been initiated. Have the reset timer 25708c2ecf20Sopenharmony_ci * restarted and the abort completion will complete the io 25718c2ecf20Sopenharmony_ci * shortly. Avoids a synchronous wait while the abort finishes. 25728c2ecf20Sopenharmony_ci */ 25738c2ecf20Sopenharmony_ci return BLK_EH_RESET_TIMER; 25748c2ecf20Sopenharmony_ci} 25758c2ecf20Sopenharmony_ci 25768c2ecf20Sopenharmony_cistatic int 25778c2ecf20Sopenharmony_cinvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, 25788c2ecf20Sopenharmony_ci struct nvme_fc_fcp_op *op) 25798c2ecf20Sopenharmony_ci{ 25808c2ecf20Sopenharmony_ci struct nvmefc_fcp_req *freq = &op->fcp_req; 25818c2ecf20Sopenharmony_ci int ret; 25828c2ecf20Sopenharmony_ci 25838c2ecf20Sopenharmony_ci freq->sg_cnt = 0; 25848c2ecf20Sopenharmony_ci 25858c2ecf20Sopenharmony_ci if (!blk_rq_nr_phys_segments(rq)) 25868c2ecf20Sopenharmony_ci return 0; 25878c2ecf20Sopenharmony_ci 25888c2ecf20Sopenharmony_ci freq->sg_table.sgl = freq->first_sgl; 25898c2ecf20Sopenharmony_ci ret = sg_alloc_table_chained(&freq->sg_table, 25908c2ecf20Sopenharmony_ci blk_rq_nr_phys_segments(rq), freq->sg_table.sgl, 25918c2ecf20Sopenharmony_ci NVME_INLINE_SG_CNT); 25928c2ecf20Sopenharmony_ci if (ret) 25938c2ecf20Sopenharmony_ci return -ENOMEM; 25948c2ecf20Sopenharmony_ci 25958c2ecf20Sopenharmony_ci op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); 25968c2ecf20Sopenharmony_ci WARN_ON(op->nents > blk_rq_nr_phys_segments(rq)); 25978c2ecf20Sopenharmony_ci freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, 25988c2ecf20Sopenharmony_ci op->nents, rq_dma_dir(rq)); 25998c2ecf20Sopenharmony_ci if (unlikely(freq->sg_cnt <= 0)) { 26008c2ecf20Sopenharmony_ci sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); 26018c2ecf20Sopenharmony_ci freq->sg_cnt = 0; 26028c2ecf20Sopenharmony_ci return -EFAULT; 26038c2ecf20Sopenharmony_ci } 26048c2ecf20Sopenharmony_ci 26058c2ecf20Sopenharmony_ci /* 26068c2ecf20Sopenharmony_ci * TODO: blk_integrity_rq(rq) for DIF 26078c2ecf20Sopenharmony_ci */ 26088c2ecf20Sopenharmony_ci return 0; 26098c2ecf20Sopenharmony_ci} 26108c2ecf20Sopenharmony_ci 26118c2ecf20Sopenharmony_cistatic void 26128c2ecf20Sopenharmony_cinvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq, 26138c2ecf20Sopenharmony_ci struct nvme_fc_fcp_op *op) 26148c2ecf20Sopenharmony_ci{ 26158c2ecf20Sopenharmony_ci struct nvmefc_fcp_req *freq = &op->fcp_req; 26168c2ecf20Sopenharmony_ci 26178c2ecf20Sopenharmony_ci if (!freq->sg_cnt) 26188c2ecf20Sopenharmony_ci return; 26198c2ecf20Sopenharmony_ci 26208c2ecf20Sopenharmony_ci fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, 26218c2ecf20Sopenharmony_ci rq_dma_dir(rq)); 26228c2ecf20Sopenharmony_ci 26238c2ecf20Sopenharmony_ci sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); 26248c2ecf20Sopenharmony_ci 26258c2ecf20Sopenharmony_ci freq->sg_cnt = 0; 26268c2ecf20Sopenharmony_ci} 26278c2ecf20Sopenharmony_ci 26288c2ecf20Sopenharmony_ci/* 26298c2ecf20Sopenharmony_ci * In FC, the queue is a logical thing. At transport connect, the target 26308c2ecf20Sopenharmony_ci * creates its "queue" and returns a handle that is to be given to the 26318c2ecf20Sopenharmony_ci * target whenever it posts something to the corresponding SQ. When an 26328c2ecf20Sopenharmony_ci * SQE is sent on a SQ, FC effectively considers the SQE, or rather the 26338c2ecf20Sopenharmony_ci * command contained within the SQE, an io, and assigns a FC exchange 26348c2ecf20Sopenharmony_ci * to it. The SQE and the associated SQ handle are sent in the initial 26358c2ecf20Sopenharmony_ci * CMD IU sents on the exchange. All transfers relative to the io occur 26368c2ecf20Sopenharmony_ci * as part of the exchange. The CQE is the last thing for the io, 26378c2ecf20Sopenharmony_ci * which is transferred (explicitly or implicitly) with the RSP IU 26388c2ecf20Sopenharmony_ci * sent on the exchange. After the CQE is received, the FC exchange is 26398c2ecf20Sopenharmony_ci * terminaed and the Exchange may be used on a different io. 26408c2ecf20Sopenharmony_ci * 26418c2ecf20Sopenharmony_ci * The transport to LLDD api has the transport making a request for a 26428c2ecf20Sopenharmony_ci * new fcp io request to the LLDD. The LLDD then allocates a FC exchange 26438c2ecf20Sopenharmony_ci * resource and transfers the command. The LLDD will then process all 26448c2ecf20Sopenharmony_ci * steps to complete the io. Upon completion, the transport done routine 26458c2ecf20Sopenharmony_ci * is called. 26468c2ecf20Sopenharmony_ci * 26478c2ecf20Sopenharmony_ci * So - while the operation is outstanding to the LLDD, there is a link 26488c2ecf20Sopenharmony_ci * level FC exchange resource that is also outstanding. This must be 26498c2ecf20Sopenharmony_ci * considered in all cleanup operations. 26508c2ecf20Sopenharmony_ci */ 26518c2ecf20Sopenharmony_cistatic blk_status_t 26528c2ecf20Sopenharmony_cinvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, 26538c2ecf20Sopenharmony_ci struct nvme_fc_fcp_op *op, u32 data_len, 26548c2ecf20Sopenharmony_ci enum nvmefc_fcp_datadir io_dir) 26558c2ecf20Sopenharmony_ci{ 26568c2ecf20Sopenharmony_ci struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; 26578c2ecf20Sopenharmony_ci struct nvme_command *sqe = &cmdiu->sqe; 26588c2ecf20Sopenharmony_ci int ret, opstate; 26598c2ecf20Sopenharmony_ci 26608c2ecf20Sopenharmony_ci /* 26618c2ecf20Sopenharmony_ci * before attempting to send the io, check to see if we believe 26628c2ecf20Sopenharmony_ci * the target device is present 26638c2ecf20Sopenharmony_ci */ 26648c2ecf20Sopenharmony_ci if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) 26658c2ecf20Sopenharmony_ci return BLK_STS_RESOURCE; 26668c2ecf20Sopenharmony_ci 26678c2ecf20Sopenharmony_ci if (!nvme_fc_ctrl_get(ctrl)) 26688c2ecf20Sopenharmony_ci return BLK_STS_IOERR; 26698c2ecf20Sopenharmony_ci 26708c2ecf20Sopenharmony_ci /* format the FC-NVME CMD IU and fcp_req */ 26718c2ecf20Sopenharmony_ci cmdiu->connection_id = cpu_to_be64(queue->connection_id); 26728c2ecf20Sopenharmony_ci cmdiu->data_len = cpu_to_be32(data_len); 26738c2ecf20Sopenharmony_ci switch (io_dir) { 26748c2ecf20Sopenharmony_ci case NVMEFC_FCP_WRITE: 26758c2ecf20Sopenharmony_ci cmdiu->flags = FCNVME_CMD_FLAGS_WRITE; 26768c2ecf20Sopenharmony_ci break; 26778c2ecf20Sopenharmony_ci case NVMEFC_FCP_READ: 26788c2ecf20Sopenharmony_ci cmdiu->flags = FCNVME_CMD_FLAGS_READ; 26798c2ecf20Sopenharmony_ci break; 26808c2ecf20Sopenharmony_ci case NVMEFC_FCP_NODATA: 26818c2ecf20Sopenharmony_ci cmdiu->flags = 0; 26828c2ecf20Sopenharmony_ci break; 26838c2ecf20Sopenharmony_ci } 26848c2ecf20Sopenharmony_ci op->fcp_req.payload_length = data_len; 26858c2ecf20Sopenharmony_ci op->fcp_req.io_dir = io_dir; 26868c2ecf20Sopenharmony_ci op->fcp_req.transferred_length = 0; 26878c2ecf20Sopenharmony_ci op->fcp_req.rcv_rsplen = 0; 26888c2ecf20Sopenharmony_ci op->fcp_req.status = NVME_SC_SUCCESS; 26898c2ecf20Sopenharmony_ci op->fcp_req.sqid = cpu_to_le16(queue->qnum); 26908c2ecf20Sopenharmony_ci 26918c2ecf20Sopenharmony_ci /* 26928c2ecf20Sopenharmony_ci * validate per fabric rules, set fields mandated by fabric spec 26938c2ecf20Sopenharmony_ci * as well as those by FC-NVME spec. 26948c2ecf20Sopenharmony_ci */ 26958c2ecf20Sopenharmony_ci WARN_ON_ONCE(sqe->common.metadata); 26968c2ecf20Sopenharmony_ci sqe->common.flags |= NVME_CMD_SGL_METABUF; 26978c2ecf20Sopenharmony_ci 26988c2ecf20Sopenharmony_ci /* 26998c2ecf20Sopenharmony_ci * format SQE DPTR field per FC-NVME rules: 27008c2ecf20Sopenharmony_ci * type=0x5 Transport SGL Data Block Descriptor 27018c2ecf20Sopenharmony_ci * subtype=0xA Transport-specific value 27028c2ecf20Sopenharmony_ci * address=0 27038c2ecf20Sopenharmony_ci * length=length of the data series 27048c2ecf20Sopenharmony_ci */ 27058c2ecf20Sopenharmony_ci sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | 27068c2ecf20Sopenharmony_ci NVME_SGL_FMT_TRANSPORT_A; 27078c2ecf20Sopenharmony_ci sqe->rw.dptr.sgl.length = cpu_to_le32(data_len); 27088c2ecf20Sopenharmony_ci sqe->rw.dptr.sgl.addr = 0; 27098c2ecf20Sopenharmony_ci 27108c2ecf20Sopenharmony_ci if (!(op->flags & FCOP_FLAGS_AEN)) { 27118c2ecf20Sopenharmony_ci ret = nvme_fc_map_data(ctrl, op->rq, op); 27128c2ecf20Sopenharmony_ci if (ret < 0) { 27138c2ecf20Sopenharmony_ci nvme_cleanup_cmd(op->rq); 27148c2ecf20Sopenharmony_ci nvme_fc_ctrl_put(ctrl); 27158c2ecf20Sopenharmony_ci if (ret == -ENOMEM || ret == -EAGAIN) 27168c2ecf20Sopenharmony_ci return BLK_STS_RESOURCE; 27178c2ecf20Sopenharmony_ci return BLK_STS_IOERR; 27188c2ecf20Sopenharmony_ci } 27198c2ecf20Sopenharmony_ci } 27208c2ecf20Sopenharmony_ci 27218c2ecf20Sopenharmony_ci fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma, 27228c2ecf20Sopenharmony_ci sizeof(op->cmd_iu), DMA_TO_DEVICE); 27238c2ecf20Sopenharmony_ci 27248c2ecf20Sopenharmony_ci atomic_set(&op->state, FCPOP_STATE_ACTIVE); 27258c2ecf20Sopenharmony_ci 27268c2ecf20Sopenharmony_ci if (!(op->flags & FCOP_FLAGS_AEN)) 27278c2ecf20Sopenharmony_ci blk_mq_start_request(op->rq); 27288c2ecf20Sopenharmony_ci 27298c2ecf20Sopenharmony_ci cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn)); 27308c2ecf20Sopenharmony_ci ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, 27318c2ecf20Sopenharmony_ci &ctrl->rport->remoteport, 27328c2ecf20Sopenharmony_ci queue->lldd_handle, &op->fcp_req); 27338c2ecf20Sopenharmony_ci 27348c2ecf20Sopenharmony_ci if (ret) { 27358c2ecf20Sopenharmony_ci /* 27368c2ecf20Sopenharmony_ci * If the lld fails to send the command is there an issue with 27378c2ecf20Sopenharmony_ci * the csn value? If the command that fails is the Connect, 27388c2ecf20Sopenharmony_ci * no - as the connection won't be live. If it is a command 27398c2ecf20Sopenharmony_ci * post-connect, it's possible a gap in csn may be created. 27408c2ecf20Sopenharmony_ci * Does this matter? As Linux initiators don't send fused 27418c2ecf20Sopenharmony_ci * commands, no. The gap would exist, but as there's nothing 27428c2ecf20Sopenharmony_ci * that depends on csn order to be delivered on the target 27438c2ecf20Sopenharmony_ci * side, it shouldn't hurt. It would be difficult for a 27448c2ecf20Sopenharmony_ci * target to even detect the csn gap as it has no idea when the 27458c2ecf20Sopenharmony_ci * cmd with the csn was supposed to arrive. 27468c2ecf20Sopenharmony_ci */ 27478c2ecf20Sopenharmony_ci opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); 27488c2ecf20Sopenharmony_ci __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); 27498c2ecf20Sopenharmony_ci 27508c2ecf20Sopenharmony_ci if (!(op->flags & FCOP_FLAGS_AEN)) { 27518c2ecf20Sopenharmony_ci nvme_fc_unmap_data(ctrl, op->rq, op); 27528c2ecf20Sopenharmony_ci nvme_cleanup_cmd(op->rq); 27538c2ecf20Sopenharmony_ci } 27548c2ecf20Sopenharmony_ci 27558c2ecf20Sopenharmony_ci nvme_fc_ctrl_put(ctrl); 27568c2ecf20Sopenharmony_ci 27578c2ecf20Sopenharmony_ci if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE && 27588c2ecf20Sopenharmony_ci ret != -EBUSY) 27598c2ecf20Sopenharmony_ci return BLK_STS_IOERR; 27608c2ecf20Sopenharmony_ci 27618c2ecf20Sopenharmony_ci return BLK_STS_RESOURCE; 27628c2ecf20Sopenharmony_ci } 27638c2ecf20Sopenharmony_ci 27648c2ecf20Sopenharmony_ci return BLK_STS_OK; 27658c2ecf20Sopenharmony_ci} 27668c2ecf20Sopenharmony_ci 27678c2ecf20Sopenharmony_cistatic blk_status_t 27688c2ecf20Sopenharmony_cinvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, 27698c2ecf20Sopenharmony_ci const struct blk_mq_queue_data *bd) 27708c2ecf20Sopenharmony_ci{ 27718c2ecf20Sopenharmony_ci struct nvme_ns *ns = hctx->queue->queuedata; 27728c2ecf20Sopenharmony_ci struct nvme_fc_queue *queue = hctx->driver_data; 27738c2ecf20Sopenharmony_ci struct nvme_fc_ctrl *ctrl = queue->ctrl; 27748c2ecf20Sopenharmony_ci struct request *rq = bd->rq; 27758c2ecf20Sopenharmony_ci struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 27768c2ecf20Sopenharmony_ci struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; 27778c2ecf20Sopenharmony_ci struct nvme_command *sqe = &cmdiu->sqe; 27788c2ecf20Sopenharmony_ci enum nvmefc_fcp_datadir io_dir; 27798c2ecf20Sopenharmony_ci bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags); 27808c2ecf20Sopenharmony_ci u32 data_len; 27818c2ecf20Sopenharmony_ci blk_status_t ret; 27828c2ecf20Sopenharmony_ci 27838c2ecf20Sopenharmony_ci if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE || 27848c2ecf20Sopenharmony_ci !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) 27858c2ecf20Sopenharmony_ci return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq); 27868c2ecf20Sopenharmony_ci 27878c2ecf20Sopenharmony_ci ret = nvme_setup_cmd(ns, rq, sqe); 27888c2ecf20Sopenharmony_ci if (ret) 27898c2ecf20Sopenharmony_ci return ret; 27908c2ecf20Sopenharmony_ci 27918c2ecf20Sopenharmony_ci /* 27928c2ecf20Sopenharmony_ci * nvme core doesn't quite treat the rq opaquely. Commands such 27938c2ecf20Sopenharmony_ci * as WRITE ZEROES will return a non-zero rq payload_bytes yet 27948c2ecf20Sopenharmony_ci * there is no actual payload to be transferred. 27958c2ecf20Sopenharmony_ci * To get it right, key data transmission on there being 1 or 27968c2ecf20Sopenharmony_ci * more physical segments in the sg list. If there is no 27978c2ecf20Sopenharmony_ci * physical segments, there is no payload. 27988c2ecf20Sopenharmony_ci */ 27998c2ecf20Sopenharmony_ci if (blk_rq_nr_phys_segments(rq)) { 28008c2ecf20Sopenharmony_ci data_len = blk_rq_payload_bytes(rq); 28018c2ecf20Sopenharmony_ci io_dir = ((rq_data_dir(rq) == WRITE) ? 28028c2ecf20Sopenharmony_ci NVMEFC_FCP_WRITE : NVMEFC_FCP_READ); 28038c2ecf20Sopenharmony_ci } else { 28048c2ecf20Sopenharmony_ci data_len = 0; 28058c2ecf20Sopenharmony_ci io_dir = NVMEFC_FCP_NODATA; 28068c2ecf20Sopenharmony_ci } 28078c2ecf20Sopenharmony_ci 28088c2ecf20Sopenharmony_ci 28098c2ecf20Sopenharmony_ci return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir); 28108c2ecf20Sopenharmony_ci} 28118c2ecf20Sopenharmony_ci 28128c2ecf20Sopenharmony_cistatic void 28138c2ecf20Sopenharmony_cinvme_fc_submit_async_event(struct nvme_ctrl *arg) 28148c2ecf20Sopenharmony_ci{ 28158c2ecf20Sopenharmony_ci struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg); 28168c2ecf20Sopenharmony_ci struct nvme_fc_fcp_op *aen_op; 28178c2ecf20Sopenharmony_ci blk_status_t ret; 28188c2ecf20Sopenharmony_ci 28198c2ecf20Sopenharmony_ci if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) 28208c2ecf20Sopenharmony_ci return; 28218c2ecf20Sopenharmony_ci 28228c2ecf20Sopenharmony_ci aen_op = &ctrl->aen_ops[0]; 28238c2ecf20Sopenharmony_ci 28248c2ecf20Sopenharmony_ci ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0, 28258c2ecf20Sopenharmony_ci NVMEFC_FCP_NODATA); 28268c2ecf20Sopenharmony_ci if (ret) 28278c2ecf20Sopenharmony_ci dev_err(ctrl->ctrl.device, 28288c2ecf20Sopenharmony_ci "failed async event work\n"); 28298c2ecf20Sopenharmony_ci} 28308c2ecf20Sopenharmony_ci 28318c2ecf20Sopenharmony_cistatic void 28328c2ecf20Sopenharmony_cinvme_fc_complete_rq(struct request *rq) 28338c2ecf20Sopenharmony_ci{ 28348c2ecf20Sopenharmony_ci struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq); 28358c2ecf20Sopenharmony_ci struct nvme_fc_ctrl *ctrl = op->ctrl; 28368c2ecf20Sopenharmony_ci 28378c2ecf20Sopenharmony_ci atomic_set(&op->state, FCPOP_STATE_IDLE); 28388c2ecf20Sopenharmony_ci op->flags &= ~FCOP_FLAGS_TERMIO; 28398c2ecf20Sopenharmony_ci 28408c2ecf20Sopenharmony_ci nvme_fc_unmap_data(ctrl, rq, op); 28418c2ecf20Sopenharmony_ci nvme_complete_rq(rq); 28428c2ecf20Sopenharmony_ci nvme_fc_ctrl_put(ctrl); 28438c2ecf20Sopenharmony_ci} 28448c2ecf20Sopenharmony_ci 28458c2ecf20Sopenharmony_ci 28468c2ecf20Sopenharmony_cistatic const struct blk_mq_ops nvme_fc_mq_ops = { 28478c2ecf20Sopenharmony_ci .queue_rq = nvme_fc_queue_rq, 28488c2ecf20Sopenharmony_ci .complete = nvme_fc_complete_rq, 28498c2ecf20Sopenharmony_ci .init_request = nvme_fc_init_request, 28508c2ecf20Sopenharmony_ci .exit_request = nvme_fc_exit_request, 28518c2ecf20Sopenharmony_ci .init_hctx = nvme_fc_init_hctx, 28528c2ecf20Sopenharmony_ci .timeout = nvme_fc_timeout, 28538c2ecf20Sopenharmony_ci}; 28548c2ecf20Sopenharmony_ci 28558c2ecf20Sopenharmony_cistatic int 28568c2ecf20Sopenharmony_cinvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) 28578c2ecf20Sopenharmony_ci{ 28588c2ecf20Sopenharmony_ci struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 28598c2ecf20Sopenharmony_ci unsigned int nr_io_queues; 28608c2ecf20Sopenharmony_ci int ret; 28618c2ecf20Sopenharmony_ci 28628c2ecf20Sopenharmony_ci nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), 28638c2ecf20Sopenharmony_ci ctrl->lport->ops->max_hw_queues); 28648c2ecf20Sopenharmony_ci ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); 28658c2ecf20Sopenharmony_ci if (ret) { 28668c2ecf20Sopenharmony_ci dev_info(ctrl->ctrl.device, 28678c2ecf20Sopenharmony_ci "set_queue_count failed: %d\n", ret); 28688c2ecf20Sopenharmony_ci return ret; 28698c2ecf20Sopenharmony_ci } 28708c2ecf20Sopenharmony_ci 28718c2ecf20Sopenharmony_ci ctrl->ctrl.queue_count = nr_io_queues + 1; 28728c2ecf20Sopenharmony_ci if (!nr_io_queues) 28738c2ecf20Sopenharmony_ci return 0; 28748c2ecf20Sopenharmony_ci 28758c2ecf20Sopenharmony_ci nvme_fc_init_io_queues(ctrl); 28768c2ecf20Sopenharmony_ci 28778c2ecf20Sopenharmony_ci memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); 28788c2ecf20Sopenharmony_ci ctrl->tag_set.ops = &nvme_fc_mq_ops; 28798c2ecf20Sopenharmony_ci ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; 28808c2ecf20Sopenharmony_ci ctrl->tag_set.reserved_tags = 1; /* fabric connect */ 28818c2ecf20Sopenharmony_ci ctrl->tag_set.numa_node = ctrl->ctrl.numa_node; 28828c2ecf20Sopenharmony_ci ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 28838c2ecf20Sopenharmony_ci ctrl->tag_set.cmd_size = 28848c2ecf20Sopenharmony_ci struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv, 28858c2ecf20Sopenharmony_ci ctrl->lport->ops->fcprqst_priv_sz); 28868c2ecf20Sopenharmony_ci ctrl->tag_set.driver_data = ctrl; 28878c2ecf20Sopenharmony_ci ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1; 28888c2ecf20Sopenharmony_ci ctrl->tag_set.timeout = NVME_IO_TIMEOUT; 28898c2ecf20Sopenharmony_ci 28908c2ecf20Sopenharmony_ci ret = blk_mq_alloc_tag_set(&ctrl->tag_set); 28918c2ecf20Sopenharmony_ci if (ret) 28928c2ecf20Sopenharmony_ci return ret; 28938c2ecf20Sopenharmony_ci 28948c2ecf20Sopenharmony_ci ctrl->ctrl.tagset = &ctrl->tag_set; 28958c2ecf20Sopenharmony_ci 28968c2ecf20Sopenharmony_ci ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); 28978c2ecf20Sopenharmony_ci if (IS_ERR(ctrl->ctrl.connect_q)) { 28988c2ecf20Sopenharmony_ci ret = PTR_ERR(ctrl->ctrl.connect_q); 28998c2ecf20Sopenharmony_ci goto out_free_tag_set; 29008c2ecf20Sopenharmony_ci } 29018c2ecf20Sopenharmony_ci 29028c2ecf20Sopenharmony_ci ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); 29038c2ecf20Sopenharmony_ci if (ret) 29048c2ecf20Sopenharmony_ci goto out_cleanup_blk_queue; 29058c2ecf20Sopenharmony_ci 29068c2ecf20Sopenharmony_ci ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); 29078c2ecf20Sopenharmony_ci if (ret) 29088c2ecf20Sopenharmony_ci goto out_delete_hw_queues; 29098c2ecf20Sopenharmony_ci 29108c2ecf20Sopenharmony_ci ctrl->ioq_live = true; 29118c2ecf20Sopenharmony_ci 29128c2ecf20Sopenharmony_ci return 0; 29138c2ecf20Sopenharmony_ci 29148c2ecf20Sopenharmony_ciout_delete_hw_queues: 29158c2ecf20Sopenharmony_ci nvme_fc_delete_hw_io_queues(ctrl); 29168c2ecf20Sopenharmony_ciout_cleanup_blk_queue: 29178c2ecf20Sopenharmony_ci blk_cleanup_queue(ctrl->ctrl.connect_q); 29188c2ecf20Sopenharmony_ciout_free_tag_set: 29198c2ecf20Sopenharmony_ci blk_mq_free_tag_set(&ctrl->tag_set); 29208c2ecf20Sopenharmony_ci nvme_fc_free_io_queues(ctrl); 29218c2ecf20Sopenharmony_ci 29228c2ecf20Sopenharmony_ci /* force put free routine to ignore io queues */ 29238c2ecf20Sopenharmony_ci ctrl->ctrl.tagset = NULL; 29248c2ecf20Sopenharmony_ci 29258c2ecf20Sopenharmony_ci return ret; 29268c2ecf20Sopenharmony_ci} 29278c2ecf20Sopenharmony_ci 29288c2ecf20Sopenharmony_cistatic int 29298c2ecf20Sopenharmony_cinvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl) 29308c2ecf20Sopenharmony_ci{ 29318c2ecf20Sopenharmony_ci struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 29328c2ecf20Sopenharmony_ci u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1; 29338c2ecf20Sopenharmony_ci unsigned int nr_io_queues; 29348c2ecf20Sopenharmony_ci int ret; 29358c2ecf20Sopenharmony_ci 29368c2ecf20Sopenharmony_ci nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), 29378c2ecf20Sopenharmony_ci ctrl->lport->ops->max_hw_queues); 29388c2ecf20Sopenharmony_ci ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); 29398c2ecf20Sopenharmony_ci if (ret) { 29408c2ecf20Sopenharmony_ci dev_info(ctrl->ctrl.device, 29418c2ecf20Sopenharmony_ci "set_queue_count failed: %d\n", ret); 29428c2ecf20Sopenharmony_ci return ret; 29438c2ecf20Sopenharmony_ci } 29448c2ecf20Sopenharmony_ci 29458c2ecf20Sopenharmony_ci if (!nr_io_queues && prior_ioq_cnt) { 29468c2ecf20Sopenharmony_ci dev_info(ctrl->ctrl.device, 29478c2ecf20Sopenharmony_ci "Fail Reconnect: At least 1 io queue " 29488c2ecf20Sopenharmony_ci "required (was %d)\n", prior_ioq_cnt); 29498c2ecf20Sopenharmony_ci return -ENOSPC; 29508c2ecf20Sopenharmony_ci } 29518c2ecf20Sopenharmony_ci 29528c2ecf20Sopenharmony_ci ctrl->ctrl.queue_count = nr_io_queues + 1; 29538c2ecf20Sopenharmony_ci /* check for io queues existing */ 29548c2ecf20Sopenharmony_ci if (ctrl->ctrl.queue_count == 1) 29558c2ecf20Sopenharmony_ci return 0; 29568c2ecf20Sopenharmony_ci 29578c2ecf20Sopenharmony_ci if (prior_ioq_cnt != nr_io_queues) { 29588c2ecf20Sopenharmony_ci dev_info(ctrl->ctrl.device, 29598c2ecf20Sopenharmony_ci "reconnect: revising io queue count from %d to %d\n", 29608c2ecf20Sopenharmony_ci prior_ioq_cnt, nr_io_queues); 29618c2ecf20Sopenharmony_ci nvme_wait_freeze(&ctrl->ctrl); 29628c2ecf20Sopenharmony_ci blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues); 29638c2ecf20Sopenharmony_ci nvme_unfreeze(&ctrl->ctrl); 29648c2ecf20Sopenharmony_ci } 29658c2ecf20Sopenharmony_ci 29668c2ecf20Sopenharmony_ci ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); 29678c2ecf20Sopenharmony_ci if (ret) 29688c2ecf20Sopenharmony_ci goto out_free_io_queues; 29698c2ecf20Sopenharmony_ci 29708c2ecf20Sopenharmony_ci ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); 29718c2ecf20Sopenharmony_ci if (ret) 29728c2ecf20Sopenharmony_ci goto out_delete_hw_queues; 29738c2ecf20Sopenharmony_ci 29748c2ecf20Sopenharmony_ci return 0; 29758c2ecf20Sopenharmony_ci 29768c2ecf20Sopenharmony_ciout_delete_hw_queues: 29778c2ecf20Sopenharmony_ci nvme_fc_delete_hw_io_queues(ctrl); 29788c2ecf20Sopenharmony_ciout_free_io_queues: 29798c2ecf20Sopenharmony_ci nvme_fc_free_io_queues(ctrl); 29808c2ecf20Sopenharmony_ci return ret; 29818c2ecf20Sopenharmony_ci} 29828c2ecf20Sopenharmony_ci 29838c2ecf20Sopenharmony_cistatic void 29848c2ecf20Sopenharmony_cinvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport) 29858c2ecf20Sopenharmony_ci{ 29868c2ecf20Sopenharmony_ci struct nvme_fc_lport *lport = rport->lport; 29878c2ecf20Sopenharmony_ci 29888c2ecf20Sopenharmony_ci atomic_inc(&lport->act_rport_cnt); 29898c2ecf20Sopenharmony_ci} 29908c2ecf20Sopenharmony_ci 29918c2ecf20Sopenharmony_cistatic void 29928c2ecf20Sopenharmony_cinvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport) 29938c2ecf20Sopenharmony_ci{ 29948c2ecf20Sopenharmony_ci struct nvme_fc_lport *lport = rport->lport; 29958c2ecf20Sopenharmony_ci u32 cnt; 29968c2ecf20Sopenharmony_ci 29978c2ecf20Sopenharmony_ci cnt = atomic_dec_return(&lport->act_rport_cnt); 29988c2ecf20Sopenharmony_ci if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED) 29998c2ecf20Sopenharmony_ci lport->ops->localport_delete(&lport->localport); 30008c2ecf20Sopenharmony_ci} 30018c2ecf20Sopenharmony_ci 30028c2ecf20Sopenharmony_cistatic int 30038c2ecf20Sopenharmony_cinvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl) 30048c2ecf20Sopenharmony_ci{ 30058c2ecf20Sopenharmony_ci struct nvme_fc_rport *rport = ctrl->rport; 30068c2ecf20Sopenharmony_ci u32 cnt; 30078c2ecf20Sopenharmony_ci 30088c2ecf20Sopenharmony_ci if (test_and_set_bit(ASSOC_ACTIVE, &ctrl->flags)) 30098c2ecf20Sopenharmony_ci return 1; 30108c2ecf20Sopenharmony_ci 30118c2ecf20Sopenharmony_ci cnt = atomic_inc_return(&rport->act_ctrl_cnt); 30128c2ecf20Sopenharmony_ci if (cnt == 1) 30138c2ecf20Sopenharmony_ci nvme_fc_rport_active_on_lport(rport); 30148c2ecf20Sopenharmony_ci 30158c2ecf20Sopenharmony_ci return 0; 30168c2ecf20Sopenharmony_ci} 30178c2ecf20Sopenharmony_ci 30188c2ecf20Sopenharmony_cistatic int 30198c2ecf20Sopenharmony_cinvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl) 30208c2ecf20Sopenharmony_ci{ 30218c2ecf20Sopenharmony_ci struct nvme_fc_rport *rport = ctrl->rport; 30228c2ecf20Sopenharmony_ci struct nvme_fc_lport *lport = rport->lport; 30238c2ecf20Sopenharmony_ci u32 cnt; 30248c2ecf20Sopenharmony_ci 30258c2ecf20Sopenharmony_ci /* clearing of ctrl->flags ASSOC_ACTIVE bit is in association delete */ 30268c2ecf20Sopenharmony_ci 30278c2ecf20Sopenharmony_ci cnt = atomic_dec_return(&rport->act_ctrl_cnt); 30288c2ecf20Sopenharmony_ci if (cnt == 0) { 30298c2ecf20Sopenharmony_ci if (rport->remoteport.port_state == FC_OBJSTATE_DELETED) 30308c2ecf20Sopenharmony_ci lport->ops->remoteport_delete(&rport->remoteport); 30318c2ecf20Sopenharmony_ci nvme_fc_rport_inactive_on_lport(rport); 30328c2ecf20Sopenharmony_ci } 30338c2ecf20Sopenharmony_ci 30348c2ecf20Sopenharmony_ci return 0; 30358c2ecf20Sopenharmony_ci} 30368c2ecf20Sopenharmony_ci 30378c2ecf20Sopenharmony_ci/* 30388c2ecf20Sopenharmony_ci * This routine restarts the controller on the host side, and 30398c2ecf20Sopenharmony_ci * on the link side, recreates the controller association. 30408c2ecf20Sopenharmony_ci */ 30418c2ecf20Sopenharmony_cistatic int 30428c2ecf20Sopenharmony_cinvme_fc_create_association(struct nvme_fc_ctrl *ctrl) 30438c2ecf20Sopenharmony_ci{ 30448c2ecf20Sopenharmony_ci struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 30458c2ecf20Sopenharmony_ci struct nvmefc_ls_rcv_op *disls = NULL; 30468c2ecf20Sopenharmony_ci unsigned long flags; 30478c2ecf20Sopenharmony_ci int ret; 30488c2ecf20Sopenharmony_ci bool changed; 30498c2ecf20Sopenharmony_ci 30508c2ecf20Sopenharmony_ci ++ctrl->ctrl.nr_reconnects; 30518c2ecf20Sopenharmony_ci 30528c2ecf20Sopenharmony_ci if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) 30538c2ecf20Sopenharmony_ci return -ENODEV; 30548c2ecf20Sopenharmony_ci 30558c2ecf20Sopenharmony_ci if (nvme_fc_ctlr_active_on_rport(ctrl)) 30568c2ecf20Sopenharmony_ci return -ENOTUNIQ; 30578c2ecf20Sopenharmony_ci 30588c2ecf20Sopenharmony_ci dev_info(ctrl->ctrl.device, 30598c2ecf20Sopenharmony_ci "NVME-FC{%d}: create association : host wwpn 0x%016llx " 30608c2ecf20Sopenharmony_ci " rport wwpn 0x%016llx: NQN \"%s\"\n", 30618c2ecf20Sopenharmony_ci ctrl->cnum, ctrl->lport->localport.port_name, 30628c2ecf20Sopenharmony_ci ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn); 30638c2ecf20Sopenharmony_ci 30648c2ecf20Sopenharmony_ci clear_bit(ASSOC_FAILED, &ctrl->flags); 30658c2ecf20Sopenharmony_ci 30668c2ecf20Sopenharmony_ci /* 30678c2ecf20Sopenharmony_ci * Create the admin queue 30688c2ecf20Sopenharmony_ci */ 30698c2ecf20Sopenharmony_ci 30708c2ecf20Sopenharmony_ci ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, 30718c2ecf20Sopenharmony_ci NVME_AQ_DEPTH); 30728c2ecf20Sopenharmony_ci if (ret) 30738c2ecf20Sopenharmony_ci goto out_free_queue; 30748c2ecf20Sopenharmony_ci 30758c2ecf20Sopenharmony_ci ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], 30768c2ecf20Sopenharmony_ci NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4)); 30778c2ecf20Sopenharmony_ci if (ret) 30788c2ecf20Sopenharmony_ci goto out_delete_hw_queue; 30798c2ecf20Sopenharmony_ci 30808c2ecf20Sopenharmony_ci ret = nvmf_connect_admin_queue(&ctrl->ctrl); 30818c2ecf20Sopenharmony_ci if (ret) 30828c2ecf20Sopenharmony_ci goto out_disconnect_admin_queue; 30838c2ecf20Sopenharmony_ci 30848c2ecf20Sopenharmony_ci set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); 30858c2ecf20Sopenharmony_ci 30868c2ecf20Sopenharmony_ci /* 30878c2ecf20Sopenharmony_ci * Check controller capabilities 30888c2ecf20Sopenharmony_ci * 30898c2ecf20Sopenharmony_ci * todo:- add code to check if ctrl attributes changed from 30908c2ecf20Sopenharmony_ci * prior connection values 30918c2ecf20Sopenharmony_ci */ 30928c2ecf20Sopenharmony_ci 30938c2ecf20Sopenharmony_ci ret = nvme_enable_ctrl(&ctrl->ctrl); 30948c2ecf20Sopenharmony_ci if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) 30958c2ecf20Sopenharmony_ci goto out_disconnect_admin_queue; 30968c2ecf20Sopenharmony_ci 30978c2ecf20Sopenharmony_ci ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments; 30988c2ecf20Sopenharmony_ci ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments << 30998c2ecf20Sopenharmony_ci (ilog2(SZ_4K) - 9); 31008c2ecf20Sopenharmony_ci 31018c2ecf20Sopenharmony_ci nvme_start_admin_queue(&ctrl->ctrl); 31028c2ecf20Sopenharmony_ci 31038c2ecf20Sopenharmony_ci ret = nvme_init_identify(&ctrl->ctrl); 31048c2ecf20Sopenharmony_ci if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) 31058c2ecf20Sopenharmony_ci goto out_disconnect_admin_queue; 31068c2ecf20Sopenharmony_ci 31078c2ecf20Sopenharmony_ci /* sanity checks */ 31088c2ecf20Sopenharmony_ci 31098c2ecf20Sopenharmony_ci /* FC-NVME does not have other data in the capsule */ 31108c2ecf20Sopenharmony_ci if (ctrl->ctrl.icdoff) { 31118c2ecf20Sopenharmony_ci dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", 31128c2ecf20Sopenharmony_ci ctrl->ctrl.icdoff); 31138c2ecf20Sopenharmony_ci goto out_disconnect_admin_queue; 31148c2ecf20Sopenharmony_ci } 31158c2ecf20Sopenharmony_ci 31168c2ecf20Sopenharmony_ci /* FC-NVME supports normal SGL Data Block Descriptors */ 31178c2ecf20Sopenharmony_ci 31188c2ecf20Sopenharmony_ci if (opts->queue_size > ctrl->ctrl.maxcmd) { 31198c2ecf20Sopenharmony_ci /* warn if maxcmd is lower than queue_size */ 31208c2ecf20Sopenharmony_ci dev_warn(ctrl->ctrl.device, 31218c2ecf20Sopenharmony_ci "queue_size %zu > ctrl maxcmd %u, reducing " 31228c2ecf20Sopenharmony_ci "to maxcmd\n", 31238c2ecf20Sopenharmony_ci opts->queue_size, ctrl->ctrl.maxcmd); 31248c2ecf20Sopenharmony_ci opts->queue_size = ctrl->ctrl.maxcmd; 31258c2ecf20Sopenharmony_ci } 31268c2ecf20Sopenharmony_ci 31278c2ecf20Sopenharmony_ci if (opts->queue_size > ctrl->ctrl.sqsize + 1) { 31288c2ecf20Sopenharmony_ci /* warn if sqsize is lower than queue_size */ 31298c2ecf20Sopenharmony_ci dev_warn(ctrl->ctrl.device, 31308c2ecf20Sopenharmony_ci "queue_size %zu > ctrl sqsize %u, reducing " 31318c2ecf20Sopenharmony_ci "to sqsize\n", 31328c2ecf20Sopenharmony_ci opts->queue_size, ctrl->ctrl.sqsize + 1); 31338c2ecf20Sopenharmony_ci opts->queue_size = ctrl->ctrl.sqsize + 1; 31348c2ecf20Sopenharmony_ci } 31358c2ecf20Sopenharmony_ci 31368c2ecf20Sopenharmony_ci ret = nvme_fc_init_aen_ops(ctrl); 31378c2ecf20Sopenharmony_ci if (ret) 31388c2ecf20Sopenharmony_ci goto out_term_aen_ops; 31398c2ecf20Sopenharmony_ci 31408c2ecf20Sopenharmony_ci /* 31418c2ecf20Sopenharmony_ci * Create the io queues 31428c2ecf20Sopenharmony_ci */ 31438c2ecf20Sopenharmony_ci 31448c2ecf20Sopenharmony_ci if (ctrl->ctrl.queue_count > 1) { 31458c2ecf20Sopenharmony_ci if (!ctrl->ioq_live) 31468c2ecf20Sopenharmony_ci ret = nvme_fc_create_io_queues(ctrl); 31478c2ecf20Sopenharmony_ci else 31488c2ecf20Sopenharmony_ci ret = nvme_fc_recreate_io_queues(ctrl); 31498c2ecf20Sopenharmony_ci } 31508c2ecf20Sopenharmony_ci if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) 31518c2ecf20Sopenharmony_ci goto out_term_aen_ops; 31528c2ecf20Sopenharmony_ci 31538c2ecf20Sopenharmony_ci changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 31548c2ecf20Sopenharmony_ci 31558c2ecf20Sopenharmony_ci ctrl->ctrl.nr_reconnects = 0; 31568c2ecf20Sopenharmony_ci 31578c2ecf20Sopenharmony_ci if (changed) 31588c2ecf20Sopenharmony_ci nvme_start_ctrl(&ctrl->ctrl); 31598c2ecf20Sopenharmony_ci 31608c2ecf20Sopenharmony_ci return 0; /* Success */ 31618c2ecf20Sopenharmony_ci 31628c2ecf20Sopenharmony_ciout_term_aen_ops: 31638c2ecf20Sopenharmony_ci nvme_fc_term_aen_ops(ctrl); 31648c2ecf20Sopenharmony_ciout_disconnect_admin_queue: 31658c2ecf20Sopenharmony_ci /* send a Disconnect(association) LS to fc-nvme target */ 31668c2ecf20Sopenharmony_ci nvme_fc_xmt_disconnect_assoc(ctrl); 31678c2ecf20Sopenharmony_ci spin_lock_irqsave(&ctrl->lock, flags); 31688c2ecf20Sopenharmony_ci ctrl->association_id = 0; 31698c2ecf20Sopenharmony_ci disls = ctrl->rcv_disconn; 31708c2ecf20Sopenharmony_ci ctrl->rcv_disconn = NULL; 31718c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&ctrl->lock, flags); 31728c2ecf20Sopenharmony_ci if (disls) 31738c2ecf20Sopenharmony_ci nvme_fc_xmt_ls_rsp(disls); 31748c2ecf20Sopenharmony_ciout_delete_hw_queue: 31758c2ecf20Sopenharmony_ci __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); 31768c2ecf20Sopenharmony_ciout_free_queue: 31778c2ecf20Sopenharmony_ci nvme_fc_free_queue(&ctrl->queues[0]); 31788c2ecf20Sopenharmony_ci clear_bit(ASSOC_ACTIVE, &ctrl->flags); 31798c2ecf20Sopenharmony_ci nvme_fc_ctlr_inactive_on_rport(ctrl); 31808c2ecf20Sopenharmony_ci 31818c2ecf20Sopenharmony_ci return ret; 31828c2ecf20Sopenharmony_ci} 31838c2ecf20Sopenharmony_ci 31848c2ecf20Sopenharmony_ci 31858c2ecf20Sopenharmony_ci/* 31868c2ecf20Sopenharmony_ci * This routine stops operation of the controller on the host side. 31878c2ecf20Sopenharmony_ci * On the host os stack side: Admin and IO queues are stopped, 31888c2ecf20Sopenharmony_ci * outstanding ios on them terminated via FC ABTS. 31898c2ecf20Sopenharmony_ci * On the link side: the association is terminated. 31908c2ecf20Sopenharmony_ci */ 31918c2ecf20Sopenharmony_cistatic void 31928c2ecf20Sopenharmony_cinvme_fc_delete_association(struct nvme_fc_ctrl *ctrl) 31938c2ecf20Sopenharmony_ci{ 31948c2ecf20Sopenharmony_ci struct nvmefc_ls_rcv_op *disls = NULL; 31958c2ecf20Sopenharmony_ci unsigned long flags; 31968c2ecf20Sopenharmony_ci 31978c2ecf20Sopenharmony_ci if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags)) 31988c2ecf20Sopenharmony_ci return; 31998c2ecf20Sopenharmony_ci 32008c2ecf20Sopenharmony_ci spin_lock_irqsave(&ctrl->lock, flags); 32018c2ecf20Sopenharmony_ci set_bit(FCCTRL_TERMIO, &ctrl->flags); 32028c2ecf20Sopenharmony_ci ctrl->iocnt = 0; 32038c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&ctrl->lock, flags); 32048c2ecf20Sopenharmony_ci 32058c2ecf20Sopenharmony_ci __nvme_fc_abort_outstanding_ios(ctrl, false); 32068c2ecf20Sopenharmony_ci 32078c2ecf20Sopenharmony_ci /* kill the aens as they are a separate path */ 32088c2ecf20Sopenharmony_ci nvme_fc_abort_aen_ops(ctrl); 32098c2ecf20Sopenharmony_ci 32108c2ecf20Sopenharmony_ci /* wait for all io that had to be aborted */ 32118c2ecf20Sopenharmony_ci spin_lock_irq(&ctrl->lock); 32128c2ecf20Sopenharmony_ci wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock); 32138c2ecf20Sopenharmony_ci clear_bit(FCCTRL_TERMIO, &ctrl->flags); 32148c2ecf20Sopenharmony_ci spin_unlock_irq(&ctrl->lock); 32158c2ecf20Sopenharmony_ci 32168c2ecf20Sopenharmony_ci nvme_fc_term_aen_ops(ctrl); 32178c2ecf20Sopenharmony_ci 32188c2ecf20Sopenharmony_ci /* 32198c2ecf20Sopenharmony_ci * send a Disconnect(association) LS to fc-nvme target 32208c2ecf20Sopenharmony_ci * Note: could have been sent at top of process, but 32218c2ecf20Sopenharmony_ci * cleaner on link traffic if after the aborts complete. 32228c2ecf20Sopenharmony_ci * Note: if association doesn't exist, association_id will be 0 32238c2ecf20Sopenharmony_ci */ 32248c2ecf20Sopenharmony_ci if (ctrl->association_id) 32258c2ecf20Sopenharmony_ci nvme_fc_xmt_disconnect_assoc(ctrl); 32268c2ecf20Sopenharmony_ci 32278c2ecf20Sopenharmony_ci spin_lock_irqsave(&ctrl->lock, flags); 32288c2ecf20Sopenharmony_ci ctrl->association_id = 0; 32298c2ecf20Sopenharmony_ci disls = ctrl->rcv_disconn; 32308c2ecf20Sopenharmony_ci ctrl->rcv_disconn = NULL; 32318c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&ctrl->lock, flags); 32328c2ecf20Sopenharmony_ci if (disls) 32338c2ecf20Sopenharmony_ci /* 32348c2ecf20Sopenharmony_ci * if a Disconnect Request was waiting for a response, send 32358c2ecf20Sopenharmony_ci * now that all ABTS's have been issued (and are complete). 32368c2ecf20Sopenharmony_ci */ 32378c2ecf20Sopenharmony_ci nvme_fc_xmt_ls_rsp(disls); 32388c2ecf20Sopenharmony_ci 32398c2ecf20Sopenharmony_ci if (ctrl->ctrl.tagset) { 32408c2ecf20Sopenharmony_ci nvme_fc_delete_hw_io_queues(ctrl); 32418c2ecf20Sopenharmony_ci nvme_fc_free_io_queues(ctrl); 32428c2ecf20Sopenharmony_ci } 32438c2ecf20Sopenharmony_ci 32448c2ecf20Sopenharmony_ci __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); 32458c2ecf20Sopenharmony_ci nvme_fc_free_queue(&ctrl->queues[0]); 32468c2ecf20Sopenharmony_ci 32478c2ecf20Sopenharmony_ci /* re-enable the admin_q so anything new can fast fail */ 32488c2ecf20Sopenharmony_ci nvme_start_admin_queue(&ctrl->ctrl); 32498c2ecf20Sopenharmony_ci 32508c2ecf20Sopenharmony_ci /* resume the io queues so that things will fast fail */ 32518c2ecf20Sopenharmony_ci nvme_start_queues(&ctrl->ctrl); 32528c2ecf20Sopenharmony_ci 32538c2ecf20Sopenharmony_ci nvme_fc_ctlr_inactive_on_rport(ctrl); 32548c2ecf20Sopenharmony_ci} 32558c2ecf20Sopenharmony_ci 32568c2ecf20Sopenharmony_cistatic void 32578c2ecf20Sopenharmony_cinvme_fc_delete_ctrl(struct nvme_ctrl *nctrl) 32588c2ecf20Sopenharmony_ci{ 32598c2ecf20Sopenharmony_ci struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); 32608c2ecf20Sopenharmony_ci 32618c2ecf20Sopenharmony_ci cancel_work_sync(&ctrl->ioerr_work); 32628c2ecf20Sopenharmony_ci cancel_delayed_work_sync(&ctrl->connect_work); 32638c2ecf20Sopenharmony_ci /* 32648c2ecf20Sopenharmony_ci * kill the association on the link side. this will block 32658c2ecf20Sopenharmony_ci * waiting for io to terminate 32668c2ecf20Sopenharmony_ci */ 32678c2ecf20Sopenharmony_ci nvme_fc_delete_association(ctrl); 32688c2ecf20Sopenharmony_ci} 32698c2ecf20Sopenharmony_ci 32708c2ecf20Sopenharmony_cistatic void 32718c2ecf20Sopenharmony_cinvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) 32728c2ecf20Sopenharmony_ci{ 32738c2ecf20Sopenharmony_ci struct nvme_fc_rport *rport = ctrl->rport; 32748c2ecf20Sopenharmony_ci struct nvme_fc_remote_port *portptr = &rport->remoteport; 32758c2ecf20Sopenharmony_ci unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ; 32768c2ecf20Sopenharmony_ci bool recon = true; 32778c2ecf20Sopenharmony_ci 32788c2ecf20Sopenharmony_ci if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) 32798c2ecf20Sopenharmony_ci return; 32808c2ecf20Sopenharmony_ci 32818c2ecf20Sopenharmony_ci if (portptr->port_state == FC_OBJSTATE_ONLINE) 32828c2ecf20Sopenharmony_ci dev_info(ctrl->ctrl.device, 32838c2ecf20Sopenharmony_ci "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n", 32848c2ecf20Sopenharmony_ci ctrl->cnum, status); 32858c2ecf20Sopenharmony_ci else if (time_after_eq(jiffies, rport->dev_loss_end)) 32868c2ecf20Sopenharmony_ci recon = false; 32878c2ecf20Sopenharmony_ci 32888c2ecf20Sopenharmony_ci if (recon && nvmf_should_reconnect(&ctrl->ctrl)) { 32898c2ecf20Sopenharmony_ci if (portptr->port_state == FC_OBJSTATE_ONLINE) 32908c2ecf20Sopenharmony_ci dev_info(ctrl->ctrl.device, 32918c2ecf20Sopenharmony_ci "NVME-FC{%d}: Reconnect attempt in %ld " 32928c2ecf20Sopenharmony_ci "seconds\n", 32938c2ecf20Sopenharmony_ci ctrl->cnum, recon_delay / HZ); 32948c2ecf20Sopenharmony_ci else if (time_after(jiffies + recon_delay, rport->dev_loss_end)) 32958c2ecf20Sopenharmony_ci recon_delay = rport->dev_loss_end - jiffies; 32968c2ecf20Sopenharmony_ci 32978c2ecf20Sopenharmony_ci queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay); 32988c2ecf20Sopenharmony_ci } else { 32998c2ecf20Sopenharmony_ci if (portptr->port_state == FC_OBJSTATE_ONLINE) 33008c2ecf20Sopenharmony_ci dev_warn(ctrl->ctrl.device, 33018c2ecf20Sopenharmony_ci "NVME-FC{%d}: Max reconnect attempts (%d) " 33028c2ecf20Sopenharmony_ci "reached.\n", 33038c2ecf20Sopenharmony_ci ctrl->cnum, ctrl->ctrl.nr_reconnects); 33048c2ecf20Sopenharmony_ci else 33058c2ecf20Sopenharmony_ci dev_warn(ctrl->ctrl.device, 33068c2ecf20Sopenharmony_ci "NVME-FC{%d}: dev_loss_tmo (%d) expired " 33078c2ecf20Sopenharmony_ci "while waiting for remoteport connectivity.\n", 33088c2ecf20Sopenharmony_ci ctrl->cnum, min_t(int, portptr->dev_loss_tmo, 33098c2ecf20Sopenharmony_ci (ctrl->ctrl.opts->max_reconnects * 33108c2ecf20Sopenharmony_ci ctrl->ctrl.opts->reconnect_delay))); 33118c2ecf20Sopenharmony_ci WARN_ON(nvme_delete_ctrl(&ctrl->ctrl)); 33128c2ecf20Sopenharmony_ci } 33138c2ecf20Sopenharmony_ci} 33148c2ecf20Sopenharmony_ci 33158c2ecf20Sopenharmony_cistatic void 33168c2ecf20Sopenharmony_cinvme_fc_reset_ctrl_work(struct work_struct *work) 33178c2ecf20Sopenharmony_ci{ 33188c2ecf20Sopenharmony_ci struct nvme_fc_ctrl *ctrl = 33198c2ecf20Sopenharmony_ci container_of(work, struct nvme_fc_ctrl, ctrl.reset_work); 33208c2ecf20Sopenharmony_ci 33218c2ecf20Sopenharmony_ci nvme_stop_ctrl(&ctrl->ctrl); 33228c2ecf20Sopenharmony_ci 33238c2ecf20Sopenharmony_ci /* will block will waiting for io to terminate */ 33248c2ecf20Sopenharmony_ci nvme_fc_delete_association(ctrl); 33258c2ecf20Sopenharmony_ci 33268c2ecf20Sopenharmony_ci if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) 33278c2ecf20Sopenharmony_ci dev_err(ctrl->ctrl.device, 33288c2ecf20Sopenharmony_ci "NVME-FC{%d}: error_recovery: Couldn't change state " 33298c2ecf20Sopenharmony_ci "to CONNECTING\n", ctrl->cnum); 33308c2ecf20Sopenharmony_ci 33318c2ecf20Sopenharmony_ci if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) { 33328c2ecf20Sopenharmony_ci if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { 33338c2ecf20Sopenharmony_ci dev_err(ctrl->ctrl.device, 33348c2ecf20Sopenharmony_ci "NVME-FC{%d}: failed to schedule connect " 33358c2ecf20Sopenharmony_ci "after reset\n", ctrl->cnum); 33368c2ecf20Sopenharmony_ci } else { 33378c2ecf20Sopenharmony_ci flush_delayed_work(&ctrl->connect_work); 33388c2ecf20Sopenharmony_ci } 33398c2ecf20Sopenharmony_ci } else { 33408c2ecf20Sopenharmony_ci nvme_fc_reconnect_or_delete(ctrl, -ENOTCONN); 33418c2ecf20Sopenharmony_ci } 33428c2ecf20Sopenharmony_ci} 33438c2ecf20Sopenharmony_ci 33448c2ecf20Sopenharmony_ci 33458c2ecf20Sopenharmony_cistatic const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { 33468c2ecf20Sopenharmony_ci .name = "fc", 33478c2ecf20Sopenharmony_ci .module = THIS_MODULE, 33488c2ecf20Sopenharmony_ci .flags = NVME_F_FABRICS, 33498c2ecf20Sopenharmony_ci .reg_read32 = nvmf_reg_read32, 33508c2ecf20Sopenharmony_ci .reg_read64 = nvmf_reg_read64, 33518c2ecf20Sopenharmony_ci .reg_write32 = nvmf_reg_write32, 33528c2ecf20Sopenharmony_ci .free_ctrl = nvme_fc_nvme_ctrl_freed, 33538c2ecf20Sopenharmony_ci .submit_async_event = nvme_fc_submit_async_event, 33548c2ecf20Sopenharmony_ci .delete_ctrl = nvme_fc_delete_ctrl, 33558c2ecf20Sopenharmony_ci .get_address = nvmf_get_address, 33568c2ecf20Sopenharmony_ci}; 33578c2ecf20Sopenharmony_ci 33588c2ecf20Sopenharmony_cistatic void 33598c2ecf20Sopenharmony_cinvme_fc_connect_ctrl_work(struct work_struct *work) 33608c2ecf20Sopenharmony_ci{ 33618c2ecf20Sopenharmony_ci int ret; 33628c2ecf20Sopenharmony_ci 33638c2ecf20Sopenharmony_ci struct nvme_fc_ctrl *ctrl = 33648c2ecf20Sopenharmony_ci container_of(to_delayed_work(work), 33658c2ecf20Sopenharmony_ci struct nvme_fc_ctrl, connect_work); 33668c2ecf20Sopenharmony_ci 33678c2ecf20Sopenharmony_ci ret = nvme_fc_create_association(ctrl); 33688c2ecf20Sopenharmony_ci if (ret) 33698c2ecf20Sopenharmony_ci nvme_fc_reconnect_or_delete(ctrl, ret); 33708c2ecf20Sopenharmony_ci else 33718c2ecf20Sopenharmony_ci dev_info(ctrl->ctrl.device, 33728c2ecf20Sopenharmony_ci "NVME-FC{%d}: controller connect complete\n", 33738c2ecf20Sopenharmony_ci ctrl->cnum); 33748c2ecf20Sopenharmony_ci} 33758c2ecf20Sopenharmony_ci 33768c2ecf20Sopenharmony_ci 33778c2ecf20Sopenharmony_cistatic const struct blk_mq_ops nvme_fc_admin_mq_ops = { 33788c2ecf20Sopenharmony_ci .queue_rq = nvme_fc_queue_rq, 33798c2ecf20Sopenharmony_ci .complete = nvme_fc_complete_rq, 33808c2ecf20Sopenharmony_ci .init_request = nvme_fc_init_request, 33818c2ecf20Sopenharmony_ci .exit_request = nvme_fc_exit_request, 33828c2ecf20Sopenharmony_ci .init_hctx = nvme_fc_init_admin_hctx, 33838c2ecf20Sopenharmony_ci .timeout = nvme_fc_timeout, 33848c2ecf20Sopenharmony_ci}; 33858c2ecf20Sopenharmony_ci 33868c2ecf20Sopenharmony_ci 33878c2ecf20Sopenharmony_ci/* 33888c2ecf20Sopenharmony_ci * Fails a controller request if it matches an existing controller 33898c2ecf20Sopenharmony_ci * (association) with the same tuple: 33908c2ecf20Sopenharmony_ci * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN> 33918c2ecf20Sopenharmony_ci * 33928c2ecf20Sopenharmony_ci * The ports don't need to be compared as they are intrinsically 33938c2ecf20Sopenharmony_ci * already matched by the port pointers supplied. 33948c2ecf20Sopenharmony_ci */ 33958c2ecf20Sopenharmony_cistatic bool 33968c2ecf20Sopenharmony_cinvme_fc_existing_controller(struct nvme_fc_rport *rport, 33978c2ecf20Sopenharmony_ci struct nvmf_ctrl_options *opts) 33988c2ecf20Sopenharmony_ci{ 33998c2ecf20Sopenharmony_ci struct nvme_fc_ctrl *ctrl; 34008c2ecf20Sopenharmony_ci unsigned long flags; 34018c2ecf20Sopenharmony_ci bool found = false; 34028c2ecf20Sopenharmony_ci 34038c2ecf20Sopenharmony_ci spin_lock_irqsave(&rport->lock, flags); 34048c2ecf20Sopenharmony_ci list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { 34058c2ecf20Sopenharmony_ci found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts); 34068c2ecf20Sopenharmony_ci if (found) 34078c2ecf20Sopenharmony_ci break; 34088c2ecf20Sopenharmony_ci } 34098c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&rport->lock, flags); 34108c2ecf20Sopenharmony_ci 34118c2ecf20Sopenharmony_ci return found; 34128c2ecf20Sopenharmony_ci} 34138c2ecf20Sopenharmony_ci 34148c2ecf20Sopenharmony_cistatic struct nvme_ctrl * 34158c2ecf20Sopenharmony_cinvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, 34168c2ecf20Sopenharmony_ci struct nvme_fc_lport *lport, struct nvme_fc_rport *rport) 34178c2ecf20Sopenharmony_ci{ 34188c2ecf20Sopenharmony_ci struct nvme_fc_ctrl *ctrl; 34198c2ecf20Sopenharmony_ci unsigned long flags; 34208c2ecf20Sopenharmony_ci int ret, idx, ctrl_loss_tmo; 34218c2ecf20Sopenharmony_ci 34228c2ecf20Sopenharmony_ci if (!(rport->remoteport.port_role & 34238c2ecf20Sopenharmony_ci (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) { 34248c2ecf20Sopenharmony_ci ret = -EBADR; 34258c2ecf20Sopenharmony_ci goto out_fail; 34268c2ecf20Sopenharmony_ci } 34278c2ecf20Sopenharmony_ci 34288c2ecf20Sopenharmony_ci if (!opts->duplicate_connect && 34298c2ecf20Sopenharmony_ci nvme_fc_existing_controller(rport, opts)) { 34308c2ecf20Sopenharmony_ci ret = -EALREADY; 34318c2ecf20Sopenharmony_ci goto out_fail; 34328c2ecf20Sopenharmony_ci } 34338c2ecf20Sopenharmony_ci 34348c2ecf20Sopenharmony_ci ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 34358c2ecf20Sopenharmony_ci if (!ctrl) { 34368c2ecf20Sopenharmony_ci ret = -ENOMEM; 34378c2ecf20Sopenharmony_ci goto out_fail; 34388c2ecf20Sopenharmony_ci } 34398c2ecf20Sopenharmony_ci 34408c2ecf20Sopenharmony_ci idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL); 34418c2ecf20Sopenharmony_ci if (idx < 0) { 34428c2ecf20Sopenharmony_ci ret = -ENOSPC; 34438c2ecf20Sopenharmony_ci goto out_free_ctrl; 34448c2ecf20Sopenharmony_ci } 34458c2ecf20Sopenharmony_ci 34468c2ecf20Sopenharmony_ci /* 34478c2ecf20Sopenharmony_ci * if ctrl_loss_tmo is being enforced and the default reconnect delay 34488c2ecf20Sopenharmony_ci * is being used, change to a shorter reconnect delay for FC. 34498c2ecf20Sopenharmony_ci */ 34508c2ecf20Sopenharmony_ci if (opts->max_reconnects != -1 && 34518c2ecf20Sopenharmony_ci opts->reconnect_delay == NVMF_DEF_RECONNECT_DELAY && 34528c2ecf20Sopenharmony_ci opts->reconnect_delay > NVME_FC_DEFAULT_RECONNECT_TMO) { 34538c2ecf20Sopenharmony_ci ctrl_loss_tmo = opts->max_reconnects * opts->reconnect_delay; 34548c2ecf20Sopenharmony_ci opts->reconnect_delay = NVME_FC_DEFAULT_RECONNECT_TMO; 34558c2ecf20Sopenharmony_ci opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo, 34568c2ecf20Sopenharmony_ci opts->reconnect_delay); 34578c2ecf20Sopenharmony_ci } 34588c2ecf20Sopenharmony_ci 34598c2ecf20Sopenharmony_ci ctrl->ctrl.opts = opts; 34608c2ecf20Sopenharmony_ci ctrl->ctrl.nr_reconnects = 0; 34618c2ecf20Sopenharmony_ci if (lport->dev) 34628c2ecf20Sopenharmony_ci ctrl->ctrl.numa_node = dev_to_node(lport->dev); 34638c2ecf20Sopenharmony_ci else 34648c2ecf20Sopenharmony_ci ctrl->ctrl.numa_node = NUMA_NO_NODE; 34658c2ecf20Sopenharmony_ci INIT_LIST_HEAD(&ctrl->ctrl_list); 34668c2ecf20Sopenharmony_ci ctrl->lport = lport; 34678c2ecf20Sopenharmony_ci ctrl->rport = rport; 34688c2ecf20Sopenharmony_ci ctrl->dev = lport->dev; 34698c2ecf20Sopenharmony_ci ctrl->cnum = idx; 34708c2ecf20Sopenharmony_ci ctrl->ioq_live = false; 34718c2ecf20Sopenharmony_ci init_waitqueue_head(&ctrl->ioabort_wait); 34728c2ecf20Sopenharmony_ci 34738c2ecf20Sopenharmony_ci get_device(ctrl->dev); 34748c2ecf20Sopenharmony_ci kref_init(&ctrl->ref); 34758c2ecf20Sopenharmony_ci 34768c2ecf20Sopenharmony_ci INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work); 34778c2ecf20Sopenharmony_ci INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); 34788c2ecf20Sopenharmony_ci INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work); 34798c2ecf20Sopenharmony_ci spin_lock_init(&ctrl->lock); 34808c2ecf20Sopenharmony_ci 34818c2ecf20Sopenharmony_ci /* io queue count */ 34828c2ecf20Sopenharmony_ci ctrl->ctrl.queue_count = min_t(unsigned int, 34838c2ecf20Sopenharmony_ci opts->nr_io_queues, 34848c2ecf20Sopenharmony_ci lport->ops->max_hw_queues); 34858c2ecf20Sopenharmony_ci ctrl->ctrl.queue_count++; /* +1 for admin queue */ 34868c2ecf20Sopenharmony_ci 34878c2ecf20Sopenharmony_ci ctrl->ctrl.sqsize = opts->queue_size - 1; 34888c2ecf20Sopenharmony_ci ctrl->ctrl.kato = opts->kato; 34898c2ecf20Sopenharmony_ci ctrl->ctrl.cntlid = 0xffff; 34908c2ecf20Sopenharmony_ci 34918c2ecf20Sopenharmony_ci ret = -ENOMEM; 34928c2ecf20Sopenharmony_ci ctrl->queues = kcalloc(ctrl->ctrl.queue_count, 34938c2ecf20Sopenharmony_ci sizeof(struct nvme_fc_queue), GFP_KERNEL); 34948c2ecf20Sopenharmony_ci if (!ctrl->queues) 34958c2ecf20Sopenharmony_ci goto out_free_ida; 34968c2ecf20Sopenharmony_ci 34978c2ecf20Sopenharmony_ci nvme_fc_init_queue(ctrl, 0); 34988c2ecf20Sopenharmony_ci 34998c2ecf20Sopenharmony_ci memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); 35008c2ecf20Sopenharmony_ci ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops; 35018c2ecf20Sopenharmony_ci ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH; 35028c2ecf20Sopenharmony_ci ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */ 35038c2ecf20Sopenharmony_ci ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node; 35048c2ecf20Sopenharmony_ci ctrl->admin_tag_set.cmd_size = 35058c2ecf20Sopenharmony_ci struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv, 35068c2ecf20Sopenharmony_ci ctrl->lport->ops->fcprqst_priv_sz); 35078c2ecf20Sopenharmony_ci ctrl->admin_tag_set.driver_data = ctrl; 35088c2ecf20Sopenharmony_ci ctrl->admin_tag_set.nr_hw_queues = 1; 35098c2ecf20Sopenharmony_ci ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT; 35108c2ecf20Sopenharmony_ci ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED; 35118c2ecf20Sopenharmony_ci 35128c2ecf20Sopenharmony_ci ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); 35138c2ecf20Sopenharmony_ci if (ret) 35148c2ecf20Sopenharmony_ci goto out_free_queues; 35158c2ecf20Sopenharmony_ci ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set; 35168c2ecf20Sopenharmony_ci 35178c2ecf20Sopenharmony_ci ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set); 35188c2ecf20Sopenharmony_ci if (IS_ERR(ctrl->ctrl.fabrics_q)) { 35198c2ecf20Sopenharmony_ci ret = PTR_ERR(ctrl->ctrl.fabrics_q); 35208c2ecf20Sopenharmony_ci goto out_free_admin_tag_set; 35218c2ecf20Sopenharmony_ci } 35228c2ecf20Sopenharmony_ci 35238c2ecf20Sopenharmony_ci ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); 35248c2ecf20Sopenharmony_ci if (IS_ERR(ctrl->ctrl.admin_q)) { 35258c2ecf20Sopenharmony_ci ret = PTR_ERR(ctrl->ctrl.admin_q); 35268c2ecf20Sopenharmony_ci goto out_cleanup_fabrics_q; 35278c2ecf20Sopenharmony_ci } 35288c2ecf20Sopenharmony_ci 35298c2ecf20Sopenharmony_ci /* 35308c2ecf20Sopenharmony_ci * Would have been nice to init io queues tag set as well. 35318c2ecf20Sopenharmony_ci * However, we require interaction from the controller 35328c2ecf20Sopenharmony_ci * for max io queue count before we can do so. 35338c2ecf20Sopenharmony_ci * Defer this to the connect path. 35348c2ecf20Sopenharmony_ci */ 35358c2ecf20Sopenharmony_ci 35368c2ecf20Sopenharmony_ci ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0); 35378c2ecf20Sopenharmony_ci if (ret) 35388c2ecf20Sopenharmony_ci goto out_cleanup_admin_q; 35398c2ecf20Sopenharmony_ci 35408c2ecf20Sopenharmony_ci /* at this point, teardown path changes to ref counting on nvme ctrl */ 35418c2ecf20Sopenharmony_ci 35428c2ecf20Sopenharmony_ci spin_lock_irqsave(&rport->lock, flags); 35438c2ecf20Sopenharmony_ci list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); 35448c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&rport->lock, flags); 35458c2ecf20Sopenharmony_ci 35468c2ecf20Sopenharmony_ci if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) || 35478c2ecf20Sopenharmony_ci !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { 35488c2ecf20Sopenharmony_ci dev_err(ctrl->ctrl.device, 35498c2ecf20Sopenharmony_ci "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum); 35508c2ecf20Sopenharmony_ci goto fail_ctrl; 35518c2ecf20Sopenharmony_ci } 35528c2ecf20Sopenharmony_ci 35538c2ecf20Sopenharmony_ci if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { 35548c2ecf20Sopenharmony_ci dev_err(ctrl->ctrl.device, 35558c2ecf20Sopenharmony_ci "NVME-FC{%d}: failed to schedule initial connect\n", 35568c2ecf20Sopenharmony_ci ctrl->cnum); 35578c2ecf20Sopenharmony_ci goto fail_ctrl; 35588c2ecf20Sopenharmony_ci } 35598c2ecf20Sopenharmony_ci 35608c2ecf20Sopenharmony_ci flush_delayed_work(&ctrl->connect_work); 35618c2ecf20Sopenharmony_ci 35628c2ecf20Sopenharmony_ci dev_info(ctrl->ctrl.device, 35638c2ecf20Sopenharmony_ci "NVME-FC{%d}: new ctrl: NQN \"%s\"\n", 35648c2ecf20Sopenharmony_ci ctrl->cnum, ctrl->ctrl.opts->subsysnqn); 35658c2ecf20Sopenharmony_ci 35668c2ecf20Sopenharmony_ci return &ctrl->ctrl; 35678c2ecf20Sopenharmony_ci 35688c2ecf20Sopenharmony_cifail_ctrl: 35698c2ecf20Sopenharmony_ci nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING); 35708c2ecf20Sopenharmony_ci cancel_work_sync(&ctrl->ioerr_work); 35718c2ecf20Sopenharmony_ci cancel_work_sync(&ctrl->ctrl.reset_work); 35728c2ecf20Sopenharmony_ci cancel_delayed_work_sync(&ctrl->connect_work); 35738c2ecf20Sopenharmony_ci 35748c2ecf20Sopenharmony_ci ctrl->ctrl.opts = NULL; 35758c2ecf20Sopenharmony_ci 35768c2ecf20Sopenharmony_ci /* initiate nvme ctrl ref counting teardown */ 35778c2ecf20Sopenharmony_ci nvme_uninit_ctrl(&ctrl->ctrl); 35788c2ecf20Sopenharmony_ci 35798c2ecf20Sopenharmony_ci /* Remove core ctrl ref. */ 35808c2ecf20Sopenharmony_ci nvme_put_ctrl(&ctrl->ctrl); 35818c2ecf20Sopenharmony_ci 35828c2ecf20Sopenharmony_ci /* as we're past the point where we transition to the ref 35838c2ecf20Sopenharmony_ci * counting teardown path, if we return a bad pointer here, 35848c2ecf20Sopenharmony_ci * the calling routine, thinking it's prior to the 35858c2ecf20Sopenharmony_ci * transition, will do an rport put. Since the teardown 35868c2ecf20Sopenharmony_ci * path also does a rport put, we do an extra get here to 35878c2ecf20Sopenharmony_ci * so proper order/teardown happens. 35888c2ecf20Sopenharmony_ci */ 35898c2ecf20Sopenharmony_ci nvme_fc_rport_get(rport); 35908c2ecf20Sopenharmony_ci 35918c2ecf20Sopenharmony_ci return ERR_PTR(-EIO); 35928c2ecf20Sopenharmony_ci 35938c2ecf20Sopenharmony_ciout_cleanup_admin_q: 35948c2ecf20Sopenharmony_ci blk_cleanup_queue(ctrl->ctrl.admin_q); 35958c2ecf20Sopenharmony_ciout_cleanup_fabrics_q: 35968c2ecf20Sopenharmony_ci blk_cleanup_queue(ctrl->ctrl.fabrics_q); 35978c2ecf20Sopenharmony_ciout_free_admin_tag_set: 35988c2ecf20Sopenharmony_ci blk_mq_free_tag_set(&ctrl->admin_tag_set); 35998c2ecf20Sopenharmony_ciout_free_queues: 36008c2ecf20Sopenharmony_ci kfree(ctrl->queues); 36018c2ecf20Sopenharmony_ciout_free_ida: 36028c2ecf20Sopenharmony_ci put_device(ctrl->dev); 36038c2ecf20Sopenharmony_ci ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); 36048c2ecf20Sopenharmony_ciout_free_ctrl: 36058c2ecf20Sopenharmony_ci kfree(ctrl); 36068c2ecf20Sopenharmony_ciout_fail: 36078c2ecf20Sopenharmony_ci /* exit via here doesn't follow ctlr ref points */ 36088c2ecf20Sopenharmony_ci return ERR_PTR(ret); 36098c2ecf20Sopenharmony_ci} 36108c2ecf20Sopenharmony_ci 36118c2ecf20Sopenharmony_ci 36128c2ecf20Sopenharmony_cistruct nvmet_fc_traddr { 36138c2ecf20Sopenharmony_ci u64 nn; 36148c2ecf20Sopenharmony_ci u64 pn; 36158c2ecf20Sopenharmony_ci}; 36168c2ecf20Sopenharmony_ci 36178c2ecf20Sopenharmony_cistatic int 36188c2ecf20Sopenharmony_ci__nvme_fc_parse_u64(substring_t *sstr, u64 *val) 36198c2ecf20Sopenharmony_ci{ 36208c2ecf20Sopenharmony_ci u64 token64; 36218c2ecf20Sopenharmony_ci 36228c2ecf20Sopenharmony_ci if (match_u64(sstr, &token64)) 36238c2ecf20Sopenharmony_ci return -EINVAL; 36248c2ecf20Sopenharmony_ci *val = token64; 36258c2ecf20Sopenharmony_ci 36268c2ecf20Sopenharmony_ci return 0; 36278c2ecf20Sopenharmony_ci} 36288c2ecf20Sopenharmony_ci 36298c2ecf20Sopenharmony_ci/* 36308c2ecf20Sopenharmony_ci * This routine validates and extracts the WWN's from the TRADDR string. 36318c2ecf20Sopenharmony_ci * As kernel parsers need the 0x to determine number base, universally 36328c2ecf20Sopenharmony_ci * build string to parse with 0x prefix before parsing name strings. 36338c2ecf20Sopenharmony_ci */ 36348c2ecf20Sopenharmony_cistatic int 36358c2ecf20Sopenharmony_cinvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen) 36368c2ecf20Sopenharmony_ci{ 36378c2ecf20Sopenharmony_ci char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1]; 36388c2ecf20Sopenharmony_ci substring_t wwn = { name, &name[sizeof(name)-1] }; 36398c2ecf20Sopenharmony_ci int nnoffset, pnoffset; 36408c2ecf20Sopenharmony_ci 36418c2ecf20Sopenharmony_ci /* validate if string is one of the 2 allowed formats */ 36428c2ecf20Sopenharmony_ci if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH && 36438c2ecf20Sopenharmony_ci !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && 36448c2ecf20Sopenharmony_ci !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET], 36458c2ecf20Sopenharmony_ci "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { 36468c2ecf20Sopenharmony_ci nnoffset = NVME_FC_TRADDR_OXNNLEN; 36478c2ecf20Sopenharmony_ci pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET + 36488c2ecf20Sopenharmony_ci NVME_FC_TRADDR_OXNNLEN; 36498c2ecf20Sopenharmony_ci } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH && 36508c2ecf20Sopenharmony_ci !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && 36518c2ecf20Sopenharmony_ci !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET], 36528c2ecf20Sopenharmony_ci "pn-", NVME_FC_TRADDR_NNLEN))) { 36538c2ecf20Sopenharmony_ci nnoffset = NVME_FC_TRADDR_NNLEN; 36548c2ecf20Sopenharmony_ci pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN; 36558c2ecf20Sopenharmony_ci } else 36568c2ecf20Sopenharmony_ci goto out_einval; 36578c2ecf20Sopenharmony_ci 36588c2ecf20Sopenharmony_ci name[0] = '0'; 36598c2ecf20Sopenharmony_ci name[1] = 'x'; 36608c2ecf20Sopenharmony_ci name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0; 36618c2ecf20Sopenharmony_ci 36628c2ecf20Sopenharmony_ci memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN); 36638c2ecf20Sopenharmony_ci if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) 36648c2ecf20Sopenharmony_ci goto out_einval; 36658c2ecf20Sopenharmony_ci 36668c2ecf20Sopenharmony_ci memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN); 36678c2ecf20Sopenharmony_ci if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) 36688c2ecf20Sopenharmony_ci goto out_einval; 36698c2ecf20Sopenharmony_ci 36708c2ecf20Sopenharmony_ci return 0; 36718c2ecf20Sopenharmony_ci 36728c2ecf20Sopenharmony_ciout_einval: 36738c2ecf20Sopenharmony_ci pr_warn("%s: bad traddr string\n", __func__); 36748c2ecf20Sopenharmony_ci return -EINVAL; 36758c2ecf20Sopenharmony_ci} 36768c2ecf20Sopenharmony_ci 36778c2ecf20Sopenharmony_cistatic struct nvme_ctrl * 36788c2ecf20Sopenharmony_cinvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) 36798c2ecf20Sopenharmony_ci{ 36808c2ecf20Sopenharmony_ci struct nvme_fc_lport *lport; 36818c2ecf20Sopenharmony_ci struct nvme_fc_rport *rport; 36828c2ecf20Sopenharmony_ci struct nvme_ctrl *ctrl; 36838c2ecf20Sopenharmony_ci struct nvmet_fc_traddr laddr = { 0L, 0L }; 36848c2ecf20Sopenharmony_ci struct nvmet_fc_traddr raddr = { 0L, 0L }; 36858c2ecf20Sopenharmony_ci unsigned long flags; 36868c2ecf20Sopenharmony_ci int ret; 36878c2ecf20Sopenharmony_ci 36888c2ecf20Sopenharmony_ci ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE); 36898c2ecf20Sopenharmony_ci if (ret || !raddr.nn || !raddr.pn) 36908c2ecf20Sopenharmony_ci return ERR_PTR(-EINVAL); 36918c2ecf20Sopenharmony_ci 36928c2ecf20Sopenharmony_ci ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE); 36938c2ecf20Sopenharmony_ci if (ret || !laddr.nn || !laddr.pn) 36948c2ecf20Sopenharmony_ci return ERR_PTR(-EINVAL); 36958c2ecf20Sopenharmony_ci 36968c2ecf20Sopenharmony_ci /* find the host and remote ports to connect together */ 36978c2ecf20Sopenharmony_ci spin_lock_irqsave(&nvme_fc_lock, flags); 36988c2ecf20Sopenharmony_ci list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { 36998c2ecf20Sopenharmony_ci if (lport->localport.node_name != laddr.nn || 37008c2ecf20Sopenharmony_ci lport->localport.port_name != laddr.pn || 37018c2ecf20Sopenharmony_ci lport->localport.port_state != FC_OBJSTATE_ONLINE) 37028c2ecf20Sopenharmony_ci continue; 37038c2ecf20Sopenharmony_ci 37048c2ecf20Sopenharmony_ci list_for_each_entry(rport, &lport->endp_list, endp_list) { 37058c2ecf20Sopenharmony_ci if (rport->remoteport.node_name != raddr.nn || 37068c2ecf20Sopenharmony_ci rport->remoteport.port_name != raddr.pn || 37078c2ecf20Sopenharmony_ci rport->remoteport.port_state != FC_OBJSTATE_ONLINE) 37088c2ecf20Sopenharmony_ci continue; 37098c2ecf20Sopenharmony_ci 37108c2ecf20Sopenharmony_ci /* if fail to get reference fall through. Will error */ 37118c2ecf20Sopenharmony_ci if (!nvme_fc_rport_get(rport)) 37128c2ecf20Sopenharmony_ci break; 37138c2ecf20Sopenharmony_ci 37148c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&nvme_fc_lock, flags); 37158c2ecf20Sopenharmony_ci 37168c2ecf20Sopenharmony_ci ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport); 37178c2ecf20Sopenharmony_ci if (IS_ERR(ctrl)) 37188c2ecf20Sopenharmony_ci nvme_fc_rport_put(rport); 37198c2ecf20Sopenharmony_ci return ctrl; 37208c2ecf20Sopenharmony_ci } 37218c2ecf20Sopenharmony_ci } 37228c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&nvme_fc_lock, flags); 37238c2ecf20Sopenharmony_ci 37248c2ecf20Sopenharmony_ci pr_warn("%s: %s - %s combination not found\n", 37258c2ecf20Sopenharmony_ci __func__, opts->traddr, opts->host_traddr); 37268c2ecf20Sopenharmony_ci return ERR_PTR(-ENOENT); 37278c2ecf20Sopenharmony_ci} 37288c2ecf20Sopenharmony_ci 37298c2ecf20Sopenharmony_ci 37308c2ecf20Sopenharmony_cistatic struct nvmf_transport_ops nvme_fc_transport = { 37318c2ecf20Sopenharmony_ci .name = "fc", 37328c2ecf20Sopenharmony_ci .module = THIS_MODULE, 37338c2ecf20Sopenharmony_ci .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR, 37348c2ecf20Sopenharmony_ci .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO, 37358c2ecf20Sopenharmony_ci .create_ctrl = nvme_fc_create_ctrl, 37368c2ecf20Sopenharmony_ci}; 37378c2ecf20Sopenharmony_ci 37388c2ecf20Sopenharmony_ci/* Arbitrary successive failures max. With lots of subsystems could be high */ 37398c2ecf20Sopenharmony_ci#define DISCOVERY_MAX_FAIL 20 37408c2ecf20Sopenharmony_ci 37418c2ecf20Sopenharmony_cistatic ssize_t nvme_fc_nvme_discovery_store(struct device *dev, 37428c2ecf20Sopenharmony_ci struct device_attribute *attr, const char *buf, size_t count) 37438c2ecf20Sopenharmony_ci{ 37448c2ecf20Sopenharmony_ci unsigned long flags; 37458c2ecf20Sopenharmony_ci LIST_HEAD(local_disc_list); 37468c2ecf20Sopenharmony_ci struct nvme_fc_lport *lport; 37478c2ecf20Sopenharmony_ci struct nvme_fc_rport *rport; 37488c2ecf20Sopenharmony_ci int failcnt = 0; 37498c2ecf20Sopenharmony_ci 37508c2ecf20Sopenharmony_ci spin_lock_irqsave(&nvme_fc_lock, flags); 37518c2ecf20Sopenharmony_cirestart: 37528c2ecf20Sopenharmony_ci list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { 37538c2ecf20Sopenharmony_ci list_for_each_entry(rport, &lport->endp_list, endp_list) { 37548c2ecf20Sopenharmony_ci if (!nvme_fc_lport_get(lport)) 37558c2ecf20Sopenharmony_ci continue; 37568c2ecf20Sopenharmony_ci if (!nvme_fc_rport_get(rport)) { 37578c2ecf20Sopenharmony_ci /* 37588c2ecf20Sopenharmony_ci * This is a temporary condition. Upon restart 37598c2ecf20Sopenharmony_ci * this rport will be gone from the list. 37608c2ecf20Sopenharmony_ci * 37618c2ecf20Sopenharmony_ci * Revert the lport put and retry. Anything 37628c2ecf20Sopenharmony_ci * added to the list already will be skipped (as 37638c2ecf20Sopenharmony_ci * they are no longer list_empty). Loops should 37648c2ecf20Sopenharmony_ci * resume at rports that were not yet seen. 37658c2ecf20Sopenharmony_ci */ 37668c2ecf20Sopenharmony_ci nvme_fc_lport_put(lport); 37678c2ecf20Sopenharmony_ci 37688c2ecf20Sopenharmony_ci if (failcnt++ < DISCOVERY_MAX_FAIL) 37698c2ecf20Sopenharmony_ci goto restart; 37708c2ecf20Sopenharmony_ci 37718c2ecf20Sopenharmony_ci pr_err("nvme_discovery: too many reference " 37728c2ecf20Sopenharmony_ci "failures\n"); 37738c2ecf20Sopenharmony_ci goto process_local_list; 37748c2ecf20Sopenharmony_ci } 37758c2ecf20Sopenharmony_ci if (list_empty(&rport->disc_list)) 37768c2ecf20Sopenharmony_ci list_add_tail(&rport->disc_list, 37778c2ecf20Sopenharmony_ci &local_disc_list); 37788c2ecf20Sopenharmony_ci } 37798c2ecf20Sopenharmony_ci } 37808c2ecf20Sopenharmony_ci 37818c2ecf20Sopenharmony_ciprocess_local_list: 37828c2ecf20Sopenharmony_ci while (!list_empty(&local_disc_list)) { 37838c2ecf20Sopenharmony_ci rport = list_first_entry(&local_disc_list, 37848c2ecf20Sopenharmony_ci struct nvme_fc_rport, disc_list); 37858c2ecf20Sopenharmony_ci list_del_init(&rport->disc_list); 37868c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&nvme_fc_lock, flags); 37878c2ecf20Sopenharmony_ci 37888c2ecf20Sopenharmony_ci lport = rport->lport; 37898c2ecf20Sopenharmony_ci /* signal discovery. Won't hurt if it repeats */ 37908c2ecf20Sopenharmony_ci nvme_fc_signal_discovery_scan(lport, rport); 37918c2ecf20Sopenharmony_ci nvme_fc_rport_put(rport); 37928c2ecf20Sopenharmony_ci nvme_fc_lport_put(lport); 37938c2ecf20Sopenharmony_ci 37948c2ecf20Sopenharmony_ci spin_lock_irqsave(&nvme_fc_lock, flags); 37958c2ecf20Sopenharmony_ci } 37968c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&nvme_fc_lock, flags); 37978c2ecf20Sopenharmony_ci 37988c2ecf20Sopenharmony_ci return count; 37998c2ecf20Sopenharmony_ci} 38008c2ecf20Sopenharmony_cistatic DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store); 38018c2ecf20Sopenharmony_ci 38028c2ecf20Sopenharmony_cistatic struct attribute *nvme_fc_attrs[] = { 38038c2ecf20Sopenharmony_ci &dev_attr_nvme_discovery.attr, 38048c2ecf20Sopenharmony_ci NULL 38058c2ecf20Sopenharmony_ci}; 38068c2ecf20Sopenharmony_ci 38078c2ecf20Sopenharmony_cistatic struct attribute_group nvme_fc_attr_group = { 38088c2ecf20Sopenharmony_ci .attrs = nvme_fc_attrs, 38098c2ecf20Sopenharmony_ci}; 38108c2ecf20Sopenharmony_ci 38118c2ecf20Sopenharmony_cistatic const struct attribute_group *nvme_fc_attr_groups[] = { 38128c2ecf20Sopenharmony_ci &nvme_fc_attr_group, 38138c2ecf20Sopenharmony_ci NULL 38148c2ecf20Sopenharmony_ci}; 38158c2ecf20Sopenharmony_ci 38168c2ecf20Sopenharmony_cistatic struct class fc_class = { 38178c2ecf20Sopenharmony_ci .name = "fc", 38188c2ecf20Sopenharmony_ci .dev_groups = nvme_fc_attr_groups, 38198c2ecf20Sopenharmony_ci .owner = THIS_MODULE, 38208c2ecf20Sopenharmony_ci}; 38218c2ecf20Sopenharmony_ci 38228c2ecf20Sopenharmony_cistatic int __init nvme_fc_init_module(void) 38238c2ecf20Sopenharmony_ci{ 38248c2ecf20Sopenharmony_ci int ret; 38258c2ecf20Sopenharmony_ci 38268c2ecf20Sopenharmony_ci nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0); 38278c2ecf20Sopenharmony_ci if (!nvme_fc_wq) 38288c2ecf20Sopenharmony_ci return -ENOMEM; 38298c2ecf20Sopenharmony_ci 38308c2ecf20Sopenharmony_ci /* 38318c2ecf20Sopenharmony_ci * NOTE: 38328c2ecf20Sopenharmony_ci * It is expected that in the future the kernel will combine 38338c2ecf20Sopenharmony_ci * the FC-isms that are currently under scsi and now being 38348c2ecf20Sopenharmony_ci * added to by NVME into a new standalone FC class. The SCSI 38358c2ecf20Sopenharmony_ci * and NVME protocols and their devices would be under this 38368c2ecf20Sopenharmony_ci * new FC class. 38378c2ecf20Sopenharmony_ci * 38388c2ecf20Sopenharmony_ci * As we need something to post FC-specific udev events to, 38398c2ecf20Sopenharmony_ci * specifically for nvme probe events, start by creating the 38408c2ecf20Sopenharmony_ci * new device class. When the new standalone FC class is 38418c2ecf20Sopenharmony_ci * put in place, this code will move to a more generic 38428c2ecf20Sopenharmony_ci * location for the class. 38438c2ecf20Sopenharmony_ci */ 38448c2ecf20Sopenharmony_ci ret = class_register(&fc_class); 38458c2ecf20Sopenharmony_ci if (ret) { 38468c2ecf20Sopenharmony_ci pr_err("couldn't register class fc\n"); 38478c2ecf20Sopenharmony_ci goto out_destroy_wq; 38488c2ecf20Sopenharmony_ci } 38498c2ecf20Sopenharmony_ci 38508c2ecf20Sopenharmony_ci /* 38518c2ecf20Sopenharmony_ci * Create a device for the FC-centric udev events 38528c2ecf20Sopenharmony_ci */ 38538c2ecf20Sopenharmony_ci fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL, 38548c2ecf20Sopenharmony_ci "fc_udev_device"); 38558c2ecf20Sopenharmony_ci if (IS_ERR(fc_udev_device)) { 38568c2ecf20Sopenharmony_ci pr_err("couldn't create fc_udev device!\n"); 38578c2ecf20Sopenharmony_ci ret = PTR_ERR(fc_udev_device); 38588c2ecf20Sopenharmony_ci goto out_destroy_class; 38598c2ecf20Sopenharmony_ci } 38608c2ecf20Sopenharmony_ci 38618c2ecf20Sopenharmony_ci ret = nvmf_register_transport(&nvme_fc_transport); 38628c2ecf20Sopenharmony_ci if (ret) 38638c2ecf20Sopenharmony_ci goto out_destroy_device; 38648c2ecf20Sopenharmony_ci 38658c2ecf20Sopenharmony_ci return 0; 38668c2ecf20Sopenharmony_ci 38678c2ecf20Sopenharmony_ciout_destroy_device: 38688c2ecf20Sopenharmony_ci device_destroy(&fc_class, MKDEV(0, 0)); 38698c2ecf20Sopenharmony_ciout_destroy_class: 38708c2ecf20Sopenharmony_ci class_unregister(&fc_class); 38718c2ecf20Sopenharmony_ciout_destroy_wq: 38728c2ecf20Sopenharmony_ci destroy_workqueue(nvme_fc_wq); 38738c2ecf20Sopenharmony_ci 38748c2ecf20Sopenharmony_ci return ret; 38758c2ecf20Sopenharmony_ci} 38768c2ecf20Sopenharmony_ci 38778c2ecf20Sopenharmony_cistatic void 38788c2ecf20Sopenharmony_cinvme_fc_delete_controllers(struct nvme_fc_rport *rport) 38798c2ecf20Sopenharmony_ci{ 38808c2ecf20Sopenharmony_ci struct nvme_fc_ctrl *ctrl; 38818c2ecf20Sopenharmony_ci 38828c2ecf20Sopenharmony_ci spin_lock(&rport->lock); 38838c2ecf20Sopenharmony_ci list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { 38848c2ecf20Sopenharmony_ci dev_warn(ctrl->ctrl.device, 38858c2ecf20Sopenharmony_ci "NVME-FC{%d}: transport unloading: deleting ctrl\n", 38868c2ecf20Sopenharmony_ci ctrl->cnum); 38878c2ecf20Sopenharmony_ci nvme_delete_ctrl(&ctrl->ctrl); 38888c2ecf20Sopenharmony_ci } 38898c2ecf20Sopenharmony_ci spin_unlock(&rport->lock); 38908c2ecf20Sopenharmony_ci} 38918c2ecf20Sopenharmony_ci 38928c2ecf20Sopenharmony_cistatic void 38938c2ecf20Sopenharmony_cinvme_fc_cleanup_for_unload(void) 38948c2ecf20Sopenharmony_ci{ 38958c2ecf20Sopenharmony_ci struct nvme_fc_lport *lport; 38968c2ecf20Sopenharmony_ci struct nvme_fc_rport *rport; 38978c2ecf20Sopenharmony_ci 38988c2ecf20Sopenharmony_ci list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { 38998c2ecf20Sopenharmony_ci list_for_each_entry(rport, &lport->endp_list, endp_list) { 39008c2ecf20Sopenharmony_ci nvme_fc_delete_controllers(rport); 39018c2ecf20Sopenharmony_ci } 39028c2ecf20Sopenharmony_ci } 39038c2ecf20Sopenharmony_ci} 39048c2ecf20Sopenharmony_ci 39058c2ecf20Sopenharmony_cistatic void __exit nvme_fc_exit_module(void) 39068c2ecf20Sopenharmony_ci{ 39078c2ecf20Sopenharmony_ci unsigned long flags; 39088c2ecf20Sopenharmony_ci bool need_cleanup = false; 39098c2ecf20Sopenharmony_ci 39108c2ecf20Sopenharmony_ci spin_lock_irqsave(&nvme_fc_lock, flags); 39118c2ecf20Sopenharmony_ci nvme_fc_waiting_to_unload = true; 39128c2ecf20Sopenharmony_ci if (!list_empty(&nvme_fc_lport_list)) { 39138c2ecf20Sopenharmony_ci need_cleanup = true; 39148c2ecf20Sopenharmony_ci nvme_fc_cleanup_for_unload(); 39158c2ecf20Sopenharmony_ci } 39168c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&nvme_fc_lock, flags); 39178c2ecf20Sopenharmony_ci if (need_cleanup) { 39188c2ecf20Sopenharmony_ci pr_info("%s: waiting for ctlr deletes\n", __func__); 39198c2ecf20Sopenharmony_ci wait_for_completion(&nvme_fc_unload_proceed); 39208c2ecf20Sopenharmony_ci pr_info("%s: ctrl deletes complete\n", __func__); 39218c2ecf20Sopenharmony_ci } 39228c2ecf20Sopenharmony_ci 39238c2ecf20Sopenharmony_ci nvmf_unregister_transport(&nvme_fc_transport); 39248c2ecf20Sopenharmony_ci 39258c2ecf20Sopenharmony_ci ida_destroy(&nvme_fc_local_port_cnt); 39268c2ecf20Sopenharmony_ci ida_destroy(&nvme_fc_ctrl_cnt); 39278c2ecf20Sopenharmony_ci 39288c2ecf20Sopenharmony_ci device_destroy(&fc_class, MKDEV(0, 0)); 39298c2ecf20Sopenharmony_ci class_unregister(&fc_class); 39308c2ecf20Sopenharmony_ci destroy_workqueue(nvme_fc_wq); 39318c2ecf20Sopenharmony_ci} 39328c2ecf20Sopenharmony_ci 39338c2ecf20Sopenharmony_cimodule_init(nvme_fc_init_module); 39348c2ecf20Sopenharmony_cimodule_exit(nvme_fc_exit_module); 39358c2ecf20Sopenharmony_ci 39368c2ecf20Sopenharmony_ciMODULE_LICENSE("GPL v2"); 3937