Lines Matching refs:ctrl

35 	struct nvme_fc_ctrl	*ctrl;
99 struct nvme_fc_ctrl *ctrl;
179 struct nvme_ctrl ctrl;
183 to_fc_ctrl(struct nvme_ctrl *ctrl)
185 return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
557 nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
559 switch (ctrl->ctrl.state) {
566 dev_info(ctrl->ctrl.device,
568 "Attempting reconnect\n", ctrl->cnum);
570 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0);
592 struct nvme_fc_ctrl *ctrl;
628 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
629 nvme_fc_resume_controller(ctrl);
789 nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
791 dev_info(ctrl->ctrl.device,
793 "Reconnect", ctrl->cnum);
795 switch (ctrl->ctrl.state) {
805 if (nvme_reset_ctrl(&ctrl->ctrl)) {
806 dev_warn(ctrl->ctrl.device,
808 ctrl->cnum);
809 nvme_delete_ctrl(&ctrl->ctrl);
855 struct nvme_fc_ctrl *ctrl;
871 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
874 dev_warn(ctrl->ctrl.device,
876 ctrl->cnum);
877 nvme_delete_ctrl(&ctrl->ctrl);
879 nvme_fc_ctrl_connectivity_loss(ctrl);
1034 static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
1176 nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
1188 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1190 dev_info(ctrl->ctrl.device,
1192 ctrl->cnum);
1200 if (ctrl->lport->ops->lsrqst_priv_sz)
1219 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
1220 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
1222 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
1232 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1269 dev_err(ctrl->dev,
1273 spin_lock_irqsave(&ctrl->lock, flags);
1274 ctrl->association_id =
1279 spin_unlock_irqrestore(&ctrl->lock, flags);
1286 dev_err(ctrl->dev,
1293 nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1304 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1306 dev_info(ctrl->ctrl.device,
1308 ctrl->cnum);
1316 if (ctrl->lport->ops->lsrqst_priv_sz)
1330 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1347 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1375 dev_err(ctrl->dev,
1388 dev_err(ctrl->dev,
1424 nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1434 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1436 dev_info(ctrl->ctrl.device,
1439 ctrl->cnum);
1446 if (ctrl->lport->ops->lsrqst_priv_sz)
1452 ctrl->association_id);
1454 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1510 struct nvme_fc_ctrl *ctrl, *ret = NULL;
1517 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
1518 if (!nvme_fc_ctrl_get(ctrl))
1520 spin_lock(&ctrl->lock);
1521 if (association_id == ctrl->association_id) {
1522 oldls = ctrl->rcv_disconn;
1523 ctrl->rcv_disconn = lsop;
1524 ret = ctrl;
1526 spin_unlock(&ctrl->lock);
1528 /* leave the ctrl get reference */
1530 nvme_fc_ctrl_put(ctrl);
1539 "LS's received\n", ctrl->cnum);
1565 struct nvme_fc_ctrl *ctrl = NULL;
1573 ctrl = nvme_fc_match_disconn_ls(rport, lsop);
1574 if (!ctrl)
1607 nvme_fc_error_recovery(ctrl, "Disconnect Association LS received");
1610 nvme_fc_ctrl_put(ctrl);
1814 __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1817 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1819 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1835 __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1840 spin_lock_irqsave(&ctrl->lock, flags);
1844 else if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) {
1846 ctrl->iocnt++;
1848 spin_unlock_irqrestore(&ctrl->lock, flags);
1853 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1854 &ctrl->rport->remoteport,
1862 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
1864 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1872 __nvme_fc_abort_op(ctrl, aen_op);
1876 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
1882 spin_lock_irqsave(&ctrl->lock, flags);
1883 if (test_bit(FCCTRL_TERMIO, &ctrl->flags) &&
1885 if (!--ctrl->iocnt)
1886 wake_up(&ctrl->ioabort_wait);
1888 spin_unlock_irqrestore(&ctrl->lock, flags);
1895 struct nvme_fc_ctrl *ctrl =
1898 nvme_fc_error_recovery(ctrl, "transport detected io error");
1907 struct nvme_fc_ctrl *ctrl = op->ctrl;
1955 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1962 dev_info(ctrl->ctrl.device,
1964 ctrl->cnum, freq->status);
1994 dev_info(ctrl->ctrl.device,
1997 ctrl->cnum, freq->transferred_length,
2016 dev_info(ctrl->ctrl.device,
2020 ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len),
2034 dev_info(ctrl->ctrl.device,
2037 ctrl->cnum, freq->rcv_rsplen);
2045 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
2046 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2049 nvme_fc_ctrl_put(ctrl);
2053 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2058 if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING)
2059 queue_work(nvme_reset_wq, &ctrl->ioerr_work);
2063 __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
2078 op->ctrl = ctrl;
2092 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
2094 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
2095 dev_err(ctrl->dev,
2101 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
2104 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
2105 dev_err(ctrl->dev,
2119 struct nvme_fc_ctrl *ctrl = set->driver_data;
2121 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
2122 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
2125 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
2130 nvme_req(rq)->ctrl = &ctrl->ctrl;
2135 nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
2143 aen_op = ctrl->aen_ops;
2145 if (ctrl->lport->ops->fcprqst_priv_sz) {
2146 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
2154 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
2174 nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
2179 cancel_work_sync(&ctrl->ctrl.async_event_work);
2180 aen_op = ctrl->aen_ops;
2182 __nvme_fc_exit_request(ctrl, aen_op);
2190 __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
2193 struct nvme_fc_queue *queue = &ctrl->queues[qidx];
2203 struct nvme_fc_ctrl *ctrl = data;
2205 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
2214 struct nvme_fc_ctrl *ctrl = data;
2216 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
2222 nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
2226 queue = &ctrl->queues[idx];
2228 queue->ctrl = ctrl;
2231 queue->dev = ctrl->dev;
2234 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
2276 __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
2279 if (ctrl->lport->ops->delete_queue)
2280 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
2286 nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
2290 for (i = 1; i < ctrl->ctrl.queue_count; i++)
2291 nvme_fc_free_queue(&ctrl->queues[i]);
2295 __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
2301 if (ctrl->lport->ops->create_queue)
2302 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
2309 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
2311 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
2314 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
2315 __nvme_fc_delete_hw_queue(ctrl, queue, i);
2319 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
2321 struct nvme_fc_queue *queue = &ctrl->queues[1];
2324 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
2325 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
2334 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
2339 nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
2343 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
2344 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
2348 ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
2352 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
2359 nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
2363 for (i = 1; i < ctrl->ctrl.queue_count; i++)
2364 nvme_fc_init_queue(ctrl, i);
2370 struct nvme_fc_ctrl *ctrl =
2374 if (ctrl->ctrl.tagset) {
2375 blk_cleanup_queue(ctrl->ctrl.connect_q);
2376 blk_mq_free_tag_set(&ctrl->tag_set);
2380 spin_lock_irqsave(&ctrl->rport->lock, flags);
2381 list_del(&ctrl->ctrl_list);
2382 spin_unlock_irqrestore(&ctrl->rport->lock, flags);
2384 nvme_start_admin_queue(&ctrl->ctrl);
2385 blk_cleanup_queue(ctrl->ctrl.admin_q);
2386 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
2387 blk_mq_free_tag_set(&ctrl->admin_tag_set);
2389 kfree(ctrl->queues);
2391 put_device(ctrl->dev);
2392 nvme_fc_rport_put(ctrl->rport);
2394 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
2395 if (ctrl->ctrl.opts)
2396 nvmf_free_options(ctrl->ctrl.opts);
2397 kfree(ctrl);
2401 nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
2403 kref_put(&ctrl->ref, nvme_fc_ctrl_free);
2407 nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
2409 return kref_get_unless_zero(&ctrl->ref);
2419 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2421 WARN_ON(nctrl != &ctrl->ctrl);
2423 nvme_fc_ctrl_put(ctrl);
2443 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2447 __nvme_fc_abort_op(ctrl, op);
2461 __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
2469 if (ctrl->ctrl.queue_count > 1) {
2470 for (q = 1; q < ctrl->ctrl.queue_count; q++)
2471 clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags);
2473 clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
2487 if (ctrl->ctrl.queue_count > 1) {
2488 nvme_stop_queues(&ctrl->ctrl);
2489 nvme_sync_io_queues(&ctrl->ctrl);
2490 blk_mq_tagset_busy_iter(&ctrl->tag_set,
2491 nvme_fc_terminate_exchange, &ctrl->ctrl);
2492 blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
2494 nvme_start_queues(&ctrl->ctrl);
2512 nvme_stop_admin_queue(&ctrl->ctrl);
2513 blk_sync_queue(ctrl->ctrl.admin_q);
2514 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
2515 nvme_fc_terminate_exchange, &ctrl->ctrl);
2516 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
2520 nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
2529 if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
2530 __nvme_fc_abort_outstanding_ios(ctrl, true);
2531 set_bit(ASSOC_FAILED, &ctrl->flags);
2536 if (ctrl->ctrl.state != NVME_CTRL_LIVE)
2539 dev_warn(ctrl->ctrl.device,
2541 ctrl->cnum, errmsg);
2542 dev_warn(ctrl->ctrl.device,
2543 "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
2545 nvme_reset_ctrl(&ctrl->ctrl);
2552 struct nvme_fc_ctrl *ctrl = op->ctrl;
2560 dev_info(ctrl->ctrl.device,
2563 ctrl->cnum, op->queue->qnum, sqe->common.opcode,
2565 if (__nvme_fc_abort_op(ctrl, op))
2566 nvme_fc_error_recovery(ctrl, "io timeout abort failed");
2577 nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2597 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
2612 nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2620 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
2652 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
2664 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2667 if (!nvme_fc_ctrl_get(ctrl))
2711 ret = nvme_fc_map_data(ctrl, op->rq, op);
2714 nvme_fc_ctrl_put(ctrl);
2721 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
2730 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
2731 &ctrl->rport->remoteport,
2748 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2751 nvme_fc_unmap_data(ctrl, op->rq, op);
2755 nvme_fc_ctrl_put(ctrl);
2757 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
2773 struct nvme_fc_ctrl *ctrl = queue->ctrl;
2783 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
2784 !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2785 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2809 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
2815 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
2819 if (test_bit(FCCTRL_TERMIO, &ctrl->flags))
2822 aen_op = &ctrl->aen_ops[0];
2824 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
2827 dev_err(ctrl->ctrl.device,
2835 struct nvme_fc_ctrl *ctrl = op->ctrl;
2840 nvme_fc_unmap_data(ctrl, rq, op);
2842 nvme_fc_ctrl_put(ctrl);
2856 nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2858 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2863 ctrl->lport->ops->max_hw_queues);
2864 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2866 dev_info(ctrl->ctrl.device,
2871 ctrl->ctrl.queue_count = nr_io_queues + 1;
2875 nvme_fc_init_io_queues(ctrl);
2877 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
2878 ctrl->tag_set.ops = &nvme_fc_mq_ops;
2879 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
2880 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
2881 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
2882 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2883 ctrl->tag_set.cmd_size =
2885 ctrl->lport->ops->fcprqst_priv_sz);
2886 ctrl->tag_set.driver_data = ctrl;
2887 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
2888 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
2890 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
2894 ctrl->ctrl.tagset = &ctrl->tag_set;
2896 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
2897 if (IS_ERR(ctrl->ctrl.connect_q)) {
2898 ret = PTR_ERR(ctrl->ctrl.connect_q);
2902 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2906 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2910 ctrl->ioq_live = true;
2915 nvme_fc_delete_hw_io_queues(ctrl);
2917 blk_cleanup_queue(ctrl->ctrl.connect_q);
2919 blk_mq_free_tag_set(&ctrl->tag_set);
2920 nvme_fc_free_io_queues(ctrl);
2923 ctrl->ctrl.tagset = NULL;
2929 nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
2931 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2932 u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1;
2937 ctrl->lport->ops->max_hw_queues);
2938 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2940 dev_info(ctrl->ctrl.device,
2946 dev_info(ctrl->ctrl.device,
2952 ctrl->ctrl.queue_count = nr_io_queues + 1;
2954 if (ctrl->ctrl.queue_count == 1)
2958 dev_info(ctrl->ctrl.device,
2961 nvme_wait_freeze(&ctrl->ctrl);
2962 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
2963 nvme_unfreeze(&ctrl->ctrl);
2966 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2970 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2977 nvme_fc_delete_hw_io_queues(ctrl);
2979 nvme_fc_free_io_queues(ctrl);
3003 nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
3005 struct nvme_fc_rport *rport = ctrl->rport;
3008 if (test_and_set_bit(ASSOC_ACTIVE, &ctrl->flags))
3019 nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
3021 struct nvme_fc_rport *rport = ctrl->rport;
3025 /* clearing of ctrl->flags ASSOC_ACTIVE bit is in association delete */
3042 nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
3044 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
3050 ++ctrl->ctrl.nr_reconnects;
3052 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
3055 if (nvme_fc_ctlr_active_on_rport(ctrl))
3058 dev_info(ctrl->ctrl.device,
3061 ctrl->cnum, ctrl->lport->localport.port_name,
3062 ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn);
3064 clear_bit(ASSOC_FAILED, &ctrl->flags);
3070 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
3075 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
3080 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
3084 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
3089 * todo:- add code to check if ctrl attributes changed from
3093 ret = nvme_enable_ctrl(&ctrl->ctrl);
3094 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
3097 ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments;
3098 ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments <<
3101 nvme_start_admin_queue(&ctrl->ctrl);
3103 ret = nvme_init_identify(&ctrl->ctrl);
3104 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
3110 if (ctrl->ctrl.icdoff) {
3111 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
3112 ctrl->ctrl.icdoff);
3118 if (opts->queue_size > ctrl->ctrl.maxcmd) {
3120 dev_warn(ctrl->ctrl.device,
3121 "queue_size %zu > ctrl maxcmd %u, reducing "
3123 opts->queue_size, ctrl->ctrl.maxcmd);
3124 opts->queue_size = ctrl->ctrl.maxcmd;
3127 if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
3129 dev_warn(ctrl->ctrl.device,
3130 "queue_size %zu > ctrl sqsize %u, reducing "
3132 opts->queue_size, ctrl->ctrl.sqsize + 1);
3133 opts->queue_size = ctrl->ctrl.sqsize + 1;
3136 ret = nvme_fc_init_aen_ops(ctrl);
3144 if (ctrl->ctrl.queue_count > 1) {
3145 if (!ctrl->ioq_live)
3146 ret = nvme_fc_create_io_queues(ctrl);
3148 ret = nvme_fc_recreate_io_queues(ctrl);
3150 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
3153 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
3155 ctrl->ctrl.nr_reconnects = 0;
3158 nvme_start_ctrl(&ctrl->ctrl);
3163 nvme_fc_term_aen_ops(ctrl);
3166 nvme_fc_xmt_disconnect_assoc(ctrl);
3167 spin_lock_irqsave(&ctrl->lock, flags);
3168 ctrl->association_id = 0;
3169 disls = ctrl->rcv_disconn;
3170 ctrl->rcv_disconn = NULL;
3171 spin_unlock_irqrestore(&ctrl->lock, flags);
3175 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
3177 nvme_fc_free_queue(&ctrl->queues[0]);
3178 clear_bit(ASSOC_ACTIVE, &ctrl->flags);
3179 nvme_fc_ctlr_inactive_on_rport(ctrl);
3192 nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
3197 if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags))
3200 spin_lock_irqsave(&ctrl->lock, flags);
3201 set_bit(FCCTRL_TERMIO, &ctrl->flags);
3202 ctrl->iocnt = 0;
3203 spin_unlock_irqrestore(&ctrl->lock, flags);
3205 __nvme_fc_abort_outstanding_ios(ctrl, false);
3208 nvme_fc_abort_aen_ops(ctrl);
3211 spin_lock_irq(&ctrl->lock);
3212 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
3213 clear_bit(FCCTRL_TERMIO, &ctrl->flags);
3214 spin_unlock_irq(&ctrl->lock);
3216 nvme_fc_term_aen_ops(ctrl);
3224 if (ctrl->association_id)
3225 nvme_fc_xmt_disconnect_assoc(ctrl);
3227 spin_lock_irqsave(&ctrl->lock, flags);
3228 ctrl->association_id = 0;
3229 disls = ctrl->rcv_disconn;
3230 ctrl->rcv_disconn = NULL;
3231 spin_unlock_irqrestore(&ctrl->lock, flags);
3239 if (ctrl->ctrl.tagset) {
3240 nvme_fc_delete_hw_io_queues(ctrl);
3241 nvme_fc_free_io_queues(ctrl);
3244 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
3245 nvme_fc_free_queue(&ctrl->queues[0]);
3248 nvme_start_admin_queue(&ctrl->ctrl);
3251 nvme_start_queues(&ctrl->ctrl);
3253 nvme_fc_ctlr_inactive_on_rport(ctrl);
3259 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
3261 cancel_work_sync(&ctrl->ioerr_work);
3262 cancel_delayed_work_sync(&ctrl->connect_work);
3267 nvme_fc_delete_association(ctrl);
3271 nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
3273 struct nvme_fc_rport *rport = ctrl->rport;
3275 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
3278 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
3282 dev_info(ctrl->ctrl.device,
3284 ctrl->cnum, status);
3288 if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
3290 dev_info(ctrl->ctrl.device,
3293 ctrl->cnum, recon_delay / HZ);
3297 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
3300 dev_warn(ctrl->ctrl.device,
3303 ctrl->cnum, ctrl->ctrl.nr_reconnects);
3305 dev_warn(ctrl->ctrl.device,
3308 ctrl->cnum, min_t(int, portptr->dev_loss_tmo,
3309 (ctrl->ctrl.opts->max_reconnects *
3310 ctrl->ctrl.opts->reconnect_delay)));
3311 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
3318 struct nvme_fc_ctrl *ctrl =
3319 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
3321 nvme_stop_ctrl(&ctrl->ctrl);
3324 nvme_fc_delete_association(ctrl);
3326 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
3327 dev_err(ctrl->ctrl.device,
3329 "to CONNECTING\n", ctrl->cnum);
3331 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
3332 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
3333 dev_err(ctrl->ctrl.device,
3335 "after reset\n", ctrl->cnum);
3337 flush_delayed_work(&ctrl->connect_work);
3340 nvme_fc_reconnect_or_delete(ctrl, -ENOTCONN);
3363 struct nvme_fc_ctrl *ctrl =
3367 ret = nvme_fc_create_association(ctrl);
3369 nvme_fc_reconnect_or_delete(ctrl, ret);
3371 dev_info(ctrl->ctrl.device,
3373 ctrl->cnum);
3399 struct nvme_fc_ctrl *ctrl;
3404 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3405 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts);
3418 struct nvme_fc_ctrl *ctrl;
3434 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
3435 if (!ctrl) {
3459 ctrl->ctrl.opts = opts;
3460 ctrl->ctrl.nr_reconnects = 0;
3462 ctrl->ctrl.numa_node = dev_to_node(lport->dev);
3464 ctrl->ctrl.numa_node = NUMA_NO_NODE;
3465 INIT_LIST_HEAD(&ctrl->ctrl_list);
3466 ctrl->lport = lport;
3467 ctrl->rport = rport;
3468 ctrl->dev = lport->dev;
3469 ctrl->cnum = idx;
3470 ctrl->ioq_live = false;
3471 init_waitqueue_head(&ctrl->ioabort_wait);
3473 get_device(ctrl->dev);
3474 kref_init(&ctrl->ref);
3476 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
3477 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
3478 INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
3479 spin_lock_init(&ctrl->lock);
3482 ctrl->ctrl.queue_count = min_t(unsigned int,
3485 ctrl->ctrl.queue_count++; /* +1 for admin queue */
3487 ctrl->ctrl.sqsize = opts->queue_size - 1;
3488 ctrl->ctrl.kato = opts->kato;
3489 ctrl->ctrl.cntlid = 0xffff;
3492 ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
3494 if (!ctrl->queues)
3497 nvme_fc_init_queue(ctrl, 0);
3499 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
3500 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
3501 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
3502 ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
3503 ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
3504 ctrl->admin_tag_set.cmd_size =
3506 ctrl->lport->ops->fcprqst_priv_sz);
3507 ctrl->admin_tag_set.driver_data = ctrl;
3508 ctrl->admin_tag_set.nr_hw_queues = 1;
3509 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
3510 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
3512 ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
3515 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
3517 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3518 if (IS_ERR(ctrl->ctrl.fabrics_q)) {
3519 ret = PTR_ERR(ctrl->ctrl.fabrics_q);
3523 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
3524 if (IS_ERR(ctrl->ctrl.admin_q)) {
3525 ret = PTR_ERR(ctrl->ctrl.admin_q);
3536 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
3540 /* at this point, teardown path changes to ref counting on nvme ctrl */
3543 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
3546 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
3547 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
3548 dev_err(ctrl->ctrl.device,
3549 "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
3553 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
3554 dev_err(ctrl->ctrl.device,
3556 ctrl->cnum);
3560 flush_delayed_work(&ctrl->connect_work);
3562 dev_info(ctrl->ctrl.device,
3563 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
3564 ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
3566 return &ctrl->ctrl;
3569 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
3570 cancel_work_sync(&ctrl->ioerr_work);
3571 cancel_work_sync(&ctrl->ctrl.reset_work);
3572 cancel_delayed_work_sync(&ctrl->connect_work);
3574 ctrl->ctrl.opts = NULL;
3576 /* initiate nvme ctrl ref counting teardown */
3577 nvme_uninit_ctrl(&ctrl->ctrl);
3579 /* Remove core ctrl ref. */
3580 nvme_put_ctrl(&ctrl->ctrl);
3594 blk_cleanup_queue(ctrl->ctrl.admin_q);
3596 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
3598 blk_mq_free_tag_set(&ctrl->admin_tag_set);
3600 kfree(ctrl->queues);
3602 put_device(ctrl->dev);
3603 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
3605 kfree(ctrl);
3682 struct nvme_ctrl *ctrl;
3716 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
3717 if (IS_ERR(ctrl))
3719 return ctrl;
3880 struct nvme_fc_ctrl *ctrl;
3883 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3884 dev_warn(ctrl->ctrl.device,
3885 "NVME-FC{%d}: transport unloading: deleting ctrl\n",
3886 ctrl->cnum);
3887 nvme_delete_ctrl(&ctrl->ctrl);
3920 pr_info("%s: ctrl deletes complete\n", __func__);