Lines Matching refs:ctrl

36 	struct nvme_fc_ctrl	*ctrl;
100 struct nvme_fc_ctrl *ctrl;
180 struct nvme_ctrl ctrl;
184 to_fc_ctrl(struct nvme_ctrl *ctrl)
186 return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
551 nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
553 switch (nvme_ctrl_state(&ctrl->ctrl)) {
560 dev_info(ctrl->ctrl.device,
562 "Attempting reconnect\n", ctrl->cnum);
564 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0);
586 struct nvme_fc_ctrl *ctrl;
622 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
623 nvme_fc_resume_controller(ctrl);
783 nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
785 dev_info(ctrl->ctrl.device,
787 "Reconnect", ctrl->cnum);
789 switch (nvme_ctrl_state(&ctrl->ctrl)) {
799 if (nvme_reset_ctrl(&ctrl->ctrl)) {
800 dev_warn(ctrl->ctrl.device,
802 ctrl->cnum);
803 nvme_delete_ctrl(&ctrl->ctrl);
849 struct nvme_fc_ctrl *ctrl;
865 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
868 dev_warn(ctrl->ctrl.device,
870 ctrl->cnum);
871 nvme_delete_ctrl(&ctrl->ctrl);
873 nvme_fc_ctrl_connectivity_loss(ctrl);
1028 static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
1170 nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
1182 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1184 dev_info(ctrl->ctrl.device,
1186 ctrl->cnum);
1194 if (ctrl->lport->ops->lsrqst_priv_sz)
1213 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
1214 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
1216 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
1226 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1263 dev_err(ctrl->dev,
1267 spin_lock_irqsave(&ctrl->lock, flags);
1268 ctrl->association_id =
1273 spin_unlock_irqrestore(&ctrl->lock, flags);
1280 dev_err(ctrl->dev,
1287 nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1298 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1300 dev_info(ctrl->ctrl.device,
1302 ctrl->cnum);
1310 if (ctrl->lport->ops->lsrqst_priv_sz)
1324 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1341 ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
1369 dev_err(ctrl->dev,
1382 dev_err(ctrl->dev,
1418 nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1428 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1430 dev_info(ctrl->ctrl.device,
1433 ctrl->cnum);
1440 if (ctrl->lport->ops->lsrqst_priv_sz)
1446 ctrl->association_id);
1448 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
1506 struct nvme_fc_ctrl *ctrl, *ret = NULL;
1513 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
1514 if (!nvme_fc_ctrl_get(ctrl))
1516 spin_lock(&ctrl->lock);
1517 if (association_id == ctrl->association_id) {
1518 oldls = ctrl->rcv_disconn;
1519 ctrl->rcv_disconn = lsop;
1520 ret = ctrl;
1522 spin_unlock(&ctrl->lock);
1524 /* leave the ctrl get reference */
1526 nvme_fc_ctrl_put(ctrl);
1535 "LS's received\n", ctrl->cnum);
1561 struct nvme_fc_ctrl *ctrl = NULL;
1569 ctrl = nvme_fc_match_disconn_ls(rport, lsop);
1570 if (!ctrl)
1603 nvme_fc_error_recovery(ctrl, "Disconnect Association LS received");
1606 nvme_fc_ctrl_put(ctrl);
1821 __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1824 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1826 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1842 __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1847 spin_lock_irqsave(&ctrl->lock, flags);
1851 else if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) {
1853 ctrl->iocnt++;
1855 spin_unlock_irqrestore(&ctrl->lock, flags);
1860 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1861 &ctrl->rport->remoteport,
1869 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
1871 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1879 __nvme_fc_abort_op(ctrl, aen_op);
1883 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
1889 spin_lock_irqsave(&ctrl->lock, flags);
1890 if (test_bit(FCCTRL_TERMIO, &ctrl->flags) &&
1892 if (!--ctrl->iocnt)
1893 wake_up(&ctrl->ioabort_wait);
1895 spin_unlock_irqrestore(&ctrl->lock, flags);
1902 struct nvme_fc_ctrl *ctrl =
1905 nvme_fc_error_recovery(ctrl, "transport detected io error");
1932 struct nvme_fc_ctrl *ctrl = op->ctrl;
1980 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1987 dev_info(ctrl->ctrl.device,
1989 ctrl->cnum, freq->status);
2019 dev_info(ctrl->ctrl.device,
2022 ctrl->cnum, freq->transferred_length,
2041 dev_info(ctrl->ctrl.device,
2045 ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len),
2059 dev_info(ctrl->ctrl.device,
2062 ctrl->cnum, freq->rcv_rsplen);
2070 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
2071 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2074 nvme_fc_ctrl_put(ctrl);
2078 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2083 if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING)
2084 queue_work(nvme_reset_wq, &ctrl->ioerr_work);
2088 __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
2103 op->ctrl = ctrl;
2117 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
2119 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
2120 dev_err(ctrl->dev,
2126 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
2129 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
2130 dev_err(ctrl->dev,
2144 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data);
2146 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
2147 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
2150 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
2155 nvme_req(rq)->ctrl = &ctrl->ctrl;
2161 nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
2169 aen_op = ctrl->aen_ops;
2171 if (ctrl->lport->ops->fcprqst_priv_sz) {
2172 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
2180 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
2200 nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
2205 cancel_work_sync(&ctrl->ctrl.async_event_work);
2206 aen_op = ctrl->aen_ops;
2208 __nvme_fc_exit_request(ctrl, aen_op);
2218 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(data);
2219 struct nvme_fc_queue *queue = &ctrl->queues[qidx];
2240 nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
2244 queue = &ctrl->queues[idx];
2246 queue->ctrl = ctrl;
2249 queue->dev = ctrl->dev;
2252 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
2294 __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
2297 if (ctrl->lport->ops->delete_queue)
2298 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
2304 nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
2308 for (i = 1; i < ctrl->ctrl.queue_count; i++)
2309 nvme_fc_free_queue(&ctrl->queues[i]);
2313 __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
2319 if (ctrl->lport->ops->create_queue)
2320 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
2327 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
2329 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
2332 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
2333 __nvme_fc_delete_hw_queue(ctrl, queue, i);
2337 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
2339 struct nvme_fc_queue *queue = &ctrl->queues[1];
2342 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
2343 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
2352 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
2357 nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
2361 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
2362 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
2366 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
2370 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
2377 nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
2381 for (i = 1; i < ctrl->ctrl.queue_count; i++)
2382 nvme_fc_init_queue(ctrl, i);
2388 struct nvme_fc_ctrl *ctrl =
2392 if (ctrl->ctrl.tagset)
2393 nvme_remove_io_tag_set(&ctrl->ctrl);
2396 spin_lock_irqsave(&ctrl->rport->lock, flags);
2397 list_del(&ctrl->ctrl_list);
2398 spin_unlock_irqrestore(&ctrl->rport->lock, flags);
2400 nvme_unquiesce_admin_queue(&ctrl->ctrl);
2401 nvme_remove_admin_tag_set(&ctrl->ctrl);
2403 kfree(ctrl->queues);
2405 put_device(ctrl->dev);
2406 nvme_fc_rport_put(ctrl->rport);
2408 ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum);
2409 if (ctrl->ctrl.opts)
2410 nvmf_free_options(ctrl->ctrl.opts);
2411 kfree(ctrl);
2415 nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
2417 kref_put(&ctrl->ref, nvme_fc_ctrl_free);
2421 nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
2423 return kref_get_unless_zero(&ctrl->ref);
2433 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2435 WARN_ON(nctrl != &ctrl->ctrl);
2437 nvme_fc_ctrl_put(ctrl);
2456 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2460 __nvme_fc_abort_op(ctrl, op);
2474 __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
2482 if (ctrl->ctrl.queue_count > 1) {
2483 for (q = 1; q < ctrl->ctrl.queue_count; q++)
2484 clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags);
2486 clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
2500 if (ctrl->ctrl.queue_count > 1) {
2501 nvme_quiesce_io_queues(&ctrl->ctrl);
2502 nvme_sync_io_queues(&ctrl->ctrl);
2503 blk_mq_tagset_busy_iter(&ctrl->tag_set,
2504 nvme_fc_terminate_exchange, &ctrl->ctrl);
2505 blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
2507 nvme_unquiesce_io_queues(&ctrl->ctrl);
2525 nvme_quiesce_admin_queue(&ctrl->ctrl);
2526 blk_sync_queue(ctrl->ctrl.admin_q);
2527 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
2528 nvme_fc_terminate_exchange, &ctrl->ctrl);
2529 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
2531 nvme_unquiesce_admin_queue(&ctrl->ctrl);
2535 nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
2544 if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
2545 __nvme_fc_abort_outstanding_ios(ctrl, true);
2546 set_bit(ASSOC_FAILED, &ctrl->flags);
2547 dev_warn(ctrl->ctrl.device,
2549 ctrl->cnum);
2554 if (ctrl->ctrl.state != NVME_CTRL_LIVE)
2557 dev_warn(ctrl->ctrl.device,
2559 ctrl->cnum, errmsg);
2560 dev_warn(ctrl->ctrl.device,
2561 "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
2563 nvme_reset_ctrl(&ctrl->ctrl);
2569 struct nvme_fc_ctrl *ctrl = op->ctrl;
2577 dev_info(ctrl->ctrl.device,
2580 ctrl->cnum, op->queue->qnum, sqe->common.opcode,
2582 if (__nvme_fc_abort_op(ctrl, op))
2583 nvme_fc_error_recovery(ctrl, "io timeout abort failed");
2594 nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2614 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
2629 nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2637 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
2669 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
2681 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
2684 if (!nvme_fc_ctrl_get(ctrl))
2728 ret = nvme_fc_map_data(ctrl, op->rq, op);
2731 nvme_fc_ctrl_put(ctrl);
2738 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
2747 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
2748 &ctrl->rport->remoteport,
2765 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
2768 nvme_fc_unmap_data(ctrl, op->rq, op);
2772 nvme_fc_ctrl_put(ctrl);
2774 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
2790 struct nvme_fc_ctrl *ctrl = queue->ctrl;
2798 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
2799 !nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2800 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
2824 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
2830 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
2834 if (test_bit(FCCTRL_TERMIO, &ctrl->flags))
2837 aen_op = &ctrl->aen_ops[0];
2839 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
2842 dev_err(ctrl->ctrl.device,
2850 struct nvme_fc_ctrl *ctrl = op->ctrl;
2855 nvme_fc_unmap_data(ctrl, rq, op);
2857 nvme_fc_ctrl_put(ctrl);
2862 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data);
2874 if (ctrl->lport->ops->map_queues)
2875 ctrl->lport->ops->map_queues(&ctrl->lport->localport,
2893 nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2895 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2900 ctrl->lport->ops->max_hw_queues);
2901 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2903 dev_info(ctrl->ctrl.device,
2908 ctrl->ctrl.queue_count = nr_io_queues + 1;
2912 nvme_fc_init_io_queues(ctrl);
2914 ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
2917 ctrl->lport->ops->fcprqst_priv_sz));
2921 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2925 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2929 ctrl->ioq_live = true;
2934 nvme_fc_delete_hw_io_queues(ctrl);
2936 nvme_remove_io_tag_set(&ctrl->ctrl);
2937 nvme_fc_free_io_queues(ctrl);
2940 ctrl->ctrl.tagset = NULL;
2946 nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
2948 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2949 u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1;
2954 ctrl->lport->ops->max_hw_queues);
2955 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
2957 dev_info(ctrl->ctrl.device,
2963 dev_info(ctrl->ctrl.device,
2969 ctrl->ctrl.queue_count = nr_io_queues + 1;
2971 if (ctrl->ctrl.queue_count == 1)
2975 dev_info(ctrl->ctrl.device,
2978 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
2981 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2985 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
2992 nvme_fc_delete_hw_io_queues(ctrl);
2994 nvme_fc_free_io_queues(ctrl);
3018 nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
3020 struct nvme_fc_rport *rport = ctrl->rport;
3023 if (test_and_set_bit(ASSOC_ACTIVE, &ctrl->flags))
3034 nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
3036 struct nvme_fc_rport *rport = ctrl->rport;
3040 /* clearing of ctrl->flags ASSOC_ACTIVE bit is in association delete */
3057 nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
3059 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
3065 ++ctrl->ctrl.nr_reconnects;
3067 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
3070 if (nvme_fc_ctlr_active_on_rport(ctrl))
3073 dev_info(ctrl->ctrl.device,
3076 ctrl->cnum, ctrl->lport->localport.port_name,
3077 ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn);
3079 clear_bit(ASSOC_FAILED, &ctrl->flags);
3085 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
3090 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
3095 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
3099 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
3104 * todo:- add code to check if ctrl attributes changed from
3108 ret = nvme_enable_ctrl(&ctrl->ctrl);
3109 if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
3114 ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments;
3115 ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments <<
3118 nvme_unquiesce_admin_queue(&ctrl->ctrl);
3120 ret = nvme_init_ctrl_finish(&ctrl->ctrl, false);
3121 if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
3129 if (ctrl->ctrl.icdoff) {
3130 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
3131 ctrl->ctrl.icdoff);
3137 if (!nvme_ctrl_sgl_supported(&ctrl->ctrl)) {
3138 dev_err(ctrl->ctrl.device,
3144 if (opts->queue_size > ctrl->ctrl.maxcmd) {
3146 dev_warn(ctrl->ctrl.device,
3147 "queue_size %zu > ctrl maxcmd %u, reducing "
3149 opts->queue_size, ctrl->ctrl.maxcmd);
3150 opts->queue_size = ctrl->ctrl.maxcmd;
3151 ctrl->ctrl.sqsize = opts->queue_size - 1;
3154 ret = nvme_fc_init_aen_ops(ctrl);
3162 if (ctrl->ctrl.queue_count > 1) {
3163 if (!ctrl->ioq_live)
3164 ret = nvme_fc_create_io_queues(ctrl);
3166 ret = nvme_fc_recreate_io_queues(ctrl);
3168 if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
3173 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
3175 ctrl->ctrl.nr_reconnects = 0;
3178 nvme_start_ctrl(&ctrl->ctrl);
3183 nvme_fc_term_aen_ops(ctrl);
3185 dev_warn(ctrl->ctrl.device,
3187 ctrl->cnum, ctrl->association_id, ret);
3189 nvme_fc_xmt_disconnect_assoc(ctrl);
3190 spin_lock_irqsave(&ctrl->lock, flags);
3191 ctrl->association_id = 0;
3192 disls = ctrl->rcv_disconn;
3193 ctrl->rcv_disconn = NULL;
3194 spin_unlock_irqrestore(&ctrl->lock, flags);
3198 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
3200 nvme_fc_free_queue(&ctrl->queues[0]);
3201 clear_bit(ASSOC_ACTIVE, &ctrl->flags);
3202 nvme_fc_ctlr_inactive_on_rport(ctrl);
3215 nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
3220 if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags))
3223 spin_lock_irqsave(&ctrl->lock, flags);
3224 set_bit(FCCTRL_TERMIO, &ctrl->flags);
3225 ctrl->iocnt = 0;
3226 spin_unlock_irqrestore(&ctrl->lock, flags);
3228 __nvme_fc_abort_outstanding_ios(ctrl, false);
3231 nvme_fc_abort_aen_ops(ctrl);
3234 spin_lock_irq(&ctrl->lock);
3235 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
3236 clear_bit(FCCTRL_TERMIO, &ctrl->flags);
3237 spin_unlock_irq(&ctrl->lock);
3239 nvme_fc_term_aen_ops(ctrl);
3247 if (ctrl->association_id)
3248 nvme_fc_xmt_disconnect_assoc(ctrl);
3250 spin_lock_irqsave(&ctrl->lock, flags);
3251 ctrl->association_id = 0;
3252 disls = ctrl->rcv_disconn;
3253 ctrl->rcv_disconn = NULL;
3254 spin_unlock_irqrestore(&ctrl->lock, flags);
3262 if (ctrl->ctrl.tagset) {
3263 nvme_fc_delete_hw_io_queues(ctrl);
3264 nvme_fc_free_io_queues(ctrl);
3267 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
3268 nvme_fc_free_queue(&ctrl->queues[0]);
3271 nvme_unquiesce_admin_queue(&ctrl->ctrl);
3274 nvme_unquiesce_io_queues(&ctrl->ctrl);
3276 nvme_fc_ctlr_inactive_on_rport(ctrl);
3282 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
3284 cancel_work_sync(&ctrl->ioerr_work);
3285 cancel_delayed_work_sync(&ctrl->connect_work);
3290 nvme_fc_delete_association(ctrl);
3294 nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
3296 struct nvme_fc_rport *rport = ctrl->rport;
3298 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
3301 if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_CONNECTING)
3305 dev_info(ctrl->ctrl.device,
3307 ctrl->cnum, status);
3313 if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
3315 dev_info(ctrl->ctrl.device,
3318 ctrl->cnum, recon_delay / HZ);
3322 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
3326 dev_warn(ctrl->ctrl.device,
3328 ctrl->cnum);
3330 dev_warn(ctrl->ctrl.device,
3333 ctrl->cnum, ctrl->ctrl.nr_reconnects);
3335 dev_warn(ctrl->ctrl.device,
3338 ctrl->cnum, min_t(int, portptr->dev_loss_tmo,
3339 (ctrl->ctrl.opts->max_reconnects *
3340 ctrl->ctrl.opts->reconnect_delay)));
3341 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
3348 struct nvme_fc_ctrl *ctrl =
3349 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
3351 nvme_stop_ctrl(&ctrl->ctrl);
3354 nvme_fc_delete_association(ctrl);
3356 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
3357 dev_err(ctrl->ctrl.device,
3359 "to CONNECTING\n", ctrl->cnum);
3361 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) {
3362 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
3363 dev_err(ctrl->ctrl.device,
3365 "after reset\n", ctrl->cnum);
3367 flush_delayed_work(&ctrl->connect_work);
3370 nvme_fc_reconnect_or_delete(ctrl, -ENOTCONN);
3393 struct nvme_fc_ctrl *ctrl =
3397 ret = nvme_fc_create_association(ctrl);
3399 nvme_fc_reconnect_or_delete(ctrl, ret);
3401 dev_info(ctrl->ctrl.device,
3403 ctrl->cnum);
3429 struct nvme_fc_ctrl *ctrl;
3434 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3435 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts);
3448 struct nvme_fc_ctrl *ctrl;
3464 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
3465 if (!ctrl) {
3489 ctrl->ctrl.opts = opts;
3490 ctrl->ctrl.nr_reconnects = 0;
3492 ctrl->ctrl.numa_node = dev_to_node(lport->dev);
3494 ctrl->ctrl.numa_node = NUMA_NO_NODE;
3495 INIT_LIST_HEAD(&ctrl->ctrl_list);
3496 ctrl->lport = lport;
3497 ctrl->rport = rport;
3498 ctrl->dev = lport->dev;
3499 ctrl->cnum = idx;
3500 ctrl->ioq_live = false;
3501 init_waitqueue_head(&ctrl->ioabort_wait);
3503 get_device(ctrl->dev);
3504 kref_init(&ctrl->ref);
3506 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
3507 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
3508 INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
3509 spin_lock_init(&ctrl->lock);
3512 ctrl->ctrl.queue_count = min_t(unsigned int,
3515 ctrl->ctrl.queue_count++; /* +1 for admin queue */
3517 ctrl->ctrl.sqsize = opts->queue_size - 1;
3518 ctrl->ctrl.kato = opts->kato;
3519 ctrl->ctrl.cntlid = 0xffff;
3522 ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
3524 if (!ctrl->queues)
3527 nvme_fc_init_queue(ctrl, 0);
3536 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
3540 /* at this point, teardown path changes to ref counting on nvme ctrl */
3542 ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
3545 ctrl->lport->ops->fcprqst_priv_sz));
3550 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
3553 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
3554 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
3555 dev_err(ctrl->ctrl.device,
3556 "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
3560 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
3561 dev_err(ctrl->ctrl.device,
3563 ctrl->cnum);
3567 flush_delayed_work(&ctrl->connect_work);
3569 dev_info(ctrl->ctrl.device,
3570 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
3571 ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl));
3573 return &ctrl->ctrl;
3576 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
3577 cancel_work_sync(&ctrl->ioerr_work);
3578 cancel_work_sync(&ctrl->ctrl.reset_work);
3579 cancel_delayed_work_sync(&ctrl->connect_work);
3581 ctrl->ctrl.opts = NULL;
3583 /* initiate nvme ctrl ref counting teardown */
3584 nvme_uninit_ctrl(&ctrl->ctrl);
3586 /* Remove core ctrl ref. */
3587 nvme_put_ctrl(&ctrl->ctrl);
3601 kfree(ctrl->queues);
3603 put_device(ctrl->dev);
3604 ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum);
3606 kfree(ctrl);
3683 struct nvme_ctrl *ctrl;
3717 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
3718 if (IS_ERR(ctrl))
3720 return ctrl;
3937 struct nvme_fc_ctrl *ctrl;
3940 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
3941 dev_warn(ctrl->ctrl.device,
3942 "NVME-FC{%d}: transport unloading: deleting ctrl\n",
3943 ctrl->cnum);
3944 nvme_delete_ctrl(&ctrl->ctrl);