Lines Matching refs:ctrl

37 	struct nvme_ctrl	ctrl;
42 static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
44 return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
54 struct nvme_loop_ctrl *ctrl;
65 static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
71 return queue - queue->ctrl->queues;
87 return queue->ctrl->admin_tag_set.tags[queue_idx];
88 return queue->ctrl->tag_set.tags[queue_idx - 1];
105 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
112 dev_err(queue->ctrl->ctrl.device,
141 if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
142 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
150 iod->req.port = queue->ctrl->port;
175 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
176 struct nvme_loop_queue *queue = &ctrl->queues[0];
177 struct nvme_loop_iod *iod = &ctrl->async_event_iod;
186 dev_err(ctrl->ctrl.device, "failed async event work\n");
193 static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
198 iod->queue = &ctrl->queues[queue_idx];
207 struct nvme_loop_ctrl *ctrl = set->driver_data;
209 nvme_req(req)->ctrl = &ctrl->ctrl;
210 return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
211 (set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
219 struct nvme_loop_ctrl *ctrl = data;
220 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
222 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
239 struct nvme_loop_ctrl *ctrl = data;
240 struct nvme_loop_queue *queue = &ctrl->queues[0];
262 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
264 if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
266 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
267 blk_cleanup_queue(ctrl->ctrl.admin_q);
268 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
269 blk_mq_free_tag_set(&ctrl->admin_tag_set);
274 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
276 if (list_empty(&ctrl->list))
280 list_del(&ctrl->list);
284 blk_cleanup_queue(ctrl->ctrl.connect_q);
285 blk_mq_free_tag_set(&ctrl->tag_set);
287 kfree(ctrl->queues);
290 kfree(ctrl);
293 static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
297 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
298 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
299 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
301 ctrl->ctrl.queue_count = 1;
304 static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
306 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
311 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
315 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
318 ctrl->queues[i].ctrl = ctrl;
319 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
323 ctrl->ctrl.queue_count++;
329 nvme_loop_destroy_io_queues(ctrl);
333 static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
337 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
338 ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
341 set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
347 static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
351 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
352 ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
353 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
354 ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
355 ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
356 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
358 ctrl->admin_tag_set.driver_data = ctrl;
359 ctrl->admin_tag_set.nr_hw_queues = 1;
360 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
361 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
363 ctrl->queues[0].ctrl = ctrl;
364 error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
367 ctrl->ctrl.queue_count = 1;
369 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
372 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
374 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
375 if (IS_ERR(ctrl->ctrl.fabrics_q)) {
376 error = PTR_ERR(ctrl->ctrl.fabrics_q);
380 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
381 if (IS_ERR(ctrl->ctrl.admin_q)) {
382 error = PTR_ERR(ctrl->ctrl.admin_q);
386 clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->ctrl.flags);
388 error = nvmf_connect_admin_queue(&ctrl->ctrl);
392 set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
394 error = nvme_enable_ctrl(&ctrl->ctrl);
398 ctrl->ctrl.max_hw_sectors =
401 nvme_start_admin_queue(&ctrl->ctrl);
403 error = nvme_init_identify(&ctrl->ctrl);
410 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
411 blk_cleanup_queue(ctrl->ctrl.admin_q);
413 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
415 blk_mq_free_tag_set(&ctrl->admin_tag_set);
417 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
421 static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
423 if (ctrl->ctrl.queue_count > 1) {
424 nvme_stop_queues(&ctrl->ctrl);
425 blk_mq_tagset_busy_iter(&ctrl->tag_set,
426 nvme_cancel_request, &ctrl->ctrl);
427 blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
428 nvme_loop_destroy_io_queues(ctrl);
431 nvme_stop_admin_queue(&ctrl->ctrl);
432 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
433 nvme_shutdown_ctrl(&ctrl->ctrl);
435 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
436 nvme_cancel_request, &ctrl->ctrl);
437 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
438 nvme_loop_destroy_admin_queue(ctrl);
441 static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
443 nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
448 struct nvme_loop_ctrl *ctrl;
451 list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
452 if (ctrl->ctrl.cntlid == nctrl->cntlid)
453 nvme_delete_ctrl(&ctrl->ctrl);
460 struct nvme_loop_ctrl *ctrl =
461 container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
464 nvme_stop_ctrl(&ctrl->ctrl);
465 nvme_loop_shutdown_ctrl(ctrl);
467 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
468 if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
469 ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
470 /* state change failure for non-deleted ctrl? */
475 ret = nvme_loop_configure_admin_queue(ctrl);
479 ret = nvme_loop_init_io_queues(ctrl);
483 ret = nvme_loop_connect_io_queues(ctrl);
487 blk_mq_update_nr_hw_queues(&ctrl->tag_set,
488 ctrl->ctrl.queue_count - 1);
490 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
493 nvme_start_ctrl(&ctrl->ctrl);
498 nvme_loop_destroy_io_queues(ctrl);
500 nvme_loop_destroy_admin_queue(ctrl);
502 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
503 nvme_uninit_ctrl(&ctrl->ctrl);
519 static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
523 ret = nvme_loop_init_io_queues(ctrl);
527 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
528 ctrl->tag_set.ops = &nvme_loop_mq_ops;
529 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
530 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
531 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
532 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
533 ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
535 ctrl->tag_set.driver_data = ctrl;
536 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
537 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
538 ctrl->ctrl.tagset = &ctrl->tag_set;
540 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
544 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
545 if (IS_ERR(ctrl->ctrl.connect_q)) {
546 ret = PTR_ERR(ctrl->ctrl.connect_q);
550 ret = nvme_loop_connect_io_queues(ctrl);
557 blk_cleanup_queue(ctrl->ctrl.connect_q);
559 blk_mq_free_tag_set(&ctrl->tag_set);
561 nvme_loop_destroy_io_queues(ctrl);
565 static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl)
572 if ((ctrl->opts->mask & NVMF_OPT_TRADDR) &&
573 strcmp(ctrl->opts->traddr, p->disc_addr.traddr))
585 struct nvme_loop_ctrl *ctrl;
588 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
589 if (!ctrl)
591 ctrl->ctrl.opts = opts;
592 INIT_LIST_HEAD(&ctrl->list);
594 INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
596 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
599 kfree(ctrl);
603 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
608 ctrl->ctrl.sqsize = opts->queue_size - 1;
609 ctrl->ctrl.kato = opts->kato;
610 ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
612 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
614 if (!ctrl->queues)
617 ret = nvme_loop_configure_admin_queue(ctrl);
621 if (opts->queue_size > ctrl->ctrl.maxcmd) {
623 dev_warn(ctrl->ctrl.device,
624 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
625 opts->queue_size, ctrl->ctrl.maxcmd);
626 opts->queue_size = ctrl->ctrl.maxcmd;
630 ret = nvme_loop_create_io_queues(ctrl);
635 nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
637 dev_info(ctrl->ctrl.device,
638 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
640 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
644 list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
647 nvme_start_ctrl(&ctrl->ctrl);
649 return &ctrl->ctrl;
652 nvme_loop_destroy_admin_queue(ctrl);
654 kfree(ctrl->queues);
656 nvme_uninit_ctrl(&ctrl->ctrl);
657 nvme_put_ctrl(&ctrl->ctrl);
720 struct nvme_loop_ctrl *ctrl, *next;
726 list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
727 nvme_delete_ctrl(&ctrl->ctrl);