Lines Matching refs:ctrl
37 struct nvme_ctrl ctrl;
42 static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
44 return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
54 struct nvme_loop_ctrl *ctrl;
65 static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
71 return queue - queue->ctrl->queues;
87 return queue->ctrl->admin_tag_set.tags[queue_idx];
88 return queue->ctrl->tag_set.tags[queue_idx - 1];
105 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
112 dev_err(queue->ctrl->ctrl.device,
141 if (!nvme_check_ready(&queue->ctrl->ctrl, req, queue_ready))
142 return nvme_fail_nonready_command(&queue->ctrl->ctrl, req);
150 iod->req.port = queue->ctrl->port;
175 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
176 struct nvme_loop_queue *queue = &ctrl->queues[0];
177 struct nvme_loop_iod *iod = &ctrl->async_event_iod;
186 dev_err(ctrl->ctrl.device, "failed async event work\n");
193 static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
198 iod->queue = &ctrl->queues[queue_idx];
207 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(set->driver_data);
210 nvme_req(req)->ctrl = &ctrl->ctrl;
212 return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
213 (set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
221 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(data);
222 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
224 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
241 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(data);
242 struct nvme_loop_queue *queue = &ctrl->queues[0];
264 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
266 if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
268 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
269 nvme_remove_admin_tag_set(&ctrl->ctrl);
274 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
276 if (list_empty(&ctrl->list))
280 list_del(&ctrl->list);
285 kfree(ctrl->queues);
288 kfree(ctrl);
291 static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
295 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
296 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
297 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
299 ctrl->ctrl.queue_count = 1;
302 static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
304 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
309 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
313 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
316 ctrl->queues[i].ctrl = ctrl;
317 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
321 ctrl->ctrl.queue_count++;
327 nvme_loop_destroy_io_queues(ctrl);
331 static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
335 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
336 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
339 set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
345 static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
349 ctrl->queues[0].ctrl = ctrl;
350 error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
353 ctrl->ctrl.queue_count = 1;
355 error = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
363 clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->ctrl.flags);
365 error = nvmf_connect_admin_queue(&ctrl->ctrl);
369 set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
371 error = nvme_enable_ctrl(&ctrl->ctrl);
375 ctrl->ctrl.max_hw_sectors =
378 nvme_unquiesce_admin_queue(&ctrl->ctrl);
380 error = nvme_init_ctrl_finish(&ctrl->ctrl, false);
387 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
388 nvme_remove_admin_tag_set(&ctrl->ctrl);
390 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
394 static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
396 if (ctrl->ctrl.queue_count > 1) {
397 nvme_quiesce_io_queues(&ctrl->ctrl);
398 nvme_cancel_tagset(&ctrl->ctrl);
399 nvme_loop_destroy_io_queues(ctrl);
402 nvme_quiesce_admin_queue(&ctrl->ctrl);
403 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
404 nvme_disable_ctrl(&ctrl->ctrl, true);
406 nvme_cancel_admin_tagset(&ctrl->ctrl);
407 nvme_loop_destroy_admin_queue(ctrl);
410 static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
412 nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
417 struct nvme_loop_ctrl *ctrl;
420 list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
421 if (ctrl->ctrl.cntlid == nctrl->cntlid)
422 nvme_delete_ctrl(&ctrl->ctrl);
429 struct nvme_loop_ctrl *ctrl =
430 container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
433 nvme_stop_ctrl(&ctrl->ctrl);
434 nvme_loop_shutdown_ctrl(ctrl);
436 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
437 if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
438 ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
439 /* state change failure for non-deleted ctrl? */
444 ret = nvme_loop_configure_admin_queue(ctrl);
448 ret = nvme_loop_init_io_queues(ctrl);
452 ret = nvme_loop_connect_io_queues(ctrl);
456 blk_mq_update_nr_hw_queues(&ctrl->tag_set,
457 ctrl->ctrl.queue_count - 1);
459 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
462 nvme_start_ctrl(&ctrl->ctrl);
467 nvme_loop_destroy_io_queues(ctrl);
469 nvme_loop_destroy_admin_queue(ctrl);
471 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
472 nvme_uninit_ctrl(&ctrl->ctrl);
488 static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
492 ret = nvme_loop_init_io_queues(ctrl);
496 ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
503 ret = nvme_loop_connect_io_queues(ctrl);
510 nvme_remove_io_tag_set(&ctrl->ctrl);
512 nvme_loop_destroy_io_queues(ctrl);
516 static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl)
523 if ((ctrl->opts->mask & NVMF_OPT_TRADDR) &&
524 strcmp(ctrl->opts->traddr, p->disc_addr.traddr))
536 struct nvme_loop_ctrl *ctrl;
539 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
540 if (!ctrl)
542 ctrl->ctrl.opts = opts;
543 INIT_LIST_HEAD(&ctrl->list);
545 INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
547 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
550 kfree(ctrl);
554 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
559 ctrl->ctrl.kato = opts->kato;
560 ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
562 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
564 if (!ctrl->queues)
567 ret = nvme_loop_configure_admin_queue(ctrl);
571 if (opts->queue_size > ctrl->ctrl.maxcmd) {
573 dev_warn(ctrl->ctrl.device,
574 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
575 opts->queue_size, ctrl->ctrl.maxcmd);
576 opts->queue_size = ctrl->ctrl.maxcmd;
578 ctrl->ctrl.sqsize = opts->queue_size - 1;
581 ret = nvme_loop_create_io_queues(ctrl);
586 nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
588 dev_info(ctrl->ctrl.device,
589 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
591 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
595 list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
598 nvme_start_ctrl(&ctrl->ctrl);
600 return &ctrl->ctrl;
603 nvme_loop_destroy_admin_queue(ctrl);
605 kfree(ctrl->queues);
607 nvme_uninit_ctrl(&ctrl->ctrl);
608 nvme_put_ctrl(&ctrl->ctrl);
671 struct nvme_loop_ctrl *ctrl, *next;
677 list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
678 nvme_delete_ctrl(&ctrl->ctrl);