Lines Matching refs:ctrl
135 struct nvme_tcp_ctrl *ctrl;
163 struct nvme_ctrl ctrl;
178 static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
180 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
185 return queue - queue->ctrl->queues;
193 return queue->ctrl->admin_tag_set.tags[queue_idx];
194 return queue->ctrl->tag_set.tags[queue_idx - 1];
214 return req == &req->queue->ctrl->async_req;
409 dev_err(queue->ctrl->ctrl.device,
419 dev_err(queue->ctrl->ctrl.device,
438 dev_err(queue->ctrl->ctrl.device,
460 struct nvme_tcp_ctrl *ctrl = set->driver_data;
462 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
463 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
473 nvme_req(rq)->ctrl = &ctrl->ctrl;
481 struct nvme_tcp_ctrl *ctrl = data;
482 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
491 struct nvme_tcp_ctrl *ctrl = data;
492 struct nvme_tcp_queue *queue = &ctrl->queues[0];
515 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
517 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
520 dev_warn(ctrl->device, "starting error recovery\n");
521 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
531 dev_err(queue->ctrl->ctrl.device,
534 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
552 dev_err(queue->ctrl->ctrl.device,
559 dev_err(queue->ctrl->ctrl.device,
569 dev_err(queue->ctrl->ctrl.device,
572 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
593 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
614 dev_err(queue->ctrl->ctrl.device,
621 dev_err(queue->ctrl->ctrl.device,
629 dev_err(queue->ctrl->ctrl.device,
663 dev_err(queue->ctrl->ctrl.device,
726 dev_err(queue->ctrl->ctrl.device,
763 dev_err(queue->ctrl->ctrl.device,
783 dev_err(queue->ctrl->ctrl.device,
830 dev_err(queue->ctrl->ctrl.device,
871 dev_err(queue->ctrl->ctrl.device,
874 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
922 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
925 dev_info(queue->ctrl->ctrl.device,
945 nvme_complete_async_event(&req->queue->ctrl->ctrl,
1147 dev_err(queue->ctrl->ctrl.device,
1239 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1241 struct nvme_tcp_request *async = &ctrl->async_req;
1246 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1248 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1249 struct nvme_tcp_request *async = &ctrl->async_req;
1258 async->queue = &ctrl->queues[0];
1264 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1265 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1345 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1355 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1383 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1387 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
1392 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1397 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1398 ctrl->io_queues[HCTX_TYPE_READ];
1403 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1409 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1410 ctrl->io_queues[HCTX_TYPE_READ] +
1411 ctrl->io_queues[HCTX_TYPE_POLL];
1416 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1423 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
1425 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
1426 ctrl->io_queues[HCTX_TYPE_READ] - 1;
1433 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1434 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1438 queue->ctrl = ctrl;
1451 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1494 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1495 sizeof(ctrl->src_addr));
1526 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1527 sizeof(ctrl->addr), 0);
1578 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1579 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1605 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1606 struct nvme_tcp_queue *queue = &ctrl->queues[idx];
1632 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1637 set = &ctrl->admin_tag_set;
1645 set->driver_data = ctrl;
1649 set = &ctrl->tag_set;
1657 set->driver_data = ctrl;
1670 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1672 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1673 cancel_work_sync(&ctrl->async_event_work);
1674 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1675 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1678 nvme_tcp_free_queue(ctrl, 0);
1681 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1685 for (i = 1; i < ctrl->queue_count; i++)
1686 nvme_tcp_free_queue(ctrl, i);
1689 static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1693 for (i = 1; i < ctrl->queue_count; i++)
1694 nvme_tcp_stop_queue(ctrl, i);
1697 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
1701 for (i = 1; i < ctrl->queue_count; i++) {
1702 ret = nvme_tcp_start_queue(ctrl, i);
1711 nvme_tcp_stop_queue(ctrl, i);
1715 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1719 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
1723 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1730 nvme_tcp_free_queue(ctrl, 0);
1734 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1738 for (i = 1; i < ctrl->queue_count; i++) {
1739 ret = nvme_tcp_alloc_queue(ctrl, i,
1740 ctrl->sqsize + 1);
1749 nvme_tcp_free_queue(ctrl, i);
1754 static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1758 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1759 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
1760 nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
1768 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1777 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1778 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1779 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1781 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1788 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1790 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1795 ctrl->io_queues[HCTX_TYPE_POLL] =
1800 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1805 nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1806 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1811 dev_err(ctrl->device,
1816 ctrl->queue_count = nr_io_queues + 1;
1817 dev_info(ctrl->device,
1820 nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1822 return __nvme_tcp_alloc_io_queues(ctrl);
1825 static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1827 nvme_tcp_stop_io_queues(ctrl);
1829 blk_cleanup_queue(ctrl->connect_q);
1830 blk_mq_free_tag_set(ctrl->tagset);
1832 nvme_tcp_free_io_queues(ctrl);
1835 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1839 ret = nvme_tcp_alloc_io_queues(ctrl);
1844 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
1845 if (IS_ERR(ctrl->tagset)) {
1846 ret = PTR_ERR(ctrl->tagset);
1850 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
1851 if (IS_ERR(ctrl->connect_q)) {
1852 ret = PTR_ERR(ctrl->connect_q);
1857 ret = nvme_tcp_start_io_queues(ctrl);
1862 nvme_start_freeze(ctrl);
1863 nvme_start_queues(ctrl);
1864 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
1871 nvme_unfreeze(ctrl);
1874 blk_mq_update_nr_hw_queues(ctrl->tagset,
1875 ctrl->queue_count - 1);
1876 nvme_unfreeze(ctrl);
1882 nvme_stop_queues(ctrl);
1883 nvme_sync_io_queues(ctrl);
1884 nvme_tcp_stop_io_queues(ctrl);
1886 nvme_cancel_tagset(ctrl);
1888 blk_cleanup_queue(ctrl->connect_q);
1891 blk_mq_free_tag_set(ctrl->tagset);
1893 nvme_tcp_free_io_queues(ctrl);
1897 static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1899 nvme_tcp_stop_queue(ctrl, 0);
1901 blk_cleanup_queue(ctrl->admin_q);
1902 blk_cleanup_queue(ctrl->fabrics_q);
1903 blk_mq_free_tag_set(ctrl->admin_tagset);
1905 nvme_tcp_free_admin_queue(ctrl);
1908 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1912 error = nvme_tcp_alloc_admin_queue(ctrl);
1917 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
1918 if (IS_ERR(ctrl->admin_tagset)) {
1919 error = PTR_ERR(ctrl->admin_tagset);
1923 ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset);
1924 if (IS_ERR(ctrl->fabrics_q)) {
1925 error = PTR_ERR(ctrl->fabrics_q);
1929 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
1930 if (IS_ERR(ctrl->admin_q)) {
1931 error = PTR_ERR(ctrl->admin_q);
1936 error = nvme_tcp_start_queue(ctrl, 0);
1940 error = nvme_enable_ctrl(ctrl);
1944 nvme_start_admin_queue(ctrl);
1946 error = nvme_init_identify(ctrl);
1953 nvme_stop_admin_queue(ctrl);
1954 blk_sync_queue(ctrl->admin_q);
1956 nvme_tcp_stop_queue(ctrl, 0);
1957 nvme_cancel_admin_tagset(ctrl);
1960 blk_cleanup_queue(ctrl->admin_q);
1963 blk_cleanup_queue(ctrl->fabrics_q);
1966 blk_mq_free_tag_set(ctrl->admin_tagset);
1968 nvme_tcp_free_admin_queue(ctrl);
1972 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
1975 nvme_stop_admin_queue(ctrl);
1976 blk_sync_queue(ctrl->admin_q);
1977 nvme_tcp_stop_queue(ctrl, 0);
1978 if (ctrl->admin_tagset) {
1979 blk_mq_tagset_busy_iter(ctrl->admin_tagset,
1980 nvme_cancel_request, ctrl);
1981 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset);
1984 nvme_start_admin_queue(ctrl);
1985 nvme_tcp_destroy_admin_queue(ctrl, remove);
1988 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
1991 if (ctrl->queue_count <= 1)
1993 nvme_stop_admin_queue(ctrl);
1994 nvme_stop_queues(ctrl);
1995 nvme_sync_io_queues(ctrl);
1996 nvme_tcp_stop_io_queues(ctrl);
1997 if (ctrl->tagset) {
1998 blk_mq_tagset_busy_iter(ctrl->tagset,
1999 nvme_cancel_request, ctrl);
2000 blk_mq_tagset_wait_completed_request(ctrl->tagset);
2003 nvme_start_queues(ctrl);
2004 nvme_tcp_destroy_io_queues(ctrl, remove);
2007 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
2010 if (ctrl->state != NVME_CTRL_CONNECTING) {
2011 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
2012 ctrl->state == NVME_CTRL_LIVE);
2016 if (nvmf_should_reconnect(ctrl)) {
2017 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
2018 ctrl->opts->reconnect_delay);
2019 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
2020 ctrl->opts->reconnect_delay * HZ);
2022 dev_info(ctrl->device, "Removing controller...\n");
2023 nvme_delete_ctrl(ctrl);
2027 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
2029 struct nvmf_ctrl_options *opts = ctrl->opts;
2032 ret = nvme_tcp_configure_admin_queue(ctrl, new);
2036 if (ctrl->icdoff) {
2037 dev_err(ctrl->device, "icdoff is not supported!\n");
2041 if (opts->queue_size > ctrl->sqsize + 1)
2042 dev_warn(ctrl->device,
2043 "queue_size %zu > ctrl sqsize %u, clamping down\n",
2044 opts->queue_size, ctrl->sqsize + 1);
2046 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
2047 dev_warn(ctrl->device,
2048 "sqsize %u > ctrl maxcmd %u, clamping down\n",
2049 ctrl->sqsize + 1, ctrl->maxcmd);
2050 ctrl->sqsize = ctrl->maxcmd - 1;
2053 if (ctrl->queue_count > 1) {
2054 ret = nvme_tcp_configure_io_queues(ctrl, new);
2059 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
2061 * state change failure is ok if we started ctrl delete,
2065 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2066 ctrl->state != NVME_CTRL_DELETING_NOIO);
2072 nvme_start_ctrl(ctrl);
2076 if (ctrl->queue_count > 1) {
2077 nvme_stop_queues(ctrl);
2078 nvme_sync_io_queues(ctrl);
2079 nvme_tcp_stop_io_queues(ctrl);
2080 nvme_cancel_tagset(ctrl);
2081 nvme_tcp_destroy_io_queues(ctrl, new);
2084 nvme_stop_admin_queue(ctrl);
2085 blk_sync_queue(ctrl->admin_q);
2086 nvme_tcp_stop_queue(ctrl, 0);
2087 nvme_cancel_admin_tagset(ctrl);
2088 nvme_tcp_destroy_admin_queue(ctrl, new);
2096 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2098 ++ctrl->nr_reconnects;
2100 if (nvme_tcp_setup_ctrl(ctrl, false))
2103 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
2104 ctrl->nr_reconnects);
2106 ctrl->nr_reconnects = 0;
2111 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
2112 ctrl->nr_reconnects);
2113 nvme_tcp_reconnect_or_remove(ctrl);
2120 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2122 nvme_stop_keep_alive(ctrl);
2123 flush_work(&ctrl->async_event_work);
2124 nvme_tcp_teardown_io_queues(ctrl, false);
2126 nvme_start_queues(ctrl);
2127 nvme_tcp_teardown_admin_queue(ctrl, false);
2128 nvme_start_admin_queue(ctrl);
2130 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2131 /* state change failure is ok if we started ctrl delete */
2132 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2133 ctrl->state != NVME_CTRL_DELETING_NOIO);
2137 nvme_tcp_reconnect_or_remove(ctrl);
2140 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2142 nvme_tcp_teardown_io_queues(ctrl, shutdown);
2143 nvme_stop_admin_queue(ctrl);
2145 nvme_shutdown_ctrl(ctrl);
2147 nvme_disable_ctrl(ctrl);
2148 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
2151 static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
2153 nvme_tcp_teardown_ctrl(ctrl, true);
2158 struct nvme_ctrl *ctrl =
2161 nvme_stop_ctrl(ctrl);
2162 nvme_tcp_teardown_ctrl(ctrl, false);
2164 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2165 /* state change failure is ok if we started ctrl delete */
2166 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2167 ctrl->state != NVME_CTRL_DELETING_NOIO);
2171 if (nvme_tcp_setup_ctrl(ctrl, false))
2177 ++ctrl->nr_reconnects;
2178 nvme_tcp_reconnect_or_remove(ctrl);
2181 static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
2183 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
2184 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
2189 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2191 if (list_empty(&ctrl->list))
2195 list_del(&ctrl->list);
2200 kfree(ctrl->queues);
2201 kfree(ctrl);
2219 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2237 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2238 struct nvme_tcp_queue *queue = &ctrl->queues[0];
2239 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2255 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2256 ctrl->async_req.offset = 0;
2257 ctrl->async_req.curr_bio = NULL;
2258 ctrl->async_req.data_len = 0;
2260 nvme_tcp_queue_request(&ctrl->async_req, true, true);
2266 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2268 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
2279 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2282 dev_warn(ctrl->device,
2286 if (ctrl->state != NVME_CTRL_LIVE) {
2291 * - ctrl disable/shutdown fabrics requests
2308 nvme_tcp_error_recovery(ctrl);
2376 dev_err(queue->ctrl->ctrl.device,
2402 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2403 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2418 struct nvme_tcp_ctrl *ctrl = set->driver_data;
2419 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2421 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
2424 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2427 ctrl->io_queues[HCTX_TYPE_READ];
2429 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2433 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2436 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2442 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
2445 ctrl->io_queues[HCTX_TYPE_POLL];
2447 ctrl->io_queues[HCTX_TYPE_DEFAULT] +
2448 ctrl->io_queues[HCTX_TYPE_READ];
2452 dev_info(ctrl->ctrl.device,
2454 ctrl->io_queues[HCTX_TYPE_DEFAULT],
2455 ctrl->io_queues[HCTX_TYPE_READ],
2456 ctrl->io_queues[HCTX_TYPE_POLL]);
2515 struct nvme_tcp_ctrl *ctrl;
2519 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2520 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2532 struct nvme_tcp_ctrl *ctrl;
2535 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2536 if (!ctrl)
2539 INIT_LIST_HEAD(&ctrl->list);
2540 ctrl->ctrl.opts = opts;
2541 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2543 ctrl->ctrl.sqsize = opts->queue_size - 1;
2544 ctrl->ctrl.kato = opts->kato;
2546 INIT_DELAYED_WORK(&ctrl->connect_work,
2548 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2549 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2562 opts->traddr, opts->trsvcid, &ctrl->addr);
2571 opts->host_traddr, NULL, &ctrl->src_addr);
2584 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2586 if (!ctrl->queues) {
2591 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2595 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2601 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2605 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2606 ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
2609 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2612 return &ctrl->ctrl;
2615 nvme_uninit_ctrl(&ctrl->ctrl);
2616 nvme_put_ctrl(&ctrl->ctrl);
2621 kfree(ctrl->queues);
2623 kfree(ctrl);
2652 struct nvme_tcp_ctrl *ctrl;
2657 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2658 nvme_delete_ctrl(&ctrl->ctrl);