Lines Matching refs:ctrl
89 struct nvme_rdma_ctrl *ctrl;
125 struct nvme_ctrl ctrl;
130 static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl)
132 return container_of(ctrl, struct nvme_rdma_ctrl, ctrl);
161 return queue - queue->ctrl->queues;
167 queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] +
168 queue->ctrl->io_queues[HCTX_TYPE_READ];
300 struct nvme_rdma_ctrl *ctrl = set->driver_data;
302 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
303 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
305 nvme_req(rq)->ctrl = &ctrl->ctrl;
324 struct nvme_rdma_ctrl *ctrl = data;
325 struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1];
327 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
336 struct nvme_rdma_ctrl *ctrl = data;
337 struct nvme_rdma_queue *queue = &ctrl->queues[0];
539 dev_err(queue->ctrl->ctrl.device,
550 dev_err(queue->ctrl->ctrl.device,
575 static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
582 queue = &ctrl->queues[idx];
584 queue->ctrl = ctrl;
585 if (idx && ctrl->ctrl.max_integrity_segments)
592 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
601 dev_info(ctrl->ctrl.device,
607 if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
608 src_addr = (struct sockaddr *)&ctrl->src_addr;
612 (struct sockaddr *)&ctrl->addr,
615 dev_info(ctrl->ctrl.device,
622 dev_info(ctrl->ctrl.device,
666 static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
670 for (i = 1; i < ctrl->ctrl.queue_count; i++)
671 nvme_rdma_free_queue(&ctrl->queues[i]);
674 static void nvme_rdma_stop_io_queues(struct nvme_rdma_ctrl *ctrl)
678 for (i = 1; i < ctrl->ctrl.queue_count; i++)
679 nvme_rdma_stop_queue(&ctrl->queues[i]);
682 static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
684 struct nvme_rdma_queue *queue = &ctrl->queues[idx];
689 ret = nvmf_connect_io_queue(&ctrl->ctrl, idx, poll);
691 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
698 dev_info(ctrl->ctrl.device,
704 static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl)
708 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
709 ret = nvme_rdma_start_queue(ctrl, i);
718 nvme_rdma_stop_queue(&ctrl->queues[i]);
722 static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
724 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
725 struct ib_device *ibdev = ctrl->device->dev;
737 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
742 dev_err(ctrl->ctrl.device,
747 ctrl->ctrl.queue_count = nr_io_queues + 1;
748 dev_info(ctrl->ctrl.device,
757 ctrl->io_queues[HCTX_TYPE_READ] = nr_read_queues;
758 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
759 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
761 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
768 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
770 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
775 ctrl->io_queues[HCTX_TYPE_POLL] =
779 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
780 ret = nvme_rdma_alloc_queue(ctrl, i,
781 ctrl->ctrl.sqsize + 1);
790 nvme_rdma_free_queue(&ctrl->queues[i]);
798 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
803 set = &ctrl->admin_tag_set;
811 set->driver_data = ctrl;
816 set = &ctrl->tag_set;
828 set->driver_data = ctrl;
841 static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
845 blk_cleanup_queue(ctrl->ctrl.admin_q);
846 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
847 blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
849 if (ctrl->async_event_sqe.data) {
850 cancel_work_sync(&ctrl->ctrl.async_event_work);
851 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
853 ctrl->async_event_sqe.data = NULL;
855 nvme_rdma_free_queue(&ctrl->queues[0]);
858 static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
864 error = nvme_rdma_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
868 ctrl->device = ctrl->queues[0].device;
869 ctrl->ctrl.numa_node = ibdev_to_node(ctrl->device->dev);
872 if (ctrl->device->dev->attrs.device_cap_flags &
876 ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev,
884 error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe,
890 ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
891 if (IS_ERR(ctrl->ctrl.admin_tagset)) {
892 error = PTR_ERR(ctrl->ctrl.admin_tagset);
896 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
897 if (IS_ERR(ctrl->ctrl.fabrics_q)) {
898 error = PTR_ERR(ctrl->ctrl.fabrics_q);
902 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
903 if (IS_ERR(ctrl->ctrl.admin_q)) {
904 error = PTR_ERR(ctrl->ctrl.admin_q);
909 error = nvme_rdma_start_queue(ctrl, 0);
913 error = nvme_enable_ctrl(&ctrl->ctrl);
917 ctrl->ctrl.max_segments = ctrl->max_fr_pages;
918 ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9);
920 ctrl->ctrl.max_integrity_segments = ctrl->max_fr_pages;
922 ctrl->ctrl.max_integrity_segments = 0;
924 nvme_start_admin_queue(&ctrl->ctrl);
926 error = nvme_init_identify(&ctrl->ctrl);
933 nvme_stop_admin_queue(&ctrl->ctrl);
934 blk_sync_queue(ctrl->ctrl.admin_q);
936 nvme_rdma_stop_queue(&ctrl->queues[0]);
937 nvme_cancel_admin_tagset(&ctrl->ctrl);
940 blk_cleanup_queue(ctrl->ctrl.admin_q);
943 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
946 blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
948 if (ctrl->async_event_sqe.data) {
949 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
951 ctrl->async_event_sqe.data = NULL;
954 nvme_rdma_free_queue(&ctrl->queues[0]);
958 static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
962 blk_cleanup_queue(ctrl->ctrl.connect_q);
963 blk_mq_free_tag_set(ctrl->ctrl.tagset);
965 nvme_rdma_free_io_queues(ctrl);
968 static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
972 ret = nvme_rdma_alloc_io_queues(ctrl);
977 ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false);
978 if (IS_ERR(ctrl->ctrl.tagset)) {
979 ret = PTR_ERR(ctrl->ctrl.tagset);
983 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
984 if (IS_ERR(ctrl->ctrl.connect_q)) {
985 ret = PTR_ERR(ctrl->ctrl.connect_q);
990 ret = nvme_rdma_start_io_queues(ctrl);
995 nvme_start_freeze(&ctrl->ctrl);
996 nvme_start_queues(&ctrl->ctrl);
997 if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) {
1004 nvme_unfreeze(&ctrl->ctrl);
1007 blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset,
1008 ctrl->ctrl.queue_count - 1);
1009 nvme_unfreeze(&ctrl->ctrl);
1015 nvme_stop_queues(&ctrl->ctrl);
1016 nvme_sync_io_queues(&ctrl->ctrl);
1017 nvme_rdma_stop_io_queues(ctrl);
1019 nvme_cancel_tagset(&ctrl->ctrl);
1021 blk_cleanup_queue(ctrl->ctrl.connect_q);
1024 blk_mq_free_tag_set(ctrl->ctrl.tagset);
1026 nvme_rdma_free_io_queues(ctrl);
1030 static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
1033 nvme_stop_admin_queue(&ctrl->ctrl);
1034 blk_sync_queue(ctrl->ctrl.admin_q);
1035 nvme_rdma_stop_queue(&ctrl->queues[0]);
1036 if (ctrl->ctrl.admin_tagset) {
1037 blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
1038 nvme_cancel_request, &ctrl->ctrl);
1039 blk_mq_tagset_wait_completed_request(ctrl->ctrl.admin_tagset);
1042 nvme_start_admin_queue(&ctrl->ctrl);
1043 nvme_rdma_destroy_admin_queue(ctrl, remove);
1046 static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
1049 if (ctrl->ctrl.queue_count > 1) {
1050 nvme_stop_queues(&ctrl->ctrl);
1051 nvme_sync_io_queues(&ctrl->ctrl);
1052 nvme_rdma_stop_io_queues(ctrl);
1053 if (ctrl->ctrl.tagset) {
1054 blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
1055 nvme_cancel_request, &ctrl->ctrl);
1056 blk_mq_tagset_wait_completed_request(ctrl->ctrl.tagset);
1059 nvme_start_queues(&ctrl->ctrl);
1060 nvme_rdma_destroy_io_queues(ctrl, remove);
1066 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
1068 cancel_work_sync(&ctrl->err_work);
1069 cancel_delayed_work_sync(&ctrl->reconnect_work);
1074 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
1076 if (list_empty(&ctrl->list))
1080 list_del(&ctrl->list);
1085 kfree(ctrl->queues);
1086 kfree(ctrl);
1089 static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
1092 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
1093 WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||
1094 ctrl->ctrl.state == NVME_CTRL_LIVE);
1098 if (nvmf_should_reconnect(&ctrl->ctrl)) {
1099 dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n",
1100 ctrl->ctrl.opts->reconnect_delay);
1101 queue_delayed_work(nvme_wq, &ctrl->reconnect_work,
1102 ctrl->ctrl.opts->reconnect_delay * HZ);
1104 nvme_delete_ctrl(&ctrl->ctrl);
1108 static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
1113 ret = nvme_rdma_configure_admin_queue(ctrl, new);
1117 if (ctrl->ctrl.icdoff) {
1119 dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
1123 if (!(ctrl->ctrl.sgls & (1 << 2))) {
1125 dev_err(ctrl->ctrl.device,
1130 if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) {
1131 dev_warn(ctrl->ctrl.device,
1132 "queue_size %zu > ctrl sqsize %u, clamping down\n",
1133 ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1);
1136 if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
1137 dev_warn(ctrl->ctrl.device,
1138 "sqsize %u > ctrl maxcmd %u, clamping down\n",
1139 ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd);
1140 ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1;
1143 if (ctrl->ctrl.sgls & (1 << 20))
1144 ctrl->use_inline_data = true;
1146 if (ctrl->ctrl.queue_count > 1) {
1147 ret = nvme_rdma_configure_io_queues(ctrl, new);
1152 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
1155 * state change failure is ok if we started ctrl delete,
1159 WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING &&
1160 ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO);
1166 nvme_start_ctrl(&ctrl->ctrl);
1170 if (ctrl->ctrl.queue_count > 1) {
1171 nvme_stop_queues(&ctrl->ctrl);
1172 nvme_sync_io_queues(&ctrl->ctrl);
1173 nvme_rdma_stop_io_queues(ctrl);
1174 nvme_cancel_tagset(&ctrl->ctrl);
1175 nvme_rdma_destroy_io_queues(ctrl, new);
1178 nvme_stop_admin_queue(&ctrl->ctrl);
1179 blk_sync_queue(ctrl->ctrl.admin_q);
1180 nvme_rdma_stop_queue(&ctrl->queues[0]);
1181 nvme_cancel_admin_tagset(&ctrl->ctrl);
1182 nvme_rdma_destroy_admin_queue(ctrl, new);
1188 struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work),
1191 ++ctrl->ctrl.nr_reconnects;
1193 if (nvme_rdma_setup_ctrl(ctrl, false))
1196 dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n",
1197 ctrl->ctrl.nr_reconnects);
1199 ctrl->ctrl.nr_reconnects = 0;
1204 dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
1205 ctrl->ctrl.nr_reconnects);
1206 nvme_rdma_reconnect_or_remove(ctrl);
1211 struct nvme_rdma_ctrl *ctrl = container_of(work,
1214 nvme_stop_keep_alive(&ctrl->ctrl);
1215 flush_work(&ctrl->ctrl.async_event_work);
1216 nvme_rdma_teardown_io_queues(ctrl, false);
1217 nvme_start_queues(&ctrl->ctrl);
1218 nvme_rdma_teardown_admin_queue(ctrl, false);
1219 nvme_start_admin_queue(&ctrl->ctrl);
1221 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
1222 /* state change failure is ok if we started ctrl delete */
1223 WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING &&
1224 ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO);
1228 nvme_rdma_reconnect_or_remove(ctrl);
1231 static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
1233 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
1236 dev_warn(ctrl->ctrl.device, "starting error recovery\n");
1237 queue_work(nvme_reset_wq, &ctrl->err_work);
1254 struct nvme_rdma_ctrl *ctrl = queue->ctrl;
1256 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
1257 dev_info(ctrl->ctrl.device,
1261 nvme_rdma_error_recovery(ctrl);
1358 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
1390 * Align the MR to a 4K page size to match the ctrl page size and
1602 queue->ctrl->use_inline_data &&
1676 dev_err(queue->ctrl->ctrl.device,
1702 dev_err(queue->ctrl->ctrl.device,
1713 return queue->ctrl->admin_tag_set.tags[queue_idx];
1714 return queue->ctrl->tag_set.tags[queue_idx - 1];
1725 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
1726 struct nvme_rdma_queue *queue = &ctrl->queues[0];
1728 struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe;
1758 dev_err(queue->ctrl->ctrl.device,
1761 nvme_rdma_error_recovery(queue->ctrl);
1772 dev_err(queue->ctrl->ctrl.device,
1775 nvme_rdma_error_recovery(queue->ctrl);
1782 dev_err(queue->ctrl->ctrl.device,
1785 nvme_rdma_error_recovery(queue->ctrl);
1810 dev_err(queue->ctrl->ctrl.device,
1812 nvme_rdma_error_recovery(queue->ctrl);
1825 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
1862 dev_err(queue->ctrl->ctrl.device,
1866 dev_err(queue->ctrl->ctrl.device,
1875 struct nvme_ctrl *ctrl = &queue->ctrl->ctrl;
1882 if (ctrl->opts->tos >= 0)
1883 rdma_set_service_type(queue->cm_id, ctrl->opts->tos);
1886 dev_err(ctrl->device, "rdma_resolve_route failed (%d).\n",
1900 struct nvme_rdma_ctrl *ctrl = queue->ctrl;
1931 priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
1936 dev_err(ctrl->ctrl.device,
1950 dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n",
1973 dev_dbg(queue->ctrl->ctrl.device,
1980 dev_dbg(queue->ctrl->ctrl.device,
1982 nvme_rdma_error_recovery(queue->ctrl);
1988 dev_err(queue->ctrl->ctrl.device,
1990 nvme_rdma_error_recovery(queue->ctrl);
2019 struct nvme_rdma_ctrl *ctrl = queue->ctrl;
2021 dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
2024 if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
2029 * - ctrl disable/shutdown fabrics requests
2046 nvme_rdma_error_recovery(ctrl);
2066 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2067 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2098 dev_err(queue->ctrl->ctrl.device,
2184 struct nvme_rdma_ctrl *ctrl = set->driver_data;
2185 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2187 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
2190 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2193 ctrl->io_queues[HCTX_TYPE_READ];
2195 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2199 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2202 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2206 ctrl->device->dev, 0);
2208 ctrl->device->dev, 0);
2210 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
2213 ctrl->io_queues[HCTX_TYPE_POLL];
2215 ctrl->io_queues[HCTX_TYPE_DEFAULT] +
2216 ctrl->io_queues[HCTX_TYPE_READ];
2220 dev_info(ctrl->ctrl.device,
2222 ctrl->io_queues[HCTX_TYPE_DEFAULT],
2223 ctrl->io_queues[HCTX_TYPE_READ],
2224 ctrl->io_queues[HCTX_TYPE_POLL]);
2249 static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
2251 nvme_rdma_teardown_io_queues(ctrl, shutdown);
2252 nvme_stop_admin_queue(&ctrl->ctrl);
2254 nvme_shutdown_ctrl(&ctrl->ctrl);
2256 nvme_disable_ctrl(&ctrl->ctrl);
2257 nvme_rdma_teardown_admin_queue(ctrl, shutdown);
2260 static void nvme_rdma_delete_ctrl(struct nvme_ctrl *ctrl)
2262 nvme_rdma_shutdown_ctrl(to_rdma_ctrl(ctrl), true);
2267 struct nvme_rdma_ctrl *ctrl =
2268 container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work);
2270 nvme_stop_ctrl(&ctrl->ctrl);
2271 nvme_rdma_shutdown_ctrl(ctrl, false);
2273 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2279 if (nvme_rdma_setup_ctrl(ctrl, false))
2285 ++ctrl->ctrl.nr_reconnects;
2286 nvme_rdma_reconnect_or_remove(ctrl);
2318 struct nvme_rdma_ctrl *ctrl;
2322 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
2323 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2335 struct nvme_rdma_ctrl *ctrl;
2339 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2340 if (!ctrl)
2342 ctrl->ctrl.opts = opts;
2343 INIT_LIST_HEAD(&ctrl->list);
2356 opts->traddr, opts->trsvcid, &ctrl->addr);
2365 opts->host_traddr, NULL, &ctrl->src_addr);
2378 INIT_DELAYED_WORK(&ctrl->reconnect_work,
2380 INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
2381 INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);
2383 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2385 ctrl->ctrl.sqsize = opts->queue_size - 1;
2386 ctrl->ctrl.kato = opts->kato;
2389 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2391 if (!ctrl->queues)
2394 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
2399 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
2402 ret = nvme_rdma_setup_ctrl(ctrl, true);
2406 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
2407 ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
2410 list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
2413 return &ctrl->ctrl;
2416 nvme_uninit_ctrl(&ctrl->ctrl);
2417 nvme_put_ctrl(&ctrl->ctrl);
2422 kfree(ctrl->queues);
2424 kfree(ctrl);
2441 struct nvme_rdma_ctrl *ctrl;
2459 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
2460 if (ctrl->device->dev != ib_device)
2462 nvme_delete_ctrl(&ctrl->ctrl);
2495 struct nvme_rdma_ctrl *ctrl;
2501 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list)
2502 nvme_delete_ctrl(&ctrl->ctrl);