Lines Matching refs:ctrl
89 struct nvme_rdma_ctrl *ctrl;
125 struct nvme_ctrl ctrl;
130 static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl)
132 return container_of(ctrl, struct nvme_rdma_ctrl, ctrl);
161 return queue - queue->ctrl->queues;
167 queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] +
168 queue->ctrl->io_queues[HCTX_TYPE_READ];
297 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data);
299 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
300 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
302 nvme_req(rq)->ctrl = &ctrl->ctrl;
322 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(data);
323 struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1];
325 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
334 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(data);
335 struct nvme_rdma_queue *queue = &ctrl->queues[0];
533 dev_err(queue->ctrl->ctrl.device,
544 dev_err(queue->ctrl->ctrl.device,
569 static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
576 queue = &ctrl->queues[idx];
578 queue->ctrl = ctrl;
579 if (idx && ctrl->ctrl.max_integrity_segments)
586 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
595 dev_info(ctrl->ctrl.device,
601 if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
602 src_addr = (struct sockaddr *)&ctrl->src_addr;
606 (struct sockaddr *)&ctrl->addr,
609 dev_info(ctrl->ctrl.device,
616 dev_info(ctrl->ctrl.device,
660 static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
664 for (i = 1; i < ctrl->ctrl.queue_count; i++)
665 nvme_rdma_free_queue(&ctrl->queues[i]);
668 static void nvme_rdma_stop_io_queues(struct nvme_rdma_ctrl *ctrl)
672 for (i = 1; i < ctrl->ctrl.queue_count; i++)
673 nvme_rdma_stop_queue(&ctrl->queues[i]);
676 static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
678 struct nvme_rdma_queue *queue = &ctrl->queues[idx];
682 ret = nvmf_connect_io_queue(&ctrl->ctrl, idx);
684 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
691 dev_info(ctrl->ctrl.device,
697 static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl,
703 ret = nvme_rdma_start_queue(ctrl, i);
712 nvme_rdma_stop_queue(&ctrl->queues[i]);
716 static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
718 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
723 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
728 dev_err(ctrl->ctrl.device,
733 ctrl->ctrl.queue_count = nr_io_queues + 1;
734 dev_info(ctrl->ctrl.device,
737 nvmf_set_io_queues(opts, nr_io_queues, ctrl->io_queues);
738 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
739 ret = nvme_rdma_alloc_queue(ctrl, i,
740 ctrl->ctrl.sqsize + 1);
749 nvme_rdma_free_queue(&ctrl->queues[i]);
754 static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *ctrl)
759 if (ctrl->max_integrity_segments)
763 return nvme_alloc_io_tag_set(ctrl, &to_rdma_ctrl(ctrl)->tag_set,
765 ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
769 static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl)
771 if (ctrl->async_event_sqe.data) {
772 cancel_work_sync(&ctrl->ctrl.async_event_work);
773 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
775 ctrl->async_event_sqe.data = NULL;
777 nvme_rdma_free_queue(&ctrl->queues[0]);
780 static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
786 error = nvme_rdma_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
790 ctrl->device = ctrl->queues[0].device;
791 ctrl->ctrl.numa_node = ibdev_to_node(ctrl->device->dev);
794 if (ctrl->device->dev->attrs.kernel_cap_flags &
798 ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev,
806 error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe,
812 error = nvme_alloc_admin_tag_set(&ctrl->ctrl,
813 &ctrl->admin_tag_set, &nvme_rdma_admin_mq_ops,
821 error = nvme_rdma_start_queue(ctrl, 0);
825 error = nvme_enable_ctrl(&ctrl->ctrl);
829 ctrl->ctrl.max_segments = ctrl->max_fr_pages;
830 ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9);
832 ctrl->ctrl.max_integrity_segments = ctrl->max_fr_pages;
834 ctrl->ctrl.max_integrity_segments = 0;
836 nvme_unquiesce_admin_queue(&ctrl->ctrl);
838 error = nvme_init_ctrl_finish(&ctrl->ctrl, false);
845 nvme_quiesce_admin_queue(&ctrl->ctrl);
846 blk_sync_queue(ctrl->ctrl.admin_q);
848 nvme_rdma_stop_queue(&ctrl->queues[0]);
849 nvme_cancel_admin_tagset(&ctrl->ctrl);
852 nvme_remove_admin_tag_set(&ctrl->ctrl);
854 if (ctrl->async_event_sqe.data) {
855 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
857 ctrl->async_event_sqe.data = NULL;
860 nvme_rdma_free_queue(&ctrl->queues[0]);
864 static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
868 ret = nvme_rdma_alloc_io_queues(ctrl);
873 ret = nvme_rdma_alloc_tag_set(&ctrl->ctrl);
883 nr_queues = min(ctrl->tag_set.nr_hw_queues + 1, ctrl->ctrl.queue_count);
884 ret = nvme_rdma_start_io_queues(ctrl, 1, nr_queues);
889 nvme_start_freeze(&ctrl->ctrl);
890 nvme_unquiesce_io_queues(&ctrl->ctrl);
891 if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) {
898 nvme_unfreeze(&ctrl->ctrl);
901 blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset,
902 ctrl->ctrl.queue_count - 1);
903 nvme_unfreeze(&ctrl->ctrl);
910 ret = nvme_rdma_start_io_queues(ctrl, nr_queues,
911 ctrl->tag_set.nr_hw_queues + 1);
918 nvme_quiesce_io_queues(&ctrl->ctrl);
919 nvme_sync_io_queues(&ctrl->ctrl);
920 nvme_rdma_stop_io_queues(ctrl);
922 nvme_cancel_tagset(&ctrl->ctrl);
924 nvme_remove_io_tag_set(&ctrl->ctrl);
926 nvme_rdma_free_io_queues(ctrl);
930 static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
933 nvme_quiesce_admin_queue(&ctrl->ctrl);
934 blk_sync_queue(ctrl->ctrl.admin_q);
935 nvme_rdma_stop_queue(&ctrl->queues[0]);
936 nvme_cancel_admin_tagset(&ctrl->ctrl);
938 nvme_unquiesce_admin_queue(&ctrl->ctrl);
939 nvme_remove_admin_tag_set(&ctrl->ctrl);
941 nvme_rdma_destroy_admin_queue(ctrl);
944 static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
947 if (ctrl->ctrl.queue_count > 1) {
948 nvme_quiesce_io_queues(&ctrl->ctrl);
949 nvme_sync_io_queues(&ctrl->ctrl);
950 nvme_rdma_stop_io_queues(ctrl);
951 nvme_cancel_tagset(&ctrl->ctrl);
953 nvme_unquiesce_io_queues(&ctrl->ctrl);
954 nvme_remove_io_tag_set(&ctrl->ctrl);
956 nvme_rdma_free_io_queues(ctrl);
962 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
964 flush_work(&ctrl->err_work);
965 cancel_delayed_work_sync(&ctrl->reconnect_work);
970 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
972 if (list_empty(&ctrl->list))
976 list_del(&ctrl->list);
981 kfree(ctrl->queues);
982 kfree(ctrl);
985 static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
987 enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
995 if (nvmf_should_reconnect(&ctrl->ctrl)) {
996 dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n",
997 ctrl->ctrl.opts->reconnect_delay);
998 queue_delayed_work(nvme_wq, &ctrl->reconnect_work,
999 ctrl->ctrl.opts->reconnect_delay * HZ);
1001 nvme_delete_ctrl(&ctrl->ctrl);
1005 static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
1010 ret = nvme_rdma_configure_admin_queue(ctrl, new);
1014 if (ctrl->ctrl.icdoff) {
1016 dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
1020 if (!(ctrl->ctrl.sgls & (1 << 2))) {
1022 dev_err(ctrl->ctrl.device,
1027 if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) {
1028 dev_warn(ctrl->ctrl.device,
1029 "queue_size %zu > ctrl sqsize %u, clamping down\n",
1030 ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1);
1033 if (ctrl->ctrl.sqsize + 1 > NVME_RDMA_MAX_QUEUE_SIZE) {
1034 dev_warn(ctrl->ctrl.device,
1035 "ctrl sqsize %u > max queue size %u, clamping down\n",
1036 ctrl->ctrl.sqsize + 1, NVME_RDMA_MAX_QUEUE_SIZE);
1037 ctrl->ctrl.sqsize = NVME_RDMA_MAX_QUEUE_SIZE - 1;
1040 if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
1041 dev_warn(ctrl->ctrl.device,
1042 "sqsize %u > ctrl maxcmd %u, clamping down\n",
1043 ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd);
1044 ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1;
1047 if (ctrl->ctrl.sgls & (1 << 20))
1048 ctrl->use_inline_data = true;
1050 if (ctrl->ctrl.queue_count > 1) {
1051 ret = nvme_rdma_configure_io_queues(ctrl, new);
1056 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
1059 * state change failure is ok if we started ctrl delete,
1063 enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
1072 nvme_start_ctrl(&ctrl->ctrl);
1076 if (ctrl->ctrl.queue_count > 1) {
1077 nvme_quiesce_io_queues(&ctrl->ctrl);
1078 nvme_sync_io_queues(&ctrl->ctrl);
1079 nvme_rdma_stop_io_queues(ctrl);
1080 nvme_cancel_tagset(&ctrl->ctrl);
1082 nvme_remove_io_tag_set(&ctrl->ctrl);
1083 nvme_rdma_free_io_queues(ctrl);
1086 nvme_quiesce_admin_queue(&ctrl->ctrl);
1087 blk_sync_queue(ctrl->ctrl.admin_q);
1088 nvme_rdma_stop_queue(&ctrl->queues[0]);
1089 nvme_cancel_admin_tagset(&ctrl->ctrl);
1091 nvme_remove_admin_tag_set(&ctrl->ctrl);
1092 nvme_rdma_destroy_admin_queue(ctrl);
1098 struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work),
1101 ++ctrl->ctrl.nr_reconnects;
1103 if (nvme_rdma_setup_ctrl(ctrl, false))
1106 dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n",
1107 ctrl->ctrl.nr_reconnects);
1109 ctrl->ctrl.nr_reconnects = 0;
1114 dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
1115 ctrl->ctrl.nr_reconnects);
1116 nvme_rdma_reconnect_or_remove(ctrl);
1121 struct nvme_rdma_ctrl *ctrl = container_of(work,
1124 nvme_stop_keep_alive(&ctrl->ctrl);
1125 flush_work(&ctrl->ctrl.async_event_work);
1126 nvme_rdma_teardown_io_queues(ctrl, false);
1127 nvme_unquiesce_io_queues(&ctrl->ctrl);
1128 nvme_rdma_teardown_admin_queue(ctrl, false);
1129 nvme_unquiesce_admin_queue(&ctrl->ctrl);
1130 nvme_auth_stop(&ctrl->ctrl);
1132 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
1133 /* state change failure is ok if we started ctrl delete */
1134 enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
1141 nvme_rdma_reconnect_or_remove(ctrl);
1144 static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
1146 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
1149 dev_warn(ctrl->ctrl.device, "starting error recovery\n");
1150 queue_work(nvme_reset_wq, &ctrl->err_work);
1167 struct nvme_rdma_ctrl *ctrl = queue->ctrl;
1169 if (nvme_ctrl_state(&ctrl->ctrl) == NVME_CTRL_LIVE)
1170 dev_info(ctrl->ctrl.device,
1174 nvme_rdma_error_recovery(ctrl);
1278 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
1310 * Align the MR to a 4K page size to match the ctrl page size and
1545 queue->ctrl->use_inline_data &&
1608 dev_err(queue->ctrl->ctrl.device,
1634 dev_err(queue->ctrl->ctrl.device,
1645 return queue->ctrl->admin_tag_set.tags[queue_idx];
1646 return queue->ctrl->tag_set.tags[queue_idx - 1];
1657 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
1658 struct nvme_rdma_queue *queue = &ctrl->queues[0];
1660 struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe;
1690 dev_err(queue->ctrl->ctrl.device,
1693 nvme_rdma_error_recovery(queue->ctrl);
1704 dev_err(queue->ctrl->ctrl.device,
1707 nvme_rdma_error_recovery(queue->ctrl);
1714 dev_err(queue->ctrl->ctrl.device,
1717 nvme_rdma_error_recovery(queue->ctrl);
1742 dev_err(queue->ctrl->ctrl.device,
1744 nvme_rdma_error_recovery(queue->ctrl);
1757 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
1794 dev_err(queue->ctrl->ctrl.device,
1798 dev_err(queue->ctrl->ctrl.device,
1807 struct nvme_ctrl *ctrl = &queue->ctrl->ctrl;
1814 if (ctrl->opts->tos >= 0)
1815 rdma_set_service_type(queue->cm_id, ctrl->opts->tos);
1818 dev_err(ctrl->device, "rdma_resolve_route failed (%d).\n",
1832 struct nvme_rdma_ctrl *ctrl = queue->ctrl;
1863 priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
1868 dev_err(ctrl->ctrl.device,
1882 dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n",
1905 dev_dbg(queue->ctrl->ctrl.device,
1912 dev_dbg(queue->ctrl->ctrl.device,
1914 nvme_rdma_error_recovery(queue->ctrl);
1920 dev_err(queue->ctrl->ctrl.device,
1922 nvme_rdma_error_recovery(queue->ctrl);
1947 struct nvme_rdma_ctrl *ctrl = queue->ctrl;
1949 dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
1952 if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_LIVE) {
1957 * - ctrl disable/shutdown fabrics requests
1974 nvme_rdma_error_recovery(ctrl);
1994 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
1995 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
2026 dev_err(queue->ctrl->ctrl.device,
2114 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data);
2116 nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues);
2139 static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
2141 nvme_rdma_teardown_io_queues(ctrl, shutdown);
2142 nvme_quiesce_admin_queue(&ctrl->ctrl);
2143 nvme_disable_ctrl(&ctrl->ctrl, shutdown);
2144 nvme_rdma_teardown_admin_queue(ctrl, shutdown);
2147 static void nvme_rdma_delete_ctrl(struct nvme_ctrl *ctrl)
2149 nvme_rdma_shutdown_ctrl(to_rdma_ctrl(ctrl), true);
2154 struct nvme_rdma_ctrl *ctrl =
2155 container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work);
2157 nvme_stop_ctrl(&ctrl->ctrl);
2158 nvme_rdma_shutdown_ctrl(ctrl, false);
2160 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2166 if (nvme_rdma_setup_ctrl(ctrl, false))
2172 ++ctrl->ctrl.nr_reconnects;
2173 nvme_rdma_reconnect_or_remove(ctrl);
2205 struct nvme_rdma_ctrl *ctrl;
2209 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
2210 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2222 struct nvme_rdma_ctrl *ctrl;
2226 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2227 if (!ctrl)
2229 ctrl->ctrl.opts = opts;
2230 INIT_LIST_HEAD(&ctrl->list);
2243 opts->traddr, opts->trsvcid, &ctrl->addr);
2252 opts->host_traddr, NULL, &ctrl->src_addr);
2265 INIT_DELAYED_WORK(&ctrl->reconnect_work,
2267 INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
2268 INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);
2270 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2272 ctrl->ctrl.sqsize = opts->queue_size - 1;
2273 ctrl->ctrl.kato = opts->kato;
2276 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2278 if (!ctrl->queues)
2281 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
2286 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
2289 ret = nvme_rdma_setup_ctrl(ctrl, true);
2293 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
2294 nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
2297 list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
2300 return &ctrl->ctrl;
2303 nvme_uninit_ctrl(&ctrl->ctrl);
2304 nvme_put_ctrl(&ctrl->ctrl);
2309 kfree(ctrl->queues);
2311 kfree(ctrl);
2328 struct nvme_rdma_ctrl *ctrl;
2346 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
2347 if (ctrl->device->dev != ib_device)
2349 nvme_delete_ctrl(&ctrl->ctrl);
2382 struct nvme_rdma_ctrl *ctrl;
2388 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list)
2389 nvme_delete_ctrl(&ctrl->ctrl);