Lines Matching refs:ctrl

141 	struct nvme_ctrl ctrl;
180 static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
182 return container_of(ctrl, struct nvme_dev, ctrl);
252 if (!(dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP))
336 if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) {
337 dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
425 nvme_req(req)->ctrl = set->driver_data;
515 if (!nvme_ctrl_sgl_supported(&dev->ctrl))
786 nvme_ctrl_sgl_supported(&dev->ctrl))
890 if (unlikely(!nvme_check_ready(&dev->ctrl, req, true)))
891 return nvme_fail_nonready_command(&dev->ctrl, req);
924 if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true)))
1026 nvme_complete_async_event(&nvmeq->dev->ctrl,
1033 dev_warn(nvmeq->dev->ctrl.device,
1131 static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
1133 struct nvme_dev *dev = to_nvme_dev(ctrl);
1153 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1176 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1182 struct nvme_ctrl *ctrl = &dev->ctrl;
1191 if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ)
1205 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1222 dev_warn(nvmeq->dev->ctrl.device,
1224 atomic_inc(&nvmeq->dev->ctrl.abort_limit);
1237 switch (nvme_ctrl_state(&dev->ctrl)) {
1263 dev_warn(dev->ctrl.device,
1267 dev_warn(dev->ctrl.device,
1274 dev_warn(dev->ctrl.device,
1276 dev_warn(dev->ctrl.device,
1313 dev_warn(dev->ctrl.device,
1325 switch (nvme_ctrl_state(&dev->ctrl)) {
1327 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
1330 dev_warn_ratelimited(dev->ctrl.device,
1348 dev_warn(dev->ctrl.device,
1355 if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
1356 atomic_inc(&dev->ctrl.abort_limit);
1365 dev_warn(nvmeq->dev->ctrl.device,
1371 abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd),
1374 atomic_inc(&dev->ctrl.abort_limit);
1391 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
1395 if (nvme_try_sched_reset(&dev->ctrl))
1396 nvme_unquiesce_io_queues(&dev->ctrl);
1420 for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) {
1421 dev->ctrl.queue_count--;
1437 if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
1438 nvme_quiesce_admin_queue(&nvmeq->dev->ctrl);
1447 for (i = dev->ctrl.queue_count - 1; i > 0; i--)
1461 for (i = dev->ctrl.queue_count - 1; i > 0; i--) {
1523 if (dev->ctrl.queue_count > qid)
1543 dev->ctrl.queue_count++;
1557 int nr = nvmeq->dev->ctrl.instance;
1597 if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_CONNECTING) {
1679 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
1685 nvme_unquiesce_admin_queue(&dev->ctrl);
1686 nvme_remove_admin_tag_set(&dev->ctrl);
1727 NVME_CAP_NSSRC(dev->ctrl.cap) : 0;
1740 result = nvme_disable_ctrl(&dev->ctrl, false);
1748 dev->ctrl.numa_node = dev_to_node(dev->dev);
1758 result = nvme_enable_ctrl(&dev->ctrl);
1779 for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
1786 max = min(dev->max_qid, dev->ctrl.queue_count - 1);
1833 if (NVME_CAP_CMBS(dev->ctrl.cap))
1853 if (NVME_CAP_CMBS(dev->ctrl.cap)) {
1868 dev_warn(dev->ctrl.device,
1898 ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1900 dev_warn(dev->ctrl.device,
1945 if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries)
1946 max_entries = dev->ctrl.hmmaxd;
2002 u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2);
2020 u64 preferred = (u64)dev->ctrl.hmpre * 4096;
2021 u64 min = (u64)dev->ctrl.hmmin * 4096;
2025 if (!dev->ctrl.hmpre)
2030 dev_warn(dev->ctrl.device,
2049 dev_warn(dev->ctrl.device,
2054 dev_info(dev->ctrl.device,
2132 struct nvme_ctrl *ctrl =
2134 struct nvme_dev *dev = to_nvme_dev(ctrl);
2142 if (a == &dev_attr_hmb.attr && !ctrl->hmpre)
2169 sysfs_update_group(&dev->ctrl.device->kobj, &nvme_pci_dev_attrs_group);
2240 if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR))
2252 if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
2273 result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
2298 dev->ctrl.sqsize = result - 1;
2362 dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n",
2395 struct request_queue *q = nvmeq->dev->ctrl.admin_q;
2492 dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
2494 dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1,
2496 dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap);
2504 if (dev->ctrl.quirks & NVME_QUIRK_128_BYTES_SQES)
2515 dev_warn(dev->ctrl.device, "detected Apple NVMe controller, "
2520 NVME_CAP_MQES(dev->ctrl.cap) == 0) {
2522 dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, "
2530 if ((dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) &&
2533 dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n",
2536 dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */
2577 enum nvme_ctrl_state state = nvme_ctrl_state(&dev->ctrl);
2585 nvme_start_freeze(&dev->ctrl);
2591 nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT);
2594 nvme_quiesce_io_queues(&dev->ctrl);
2596 if (!dead && dev->ctrl.queue_count > 0) {
2598 nvme_disable_ctrl(&dev->ctrl, shutdown);
2608 nvme_cancel_tagset(&dev->ctrl);
2609 nvme_cancel_admin_tagset(&dev->ctrl);
2617 nvme_unquiesce_io_queues(&dev->ctrl);
2618 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q))
2619 nvme_unquiesce_admin_queue(&dev->ctrl);
2626 if (!nvme_wait_reset(&dev->ctrl))
2672 nvme_remove_io_tag_set(&dev->ctrl);
2673 dev->ctrl.tagset = NULL;
2677 static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
2679 struct nvme_dev *dev = to_nvme_dev(ctrl);
2690 container_of(work, struct nvme_dev, ctrl.reset_work);
2691 bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
2694 if (nvme_ctrl_state(&dev->ctrl) != NVME_CTRL_RESETTING) {
2695 dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n",
2696 dev->ctrl.state);
2705 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
2707 nvme_sync_queues(&dev->ctrl);
2713 nvme_unquiesce_admin_queue(&dev->ctrl);
2720 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
2721 dev_warn(dev->ctrl.device,
2727 result = nvme_init_ctrl_finish(&dev->ctrl, was_suspend);
2747 nvme_unquiesce_io_queues(&dev->ctrl);
2748 nvme_wait_freeze(&dev->ctrl);
2751 nvme_unfreeze(&dev->ctrl);
2753 dev_warn(dev->ctrl.device, "IO queues lost\n");
2754 nvme_mark_namespaces_dead(&dev->ctrl);
2755 nvme_unquiesce_io_queues(&dev->ctrl);
2756 nvme_remove_namespaces(&dev->ctrl);
2764 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
2765 dev_warn(dev->ctrl.device,
2771 nvme_start_ctrl(&dev->ctrl);
2781 dev_warn(dev->ctrl.device, "Disabling device after reset failure: %d\n",
2783 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
2785 nvme_sync_queues(&dev->ctrl);
2786 nvme_mark_namespaces_dead(&dev->ctrl);
2787 nvme_unquiesce_io_queues(&dev->ctrl);
2788 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
2791 static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
2793 *val = readl(to_nvme_dev(ctrl)->bar + off);
2797 static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
2799 writel(val, to_nvme_dev(ctrl)->bar + off);
2803 static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
2805 *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off);
2809 static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
2811 struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
2816 static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl)
2818 struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
2819 struct nvme_subsystem *subsys = ctrl->subsys;
2821 dev_err(ctrl->device,
2830 static bool nvme_pci_supports_pci_p2pdma(struct nvme_ctrl *ctrl)
2832 struct nvme_dev *dev = to_nvme_dev(ctrl);
2934 INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
2959 ret = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
2964 if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48)
2975 dev->ctrl.max_hw_sectors = min_t(u32,
2977 dev->ctrl.max_segments = NVME_MAX_SEGS;
2983 dev->ctrl.max_integrity_segments = 1;
3015 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
3021 result = nvme_alloc_admin_tag_set(&dev->ctrl, &dev->admin_tagset,
3030 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
3031 dev_warn(dev->ctrl.device,
3037 result = nvme_init_ctrl_finish(&dev->ctrl, false);
3052 nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops,
3057 if (!dev->ctrl.tagset)
3058 dev_warn(dev->ctrl.device, "IO queues not created\n");
3060 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
3061 dev_warn(dev->ctrl.device,
3069 nvme_start_ctrl(&dev->ctrl);
3070 nvme_put_ctrl(&dev->ctrl);
3071 flush_work(&dev->ctrl.scan_work);
3075 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
3088 nvme_uninit_ctrl(&dev->ctrl);
3089 nvme_put_ctrl(&dev->ctrl);
3103 nvme_sync_queues(&dev->ctrl);
3110 if (!nvme_try_sched_reset(&dev->ctrl))
3111 flush_work(&dev->ctrl.reset_work);
3130 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
3134 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
3138 flush_work(&dev->ctrl.reset_work);
3139 nvme_stop_ctrl(&dev->ctrl);
3140 nvme_remove_namespaces(&dev->ctrl);
3149 nvme_uninit_ctrl(&dev->ctrl);
3153 static int nvme_get_power_state(struct nvme_ctrl *ctrl, u32 *ps)
3155 return nvme_get_features(ctrl, NVME_FEAT_POWER_MGMT, 0, NULL, 0, ps);
3158 static int nvme_set_power_state(struct nvme_ctrl *ctrl, u32 ps)
3160 return nvme_set_features(ctrl, NVME_FEAT_POWER_MGMT, ps, NULL, 0, NULL);
3166 struct nvme_ctrl *ctrl = &ndev->ctrl;
3169 nvme_set_power_state(ctrl, ndev->last_ps) != 0)
3171 if (ctrl->hmpre && nvme_setup_host_mem(ndev))
3176 return nvme_try_sched_reset(ctrl);
3183 struct nvme_ctrl *ctrl = &ndev->ctrl;
3201 if (pm_suspend_via_firmware() || !ctrl->npss ||
3203 (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND))
3206 nvme_start_freeze(ctrl);
3207 nvme_wait_freeze(ctrl);
3208 nvme_sync_queues(ctrl);
3210 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
3224 ret = nvme_get_power_state(ctrl, &ndev->last_ps);
3235 ret = nvme_set_power_state(ctrl, ctrl->npss);
3248 ctrl->npss = 0;
3251 nvme_unfreeze(ctrl);
3267 return nvme_try_sched_reset(&ndev->ctrl);
3294 dev_warn(dev->ctrl.device,
3296 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) {
3303 dev_warn(dev->ctrl.device,
3314 dev_info(dev->ctrl.device, "restart after slot reset\n");
3316 if (!nvme_try_sched_reset(&dev->ctrl))
3317 nvme_unquiesce_io_queues(&dev->ctrl);
3325 flush_work(&dev->ctrl.reset_work);