Lines Matching refs:ctrl

136 	struct nvme_ctrl ctrl;
180 static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
182 return container_of(ctrl, struct nvme_dev, ctrl);
321 if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) {
322 dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
425 nvme_req(req)->ctrl = &dev->ctrl;
531 if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
852 dev->ctrl.sgls & ((1 << 0) | (1 << 1)))
1008 nvme_complete_async_event(&nvmeq->dev->ctrl,
1015 dev_warn(nvmeq->dev->ctrl.device,
1114 static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
1116 struct nvme_dev *dev = to_nvme_dev(ctrl);
1134 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1158 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1164 struct nvme_ctrl *ctrl = &dev->ctrl;
1173 if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ)
1188 return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1206 dev_warn(nvmeq->dev->ctrl.device,
1208 atomic_inc(&nvmeq->dev->ctrl.abort_limit);
1220 switch (dev->ctrl.state) {
1246 dev_warn(dev->ctrl.device,
1250 dev_warn(dev->ctrl.device,
1277 nvme_reset_ctrl(&dev->ctrl);
1290 dev_warn(dev->ctrl.device,
1302 switch (dev->ctrl.state) {
1304 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
1307 dev_warn_ratelimited(dev->ctrl.device,
1325 dev_warn(dev->ctrl.device,
1330 nvme_reset_ctrl(&dev->ctrl);
1335 if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
1336 atomic_inc(&dev->ctrl.abort_limit);
1346 dev_warn(nvmeq->dev->ctrl.device,
1350 abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
1353 atomic_inc(&dev->ctrl.abort_limit);
1388 for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) {
1389 dev->ctrl.queue_count--;
1407 if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
1408 nvme_stop_admin_queue(&nvmeq->dev->ctrl);
1418 for (i = dev->ctrl.queue_count - 1; i > 0; i--)
1427 nvme_shutdown_ctrl(&dev->ctrl);
1429 nvme_disable_ctrl(&dev->ctrl);
1444 for (i = dev->ctrl.queue_count - 1; i > 0; i--) {
1506 if (dev->ctrl.queue_count > qid)
1526 dev->ctrl.queue_count++;
1540 int nr = nvmeq->dev->ctrl.instance;
1634 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
1640 nvme_start_admin_queue(&dev->ctrl);
1641 blk_cleanup_queue(dev->ctrl.admin_q);
1648 if (!dev->ctrl.admin_q) {
1654 dev->admin_tagset.numa_node = dev->ctrl.numa_node;
1661 dev->ctrl.admin_tagset = &dev->admin_tagset;
1663 dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset);
1664 if (IS_ERR(dev->ctrl.admin_q)) {
1666 dev->ctrl.admin_q = NULL;
1669 if (!blk_get_queue(dev->ctrl.admin_q)) {
1671 dev->ctrl.admin_q = NULL;
1675 nvme_start_admin_queue(&dev->ctrl);
1717 NVME_CAP_NSSRC(dev->ctrl.cap) : 0;
1723 result = nvme_disable_ctrl(&dev->ctrl);
1731 dev->ctrl.numa_node = dev_to_node(dev->dev);
1741 result = nvme_enable_ctrl(&dev->ctrl);
1762 for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
1769 max = min(dev->max_qid, dev->ctrl.queue_count - 1);
1827 if (NVME_CAP_CMBS(dev->ctrl.cap))
1847 if (NVME_CAP_CMBS(dev->ctrl.cap)) {
1862 dev_warn(dev->ctrl.device,
1874 if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
1876 dev_warn(dev->ctrl.device,
1883 sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
1905 ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
1907 dev_warn(dev->ctrl.device,
1950 if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries)
1951 max_entries = dev->ctrl.hmmaxd;
2007 u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2);
2025 u64 preferred = (u64)dev->ctrl.hmpre * 4096;
2026 u64 min = (u64)dev->ctrl.hmmin * 4096;
2032 dev_warn(dev->ctrl.device,
2051 dev_warn(dev->ctrl.device,
2056 dev_info(dev->ctrl.device,
2135 if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR))
2171 if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
2177 result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
2244 dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n",
2271 struct request_queue *q = nvmeq->dev->ctrl.admin_q;
2324 if (!dev->ctrl.tagset) {
2331 dev->tagset.numa_node = dev->ctrl.numa_node;
2343 if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
2348 dev_warn(dev->ctrl.device,
2352 dev->ctrl.tagset = &dev->tagset;
2390 dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
2392 dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1,
2394 dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */
2395 dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap);
2403 if (dev->ctrl.quirks & NVME_QUIRK_128_BYTES_SQES)
2414 dev_warn(dev->ctrl.device, "detected Apple NVMe controller, "
2419 NVME_CAP_MQES(dev->ctrl.cap) == 0) {
2421 dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, "
2429 if ((dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) &&
2432 dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n",
2476 if (dev->ctrl.state == NVME_CTRL_LIVE ||
2477 dev->ctrl.state == NVME_CTRL_RESETTING) {
2479 nvme_start_freeze(&dev->ctrl);
2490 nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT);
2492 nvme_stop_queues(&dev->ctrl);
2494 if (!dead && dev->ctrl.queue_count > 0) {
2503 blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
2504 blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
2514 nvme_start_queues(&dev->ctrl);
2515 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q))
2516 nvme_start_admin_queue(&dev->ctrl);
2523 if (!nvme_wait_reset(&dev->ctrl))
2573 dev->ctrl.tagset = NULL;
2577 static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
2579 struct nvme_dev *dev = to_nvme_dev(ctrl);
2583 if (dev->ctrl.admin_q)
2584 blk_put_queue(dev->ctrl.admin_q);
2585 free_opal_dev(dev->ctrl.opal_dev);
2598 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
2599 nvme_get_ctrl(&dev->ctrl);
2601 nvme_kill_queues(&dev->ctrl);
2603 nvme_put_ctrl(&dev->ctrl);
2609 container_of(work, struct nvme_dev, ctrl.reset_work);
2610 bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
2613 if (dev->ctrl.state != NVME_CTRL_RESETTING) {
2614 dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n",
2615 dev->ctrl.state);
2624 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
2626 nvme_sync_queues(&dev->ctrl);
2647 dev->ctrl.max_hw_sectors = min_t(u32,
2649 dev->ctrl.max_segments = NVME_MAX_SEGS;
2662 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
2663 dev_warn(dev->ctrl.device,
2673 dev->ctrl.max_integrity_segments = 1;
2675 result = nvme_init_identify(&dev->ctrl);
2679 if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) {
2680 if (!dev->ctrl.opal_dev)
2681 dev->ctrl.opal_dev =
2682 init_opal_dev(&dev->ctrl, &nvme_sec_submit);
2684 opal_unlock_from_suspend(dev->ctrl.opal_dev);
2686 free_opal_dev(dev->ctrl.opal_dev);
2687 dev->ctrl.opal_dev = NULL;
2690 if (dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP) {
2697 if (dev->ctrl.hmpre) {
2712 dev_warn(dev->ctrl.device, "IO queues not created\n");
2713 nvme_kill_queues(&dev->ctrl);
2714 nvme_remove_namespaces(&dev->ctrl);
2717 nvme_start_queues(&dev->ctrl);
2718 nvme_wait_freeze(&dev->ctrl);
2720 nvme_unfreeze(&dev->ctrl);
2727 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
2728 dev_warn(dev->ctrl.device,
2734 nvme_start_ctrl(&dev->ctrl);
2741 dev_warn(dev->ctrl.device,
2753 nvme_put_ctrl(&dev->ctrl);
2756 static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
2758 *val = readl(to_nvme_dev(ctrl)->bar + off);
2762 static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
2764 writel(val, to_nvme_dev(ctrl)->bar + off);
2768 static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
2770 *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off);
2774 static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
2776 struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
2857 flush_work(&dev->ctrl.reset_work);
2858 flush_work(&dev->ctrl.scan_work);
2859 nvme_put_ctrl(&dev->ctrl);
2873 INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
2897 ret = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
2932 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
2935 nvme_reset_ctrl(&dev->ctrl);
2944 nvme_uninit_ctrl(&dev->ctrl);
2958 nvme_sync_queues(&dev->ctrl);
2965 if (!nvme_try_sched_reset(&dev->ctrl))
2966 flush_work(&dev->ctrl.reset_work);
2985 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
2989 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
2993 flush_work(&dev->ctrl.reset_work);
2994 nvme_stop_ctrl(&dev->ctrl);
2995 nvme_remove_namespaces(&dev->ctrl);
3003 nvme_uninit_ctrl(&dev->ctrl);
3007 static int nvme_get_power_state(struct nvme_ctrl *ctrl, u32 *ps)
3009 return nvme_get_features(ctrl, NVME_FEAT_POWER_MGMT, 0, NULL, 0, ps);
3012 static int nvme_set_power_state(struct nvme_ctrl *ctrl, u32 ps)
3014 return nvme_set_features(ctrl, NVME_FEAT_POWER_MGMT, ps, NULL, 0, NULL);
3020 struct nvme_ctrl *ctrl = &ndev->ctrl;
3023 nvme_set_power_state(ctrl, ndev->last_ps) != 0)
3024 return nvme_try_sched_reset(&ndev->ctrl);
3032 struct nvme_ctrl *ctrl = &ndev->ctrl;
3055 if (pm_suspend_via_firmware() || !ctrl->npss ||
3058 (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND))
3061 nvme_start_freeze(ctrl);
3062 nvme_wait_freeze(ctrl);
3063 nvme_sync_queues(ctrl);
3065 if (ctrl->state != NVME_CTRL_LIVE)
3068 ret = nvme_get_power_state(ctrl, &ndev->last_ps);
3079 ret = nvme_set_power_state(ctrl, ctrl->npss);
3092 ctrl->npss = 0;
3095 nvme_unfreeze(ctrl);
3111 return nvme_try_sched_reset(&ndev->ctrl);
3138 dev_warn(dev->ctrl.device,
3143 dev_warn(dev->ctrl.device,
3154 dev_info(dev->ctrl.device, "restart after slot reset\n");
3156 nvme_reset_ctrl(&dev->ctrl);
3164 flush_work(&dev->ctrl.reset_work);