1// SPDX-License-Identifier: GPL-2.0 2/* 3 * NVM Express device driver 4 * Copyright (c) 2011-2014, Intel Corporation. 5 */ 6 7#include <linux/blkdev.h> 8#include <linux/blk-mq.h> 9#include <linux/compat.h> 10#include <linux/delay.h> 11#include <linux/errno.h> 12#include <linux/hdreg.h> 13#include <linux/kernel.h> 14#include <linux/module.h> 15#include <linux/backing-dev.h> 16#include <linux/slab.h> 17#include <linux/types.h> 18#include <linux/pr.h> 19#include <linux/ptrace.h> 20#include <linux/nvme_ioctl.h> 21#include <linux/pm_qos.h> 22#include <asm/unaligned.h> 23 24#include "nvme.h" 25#include "fabrics.h" 26 27#define CREATE_TRACE_POINTS 28#include "trace.h" 29 30#define NVME_MINORS (1U << MINORBITS) 31 32unsigned int admin_timeout = 60; 33module_param(admin_timeout, uint, 0644); 34MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); 35EXPORT_SYMBOL_GPL(admin_timeout); 36 37unsigned int nvme_io_timeout = 30; 38module_param_named(io_timeout, nvme_io_timeout, uint, 0644); 39MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); 40EXPORT_SYMBOL_GPL(nvme_io_timeout); 41 42static unsigned char shutdown_timeout = 5; 43module_param(shutdown_timeout, byte, 0644); 44MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); 45 46static u8 nvme_max_retries = 5; 47module_param_named(max_retries, nvme_max_retries, byte, 0644); 48MODULE_PARM_DESC(max_retries, "max number of retries a command may have"); 49 50static unsigned long default_ps_max_latency_us = 100000; 51module_param(default_ps_max_latency_us, ulong, 0644); 52MODULE_PARM_DESC(default_ps_max_latency_us, 53 "max power saving latency for new devices; use PM QOS to change per device"); 54 55static bool force_apst; 56module_param(force_apst, bool, 0644); 57MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off"); 58 59static bool streams; 60module_param(streams, bool, 0644); 61MODULE_PARM_DESC(streams, "turn on support for Streams write directives"); 62 63/* 64 * nvme_wq - hosts nvme related works that are not reset or delete 65 * nvme_reset_wq - hosts nvme reset works 66 * nvme_delete_wq - hosts nvme delete works 67 * 68 * nvme_wq will host works such as scan, aen handling, fw activation, 69 * keep-alive, periodic reconnects etc. nvme_reset_wq 70 * runs reset works which also flush works hosted on nvme_wq for 71 * serialization purposes. nvme_delete_wq host controller deletion 72 * works which flush reset works for serialization. 73 */ 74struct workqueue_struct *nvme_wq; 75EXPORT_SYMBOL_GPL(nvme_wq); 76 77struct workqueue_struct *nvme_reset_wq; 78EXPORT_SYMBOL_GPL(nvme_reset_wq); 79 80struct workqueue_struct *nvme_delete_wq; 81EXPORT_SYMBOL_GPL(nvme_delete_wq); 82 83static LIST_HEAD(nvme_subsystems); 84static DEFINE_MUTEX(nvme_subsystems_lock); 85 86static DEFINE_IDA(nvme_instance_ida); 87static dev_t nvme_chr_devt; 88static struct class *nvme_class; 89static struct class *nvme_subsys_class; 90 91static void nvme_put_subsystem(struct nvme_subsystem *subsys); 92static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 93 unsigned nsid); 94 95static void nvme_update_bdev_size(struct gendisk *disk) 96{ 97 struct block_device *bdev = bdget_disk(disk, 0); 98 99 if (bdev) { 100 bd_set_nr_sectors(bdev, get_capacity(disk)); 101 bdput(bdev); 102 } 103} 104 105static void nvme_queue_scan(struct nvme_ctrl *ctrl) 106{ 107 /* 108 * Only new queue scan work when admin and IO queues are both alive 109 */ 110 if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset) 111 queue_work(nvme_wq, &ctrl->scan_work); 112} 113 114/* 115 * Use this function to proceed with scheduling reset_work for a controller 116 * that had previously been set to the resetting state. This is intended for 117 * code paths that can't be interrupted by other reset attempts. A hot removal 118 * may prevent this from succeeding. 119 */ 120int nvme_try_sched_reset(struct nvme_ctrl *ctrl) 121{ 122 if (ctrl->state != NVME_CTRL_RESETTING) 123 return -EBUSY; 124 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) 125 return -EBUSY; 126 return 0; 127} 128EXPORT_SYMBOL_GPL(nvme_try_sched_reset); 129 130int nvme_reset_ctrl(struct nvme_ctrl *ctrl) 131{ 132 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) 133 return -EBUSY; 134 if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) 135 return -EBUSY; 136 return 0; 137} 138EXPORT_SYMBOL_GPL(nvme_reset_ctrl); 139 140int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) 141{ 142 int ret; 143 144 ret = nvme_reset_ctrl(ctrl); 145 if (!ret) { 146 flush_work(&ctrl->reset_work); 147 if (ctrl->state != NVME_CTRL_LIVE) 148 ret = -ENETRESET; 149 } 150 151 return ret; 152} 153EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync); 154 155static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl) 156{ 157 dev_info(ctrl->device, 158 "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn); 159 160 flush_work(&ctrl->reset_work); 161 nvme_stop_ctrl(ctrl); 162 nvme_remove_namespaces(ctrl); 163 ctrl->ops->delete_ctrl(ctrl); 164 nvme_uninit_ctrl(ctrl); 165} 166 167static void nvme_delete_ctrl_work(struct work_struct *work) 168{ 169 struct nvme_ctrl *ctrl = 170 container_of(work, struct nvme_ctrl, delete_work); 171 172 nvme_do_delete_ctrl(ctrl); 173} 174 175int nvme_delete_ctrl(struct nvme_ctrl *ctrl) 176{ 177 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) 178 return -EBUSY; 179 if (!queue_work(nvme_delete_wq, &ctrl->delete_work)) 180 return -EBUSY; 181 return 0; 182} 183EXPORT_SYMBOL_GPL(nvme_delete_ctrl); 184 185static void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) 186{ 187 /* 188 * Keep a reference until nvme_do_delete_ctrl() complete, 189 * since ->delete_ctrl can free the controller. 190 */ 191 nvme_get_ctrl(ctrl); 192 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) 193 nvme_do_delete_ctrl(ctrl); 194 nvme_put_ctrl(ctrl); 195} 196 197static blk_status_t nvme_error_status(u16 status) 198{ 199 switch (status & 0x7ff) { 200 case NVME_SC_SUCCESS: 201 return BLK_STS_OK; 202 case NVME_SC_CAP_EXCEEDED: 203 return BLK_STS_NOSPC; 204 case NVME_SC_LBA_RANGE: 205 case NVME_SC_CMD_INTERRUPTED: 206 case NVME_SC_NS_NOT_READY: 207 return BLK_STS_TARGET; 208 case NVME_SC_BAD_ATTRIBUTES: 209 case NVME_SC_ONCS_NOT_SUPPORTED: 210 case NVME_SC_INVALID_OPCODE: 211 case NVME_SC_INVALID_FIELD: 212 case NVME_SC_INVALID_NS: 213 return BLK_STS_NOTSUPP; 214 case NVME_SC_WRITE_FAULT: 215 case NVME_SC_READ_ERROR: 216 case NVME_SC_UNWRITTEN_BLOCK: 217 case NVME_SC_ACCESS_DENIED: 218 case NVME_SC_READ_ONLY: 219 case NVME_SC_COMPARE_FAILED: 220 return BLK_STS_MEDIUM; 221 case NVME_SC_GUARD_CHECK: 222 case NVME_SC_APPTAG_CHECK: 223 case NVME_SC_REFTAG_CHECK: 224 case NVME_SC_INVALID_PI: 225 return BLK_STS_PROTECTION; 226 case NVME_SC_RESERVATION_CONFLICT: 227 return BLK_STS_NEXUS; 228 case NVME_SC_HOST_PATH_ERROR: 229 return BLK_STS_TRANSPORT; 230 case NVME_SC_ZONE_TOO_MANY_ACTIVE: 231 return BLK_STS_ZONE_ACTIVE_RESOURCE; 232 case NVME_SC_ZONE_TOO_MANY_OPEN: 233 return BLK_STS_ZONE_OPEN_RESOURCE; 234 default: 235 return BLK_STS_IOERR; 236 } 237} 238 239static void nvme_retry_req(struct request *req) 240{ 241 struct nvme_ns *ns = req->q->queuedata; 242 unsigned long delay = 0; 243 u16 crd; 244 245 /* The mask and shift result must be <= 3 */ 246 crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11; 247 if (ns && crd) 248 delay = ns->ctrl->crdt[crd - 1] * 100; 249 250 nvme_req(req)->retries++; 251 blk_mq_requeue_request(req, false); 252 blk_mq_delay_kick_requeue_list(req->q, delay); 253} 254 255enum nvme_disposition { 256 COMPLETE, 257 RETRY, 258 FAILOVER, 259}; 260 261static inline enum nvme_disposition nvme_decide_disposition(struct request *req) 262{ 263 if (likely(nvme_req(req)->status == 0)) 264 return COMPLETE; 265 266 if (blk_noretry_request(req) || 267 (nvme_req(req)->status & NVME_SC_DNR) || 268 nvme_req(req)->retries >= nvme_max_retries) 269 return COMPLETE; 270 271 if (req->cmd_flags & REQ_NVME_MPATH) { 272 if (nvme_is_path_error(nvme_req(req)->status) || 273 blk_queue_dying(req->q)) 274 return FAILOVER; 275 } else { 276 if (blk_queue_dying(req->q)) 277 return COMPLETE; 278 } 279 280 return RETRY; 281} 282 283static inline void nvme_end_req(struct request *req) 284{ 285 blk_status_t status = nvme_error_status(nvme_req(req)->status); 286 287 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && 288 req_op(req) == REQ_OP_ZONE_APPEND) 289 req->__sector = nvme_lba_to_sect(req->q->queuedata, 290 le64_to_cpu(nvme_req(req)->result.u64)); 291 292 nvme_trace_bio_complete(req, status); 293 blk_mq_end_request(req, status); 294} 295 296void nvme_complete_rq(struct request *req) 297{ 298 trace_nvme_complete_rq(req); 299 nvme_cleanup_cmd(req); 300 301 if (nvme_req(req)->ctrl->kas) 302 nvme_req(req)->ctrl->comp_seen = true; 303 304 switch (nvme_decide_disposition(req)) { 305 case COMPLETE: 306 nvme_end_req(req); 307 return; 308 case RETRY: 309 nvme_retry_req(req); 310 return; 311 case FAILOVER: 312 nvme_failover_req(req); 313 return; 314 } 315} 316EXPORT_SYMBOL_GPL(nvme_complete_rq); 317 318bool nvme_cancel_request(struct request *req, void *data, bool reserved) 319{ 320 dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, 321 "Cancelling I/O %d", req->tag); 322 323 /* don't abort one completed request */ 324 if (blk_mq_request_completed(req)) 325 return true; 326 327 nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD; 328 nvme_req(req)->flags |= NVME_REQ_CANCELLED; 329 blk_mq_complete_request(req); 330 return true; 331} 332EXPORT_SYMBOL_GPL(nvme_cancel_request); 333 334void nvme_cancel_tagset(struct nvme_ctrl *ctrl) 335{ 336 if (ctrl->tagset) { 337 blk_mq_tagset_busy_iter(ctrl->tagset, 338 nvme_cancel_request, ctrl); 339 blk_mq_tagset_wait_completed_request(ctrl->tagset); 340 } 341} 342EXPORT_SYMBOL_GPL(nvme_cancel_tagset); 343 344void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl) 345{ 346 if (ctrl->admin_tagset) { 347 blk_mq_tagset_busy_iter(ctrl->admin_tagset, 348 nvme_cancel_request, ctrl); 349 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset); 350 } 351} 352EXPORT_SYMBOL_GPL(nvme_cancel_admin_tagset); 353 354bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 355 enum nvme_ctrl_state new_state) 356{ 357 enum nvme_ctrl_state old_state; 358 unsigned long flags; 359 bool changed = false; 360 361 spin_lock_irqsave(&ctrl->lock, flags); 362 363 old_state = ctrl->state; 364 switch (new_state) { 365 case NVME_CTRL_LIVE: 366 switch (old_state) { 367 case NVME_CTRL_NEW: 368 case NVME_CTRL_RESETTING: 369 case NVME_CTRL_CONNECTING: 370 changed = true; 371 fallthrough; 372 default: 373 break; 374 } 375 break; 376 case NVME_CTRL_RESETTING: 377 switch (old_state) { 378 case NVME_CTRL_NEW: 379 case NVME_CTRL_LIVE: 380 changed = true; 381 fallthrough; 382 default: 383 break; 384 } 385 break; 386 case NVME_CTRL_CONNECTING: 387 switch (old_state) { 388 case NVME_CTRL_NEW: 389 case NVME_CTRL_RESETTING: 390 changed = true; 391 fallthrough; 392 default: 393 break; 394 } 395 break; 396 case NVME_CTRL_DELETING: 397 switch (old_state) { 398 case NVME_CTRL_LIVE: 399 case NVME_CTRL_RESETTING: 400 case NVME_CTRL_CONNECTING: 401 changed = true; 402 fallthrough; 403 default: 404 break; 405 } 406 break; 407 case NVME_CTRL_DELETING_NOIO: 408 switch (old_state) { 409 case NVME_CTRL_DELETING: 410 case NVME_CTRL_DEAD: 411 changed = true; 412 fallthrough; 413 default: 414 break; 415 } 416 break; 417 case NVME_CTRL_DEAD: 418 switch (old_state) { 419 case NVME_CTRL_DELETING: 420 changed = true; 421 fallthrough; 422 default: 423 break; 424 } 425 break; 426 default: 427 break; 428 } 429 430 if (changed) { 431 ctrl->state = new_state; 432 wake_up_all(&ctrl->state_wq); 433 } 434 435 spin_unlock_irqrestore(&ctrl->lock, flags); 436 if (changed && ctrl->state == NVME_CTRL_LIVE) 437 nvme_kick_requeue_lists(ctrl); 438 return changed; 439} 440EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); 441 442/* 443 * Returns true for sink states that can't ever transition back to live. 444 */ 445static bool nvme_state_terminal(struct nvme_ctrl *ctrl) 446{ 447 switch (ctrl->state) { 448 case NVME_CTRL_NEW: 449 case NVME_CTRL_LIVE: 450 case NVME_CTRL_RESETTING: 451 case NVME_CTRL_CONNECTING: 452 return false; 453 case NVME_CTRL_DELETING: 454 case NVME_CTRL_DELETING_NOIO: 455 case NVME_CTRL_DEAD: 456 return true; 457 default: 458 WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state); 459 return true; 460 } 461} 462 463/* 464 * Waits for the controller state to be resetting, or returns false if it is 465 * not possible to ever transition to that state. 466 */ 467bool nvme_wait_reset(struct nvme_ctrl *ctrl) 468{ 469 wait_event(ctrl->state_wq, 470 nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) || 471 nvme_state_terminal(ctrl)); 472 return ctrl->state == NVME_CTRL_RESETTING; 473} 474EXPORT_SYMBOL_GPL(nvme_wait_reset); 475 476static void nvme_free_ns_head(struct kref *ref) 477{ 478 struct nvme_ns_head *head = 479 container_of(ref, struct nvme_ns_head, ref); 480 481 nvme_mpath_remove_disk(head); 482 ida_simple_remove(&head->subsys->ns_ida, head->instance); 483 cleanup_srcu_struct(&head->srcu); 484 nvme_put_subsystem(head->subsys); 485 kfree(head); 486} 487 488static void nvme_put_ns_head(struct nvme_ns_head *head) 489{ 490 kref_put(&head->ref, nvme_free_ns_head); 491} 492 493static void nvme_free_ns(struct kref *kref) 494{ 495 struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); 496 497 if (ns->ndev) 498 nvme_nvm_unregister(ns); 499 500 put_disk(ns->disk); 501 nvme_put_ns_head(ns->head); 502 nvme_put_ctrl(ns->ctrl); 503 kfree(ns); 504} 505 506void nvme_put_ns(struct nvme_ns *ns) 507{ 508 kref_put(&ns->kref, nvme_free_ns); 509} 510EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU); 511 512static inline void nvme_clear_nvme_request(struct request *req) 513{ 514 nvme_req(req)->retries = 0; 515 nvme_req(req)->flags = 0; 516 req->rq_flags |= RQF_DONTPREP; 517} 518 519static inline unsigned int nvme_req_op(struct nvme_command *cmd) 520{ 521 return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN; 522} 523 524static inline void nvme_init_request(struct request *req, 525 struct nvme_command *cmd) 526{ 527 if (req->q->queuedata) 528 req->timeout = NVME_IO_TIMEOUT; 529 else /* no queuedata implies admin queue */ 530 req->timeout = ADMIN_TIMEOUT; 531 532 req->cmd_flags |= REQ_FAILFAST_DRIVER; 533 nvme_clear_nvme_request(req); 534 nvme_req(req)->cmd = cmd; 535} 536 537struct request *nvme_alloc_request(struct request_queue *q, 538 struct nvme_command *cmd, blk_mq_req_flags_t flags) 539{ 540 struct request *req; 541 542 req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags); 543 if (!IS_ERR(req)) 544 nvme_init_request(req, cmd); 545 return req; 546} 547EXPORT_SYMBOL_GPL(nvme_alloc_request); 548 549struct request *nvme_alloc_request_qid(struct request_queue *q, 550 struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid) 551{ 552 struct request *req; 553 554 req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags, 555 qid ? qid - 1 : 0); 556 if (!IS_ERR(req)) 557 nvme_init_request(req, cmd); 558 return req; 559} 560EXPORT_SYMBOL_GPL(nvme_alloc_request_qid); 561 562static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable) 563{ 564 struct nvme_command c; 565 566 memset(&c, 0, sizeof(c)); 567 568 c.directive.opcode = nvme_admin_directive_send; 569 c.directive.nsid = cpu_to_le32(NVME_NSID_ALL); 570 c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE; 571 c.directive.dtype = NVME_DIR_IDENTIFY; 572 c.directive.tdtype = NVME_DIR_STREAMS; 573 c.directive.endir = enable ? NVME_DIR_ENDIR : 0; 574 575 return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0); 576} 577 578static int nvme_disable_streams(struct nvme_ctrl *ctrl) 579{ 580 return nvme_toggle_streams(ctrl, false); 581} 582 583static int nvme_enable_streams(struct nvme_ctrl *ctrl) 584{ 585 return nvme_toggle_streams(ctrl, true); 586} 587 588static int nvme_get_stream_params(struct nvme_ctrl *ctrl, 589 struct streams_directive_params *s, u32 nsid) 590{ 591 struct nvme_command c; 592 593 memset(&c, 0, sizeof(c)); 594 memset(s, 0, sizeof(*s)); 595 596 c.directive.opcode = nvme_admin_directive_recv; 597 c.directive.nsid = cpu_to_le32(nsid); 598 c.directive.numd = cpu_to_le32(nvme_bytes_to_numd(sizeof(*s))); 599 c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM; 600 c.directive.dtype = NVME_DIR_STREAMS; 601 602 return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s)); 603} 604 605static int nvme_configure_directives(struct nvme_ctrl *ctrl) 606{ 607 struct streams_directive_params s; 608 int ret; 609 610 if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES)) 611 return 0; 612 if (!streams) 613 return 0; 614 615 ret = nvme_enable_streams(ctrl); 616 if (ret) 617 return ret; 618 619 ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL); 620 if (ret) 621 goto out_disable_stream; 622 623 ctrl->nssa = le16_to_cpu(s.nssa); 624 if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) { 625 dev_info(ctrl->device, "too few streams (%u) available\n", 626 ctrl->nssa); 627 goto out_disable_stream; 628 } 629 630 ctrl->nr_streams = min_t(u16, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1); 631 dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams); 632 return 0; 633 634out_disable_stream: 635 nvme_disable_streams(ctrl); 636 return ret; 637} 638 639/* 640 * Check if 'req' has a write hint associated with it. If it does, assign 641 * a valid namespace stream to the write. 642 */ 643static void nvme_assign_write_stream(struct nvme_ctrl *ctrl, 644 struct request *req, u16 *control, 645 u32 *dsmgmt) 646{ 647 enum rw_hint streamid = req->write_hint; 648 649 if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE) 650 streamid = 0; 651 else { 652 streamid--; 653 if (WARN_ON_ONCE(streamid > ctrl->nr_streams)) 654 return; 655 656 *control |= NVME_RW_DTYPE_STREAMS; 657 *dsmgmt |= streamid << 16; 658 } 659 660 if (streamid < ARRAY_SIZE(req->q->write_hints)) 661 req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9; 662} 663 664static inline void nvme_setup_passthrough(struct request *req, 665 struct nvme_command *cmd) 666{ 667 memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd)); 668 /* passthru commands should let the driver set the SGL flags */ 669 cmd->common.flags &= ~NVME_CMD_SGL_ALL; 670} 671 672static inline void nvme_setup_flush(struct nvme_ns *ns, 673 struct nvme_command *cmnd) 674{ 675 cmnd->common.opcode = nvme_cmd_flush; 676 cmnd->common.nsid = cpu_to_le32(ns->head->ns_id); 677} 678 679static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, 680 struct nvme_command *cmnd) 681{ 682 unsigned short segments = blk_rq_nr_discard_segments(req), n = 0; 683 struct nvme_dsm_range *range; 684 struct bio *bio; 685 686 /* 687 * Some devices do not consider the DSM 'Number of Ranges' field when 688 * determining how much data to DMA. Always allocate memory for maximum 689 * number of segments to prevent device reading beyond end of buffer. 690 */ 691 static const size_t alloc_size = sizeof(*range) * NVME_DSM_MAX_RANGES; 692 693 range = kzalloc(alloc_size, GFP_ATOMIC | __GFP_NOWARN); 694 if (!range) { 695 /* 696 * If we fail allocation our range, fallback to the controller 697 * discard page. If that's also busy, it's safe to return 698 * busy, as we know we can make progress once that's freed. 699 */ 700 if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy)) 701 return BLK_STS_RESOURCE; 702 703 range = page_address(ns->ctrl->discard_page); 704 } 705 706 if (queue_max_discard_segments(req->q) == 1) { 707 u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req)); 708 u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9); 709 710 range[0].cattr = cpu_to_le32(0); 711 range[0].nlb = cpu_to_le32(nlb); 712 range[0].slba = cpu_to_le64(slba); 713 n = 1; 714 } else { 715 __rq_for_each_bio(bio, req) { 716 u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector); 717 u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; 718 719 if (n < segments) { 720 range[n].cattr = cpu_to_le32(0); 721 range[n].nlb = cpu_to_le32(nlb); 722 range[n].slba = cpu_to_le64(slba); 723 } 724 n++; 725 } 726 } 727 728 if (WARN_ON_ONCE(n != segments)) { 729 if (virt_to_page(range) == ns->ctrl->discard_page) 730 clear_bit_unlock(0, &ns->ctrl->discard_page_busy); 731 else 732 kfree(range); 733 return BLK_STS_IOERR; 734 } 735 736 cmnd->dsm.opcode = nvme_cmd_dsm; 737 cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id); 738 cmnd->dsm.nr = cpu_to_le32(segments - 1); 739 cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 740 741 req->special_vec.bv_page = virt_to_page(range); 742 req->special_vec.bv_offset = offset_in_page(range); 743 req->special_vec.bv_len = alloc_size; 744 req->rq_flags |= RQF_SPECIAL_PAYLOAD; 745 746 return BLK_STS_OK; 747} 748 749static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, 750 struct request *req, struct nvme_command *cmnd) 751{ 752 if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) 753 return nvme_setup_discard(ns, req, cmnd); 754 755 cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes; 756 cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id); 757 cmnd->write_zeroes.slba = 758 cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); 759 cmnd->write_zeroes.length = 760 cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); 761 if (nvme_ns_has_pi(ns)) 762 cmnd->write_zeroes.control = cpu_to_le16(NVME_RW_PRINFO_PRACT); 763 else 764 cmnd->write_zeroes.control = 0; 765 return BLK_STS_OK; 766} 767 768static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, 769 struct request *req, struct nvme_command *cmnd, 770 enum nvme_opcode op) 771{ 772 struct nvme_ctrl *ctrl = ns->ctrl; 773 u16 control = 0; 774 u32 dsmgmt = 0; 775 776 if (req->cmd_flags & REQ_FUA) 777 control |= NVME_RW_FUA; 778 if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD)) 779 control |= NVME_RW_LR; 780 781 if (req->cmd_flags & REQ_RAHEAD) 782 dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; 783 784 cmnd->rw.opcode = op; 785 cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); 786 cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); 787 cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); 788 789 if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams) 790 nvme_assign_write_stream(ctrl, req, &control, &dsmgmt); 791 792 if (ns->ms) { 793 /* 794 * If formated with metadata, the block layer always provides a 795 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else 796 * we enable the PRACT bit for protection information or set the 797 * namespace capacity to zero to prevent any I/O. 798 */ 799 if (!blk_integrity_rq(req)) { 800 if (WARN_ON_ONCE(!nvme_ns_has_pi(ns))) 801 return BLK_STS_NOTSUPP; 802 control |= NVME_RW_PRINFO_PRACT; 803 } 804 805 switch (ns->pi_type) { 806 case NVME_NS_DPS_PI_TYPE3: 807 control |= NVME_RW_PRINFO_PRCHK_GUARD; 808 break; 809 case NVME_NS_DPS_PI_TYPE1: 810 case NVME_NS_DPS_PI_TYPE2: 811 control |= NVME_RW_PRINFO_PRCHK_GUARD | 812 NVME_RW_PRINFO_PRCHK_REF; 813 if (op == nvme_cmd_zone_append) 814 control |= NVME_RW_APPEND_PIREMAP; 815 cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req)); 816 break; 817 } 818 } 819 820 cmnd->rw.control = cpu_to_le16(control); 821 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 822 return 0; 823} 824 825void nvme_cleanup_cmd(struct request *req) 826{ 827 if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { 828 struct nvme_ns *ns = req->rq_disk->private_data; 829 struct page *page = req->special_vec.bv_page; 830 831 if (page == ns->ctrl->discard_page) 832 clear_bit_unlock(0, &ns->ctrl->discard_page_busy); 833 else 834 kfree(page_address(page) + req->special_vec.bv_offset); 835 } 836} 837EXPORT_SYMBOL_GPL(nvme_cleanup_cmd); 838 839blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, 840 struct nvme_command *cmd) 841{ 842 struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; 843 blk_status_t ret = BLK_STS_OK; 844 845 if (!(req->rq_flags & RQF_DONTPREP)) 846 nvme_clear_nvme_request(req); 847 848 memset(cmd, 0, sizeof(*cmd)); 849 switch (req_op(req)) { 850 case REQ_OP_DRV_IN: 851 case REQ_OP_DRV_OUT: 852 nvme_setup_passthrough(req, cmd); 853 break; 854 case REQ_OP_FLUSH: 855 nvme_setup_flush(ns, cmd); 856 break; 857 case REQ_OP_ZONE_RESET_ALL: 858 case REQ_OP_ZONE_RESET: 859 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_RESET); 860 break; 861 case REQ_OP_ZONE_OPEN: 862 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_OPEN); 863 break; 864 case REQ_OP_ZONE_CLOSE: 865 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_CLOSE); 866 break; 867 case REQ_OP_ZONE_FINISH: 868 ret = nvme_setup_zone_mgmt_send(ns, req, cmd, NVME_ZONE_FINISH); 869 break; 870 case REQ_OP_WRITE_ZEROES: 871 ret = nvme_setup_write_zeroes(ns, req, cmd); 872 break; 873 case REQ_OP_DISCARD: 874 ret = nvme_setup_discard(ns, req, cmd); 875 break; 876 case REQ_OP_READ: 877 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_read); 878 break; 879 case REQ_OP_WRITE: 880 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_write); 881 break; 882 case REQ_OP_ZONE_APPEND: 883 ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append); 884 break; 885 default: 886 WARN_ON_ONCE(1); 887 return BLK_STS_IOERR; 888 } 889 890 if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN)) 891 nvme_req(req)->genctr++; 892 cmd->common.command_id = nvme_cid(req); 893 trace_nvme_setup_cmd(req, cmd); 894 return ret; 895} 896EXPORT_SYMBOL_GPL(nvme_setup_cmd); 897 898static void nvme_end_sync_rq(struct request *rq, blk_status_t error) 899{ 900 struct completion *waiting = rq->end_io_data; 901 902 rq->end_io_data = NULL; 903 complete(waiting); 904} 905 906static void nvme_execute_rq_polled(struct request_queue *q, 907 struct gendisk *bd_disk, struct request *rq, int at_head) 908{ 909 DECLARE_COMPLETION_ONSTACK(wait); 910 911 WARN_ON_ONCE(!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)); 912 913 rq->cmd_flags |= REQ_HIPRI; 914 rq->end_io_data = &wait; 915 blk_execute_rq_nowait(q, bd_disk, rq, at_head, nvme_end_sync_rq); 916 917 while (!completion_done(&wait)) { 918 blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true); 919 cond_resched(); 920 } 921} 922 923/* 924 * Returns 0 on success. If the result is negative, it's a Linux error code; 925 * if the result is positive, it's an NVM Express status code 926 */ 927int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 928 union nvme_result *result, void *buffer, unsigned bufflen, 929 unsigned timeout, int qid, int at_head, 930 blk_mq_req_flags_t flags, bool poll) 931{ 932 struct request *req; 933 int ret; 934 935 if (qid == NVME_QID_ANY) 936 req = nvme_alloc_request(q, cmd, flags); 937 else 938 req = nvme_alloc_request_qid(q, cmd, flags, qid); 939 if (IS_ERR(req)) 940 return PTR_ERR(req); 941 942 if (timeout) 943 req->timeout = timeout; 944 945 if (buffer && bufflen) { 946 ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); 947 if (ret) 948 goto out; 949 } 950 951 if (poll) 952 nvme_execute_rq_polled(req->q, NULL, req, at_head); 953 else 954 blk_execute_rq(req->q, NULL, req, at_head); 955 if (result) 956 *result = nvme_req(req)->result; 957 if (nvme_req(req)->flags & NVME_REQ_CANCELLED) 958 ret = -EINTR; 959 else 960 ret = nvme_req(req)->status; 961 out: 962 blk_mq_free_request(req); 963 return ret; 964} 965EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd); 966 967int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 968 void *buffer, unsigned bufflen) 969{ 970 return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0, 971 NVME_QID_ANY, 0, 0, false); 972} 973EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); 974 975static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf, 976 unsigned len, u32 seed, bool write) 977{ 978 struct bio_integrity_payload *bip; 979 int ret = -ENOMEM; 980 void *buf; 981 982 buf = kmalloc(len, GFP_KERNEL); 983 if (!buf) 984 goto out; 985 986 ret = -EFAULT; 987 if (write && copy_from_user(buf, ubuf, len)) 988 goto out_free_meta; 989 990 bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); 991 if (IS_ERR(bip)) { 992 ret = PTR_ERR(bip); 993 goto out_free_meta; 994 } 995 996 bip->bip_iter.bi_size = len; 997 bip->bip_iter.bi_sector = seed; 998 ret = bio_integrity_add_page(bio, virt_to_page(buf), len, 999 offset_in_page(buf)); 1000 if (ret == len) 1001 return buf; 1002 ret = -ENOMEM; 1003out_free_meta: 1004 kfree(buf); 1005out: 1006 return ERR_PTR(ret); 1007} 1008 1009static u32 nvme_known_admin_effects(u8 opcode) 1010{ 1011 switch (opcode) { 1012 case nvme_admin_format_nvm: 1013 return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_NCC | 1014 NVME_CMD_EFFECTS_CSE_MASK; 1015 case nvme_admin_sanitize_nvm: 1016 return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK; 1017 default: 1018 break; 1019 } 1020 return 0; 1021} 1022 1023u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) 1024{ 1025 u32 effects = 0; 1026 1027 if (ns) { 1028 if (ns->head->effects) 1029 effects = le32_to_cpu(ns->head->effects->iocs[opcode]); 1030 if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC)) 1031 dev_warn(ctrl->device, 1032 "IO command:%02x has unhandled effects:%08x\n", 1033 opcode, effects); 1034 return 0; 1035 } 1036 1037 if (ctrl->effects) 1038 effects = le32_to_cpu(ctrl->effects->acs[opcode]); 1039 effects |= nvme_known_admin_effects(opcode); 1040 1041 return effects; 1042} 1043EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU); 1044 1045static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 1046 u8 opcode) 1047{ 1048 u32 effects = nvme_command_effects(ctrl, ns, opcode); 1049 1050 /* 1051 * For simplicity, IO to all namespaces is quiesced even if the command 1052 * effects say only one namespace is affected. 1053 */ 1054 if (effects & NVME_CMD_EFFECTS_CSE_MASK) { 1055 mutex_lock(&ctrl->scan_lock); 1056 mutex_lock(&ctrl->subsys->lock); 1057 nvme_mpath_start_freeze(ctrl->subsys); 1058 nvme_mpath_wait_freeze(ctrl->subsys); 1059 nvme_start_freeze(ctrl); 1060 nvme_wait_freeze(ctrl); 1061 } 1062 return effects; 1063} 1064 1065static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) 1066{ 1067 if (effects & NVME_CMD_EFFECTS_CSE_MASK) { 1068 nvme_unfreeze(ctrl); 1069 nvme_mpath_unfreeze(ctrl->subsys); 1070 mutex_unlock(&ctrl->subsys->lock); 1071 nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL); 1072 mutex_unlock(&ctrl->scan_lock); 1073 } 1074 if (effects & NVME_CMD_EFFECTS_CCC) 1075 nvme_init_identify(ctrl); 1076 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) { 1077 nvme_queue_scan(ctrl); 1078 flush_work(&ctrl->scan_work); 1079 } 1080} 1081 1082void nvme_execute_passthru_rq(struct request *rq) 1083{ 1084 struct nvme_command *cmd = nvme_req(rq)->cmd; 1085 struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl; 1086 struct nvme_ns *ns = rq->q->queuedata; 1087 struct gendisk *disk = ns ? ns->disk : NULL; 1088 u32 effects; 1089 1090 effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode); 1091 blk_execute_rq(rq->q, disk, rq, 0); 1092 nvme_passthru_end(ctrl, effects); 1093} 1094EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU); 1095 1096static int nvme_submit_user_cmd(struct request_queue *q, 1097 struct nvme_command *cmd, void __user *ubuffer, 1098 unsigned bufflen, void __user *meta_buffer, unsigned meta_len, 1099 u32 meta_seed, u64 *result, unsigned timeout) 1100{ 1101 bool write = nvme_is_write(cmd); 1102 struct nvme_ns *ns = q->queuedata; 1103 struct gendisk *disk = ns ? ns->disk : NULL; 1104 struct request *req; 1105 struct bio *bio = NULL; 1106 void *meta = NULL; 1107 int ret; 1108 1109 req = nvme_alloc_request(q, cmd, 0); 1110 if (IS_ERR(req)) 1111 return PTR_ERR(req); 1112 1113 if (timeout) 1114 req->timeout = timeout; 1115 nvme_req(req)->flags |= NVME_REQ_USERCMD; 1116 1117 if (ubuffer && bufflen) { 1118 ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, 1119 GFP_KERNEL); 1120 if (ret) 1121 goto out; 1122 bio = req->bio; 1123 bio->bi_disk = disk; 1124 if (disk && meta_buffer && meta_len) { 1125 meta = nvme_add_user_metadata(bio, meta_buffer, meta_len, 1126 meta_seed, write); 1127 if (IS_ERR(meta)) { 1128 ret = PTR_ERR(meta); 1129 goto out_unmap; 1130 } 1131 req->cmd_flags |= REQ_INTEGRITY; 1132 } 1133 } 1134 1135 nvme_execute_passthru_rq(req); 1136 if (nvme_req(req)->flags & NVME_REQ_CANCELLED) 1137 ret = -EINTR; 1138 else 1139 ret = nvme_req(req)->status; 1140 if (result) 1141 *result = le64_to_cpu(nvme_req(req)->result.u64); 1142 if (meta && !ret && !write) { 1143 if (copy_to_user(meta_buffer, meta, meta_len)) 1144 ret = -EFAULT; 1145 } 1146 kfree(meta); 1147 out_unmap: 1148 if (bio) 1149 blk_rq_unmap_user(bio); 1150 out: 1151 blk_mq_free_request(req); 1152 return ret; 1153} 1154 1155static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) 1156{ 1157 struct nvme_ctrl *ctrl = rq->end_io_data; 1158 unsigned long flags; 1159 bool startka = false; 1160 1161 blk_mq_free_request(rq); 1162 1163 if (status) { 1164 dev_err(ctrl->device, 1165 "failed nvme_keep_alive_end_io error=%d\n", 1166 status); 1167 return; 1168 } 1169 1170 ctrl->comp_seen = false; 1171 spin_lock_irqsave(&ctrl->lock, flags); 1172 if (ctrl->state == NVME_CTRL_LIVE || 1173 ctrl->state == NVME_CTRL_CONNECTING) 1174 startka = true; 1175 spin_unlock_irqrestore(&ctrl->lock, flags); 1176 if (startka) 1177 queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ); 1178} 1179 1180static int nvme_keep_alive(struct nvme_ctrl *ctrl) 1181{ 1182 struct request *rq; 1183 1184 rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, 1185 BLK_MQ_REQ_RESERVED); 1186 if (IS_ERR(rq)) 1187 return PTR_ERR(rq); 1188 1189 rq->timeout = ctrl->kato * HZ; 1190 rq->end_io_data = ctrl; 1191 1192 blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io); 1193 1194 return 0; 1195} 1196 1197static void nvme_keep_alive_work(struct work_struct *work) 1198{ 1199 struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), 1200 struct nvme_ctrl, ka_work); 1201 bool comp_seen = ctrl->comp_seen; 1202 1203 if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) { 1204 dev_dbg(ctrl->device, 1205 "reschedule traffic based keep-alive timer\n"); 1206 ctrl->comp_seen = false; 1207 queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ); 1208 return; 1209 } 1210 1211 if (nvme_keep_alive(ctrl)) { 1212 /* allocation failure, reset the controller */ 1213 dev_err(ctrl->device, "keep-alive failed\n"); 1214 nvme_reset_ctrl(ctrl); 1215 return; 1216 } 1217} 1218 1219static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) 1220{ 1221 if (unlikely(ctrl->kato == 0)) 1222 return; 1223 1224 queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ); 1225} 1226 1227void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) 1228{ 1229 if (unlikely(ctrl->kato == 0)) 1230 return; 1231 1232 cancel_delayed_work_sync(&ctrl->ka_work); 1233} 1234EXPORT_SYMBOL_GPL(nvme_stop_keep_alive); 1235 1236/* 1237 * In NVMe 1.0 the CNS field was just a binary controller or namespace 1238 * flag, thus sending any new CNS opcodes has a big chance of not working. 1239 * Qemu unfortunately had that bug after reporting a 1.1 version compliance 1240 * (but not for any later version). 1241 */ 1242static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl) 1243{ 1244 if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS) 1245 return ctrl->vs < NVME_VS(1, 2, 0); 1246 return ctrl->vs < NVME_VS(1, 1, 0); 1247} 1248 1249static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) 1250{ 1251 struct nvme_command c = { }; 1252 int error; 1253 1254 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 1255 c.identify.opcode = nvme_admin_identify; 1256 c.identify.cns = NVME_ID_CNS_CTRL; 1257 1258 *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); 1259 if (!*id) 1260 return -ENOMEM; 1261 1262 error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, 1263 sizeof(struct nvme_id_ctrl)); 1264 if (error) 1265 kfree(*id); 1266 return error; 1267} 1268 1269static bool nvme_multi_css(struct nvme_ctrl *ctrl) 1270{ 1271 return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI; 1272} 1273 1274static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids, 1275 struct nvme_ns_id_desc *cur, bool *csi_seen) 1276{ 1277 const char *warn_str = "ctrl returned bogus length:"; 1278 void *data = cur; 1279 1280 switch (cur->nidt) { 1281 case NVME_NIDT_EUI64: 1282 if (cur->nidl != NVME_NIDT_EUI64_LEN) { 1283 dev_warn(ctrl->device, "%s %d for NVME_NIDT_EUI64\n", 1284 warn_str, cur->nidl); 1285 return -1; 1286 } 1287 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) 1288 return NVME_NIDT_EUI64_LEN; 1289 memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN); 1290 return NVME_NIDT_EUI64_LEN; 1291 case NVME_NIDT_NGUID: 1292 if (cur->nidl != NVME_NIDT_NGUID_LEN) { 1293 dev_warn(ctrl->device, "%s %d for NVME_NIDT_NGUID\n", 1294 warn_str, cur->nidl); 1295 return -1; 1296 } 1297 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) 1298 return NVME_NIDT_NGUID_LEN; 1299 memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN); 1300 return NVME_NIDT_NGUID_LEN; 1301 case NVME_NIDT_UUID: 1302 if (cur->nidl != NVME_NIDT_UUID_LEN) { 1303 dev_warn(ctrl->device, "%s %d for NVME_NIDT_UUID\n", 1304 warn_str, cur->nidl); 1305 return -1; 1306 } 1307 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) 1308 return NVME_NIDT_UUID_LEN; 1309 uuid_copy(&ids->uuid, data + sizeof(*cur)); 1310 return NVME_NIDT_UUID_LEN; 1311 case NVME_NIDT_CSI: 1312 if (cur->nidl != NVME_NIDT_CSI_LEN) { 1313 dev_warn(ctrl->device, "%s %d for NVME_NIDT_CSI\n", 1314 warn_str, cur->nidl); 1315 return -1; 1316 } 1317 memcpy(&ids->csi, data + sizeof(*cur), NVME_NIDT_CSI_LEN); 1318 *csi_seen = true; 1319 return NVME_NIDT_CSI_LEN; 1320 default: 1321 /* Skip unknown types */ 1322 return cur->nidl; 1323 } 1324} 1325 1326static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, 1327 struct nvme_ns_ids *ids) 1328{ 1329 struct nvme_command c = { }; 1330 bool csi_seen = false; 1331 int status, pos, len; 1332 void *data; 1333 1334 if (ctrl->vs < NVME_VS(1, 3, 0) && !nvme_multi_css(ctrl)) 1335 return 0; 1336 if (ctrl->quirks & NVME_QUIRK_NO_NS_DESC_LIST) 1337 return 0; 1338 1339 c.identify.opcode = nvme_admin_identify; 1340 c.identify.nsid = cpu_to_le32(nsid); 1341 c.identify.cns = NVME_ID_CNS_NS_DESC_LIST; 1342 1343 data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); 1344 if (!data) 1345 return -ENOMEM; 1346 1347 status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, 1348 NVME_IDENTIFY_DATA_SIZE); 1349 if (status) { 1350 dev_warn(ctrl->device, 1351 "Identify Descriptors failed (%d)\n", status); 1352 goto free_data; 1353 } 1354 1355 for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) { 1356 struct nvme_ns_id_desc *cur = data + pos; 1357 1358 if (cur->nidl == 0) 1359 break; 1360 1361 len = nvme_process_ns_desc(ctrl, ids, cur, &csi_seen); 1362 if (len < 0) 1363 break; 1364 1365 len += sizeof(*cur); 1366 } 1367 1368 if (nvme_multi_css(ctrl) && !csi_seen) { 1369 dev_warn(ctrl->device, "Command set not reported for nsid:%d\n", 1370 nsid); 1371 status = -EINVAL; 1372 } 1373 1374free_data: 1375 kfree(data); 1376 return status; 1377} 1378 1379static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, 1380 struct nvme_ns_ids *ids, struct nvme_id_ns **id) 1381{ 1382 struct nvme_command c = { }; 1383 int error; 1384 1385 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ 1386 c.identify.opcode = nvme_admin_identify; 1387 c.identify.nsid = cpu_to_le32(nsid); 1388 c.identify.cns = NVME_ID_CNS_NS; 1389 1390 *id = kmalloc(sizeof(**id), GFP_KERNEL); 1391 if (!*id) 1392 return -ENOMEM; 1393 1394 error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id)); 1395 if (error) { 1396 dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error); 1397 goto out_free_id; 1398 } 1399 1400 error = NVME_SC_INVALID_NS | NVME_SC_DNR; 1401 if ((*id)->ncap == 0) /* namespace not allocated or attached */ 1402 goto out_free_id; 1403 1404 1405 if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) { 1406 dev_info(ctrl->device, 1407 "Ignoring bogus Namespace Identifiers\n"); 1408 } else { 1409 if (ctrl->vs >= NVME_VS(1, 1, 0) && 1410 !memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 1411 memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64)); 1412 if (ctrl->vs >= NVME_VS(1, 2, 0) && 1413 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 1414 memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid)); 1415 } 1416 1417 return 0; 1418 1419out_free_id: 1420 kfree(*id); 1421 return error; 1422} 1423 1424static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid, 1425 unsigned int dword11, void *buffer, size_t buflen, u32 *result) 1426{ 1427 union nvme_result res = { 0 }; 1428 struct nvme_command c; 1429 int ret; 1430 1431 memset(&c, 0, sizeof(c)); 1432 c.features.opcode = op; 1433 c.features.fid = cpu_to_le32(fid); 1434 c.features.dword11 = cpu_to_le32(dword11); 1435 1436 ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, 1437 buffer, buflen, 0, NVME_QID_ANY, 0, 0, false); 1438 if (ret >= 0 && result) 1439 *result = le32_to_cpu(res.u32); 1440 return ret; 1441} 1442 1443int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, 1444 unsigned int dword11, void *buffer, size_t buflen, 1445 u32 *result) 1446{ 1447 return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer, 1448 buflen, result); 1449} 1450EXPORT_SYMBOL_GPL(nvme_set_features); 1451 1452int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, 1453 unsigned int dword11, void *buffer, size_t buflen, 1454 u32 *result) 1455{ 1456 return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer, 1457 buflen, result); 1458} 1459EXPORT_SYMBOL_GPL(nvme_get_features); 1460 1461int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) 1462{ 1463 u32 q_count = (*count - 1) | ((*count - 1) << 16); 1464 u32 result; 1465 int status, nr_io_queues; 1466 1467 status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0, 1468 &result); 1469 if (status < 0) 1470 return status; 1471 1472 /* 1473 * Degraded controllers might return an error when setting the queue 1474 * count. We still want to be able to bring them online and offer 1475 * access to the admin queue, as that might be only way to fix them up. 1476 */ 1477 if (status > 0) { 1478 dev_err(ctrl->device, "Could not set queue count (%d)\n", status); 1479 *count = 0; 1480 } else { 1481 nr_io_queues = min(result & 0xffff, result >> 16) + 1; 1482 *count = min(*count, nr_io_queues); 1483 } 1484 1485 return 0; 1486} 1487EXPORT_SYMBOL_GPL(nvme_set_queue_count); 1488 1489#define NVME_AEN_SUPPORTED \ 1490 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | \ 1491 NVME_AEN_CFG_ANA_CHANGE | NVME_AEN_CFG_DISC_CHANGE) 1492 1493static void nvme_enable_aen(struct nvme_ctrl *ctrl) 1494{ 1495 u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED; 1496 int status; 1497 1498 if (!supported_aens) 1499 return; 1500 1501 status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens, 1502 NULL, 0, &result); 1503 if (status) 1504 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n", 1505 supported_aens); 1506 1507 queue_work(nvme_wq, &ctrl->async_event_work); 1508} 1509 1510/* 1511 * Convert integer values from ioctl structures to user pointers, silently 1512 * ignoring the upper bits in the compat case to match behaviour of 32-bit 1513 * kernels. 1514 */ 1515static void __user *nvme_to_user_ptr(uintptr_t ptrval) 1516{ 1517 if (in_compat_syscall()) 1518 ptrval = (compat_uptr_t)ptrval; 1519 return (void __user *)ptrval; 1520} 1521 1522static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) 1523{ 1524 struct nvme_user_io io; 1525 struct nvme_command c; 1526 unsigned length, meta_len; 1527 void __user *metadata; 1528 1529 if (copy_from_user(&io, uio, sizeof(io))) 1530 return -EFAULT; 1531 if (io.flags) 1532 return -EINVAL; 1533 1534 switch (io.opcode) { 1535 case nvme_cmd_write: 1536 case nvme_cmd_read: 1537 case nvme_cmd_compare: 1538 break; 1539 default: 1540 return -EINVAL; 1541 } 1542 1543 length = (io.nblocks + 1) << ns->lba_shift; 1544 1545 if ((io.control & NVME_RW_PRINFO_PRACT) && 1546 ns->ms == sizeof(struct t10_pi_tuple)) { 1547 /* 1548 * Protection information is stripped/inserted by the 1549 * controller. 1550 */ 1551 if (nvme_to_user_ptr(io.metadata)) 1552 return -EINVAL; 1553 meta_len = 0; 1554 metadata = NULL; 1555 } else { 1556 meta_len = (io.nblocks + 1) * ns->ms; 1557 metadata = nvme_to_user_ptr(io.metadata); 1558 } 1559 1560 if (ns->features & NVME_NS_EXT_LBAS) { 1561 length += meta_len; 1562 meta_len = 0; 1563 } else if (meta_len) { 1564 if ((io.metadata & 3) || !io.metadata) 1565 return -EINVAL; 1566 } 1567 1568 memset(&c, 0, sizeof(c)); 1569 c.rw.opcode = io.opcode; 1570 c.rw.flags = io.flags; 1571 c.rw.nsid = cpu_to_le32(ns->head->ns_id); 1572 c.rw.slba = cpu_to_le64(io.slba); 1573 c.rw.length = cpu_to_le16(io.nblocks); 1574 c.rw.control = cpu_to_le16(io.control); 1575 c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); 1576 c.rw.reftag = cpu_to_le32(io.reftag); 1577 c.rw.apptag = cpu_to_le16(io.apptag); 1578 c.rw.appmask = cpu_to_le16(io.appmask); 1579 1580 return nvme_submit_user_cmd(ns->queue, &c, 1581 nvme_to_user_ptr(io.addr), length, 1582 metadata, meta_len, lower_32_bits(io.slba), NULL, 0); 1583} 1584 1585static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 1586 struct nvme_passthru_cmd __user *ucmd) 1587{ 1588 struct nvme_passthru_cmd cmd; 1589 struct nvme_command c; 1590 unsigned timeout = 0; 1591 u64 result; 1592 int status; 1593 1594 if (!capable(CAP_SYS_ADMIN)) 1595 return -EACCES; 1596 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 1597 return -EFAULT; 1598 if (cmd.flags) 1599 return -EINVAL; 1600 1601 memset(&c, 0, sizeof(c)); 1602 c.common.opcode = cmd.opcode; 1603 c.common.flags = cmd.flags; 1604 c.common.nsid = cpu_to_le32(cmd.nsid); 1605 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); 1606 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); 1607 c.common.cdw10 = cpu_to_le32(cmd.cdw10); 1608 c.common.cdw11 = cpu_to_le32(cmd.cdw11); 1609 c.common.cdw12 = cpu_to_le32(cmd.cdw12); 1610 c.common.cdw13 = cpu_to_le32(cmd.cdw13); 1611 c.common.cdw14 = cpu_to_le32(cmd.cdw14); 1612 c.common.cdw15 = cpu_to_le32(cmd.cdw15); 1613 1614 if (cmd.timeout_ms) 1615 timeout = msecs_to_jiffies(cmd.timeout_ms); 1616 1617 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, 1618 nvme_to_user_ptr(cmd.addr), cmd.data_len, 1619 nvme_to_user_ptr(cmd.metadata), cmd.metadata_len, 1620 0, &result, timeout); 1621 1622 if (status >= 0) { 1623 if (put_user(result, &ucmd->result)) 1624 return -EFAULT; 1625 } 1626 1627 return status; 1628} 1629 1630static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 1631 struct nvme_passthru_cmd64 __user *ucmd) 1632{ 1633 struct nvme_passthru_cmd64 cmd; 1634 struct nvme_command c; 1635 unsigned timeout = 0; 1636 int status; 1637 1638 if (!capable(CAP_SYS_ADMIN)) 1639 return -EACCES; 1640 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 1641 return -EFAULT; 1642 if (cmd.flags) 1643 return -EINVAL; 1644 1645 memset(&c, 0, sizeof(c)); 1646 c.common.opcode = cmd.opcode; 1647 c.common.flags = cmd.flags; 1648 c.common.nsid = cpu_to_le32(cmd.nsid); 1649 c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); 1650 c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); 1651 c.common.cdw10 = cpu_to_le32(cmd.cdw10); 1652 c.common.cdw11 = cpu_to_le32(cmd.cdw11); 1653 c.common.cdw12 = cpu_to_le32(cmd.cdw12); 1654 c.common.cdw13 = cpu_to_le32(cmd.cdw13); 1655 c.common.cdw14 = cpu_to_le32(cmd.cdw14); 1656 c.common.cdw15 = cpu_to_le32(cmd.cdw15); 1657 1658 if (cmd.timeout_ms) 1659 timeout = msecs_to_jiffies(cmd.timeout_ms); 1660 1661 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, 1662 nvme_to_user_ptr(cmd.addr), cmd.data_len, 1663 nvme_to_user_ptr(cmd.metadata), cmd.metadata_len, 1664 0, &cmd.result, timeout); 1665 1666 if (status >= 0) { 1667 if (put_user(cmd.result, &ucmd->result)) 1668 return -EFAULT; 1669 } 1670 1671 return status; 1672} 1673 1674/* 1675 * Issue ioctl requests on the first available path. Note that unlike normal 1676 * block layer requests we will not retry failed request on another controller. 1677 */ 1678struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk, 1679 struct nvme_ns_head **head, int *srcu_idx) 1680{ 1681#ifdef CONFIG_NVME_MULTIPATH 1682 if (disk->fops == &nvme_ns_head_ops) { 1683 struct nvme_ns *ns; 1684 1685 *head = disk->private_data; 1686 *srcu_idx = srcu_read_lock(&(*head)->srcu); 1687 ns = nvme_find_path(*head); 1688 if (!ns) 1689 srcu_read_unlock(&(*head)->srcu, *srcu_idx); 1690 return ns; 1691 } 1692#endif 1693 *head = NULL; 1694 *srcu_idx = -1; 1695 return disk->private_data; 1696} 1697 1698void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx) 1699{ 1700 if (head) 1701 srcu_read_unlock(&head->srcu, idx); 1702} 1703 1704static bool is_ctrl_ioctl(unsigned int cmd) 1705{ 1706 if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD) 1707 return true; 1708 if (is_sed_ioctl(cmd)) 1709 return true; 1710 return false; 1711} 1712 1713static int nvme_handle_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd, 1714 void __user *argp, 1715 struct nvme_ns_head *head, 1716 int srcu_idx) 1717{ 1718 struct nvme_ctrl *ctrl = ns->ctrl; 1719 int ret; 1720 1721 nvme_get_ctrl(ns->ctrl); 1722 nvme_put_ns_from_disk(head, srcu_idx); 1723 1724 switch (cmd) { 1725 case NVME_IOCTL_ADMIN_CMD: 1726 ret = nvme_user_cmd(ctrl, NULL, argp); 1727 break; 1728 case NVME_IOCTL_ADMIN64_CMD: 1729 ret = nvme_user_cmd64(ctrl, NULL, argp); 1730 break; 1731 default: 1732 ret = sed_ioctl(ctrl->opal_dev, cmd, argp); 1733 break; 1734 } 1735 nvme_put_ctrl(ctrl); 1736 return ret; 1737} 1738 1739static int nvme_ioctl(struct block_device *bdev, fmode_t mode, 1740 unsigned int cmd, unsigned long arg) 1741{ 1742 struct nvme_ns_head *head = NULL; 1743 void __user *argp = (void __user *)arg; 1744 struct nvme_ns *ns; 1745 int srcu_idx, ret; 1746 1747 ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); 1748 if (unlikely(!ns)) 1749 return -EWOULDBLOCK; 1750 1751 /* 1752 * Handle ioctls that apply to the controller instead of the namespace 1753 * seperately and drop the ns SRCU reference early. This avoids a 1754 * deadlock when deleting namespaces using the passthrough interface. 1755 */ 1756 if (is_ctrl_ioctl(cmd)) 1757 return nvme_handle_ctrl_ioctl(ns, cmd, argp, head, srcu_idx); 1758 1759 switch (cmd) { 1760 case NVME_IOCTL_ID: 1761 force_successful_syscall_return(); 1762 ret = ns->head->ns_id; 1763 break; 1764 case NVME_IOCTL_IO_CMD: 1765 ret = nvme_user_cmd(ns->ctrl, ns, argp); 1766 break; 1767 case NVME_IOCTL_SUBMIT_IO: 1768 ret = nvme_submit_io(ns, argp); 1769 break; 1770 case NVME_IOCTL_IO64_CMD: 1771 ret = nvme_user_cmd64(ns->ctrl, ns, argp); 1772 break; 1773 default: 1774 if (ns->ndev) 1775 ret = nvme_nvm_ioctl(ns, cmd, arg); 1776 else 1777 ret = -ENOTTY; 1778 } 1779 1780 nvme_put_ns_from_disk(head, srcu_idx); 1781 return ret; 1782} 1783 1784#ifdef CONFIG_COMPAT 1785struct nvme_user_io32 { 1786 __u8 opcode; 1787 __u8 flags; 1788 __u16 control; 1789 __u16 nblocks; 1790 __u16 rsvd; 1791 __u64 metadata; 1792 __u64 addr; 1793 __u64 slba; 1794 __u32 dsmgmt; 1795 __u32 reftag; 1796 __u16 apptag; 1797 __u16 appmask; 1798} __attribute__((__packed__)); 1799 1800#define NVME_IOCTL_SUBMIT_IO32 _IOW('N', 0x42, struct nvme_user_io32) 1801 1802static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode, 1803 unsigned int cmd, unsigned long arg) 1804{ 1805 /* 1806 * Corresponds to the difference of NVME_IOCTL_SUBMIT_IO 1807 * between 32 bit programs and 64 bit kernel. 1808 * The cause is that the results of sizeof(struct nvme_user_io), 1809 * which is used to define NVME_IOCTL_SUBMIT_IO, 1810 * are not same between 32 bit compiler and 64 bit compiler. 1811 * NVME_IOCTL_SUBMIT_IO32 is for 64 bit kernel handling 1812 * NVME_IOCTL_SUBMIT_IO issued from 32 bit programs. 1813 * Other IOCTL numbers are same between 32 bit and 64 bit. 1814 * So there is nothing to do regarding to other IOCTL numbers. 1815 */ 1816 if (cmd == NVME_IOCTL_SUBMIT_IO32) 1817 return nvme_ioctl(bdev, mode, NVME_IOCTL_SUBMIT_IO, arg); 1818 1819 return nvme_ioctl(bdev, mode, cmd, arg); 1820} 1821#else 1822#define nvme_compat_ioctl NULL 1823#endif /* CONFIG_COMPAT */ 1824 1825static int nvme_open(struct block_device *bdev, fmode_t mode) 1826{ 1827 struct nvme_ns *ns = bdev->bd_disk->private_data; 1828 1829#ifdef CONFIG_NVME_MULTIPATH 1830 /* should never be called due to GENHD_FL_HIDDEN */ 1831 if (WARN_ON_ONCE(ns->head->disk)) 1832 goto fail; 1833#endif 1834 if (!kref_get_unless_zero(&ns->kref)) 1835 goto fail; 1836 if (!try_module_get(ns->ctrl->ops->module)) 1837 goto fail_put_ns; 1838 1839 return 0; 1840 1841fail_put_ns: 1842 nvme_put_ns(ns); 1843fail: 1844 return -ENXIO; 1845} 1846 1847static void nvme_release(struct gendisk *disk, fmode_t mode) 1848{ 1849 struct nvme_ns *ns = disk->private_data; 1850 1851 module_put(ns->ctrl->ops->module); 1852 nvme_put_ns(ns); 1853} 1854 1855static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) 1856{ 1857 /* some standard values */ 1858 geo->heads = 1 << 6; 1859 geo->sectors = 1 << 5; 1860 geo->cylinders = get_capacity(bdev->bd_disk) >> 11; 1861 return 0; 1862} 1863 1864#ifdef CONFIG_BLK_DEV_INTEGRITY 1865static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type, 1866 u32 max_integrity_segments) 1867{ 1868 struct blk_integrity integrity; 1869 1870 memset(&integrity, 0, sizeof(integrity)); 1871 switch (pi_type) { 1872 case NVME_NS_DPS_PI_TYPE3: 1873 integrity.profile = &t10_pi_type3_crc; 1874 integrity.tag_size = sizeof(u16) + sizeof(u32); 1875 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1876 break; 1877 case NVME_NS_DPS_PI_TYPE1: 1878 case NVME_NS_DPS_PI_TYPE2: 1879 integrity.profile = &t10_pi_type1_crc; 1880 integrity.tag_size = sizeof(u16); 1881 integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; 1882 break; 1883 default: 1884 integrity.profile = NULL; 1885 break; 1886 } 1887 integrity.tuple_size = ms; 1888 blk_integrity_register(disk, &integrity); 1889 blk_queue_max_integrity_segments(disk->queue, max_integrity_segments); 1890} 1891#else 1892static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type, 1893 u32 max_integrity_segments) 1894{ 1895} 1896#endif /* CONFIG_BLK_DEV_INTEGRITY */ 1897 1898static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) 1899{ 1900 struct nvme_ctrl *ctrl = ns->ctrl; 1901 struct request_queue *queue = disk->queue; 1902 u32 size = queue_logical_block_size(queue); 1903 1904 if (!(ctrl->oncs & NVME_CTRL_ONCS_DSM)) { 1905 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue); 1906 return; 1907 } 1908 1909 if (ctrl->nr_streams && ns->sws && ns->sgs) 1910 size *= ns->sws * ns->sgs; 1911 1912 BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < 1913 NVME_DSM_MAX_RANGES); 1914 1915 queue->limits.discard_alignment = 0; 1916 queue->limits.discard_granularity = size; 1917 1918 /* If discard is already enabled, don't reset queue limits */ 1919 if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue)) 1920 return; 1921 1922 blk_queue_max_discard_sectors(queue, UINT_MAX); 1923 blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES); 1924 1925 if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) 1926 blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); 1927} 1928 1929/* 1930 * Even though NVMe spec explicitly states that MDTS is not applicable to the 1931 * write-zeroes, we are cautious and limit the size to the controllers 1932 * max_hw_sectors value, which is based on the MDTS field and possibly other 1933 * limiting factors. 1934 */ 1935static void nvme_config_write_zeroes(struct request_queue *q, 1936 struct nvme_ctrl *ctrl) 1937{ 1938 if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) && 1939 !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) 1940 blk_queue_max_write_zeroes_sectors(q, ctrl->max_hw_sectors); 1941} 1942 1943static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids) 1944{ 1945 return !uuid_is_null(&ids->uuid) || 1946 memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) || 1947 memchr_inv(ids->eui64, 0, sizeof(ids->eui64)); 1948} 1949 1950static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) 1951{ 1952 return uuid_equal(&a->uuid, &b->uuid) && 1953 memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 && 1954 memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0 && 1955 a->csi == b->csi; 1956} 1957 1958static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 1959 u32 *phys_bs, u32 *io_opt) 1960{ 1961 struct streams_directive_params s; 1962 int ret; 1963 1964 if (!ctrl->nr_streams) 1965 return 0; 1966 1967 ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id); 1968 if (ret) 1969 return ret; 1970 1971 ns->sws = le32_to_cpu(s.sws); 1972 ns->sgs = le16_to_cpu(s.sgs); 1973 1974 if (ns->sws) { 1975 *phys_bs = ns->sws * (1 << ns->lba_shift); 1976 if (ns->sgs) 1977 *io_opt = *phys_bs * ns->sgs; 1978 } 1979 1980 return 0; 1981} 1982 1983static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) 1984{ 1985 struct nvme_ctrl *ctrl = ns->ctrl; 1986 1987 /* 1988 * The PI implementation requires the metadata size to be equal to the 1989 * t10 pi tuple size. 1990 */ 1991 ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms); 1992 if (ns->ms == sizeof(struct t10_pi_tuple)) 1993 ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK; 1994 else 1995 ns->pi_type = 0; 1996 1997 ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); 1998 if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) 1999 return 0; 2000 if (ctrl->ops->flags & NVME_F_FABRICS) { 2001 /* 2002 * The NVMe over Fabrics specification only supports metadata as 2003 * part of the extended data LBA. We rely on HCA/HBA support to 2004 * remap the separate metadata buffer from the block layer. 2005 */ 2006 if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) 2007 return -EINVAL; 2008 if (ctrl->max_integrity_segments) 2009 ns->features |= 2010 (NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); 2011 } else { 2012 /* 2013 * For PCIe controllers, we can't easily remap the separate 2014 * metadata buffer from the block layer and thus require a 2015 * separate metadata buffer for block layer metadata/PI support. 2016 * We allow extended LBAs for the passthrough interface, though. 2017 */ 2018 if (id->flbas & NVME_NS_FLBAS_META_EXT) 2019 ns->features |= NVME_NS_EXT_LBAS; 2020 else 2021 ns->features |= NVME_NS_METADATA_SUPPORTED; 2022 } 2023 2024 return 0; 2025} 2026 2027static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, 2028 struct request_queue *q) 2029{ 2030 bool vwc = ctrl->vwc & NVME_CTRL_VWC_PRESENT; 2031 2032 if (ctrl->max_hw_sectors) { 2033 u32 max_segments = 2034 (ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> 9)) + 1; 2035 2036 max_segments = min_not_zero(max_segments, ctrl->max_segments); 2037 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); 2038 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); 2039 } 2040 blk_queue_virt_boundary(q, NVME_CTRL_PAGE_SIZE - 1); 2041 blk_queue_dma_alignment(q, 3); 2042 blk_queue_write_cache(q, vwc, vwc); 2043} 2044 2045static void nvme_update_disk_info(struct gendisk *disk, 2046 struct nvme_ns *ns, struct nvme_id_ns *id) 2047{ 2048 sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze)); 2049 unsigned short bs = 1 << ns->lba_shift; 2050 u32 atomic_bs, phys_bs, io_opt = 0; 2051 2052 /* 2053 * The block layer can't support LBA sizes larger than the page size 2054 * or smaller than a sector size yet, so catch this early and don't 2055 * allow block I/O. 2056 */ 2057 if (ns->lba_shift > PAGE_SHIFT || ns->lba_shift < SECTOR_SHIFT) { 2058 capacity = 0; 2059 bs = (1 << 9); 2060 } 2061 2062 blk_integrity_unregister(disk); 2063 2064 atomic_bs = phys_bs = bs; 2065 nvme_setup_streams_ns(ns->ctrl, ns, &phys_bs, &io_opt); 2066 if (id->nabo == 0) { 2067 /* 2068 * Bit 1 indicates whether NAWUPF is defined for this namespace 2069 * and whether it should be used instead of AWUPF. If NAWUPF == 2070 * 0 then AWUPF must be used instead. 2071 */ 2072 if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf) 2073 atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs; 2074 else 2075 atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; 2076 } 2077 2078 if (id->nsfeat & NVME_NS_FEAT_IO_OPT) { 2079 /* NPWG = Namespace Preferred Write Granularity */ 2080 phys_bs = bs * (1 + le16_to_cpu(id->npwg)); 2081 /* NOWS = Namespace Optimal Write Size */ 2082 io_opt = bs * (1 + le16_to_cpu(id->nows)); 2083 } 2084 2085 blk_queue_logical_block_size(disk->queue, bs); 2086 /* 2087 * Linux filesystems assume writing a single physical block is 2088 * an atomic operation. Hence limit the physical block size to the 2089 * value of the Atomic Write Unit Power Fail parameter. 2090 */ 2091 blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs)); 2092 blk_queue_io_min(disk->queue, phys_bs); 2093 blk_queue_io_opt(disk->queue, io_opt); 2094 2095 /* 2096 * Register a metadata profile for PI, or the plain non-integrity NVMe 2097 * metadata masquerading as Type 0 if supported, otherwise reject block 2098 * I/O to namespaces with metadata except when the namespace supports 2099 * PI, as it can strip/insert in that case. 2100 */ 2101 if (ns->ms) { 2102 if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && 2103 (ns->features & NVME_NS_METADATA_SUPPORTED)) 2104 nvme_init_integrity(disk, ns->ms, ns->pi_type, 2105 ns->ctrl->max_integrity_segments); 2106 else if (!nvme_ns_has_pi(ns)) 2107 capacity = 0; 2108 } 2109 2110 set_capacity_revalidate_and_notify(disk, capacity, false); 2111 2112 nvme_config_discard(disk, ns); 2113 nvme_config_write_zeroes(disk->queue, ns->ctrl); 2114 2115 if (id->nsattr & NVME_NS_ATTR_RO) 2116 set_disk_ro(disk, true); 2117} 2118 2119static inline bool nvme_first_scan(struct gendisk *disk) 2120{ 2121 /* nvme_alloc_ns() scans the disk prior to adding it */ 2122 return !(disk->flags & GENHD_FL_UP); 2123} 2124 2125static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id) 2126{ 2127 struct nvme_ctrl *ctrl = ns->ctrl; 2128 u32 iob; 2129 2130 if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && 2131 is_power_of_2(ctrl->max_hw_sectors)) 2132 iob = ctrl->max_hw_sectors; 2133 else 2134 iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob)); 2135 2136 if (!iob) 2137 return; 2138 2139 if (!is_power_of_2(iob)) { 2140 if (nvme_first_scan(ns->disk)) 2141 pr_warn("%s: ignoring unaligned IO boundary:%u\n", 2142 ns->disk->disk_name, iob); 2143 return; 2144 } 2145 2146 if (blk_queue_is_zoned(ns->disk->queue)) { 2147 if (nvme_first_scan(ns->disk)) 2148 pr_warn("%s: ignoring zoned namespace IO boundary\n", 2149 ns->disk->disk_name); 2150 return; 2151 } 2152 2153 blk_queue_chunk_sectors(ns->queue, iob); 2154} 2155 2156static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id) 2157{ 2158 unsigned lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK; 2159 int ret; 2160 2161 blk_mq_freeze_queue(ns->disk->queue); 2162 ns->lba_shift = id->lbaf[lbaf].ds; 2163 nvme_set_queue_limits(ns->ctrl, ns->queue); 2164 2165 if (ns->head->ids.csi == NVME_CSI_ZNS) { 2166 ret = nvme_update_zone_info(ns, lbaf); 2167 if (ret) 2168 goto out_unfreeze; 2169 } 2170 2171 ret = nvme_configure_metadata(ns, id); 2172 if (ret) 2173 goto out_unfreeze; 2174 nvme_set_chunk_sectors(ns, id); 2175 nvme_update_disk_info(ns->disk, ns, id); 2176 blk_mq_unfreeze_queue(ns->disk->queue); 2177 2178 if (blk_queue_is_zoned(ns->queue)) { 2179 ret = nvme_revalidate_zones(ns); 2180 if (ret && !nvme_first_scan(ns->disk)) 2181 return ret; 2182 } 2183 2184#ifdef CONFIG_NVME_MULTIPATH 2185 if (ns->head->disk) { 2186 blk_mq_freeze_queue(ns->head->disk->queue); 2187 nvme_update_disk_info(ns->head->disk, ns, id); 2188 blk_stack_limits(&ns->head->disk->queue->limits, 2189 &ns->queue->limits, 0); 2190 blk_queue_update_readahead(ns->head->disk->queue); 2191 nvme_update_bdev_size(ns->head->disk); 2192 blk_mq_unfreeze_queue(ns->head->disk->queue); 2193 } 2194#endif 2195 return 0; 2196 2197out_unfreeze: 2198 blk_mq_unfreeze_queue(ns->disk->queue); 2199 return ret; 2200} 2201 2202static char nvme_pr_type(enum pr_type type) 2203{ 2204 switch (type) { 2205 case PR_WRITE_EXCLUSIVE: 2206 return 1; 2207 case PR_EXCLUSIVE_ACCESS: 2208 return 2; 2209 case PR_WRITE_EXCLUSIVE_REG_ONLY: 2210 return 3; 2211 case PR_EXCLUSIVE_ACCESS_REG_ONLY: 2212 return 4; 2213 case PR_WRITE_EXCLUSIVE_ALL_REGS: 2214 return 5; 2215 case PR_EXCLUSIVE_ACCESS_ALL_REGS: 2216 return 6; 2217 default: 2218 return 0; 2219 } 2220}; 2221 2222static int nvme_pr_command(struct block_device *bdev, u32 cdw10, 2223 u64 key, u64 sa_key, u8 op) 2224{ 2225 struct nvme_ns_head *head = NULL; 2226 struct nvme_ns *ns; 2227 struct nvme_command c; 2228 int srcu_idx, ret; 2229 u8 data[16] = { 0, }; 2230 2231 ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); 2232 if (unlikely(!ns)) 2233 return -EWOULDBLOCK; 2234 2235 put_unaligned_le64(key, &data[0]); 2236 put_unaligned_le64(sa_key, &data[8]); 2237 2238 memset(&c, 0, sizeof(c)); 2239 c.common.opcode = op; 2240 c.common.nsid = cpu_to_le32(ns->head->ns_id); 2241 c.common.cdw10 = cpu_to_le32(cdw10); 2242 2243 ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16); 2244 nvme_put_ns_from_disk(head, srcu_idx); 2245 return ret; 2246} 2247 2248static int nvme_pr_register(struct block_device *bdev, u64 old, 2249 u64 new, unsigned flags) 2250{ 2251 u32 cdw10; 2252 2253 if (flags & ~PR_FL_IGNORE_KEY) 2254 return -EOPNOTSUPP; 2255 2256 cdw10 = old ? 2 : 0; 2257 cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0; 2258 cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */ 2259 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register); 2260} 2261 2262static int nvme_pr_reserve(struct block_device *bdev, u64 key, 2263 enum pr_type type, unsigned flags) 2264{ 2265 u32 cdw10; 2266 2267 if (flags & ~PR_FL_IGNORE_KEY) 2268 return -EOPNOTSUPP; 2269 2270 cdw10 = nvme_pr_type(type) << 8; 2271 cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0); 2272 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire); 2273} 2274 2275static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, 2276 enum pr_type type, bool abort) 2277{ 2278 u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1); 2279 2280 return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire); 2281} 2282 2283static int nvme_pr_clear(struct block_device *bdev, u64 key) 2284{ 2285 u32 cdw10 = 1 | (key ? 0 : 1 << 3); 2286 2287 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); 2288} 2289 2290static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) 2291{ 2292 u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 0 : 1 << 3); 2293 2294 return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); 2295} 2296 2297static const struct pr_ops nvme_pr_ops = { 2298 .pr_register = nvme_pr_register, 2299 .pr_reserve = nvme_pr_reserve, 2300 .pr_release = nvme_pr_release, 2301 .pr_preempt = nvme_pr_preempt, 2302 .pr_clear = nvme_pr_clear, 2303}; 2304 2305#ifdef CONFIG_BLK_SED_OPAL 2306int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, 2307 bool send) 2308{ 2309 struct nvme_ctrl *ctrl = data; 2310 struct nvme_command cmd; 2311 2312 memset(&cmd, 0, sizeof(cmd)); 2313 if (send) 2314 cmd.common.opcode = nvme_admin_security_send; 2315 else 2316 cmd.common.opcode = nvme_admin_security_recv; 2317 cmd.common.nsid = 0; 2318 cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8); 2319 cmd.common.cdw11 = cpu_to_le32(len); 2320 2321 return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 2322 ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0, false); 2323} 2324EXPORT_SYMBOL_GPL(nvme_sec_submit); 2325#endif /* CONFIG_BLK_SED_OPAL */ 2326 2327static const struct block_device_operations nvme_fops = { 2328 .owner = THIS_MODULE, 2329 .ioctl = nvme_ioctl, 2330 .compat_ioctl = nvme_compat_ioctl, 2331 .open = nvme_open, 2332 .release = nvme_release, 2333 .getgeo = nvme_getgeo, 2334 .report_zones = nvme_report_zones, 2335 .pr_ops = &nvme_pr_ops, 2336}; 2337 2338#ifdef CONFIG_NVME_MULTIPATH 2339static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode) 2340{ 2341 struct nvme_ns_head *head = bdev->bd_disk->private_data; 2342 2343 if (!kref_get_unless_zero(&head->ref)) 2344 return -ENXIO; 2345 return 0; 2346} 2347 2348static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode) 2349{ 2350 nvme_put_ns_head(disk->private_data); 2351} 2352 2353const struct block_device_operations nvme_ns_head_ops = { 2354 .owner = THIS_MODULE, 2355 .submit_bio = nvme_ns_head_submit_bio, 2356 .open = nvme_ns_head_open, 2357 .release = nvme_ns_head_release, 2358 .ioctl = nvme_ioctl, 2359 .compat_ioctl = nvme_compat_ioctl, 2360 .getgeo = nvme_getgeo, 2361 .report_zones = nvme_report_zones, 2362 .pr_ops = &nvme_pr_ops, 2363}; 2364#endif /* CONFIG_NVME_MULTIPATH */ 2365 2366static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled) 2367{ 2368 unsigned long timeout = 2369 ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; 2370 u32 csts, bit = enabled ? NVME_CSTS_RDY : 0; 2371 int ret; 2372 2373 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 2374 if (csts == ~0) 2375 return -ENODEV; 2376 if ((csts & NVME_CSTS_RDY) == bit) 2377 break; 2378 2379 usleep_range(1000, 2000); 2380 if (fatal_signal_pending(current)) 2381 return -EINTR; 2382 if (time_after(jiffies, timeout)) { 2383 dev_err(ctrl->device, 2384 "Device not ready; aborting %s, CSTS=0x%x\n", 2385 enabled ? "initialisation" : "reset", csts); 2386 return -ENODEV; 2387 } 2388 } 2389 2390 return ret; 2391} 2392 2393/* 2394 * If the device has been passed off to us in an enabled state, just clear 2395 * the enabled bit. The spec says we should set the 'shutdown notification 2396 * bits', but doing so may cause the device to complete commands to the 2397 * admin queue ... and we don't know what memory that might be pointing at! 2398 */ 2399int nvme_disable_ctrl(struct nvme_ctrl *ctrl) 2400{ 2401 int ret; 2402 2403 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 2404 ctrl->ctrl_config &= ~NVME_CC_ENABLE; 2405 2406 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2407 if (ret) 2408 return ret; 2409 2410 if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) 2411 msleep(NVME_QUIRK_DELAY_AMOUNT); 2412 2413 return nvme_wait_ready(ctrl, ctrl->cap, false); 2414} 2415EXPORT_SYMBOL_GPL(nvme_disable_ctrl); 2416 2417int nvme_enable_ctrl(struct nvme_ctrl *ctrl) 2418{ 2419 unsigned dev_page_min; 2420 int ret; 2421 2422 ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); 2423 if (ret) { 2424 dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret); 2425 return ret; 2426 } 2427 dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12; 2428 2429 if (NVME_CTRL_PAGE_SHIFT < dev_page_min) { 2430 dev_err(ctrl->device, 2431 "Minimum device page size %u too large for host (%u)\n", 2432 1 << dev_page_min, 1 << NVME_CTRL_PAGE_SHIFT); 2433 return -ENODEV; 2434 } 2435 2436 if (NVME_CAP_CSS(ctrl->cap) & NVME_CAP_CSS_CSI) 2437 ctrl->ctrl_config = NVME_CC_CSS_CSI; 2438 else 2439 ctrl->ctrl_config = NVME_CC_CSS_NVM; 2440 ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; 2441 ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; 2442 ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; 2443 ctrl->ctrl_config |= NVME_CC_ENABLE; 2444 2445 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2446 if (ret) 2447 return ret; 2448 return nvme_wait_ready(ctrl, ctrl->cap, true); 2449} 2450EXPORT_SYMBOL_GPL(nvme_enable_ctrl); 2451 2452int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl) 2453{ 2454 unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ); 2455 u32 csts; 2456 int ret; 2457 2458 ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; 2459 ctrl->ctrl_config |= NVME_CC_SHN_NORMAL; 2460 2461 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); 2462 if (ret) 2463 return ret; 2464 2465 while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { 2466 if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT) 2467 break; 2468 2469 msleep(100); 2470 if (fatal_signal_pending(current)) 2471 return -EINTR; 2472 if (time_after(jiffies, timeout)) { 2473 dev_err(ctrl->device, 2474 "Device shutdown incomplete; abort shutdown\n"); 2475 return -ENODEV; 2476 } 2477 } 2478 2479 return ret; 2480} 2481EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl); 2482 2483static int nvme_configure_timestamp(struct nvme_ctrl *ctrl) 2484{ 2485 __le64 ts; 2486 int ret; 2487 2488 if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP)) 2489 return 0; 2490 2491 ts = cpu_to_le64(ktime_to_ms(ktime_get_real())); 2492 ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts), 2493 NULL); 2494 if (ret) 2495 dev_warn_once(ctrl->device, 2496 "could not set timestamp (%d)\n", ret); 2497 return ret; 2498} 2499 2500static int nvme_configure_acre(struct nvme_ctrl *ctrl) 2501{ 2502 struct nvme_feat_host_behavior *host; 2503 int ret; 2504 2505 /* Don't bother enabling the feature if retry delay is not reported */ 2506 if (!ctrl->crdt[0]) 2507 return 0; 2508 2509 host = kzalloc(sizeof(*host), GFP_KERNEL); 2510 if (!host) 2511 return 0; 2512 2513 host->acre = NVME_ENABLE_ACRE; 2514 ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0, 2515 host, sizeof(*host), NULL); 2516 kfree(host); 2517 return ret; 2518} 2519 2520static int nvme_configure_apst(struct nvme_ctrl *ctrl) 2521{ 2522 /* 2523 * APST (Autonomous Power State Transition) lets us program a 2524 * table of power state transitions that the controller will 2525 * perform automatically. We configure it with a simple 2526 * heuristic: we are willing to spend at most 2% of the time 2527 * transitioning between power states. Therefore, when running 2528 * in any given state, we will enter the next lower-power 2529 * non-operational state after waiting 50 * (enlat + exlat) 2530 * microseconds, as long as that state's exit latency is under 2531 * the requested maximum latency. 2532 * 2533 * We will not autonomously enter any non-operational state for 2534 * which the total latency exceeds ps_max_latency_us. Users 2535 * can set ps_max_latency_us to zero to turn off APST. 2536 */ 2537 2538 unsigned apste; 2539 struct nvme_feat_auto_pst *table; 2540 u64 max_lat_us = 0; 2541 int max_ps = -1; 2542 int ret; 2543 2544 /* 2545 * If APST isn't supported or if we haven't been initialized yet, 2546 * then don't do anything. 2547 */ 2548 if (!ctrl->apsta) 2549 return 0; 2550 2551 if (ctrl->npss > 31) { 2552 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); 2553 return 0; 2554 } 2555 2556 table = kzalloc(sizeof(*table), GFP_KERNEL); 2557 if (!table) 2558 return 0; 2559 2560 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { 2561 /* Turn off APST. */ 2562 apste = 0; 2563 dev_dbg(ctrl->device, "APST disabled\n"); 2564 } else { 2565 __le64 target = cpu_to_le64(0); 2566 int state; 2567 2568 /* 2569 * Walk through all states from lowest- to highest-power. 2570 * According to the spec, lower-numbered states use more 2571 * power. NPSS, despite the name, is the index of the 2572 * lowest-power state, not the number of states. 2573 */ 2574 for (state = (int)ctrl->npss; state >= 0; state--) { 2575 u64 total_latency_us, exit_latency_us, transition_ms; 2576 2577 if (target) 2578 table->entries[state] = target; 2579 2580 /* 2581 * Don't allow transitions to the deepest state 2582 * if it's quirked off. 2583 */ 2584 if (state == ctrl->npss && 2585 (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) 2586 continue; 2587 2588 /* 2589 * Is this state a useful non-operational state for 2590 * higher-power states to autonomously transition to? 2591 */ 2592 if (!(ctrl->psd[state].flags & 2593 NVME_PS_FLAGS_NON_OP_STATE)) 2594 continue; 2595 2596 exit_latency_us = 2597 (u64)le32_to_cpu(ctrl->psd[state].exit_lat); 2598 if (exit_latency_us > ctrl->ps_max_latency_us) 2599 continue; 2600 2601 total_latency_us = 2602 exit_latency_us + 2603 le32_to_cpu(ctrl->psd[state].entry_lat); 2604 2605 /* 2606 * This state is good. Use it as the APST idle 2607 * target for higher power states. 2608 */ 2609 transition_ms = total_latency_us + 19; 2610 do_div(transition_ms, 20); 2611 if (transition_ms > (1 << 24) - 1) 2612 transition_ms = (1 << 24) - 1; 2613 2614 target = cpu_to_le64((state << 3) | 2615 (transition_ms << 8)); 2616 2617 if (max_ps == -1) 2618 max_ps = state; 2619 2620 if (total_latency_us > max_lat_us) 2621 max_lat_us = total_latency_us; 2622 } 2623 2624 apste = 1; 2625 2626 if (max_ps == -1) { 2627 dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n"); 2628 } else { 2629 dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n", 2630 max_ps, max_lat_us, (int)sizeof(*table), table); 2631 } 2632 } 2633 2634 ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste, 2635 table, sizeof(*table), NULL); 2636 if (ret) 2637 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); 2638 2639 kfree(table); 2640 return ret; 2641} 2642 2643static void nvme_set_latency_tolerance(struct device *dev, s32 val) 2644{ 2645 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 2646 u64 latency; 2647 2648 switch (val) { 2649 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT: 2650 case PM_QOS_LATENCY_ANY: 2651 latency = U64_MAX; 2652 break; 2653 2654 default: 2655 latency = val; 2656 } 2657 2658 if (ctrl->ps_max_latency_us != latency) { 2659 ctrl->ps_max_latency_us = latency; 2660 if (ctrl->state == NVME_CTRL_LIVE) 2661 nvme_configure_apst(ctrl); 2662 } 2663} 2664 2665struct nvme_core_quirk_entry { 2666 /* 2667 * NVMe model and firmware strings are padded with spaces. For 2668 * simplicity, strings in the quirk table are padded with NULLs 2669 * instead. 2670 */ 2671 u16 vid; 2672 const char *mn; 2673 const char *fr; 2674 unsigned long quirks; 2675}; 2676 2677static const struct nvme_core_quirk_entry core_quirks[] = { 2678 { 2679 /* 2680 * This Toshiba device seems to die using any APST states. See: 2681 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11 2682 */ 2683 .vid = 0x1179, 2684 .mn = "THNSF5256GPUK TOSHIBA", 2685 .quirks = NVME_QUIRK_NO_APST, 2686 }, 2687 { 2688 /* 2689 * This LiteON CL1-3D*-Q11 firmware version has a race 2690 * condition associated with actions related to suspend to idle 2691 * LiteON has resolved the problem in future firmware 2692 */ 2693 .vid = 0x14a4, 2694 .fr = "22301111", 2695 .quirks = NVME_QUIRK_SIMPLE_SUSPEND, 2696 }, 2697 { 2698 /* 2699 * This Kioxia CD6-V Series / HPE PE8030 device times out and 2700 * aborts I/O during any load, but more easily reproducible 2701 * with discards (fstrim). 2702 * 2703 * The device is left in a state where it is also not possible 2704 * to use "nvme set-feature" to disable APST, but booting with 2705 * nvme_core.default_ps_max_latency=0 works. 2706 */ 2707 .vid = 0x1e0f, 2708 .mn = "KCD6XVUL6T40", 2709 .quirks = NVME_QUIRK_NO_APST, 2710 }, 2711 { 2712 /* 2713 * The external Samsung X5 SSD fails initialization without a 2714 * delay before checking if it is ready and has a whole set of 2715 * other problems. To make this even more interesting, it 2716 * shares the PCI ID with internal Samsung 970 Evo Plus that 2717 * does not need or want these quirks. 2718 */ 2719 .vid = 0x144d, 2720 .mn = "Samsung Portable SSD X5", 2721 .quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | 2722 NVME_QUIRK_NO_DEEPEST_PS | 2723 NVME_QUIRK_IGNORE_DEV_SUBNQN, 2724 } 2725}; 2726 2727/* match is null-terminated but idstr is space-padded. */ 2728static bool string_matches(const char *idstr, const char *match, size_t len) 2729{ 2730 size_t matchlen; 2731 2732 if (!match) 2733 return true; 2734 2735 matchlen = strlen(match); 2736 WARN_ON_ONCE(matchlen > len); 2737 2738 if (memcmp(idstr, match, matchlen)) 2739 return false; 2740 2741 for (; matchlen < len; matchlen++) 2742 if (idstr[matchlen] != ' ') 2743 return false; 2744 2745 return true; 2746} 2747 2748static bool quirk_matches(const struct nvme_id_ctrl *id, 2749 const struct nvme_core_quirk_entry *q) 2750{ 2751 return q->vid == le16_to_cpu(id->vid) && 2752 string_matches(id->mn, q->mn, sizeof(id->mn)) && 2753 string_matches(id->fr, q->fr, sizeof(id->fr)); 2754} 2755 2756static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl, 2757 struct nvme_id_ctrl *id) 2758{ 2759 size_t nqnlen; 2760 int off; 2761 2762 if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) { 2763 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE); 2764 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) { 2765 strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE); 2766 return; 2767 } 2768 2769 if (ctrl->vs >= NVME_VS(1, 2, 1)) 2770 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); 2771 } 2772 2773 /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */ 2774 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE, 2775 "nqn.2014.08.org.nvmexpress:%04x%04x", 2776 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid)); 2777 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn)); 2778 off += sizeof(id->sn); 2779 memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn)); 2780 off += sizeof(id->mn); 2781 memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off); 2782} 2783 2784static void nvme_release_subsystem(struct device *dev) 2785{ 2786 struct nvme_subsystem *subsys = 2787 container_of(dev, struct nvme_subsystem, dev); 2788 2789 if (subsys->instance >= 0) 2790 ida_simple_remove(&nvme_instance_ida, subsys->instance); 2791 kfree(subsys); 2792} 2793 2794static void nvme_destroy_subsystem(struct kref *ref) 2795{ 2796 struct nvme_subsystem *subsys = 2797 container_of(ref, struct nvme_subsystem, ref); 2798 2799 mutex_lock(&nvme_subsystems_lock); 2800 list_del(&subsys->entry); 2801 mutex_unlock(&nvme_subsystems_lock); 2802 2803 ida_destroy(&subsys->ns_ida); 2804 device_del(&subsys->dev); 2805 put_device(&subsys->dev); 2806} 2807 2808static void nvme_put_subsystem(struct nvme_subsystem *subsys) 2809{ 2810 kref_put(&subsys->ref, nvme_destroy_subsystem); 2811} 2812 2813static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn) 2814{ 2815 struct nvme_subsystem *subsys; 2816 2817 lockdep_assert_held(&nvme_subsystems_lock); 2818 2819 /* 2820 * Fail matches for discovery subsystems. This results 2821 * in each discovery controller bound to a unique subsystem. 2822 * This avoids issues with validating controller values 2823 * that can only be true when there is a single unique subsystem. 2824 * There may be multiple and completely independent entities 2825 * that provide discovery controllers. 2826 */ 2827 if (!strcmp(subsysnqn, NVME_DISC_SUBSYS_NAME)) 2828 return NULL; 2829 2830 list_for_each_entry(subsys, &nvme_subsystems, entry) { 2831 if (strcmp(subsys->subnqn, subsysnqn)) 2832 continue; 2833 if (!kref_get_unless_zero(&subsys->ref)) 2834 continue; 2835 return subsys; 2836 } 2837 2838 return NULL; 2839} 2840 2841#define SUBSYS_ATTR_RO(_name, _mode, _show) \ 2842 struct device_attribute subsys_attr_##_name = \ 2843 __ATTR(_name, _mode, _show, NULL) 2844 2845static ssize_t nvme_subsys_show_nqn(struct device *dev, 2846 struct device_attribute *attr, 2847 char *buf) 2848{ 2849 struct nvme_subsystem *subsys = 2850 container_of(dev, struct nvme_subsystem, dev); 2851 2852 return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn); 2853} 2854static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn); 2855 2856#define nvme_subsys_show_str_function(field) \ 2857static ssize_t subsys_##field##_show(struct device *dev, \ 2858 struct device_attribute *attr, char *buf) \ 2859{ \ 2860 struct nvme_subsystem *subsys = \ 2861 container_of(dev, struct nvme_subsystem, dev); \ 2862 return sysfs_emit(buf, "%.*s\n", \ 2863 (int)sizeof(subsys->field), subsys->field); \ 2864} \ 2865static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show); 2866 2867nvme_subsys_show_str_function(model); 2868nvme_subsys_show_str_function(serial); 2869nvme_subsys_show_str_function(firmware_rev); 2870 2871static struct attribute *nvme_subsys_attrs[] = { 2872 &subsys_attr_model.attr, 2873 &subsys_attr_serial.attr, 2874 &subsys_attr_firmware_rev.attr, 2875 &subsys_attr_subsysnqn.attr, 2876#ifdef CONFIG_NVME_MULTIPATH 2877 &subsys_attr_iopolicy.attr, 2878#endif 2879 NULL, 2880}; 2881 2882static struct attribute_group nvme_subsys_attrs_group = { 2883 .attrs = nvme_subsys_attrs, 2884}; 2885 2886static const struct attribute_group *nvme_subsys_attrs_groups[] = { 2887 &nvme_subsys_attrs_group, 2888 NULL, 2889}; 2890 2891static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl) 2892{ 2893 return ctrl->opts && ctrl->opts->discovery_nqn; 2894} 2895 2896static bool nvme_validate_cntlid(struct nvme_subsystem *subsys, 2897 struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 2898{ 2899 struct nvme_ctrl *tmp; 2900 2901 lockdep_assert_held(&nvme_subsystems_lock); 2902 2903 list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) { 2904 if (nvme_state_terminal(tmp)) 2905 continue; 2906 2907 if (tmp->cntlid == ctrl->cntlid) { 2908 dev_err(ctrl->device, 2909 "Duplicate cntlid %u with %s, rejecting\n", 2910 ctrl->cntlid, dev_name(tmp->device)); 2911 return false; 2912 } 2913 2914 if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || 2915 nvme_discovery_ctrl(ctrl)) 2916 continue; 2917 2918 dev_err(ctrl->device, 2919 "Subsystem does not support multiple controllers\n"); 2920 return false; 2921 } 2922 2923 return true; 2924} 2925 2926static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) 2927{ 2928 struct nvme_subsystem *subsys, *found; 2929 int ret; 2930 2931 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); 2932 if (!subsys) 2933 return -ENOMEM; 2934 2935 subsys->instance = -1; 2936 mutex_init(&subsys->lock); 2937 kref_init(&subsys->ref); 2938 INIT_LIST_HEAD(&subsys->ctrls); 2939 INIT_LIST_HEAD(&subsys->nsheads); 2940 nvme_init_subnqn(subsys, ctrl, id); 2941 memcpy(subsys->serial, id->sn, sizeof(subsys->serial)); 2942 memcpy(subsys->model, id->mn, sizeof(subsys->model)); 2943 subsys->vendor_id = le16_to_cpu(id->vid); 2944 subsys->cmic = id->cmic; 2945 subsys->awupf = le16_to_cpu(id->awupf); 2946#ifdef CONFIG_NVME_MULTIPATH 2947 subsys->iopolicy = NVME_IOPOLICY_NUMA; 2948#endif 2949 2950 subsys->dev.class = nvme_subsys_class; 2951 subsys->dev.release = nvme_release_subsystem; 2952 subsys->dev.groups = nvme_subsys_attrs_groups; 2953 dev_set_name(&subsys->dev, "nvme-subsys%d", ctrl->instance); 2954 device_initialize(&subsys->dev); 2955 2956 mutex_lock(&nvme_subsystems_lock); 2957 found = __nvme_find_get_subsystem(subsys->subnqn); 2958 if (found) { 2959 put_device(&subsys->dev); 2960 subsys = found; 2961 2962 if (!nvme_validate_cntlid(subsys, ctrl, id)) { 2963 ret = -EINVAL; 2964 goto out_put_subsystem; 2965 } 2966 } else { 2967 ret = device_add(&subsys->dev); 2968 if (ret) { 2969 dev_err(ctrl->device, 2970 "failed to register subsystem device.\n"); 2971 put_device(&subsys->dev); 2972 goto out_unlock; 2973 } 2974 ida_init(&subsys->ns_ida); 2975 list_add_tail(&subsys->entry, &nvme_subsystems); 2976 } 2977 2978 ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj, 2979 dev_name(ctrl->device)); 2980 if (ret) { 2981 dev_err(ctrl->device, 2982 "failed to create sysfs link from subsystem.\n"); 2983 goto out_put_subsystem; 2984 } 2985 2986 if (!found) 2987 subsys->instance = ctrl->instance; 2988 ctrl->subsys = subsys; 2989 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); 2990 mutex_unlock(&nvme_subsystems_lock); 2991 return 0; 2992 2993out_put_subsystem: 2994 nvme_put_subsystem(subsys); 2995out_unlock: 2996 mutex_unlock(&nvme_subsystems_lock); 2997 return ret; 2998} 2999 3000int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, 3001 void *log, size_t size, u64 offset) 3002{ 3003 struct nvme_command c = { }; 3004 u32 dwlen = nvme_bytes_to_numd(size); 3005 3006 c.get_log_page.opcode = nvme_admin_get_log_page; 3007 c.get_log_page.nsid = cpu_to_le32(nsid); 3008 c.get_log_page.lid = log_page; 3009 c.get_log_page.lsp = lsp; 3010 c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1)); 3011 c.get_log_page.numdu = cpu_to_le16(dwlen >> 16); 3012 c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset)); 3013 c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset)); 3014 c.get_log_page.csi = csi; 3015 3016 return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); 3017} 3018 3019static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi, 3020 struct nvme_effects_log **log) 3021{ 3022 struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi); 3023 int ret; 3024 3025 if (cel) 3026 goto out; 3027 3028 cel = kzalloc(sizeof(*cel), GFP_KERNEL); 3029 if (!cel) 3030 return -ENOMEM; 3031 3032 ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi, 3033 cel, sizeof(*cel), 0); 3034 if (ret) { 3035 kfree(cel); 3036 return ret; 3037 } 3038 3039 xa_store(&ctrl->cels, csi, cel, GFP_KERNEL); 3040out: 3041 *log = cel; 3042 return 0; 3043} 3044 3045/* 3046 * Initialize the cached copies of the Identify data and various controller 3047 * register in our nvme_ctrl structure. This should be called as soon as 3048 * the admin queue is fully up and running. 3049 */ 3050int nvme_init_identify(struct nvme_ctrl *ctrl) 3051{ 3052 struct nvme_id_ctrl *id; 3053 int ret, page_shift; 3054 u32 max_hw_sectors; 3055 bool prev_apst_enabled; 3056 3057 ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); 3058 if (ret) { 3059 dev_err(ctrl->device, "Reading VS failed (%d)\n", ret); 3060 return ret; 3061 } 3062 page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12; 3063 ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize); 3064 3065 if (ctrl->vs >= NVME_VS(1, 1, 0)) 3066 ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap); 3067 3068 ret = nvme_identify_ctrl(ctrl, &id); 3069 if (ret) { 3070 dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret); 3071 return -EIO; 3072 } 3073 3074 if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) { 3075 ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects); 3076 if (ret < 0) 3077 goto out_free; 3078 } 3079 3080 if (!(ctrl->ops->flags & NVME_F_FABRICS)) 3081 ctrl->cntlid = le16_to_cpu(id->cntlid); 3082 3083 if (!ctrl->identified) { 3084 int i; 3085 3086 /* 3087 * Check for quirks. Quirk can depend on firmware version, 3088 * so, in principle, the set of quirks present can change 3089 * across a reset. As a possible future enhancement, we 3090 * could re-scan for quirks every time we reinitialize 3091 * the device, but we'd have to make sure that the driver 3092 * behaves intelligently if the quirks change. 3093 */ 3094 for (i = 0; i < ARRAY_SIZE(core_quirks); i++) { 3095 if (quirk_matches(id, &core_quirks[i])) 3096 ctrl->quirks |= core_quirks[i].quirks; 3097 } 3098 3099 ret = nvme_init_subsystem(ctrl, id); 3100 if (ret) 3101 goto out_free; 3102 } 3103 memcpy(ctrl->subsys->firmware_rev, id->fr, 3104 sizeof(ctrl->subsys->firmware_rev)); 3105 3106 if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) { 3107 dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n"); 3108 ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS; 3109 } 3110 3111 ctrl->crdt[0] = le16_to_cpu(id->crdt1); 3112 ctrl->crdt[1] = le16_to_cpu(id->crdt2); 3113 ctrl->crdt[2] = le16_to_cpu(id->crdt3); 3114 3115 ctrl->oacs = le16_to_cpu(id->oacs); 3116 ctrl->oncs = le16_to_cpu(id->oncs); 3117 ctrl->mtfa = le16_to_cpu(id->mtfa); 3118 ctrl->oaes = le32_to_cpu(id->oaes); 3119 ctrl->wctemp = le16_to_cpu(id->wctemp); 3120 ctrl->cctemp = le16_to_cpu(id->cctemp); 3121 3122 atomic_set(&ctrl->abort_limit, id->acl + 1); 3123 ctrl->vwc = id->vwc; 3124 if (id->mdts) 3125 max_hw_sectors = 1 << (id->mdts + page_shift - 9); 3126 else 3127 max_hw_sectors = UINT_MAX; 3128 ctrl->max_hw_sectors = 3129 min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); 3130 3131 nvme_set_queue_limits(ctrl, ctrl->admin_q); 3132 ctrl->sgls = le32_to_cpu(id->sgls); 3133 ctrl->kas = le16_to_cpu(id->kas); 3134 ctrl->max_namespaces = le32_to_cpu(id->mnan); 3135 ctrl->ctratt = le32_to_cpu(id->ctratt); 3136 3137 if (id->rtd3e) { 3138 /* us -> s */ 3139 u32 transition_time = le32_to_cpu(id->rtd3e) / USEC_PER_SEC; 3140 3141 ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time, 3142 shutdown_timeout, 60); 3143 3144 if (ctrl->shutdown_timeout != shutdown_timeout) 3145 dev_info(ctrl->device, 3146 "Shutdown timeout set to %u seconds\n", 3147 ctrl->shutdown_timeout); 3148 } else 3149 ctrl->shutdown_timeout = shutdown_timeout; 3150 3151 ctrl->npss = id->npss; 3152 ctrl->apsta = id->apsta; 3153 prev_apst_enabled = ctrl->apst_enabled; 3154 if (ctrl->quirks & NVME_QUIRK_NO_APST) { 3155 if (force_apst && id->apsta) { 3156 dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n"); 3157 ctrl->apst_enabled = true; 3158 } else { 3159 ctrl->apst_enabled = false; 3160 } 3161 } else { 3162 ctrl->apst_enabled = id->apsta; 3163 } 3164 memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); 3165 3166 if (ctrl->ops->flags & NVME_F_FABRICS) { 3167 ctrl->icdoff = le16_to_cpu(id->icdoff); 3168 ctrl->ioccsz = le32_to_cpu(id->ioccsz); 3169 ctrl->iorcsz = le32_to_cpu(id->iorcsz); 3170 ctrl->maxcmd = le16_to_cpu(id->maxcmd); 3171 3172 /* 3173 * In fabrics we need to verify the cntlid matches the 3174 * admin connect 3175 */ 3176 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { 3177 dev_err(ctrl->device, 3178 "Mismatching cntlid: Connect %u vs Identify " 3179 "%u, rejecting\n", 3180 ctrl->cntlid, le16_to_cpu(id->cntlid)); 3181 ret = -EINVAL; 3182 goto out_free; 3183 } 3184 3185 if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) { 3186 dev_err(ctrl->device, 3187 "keep-alive support is mandatory for fabrics\n"); 3188 ret = -EINVAL; 3189 goto out_free; 3190 } 3191 } else { 3192 ctrl->hmpre = le32_to_cpu(id->hmpre); 3193 ctrl->hmmin = le32_to_cpu(id->hmmin); 3194 ctrl->hmminds = le32_to_cpu(id->hmminds); 3195 ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); 3196 } 3197 3198 ret = nvme_mpath_init_identify(ctrl, id); 3199 kfree(id); 3200 3201 if (ret < 0) 3202 return ret; 3203 3204 if (ctrl->apst_enabled && !prev_apst_enabled) 3205 dev_pm_qos_expose_latency_tolerance(ctrl->device); 3206 else if (!ctrl->apst_enabled && prev_apst_enabled) 3207 dev_pm_qos_hide_latency_tolerance(ctrl->device); 3208 3209 ret = nvme_configure_apst(ctrl); 3210 if (ret < 0) 3211 return ret; 3212 3213 ret = nvme_configure_timestamp(ctrl); 3214 if (ret < 0) 3215 return ret; 3216 3217 ret = nvme_configure_directives(ctrl); 3218 if (ret < 0) 3219 return ret; 3220 3221 ret = nvme_configure_acre(ctrl); 3222 if (ret < 0) 3223 return ret; 3224 3225 if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) { 3226 /* 3227 * Do not return errors unless we are in a controller reset, 3228 * the controller works perfectly fine without hwmon. 3229 */ 3230 ret = nvme_hwmon_init(ctrl); 3231 if (ret == -EINTR) 3232 return ret; 3233 } 3234 3235 ctrl->identified = true; 3236 3237 return 0; 3238 3239out_free: 3240 kfree(id); 3241 return ret; 3242} 3243EXPORT_SYMBOL_GPL(nvme_init_identify); 3244 3245static int nvme_dev_open(struct inode *inode, struct file *file) 3246{ 3247 struct nvme_ctrl *ctrl = 3248 container_of(inode->i_cdev, struct nvme_ctrl, cdev); 3249 3250 switch (ctrl->state) { 3251 case NVME_CTRL_LIVE: 3252 break; 3253 default: 3254 return -EWOULDBLOCK; 3255 } 3256 3257 nvme_get_ctrl(ctrl); 3258 if (!try_module_get(ctrl->ops->module)) { 3259 nvme_put_ctrl(ctrl); 3260 return -EINVAL; 3261 } 3262 3263 file->private_data = ctrl; 3264 return 0; 3265} 3266 3267static int nvme_dev_release(struct inode *inode, struct file *file) 3268{ 3269 struct nvme_ctrl *ctrl = 3270 container_of(inode->i_cdev, struct nvme_ctrl, cdev); 3271 3272 module_put(ctrl->ops->module); 3273 nvme_put_ctrl(ctrl); 3274 return 0; 3275} 3276 3277static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) 3278{ 3279 struct nvme_ns *ns; 3280 int ret; 3281 3282 down_read(&ctrl->namespaces_rwsem); 3283 if (list_empty(&ctrl->namespaces)) { 3284 ret = -ENOTTY; 3285 goto out_unlock; 3286 } 3287 3288 ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list); 3289 if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) { 3290 dev_warn(ctrl->device, 3291 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n"); 3292 ret = -EINVAL; 3293 goto out_unlock; 3294 } 3295 3296 dev_warn(ctrl->device, 3297 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n"); 3298 kref_get(&ns->kref); 3299 up_read(&ctrl->namespaces_rwsem); 3300 3301 ret = nvme_user_cmd(ctrl, ns, argp); 3302 nvme_put_ns(ns); 3303 return ret; 3304 3305out_unlock: 3306 up_read(&ctrl->namespaces_rwsem); 3307 return ret; 3308} 3309 3310static long nvme_dev_ioctl(struct file *file, unsigned int cmd, 3311 unsigned long arg) 3312{ 3313 struct nvme_ctrl *ctrl = file->private_data; 3314 void __user *argp = (void __user *)arg; 3315 3316 switch (cmd) { 3317 case NVME_IOCTL_ADMIN_CMD: 3318 return nvme_user_cmd(ctrl, NULL, argp); 3319 case NVME_IOCTL_ADMIN64_CMD: 3320 return nvme_user_cmd64(ctrl, NULL, argp); 3321 case NVME_IOCTL_IO_CMD: 3322 return nvme_dev_user_cmd(ctrl, argp); 3323 case NVME_IOCTL_RESET: 3324 if (!capable(CAP_SYS_ADMIN)) 3325 return -EACCES; 3326 dev_warn(ctrl->device, "resetting controller\n"); 3327 return nvme_reset_ctrl_sync(ctrl); 3328 case NVME_IOCTL_SUBSYS_RESET: 3329 if (!capable(CAP_SYS_ADMIN)) 3330 return -EACCES; 3331 return nvme_reset_subsystem(ctrl); 3332 case NVME_IOCTL_RESCAN: 3333 if (!capable(CAP_SYS_ADMIN)) 3334 return -EACCES; 3335 nvme_queue_scan(ctrl); 3336 return 0; 3337 default: 3338 return -ENOTTY; 3339 } 3340} 3341 3342static const struct file_operations nvme_dev_fops = { 3343 .owner = THIS_MODULE, 3344 .open = nvme_dev_open, 3345 .release = nvme_dev_release, 3346 .unlocked_ioctl = nvme_dev_ioctl, 3347 .compat_ioctl = compat_ptr_ioctl, 3348}; 3349 3350static ssize_t nvme_sysfs_reset(struct device *dev, 3351 struct device_attribute *attr, const char *buf, 3352 size_t count) 3353{ 3354 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3355 int ret; 3356 3357 ret = nvme_reset_ctrl_sync(ctrl); 3358 if (ret < 0) 3359 return ret; 3360 return count; 3361} 3362static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); 3363 3364static ssize_t nvme_sysfs_rescan(struct device *dev, 3365 struct device_attribute *attr, const char *buf, 3366 size_t count) 3367{ 3368 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3369 3370 nvme_queue_scan(ctrl); 3371 return count; 3372} 3373static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); 3374 3375static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev) 3376{ 3377 struct gendisk *disk = dev_to_disk(dev); 3378 3379 if (disk->fops == &nvme_fops) 3380 return nvme_get_ns_from_dev(dev)->head; 3381 else 3382 return disk->private_data; 3383} 3384 3385static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, 3386 char *buf) 3387{ 3388 struct nvme_ns_head *head = dev_to_ns_head(dev); 3389 struct nvme_ns_ids *ids = &head->ids; 3390 struct nvme_subsystem *subsys = head->subsys; 3391 int serial_len = sizeof(subsys->serial); 3392 int model_len = sizeof(subsys->model); 3393 3394 if (!uuid_is_null(&ids->uuid)) 3395 return sysfs_emit(buf, "uuid.%pU\n", &ids->uuid); 3396 3397 if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 3398 return sysfs_emit(buf, "eui.%16phN\n", ids->nguid); 3399 3400 if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 3401 return sysfs_emit(buf, "eui.%8phN\n", ids->eui64); 3402 3403 while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' || 3404 subsys->serial[serial_len - 1] == '\0')) 3405 serial_len--; 3406 while (model_len > 0 && (subsys->model[model_len - 1] == ' ' || 3407 subsys->model[model_len - 1] == '\0')) 3408 model_len--; 3409 3410 return sysfs_emit(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id, 3411 serial_len, subsys->serial, model_len, subsys->model, 3412 head->ns_id); 3413} 3414static DEVICE_ATTR_RO(wwid); 3415 3416static ssize_t nguid_show(struct device *dev, struct device_attribute *attr, 3417 char *buf) 3418{ 3419 return sysfs_emit(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid); 3420} 3421static DEVICE_ATTR_RO(nguid); 3422 3423static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, 3424 char *buf) 3425{ 3426 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; 3427 3428 /* For backward compatibility expose the NGUID to userspace if 3429 * we have no UUID set 3430 */ 3431 if (uuid_is_null(&ids->uuid)) { 3432 dev_warn_ratelimited(dev, 3433 "No UUID available providing old NGUID\n"); 3434 return sysfs_emit(buf, "%pU\n", ids->nguid); 3435 } 3436 return sysfs_emit(buf, "%pU\n", &ids->uuid); 3437} 3438static DEVICE_ATTR_RO(uuid); 3439 3440static ssize_t eui_show(struct device *dev, struct device_attribute *attr, 3441 char *buf) 3442{ 3443 return sysfs_emit(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64); 3444} 3445static DEVICE_ATTR_RO(eui); 3446 3447static ssize_t nsid_show(struct device *dev, struct device_attribute *attr, 3448 char *buf) 3449{ 3450 return sysfs_emit(buf, "%d\n", dev_to_ns_head(dev)->ns_id); 3451} 3452static DEVICE_ATTR_RO(nsid); 3453 3454static struct attribute *nvme_ns_id_attrs[] = { 3455 &dev_attr_wwid.attr, 3456 &dev_attr_uuid.attr, 3457 &dev_attr_nguid.attr, 3458 &dev_attr_eui.attr, 3459 &dev_attr_nsid.attr, 3460#ifdef CONFIG_NVME_MULTIPATH 3461 &dev_attr_ana_grpid.attr, 3462 &dev_attr_ana_state.attr, 3463#endif 3464 NULL, 3465}; 3466 3467static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj, 3468 struct attribute *a, int n) 3469{ 3470 struct device *dev = container_of(kobj, struct device, kobj); 3471 struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; 3472 3473 if (a == &dev_attr_uuid.attr) { 3474 if (uuid_is_null(&ids->uuid) && 3475 !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 3476 return 0; 3477 } 3478 if (a == &dev_attr_nguid.attr) { 3479 if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) 3480 return 0; 3481 } 3482 if (a == &dev_attr_eui.attr) { 3483 if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) 3484 return 0; 3485 } 3486#ifdef CONFIG_NVME_MULTIPATH 3487 if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) { 3488 if (dev_to_disk(dev)->fops != &nvme_fops) /* per-path attr */ 3489 return 0; 3490 if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl)) 3491 return 0; 3492 } 3493#endif 3494 return a->mode; 3495} 3496 3497static const struct attribute_group nvme_ns_id_attr_group = { 3498 .attrs = nvme_ns_id_attrs, 3499 .is_visible = nvme_ns_id_attrs_are_visible, 3500}; 3501 3502const struct attribute_group *nvme_ns_id_attr_groups[] = { 3503 &nvme_ns_id_attr_group, 3504#ifdef CONFIG_NVM 3505 &nvme_nvm_attr_group, 3506#endif 3507 NULL, 3508}; 3509 3510#define nvme_show_str_function(field) \ 3511static ssize_t field##_show(struct device *dev, \ 3512 struct device_attribute *attr, char *buf) \ 3513{ \ 3514 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 3515 return sysfs_emit(buf, "%.*s\n", \ 3516 (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \ 3517} \ 3518static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 3519 3520nvme_show_str_function(model); 3521nvme_show_str_function(serial); 3522nvme_show_str_function(firmware_rev); 3523 3524#define nvme_show_int_function(field) \ 3525static ssize_t field##_show(struct device *dev, \ 3526 struct device_attribute *attr, char *buf) \ 3527{ \ 3528 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ 3529 return sysfs_emit(buf, "%d\n", ctrl->field); \ 3530} \ 3531static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); 3532 3533nvme_show_int_function(cntlid); 3534nvme_show_int_function(numa_node); 3535nvme_show_int_function(queue_count); 3536nvme_show_int_function(sqsize); 3537 3538static ssize_t nvme_sysfs_delete(struct device *dev, 3539 struct device_attribute *attr, const char *buf, 3540 size_t count) 3541{ 3542 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3543 3544 if (device_remove_file_self(dev, attr)) 3545 nvme_delete_ctrl_sync(ctrl); 3546 return count; 3547} 3548static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete); 3549 3550static ssize_t nvme_sysfs_show_transport(struct device *dev, 3551 struct device_attribute *attr, 3552 char *buf) 3553{ 3554 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3555 3556 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name); 3557} 3558static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL); 3559 3560static ssize_t nvme_sysfs_show_state(struct device *dev, 3561 struct device_attribute *attr, 3562 char *buf) 3563{ 3564 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3565 static const char *const state_name[] = { 3566 [NVME_CTRL_NEW] = "new", 3567 [NVME_CTRL_LIVE] = "live", 3568 [NVME_CTRL_RESETTING] = "resetting", 3569 [NVME_CTRL_CONNECTING] = "connecting", 3570 [NVME_CTRL_DELETING] = "deleting", 3571 [NVME_CTRL_DELETING_NOIO]= "deleting (no IO)", 3572 [NVME_CTRL_DEAD] = "dead", 3573 }; 3574 3575 if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) && 3576 state_name[ctrl->state]) 3577 return sysfs_emit(buf, "%s\n", state_name[ctrl->state]); 3578 3579 return sysfs_emit(buf, "unknown state\n"); 3580} 3581 3582static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL); 3583 3584static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev, 3585 struct device_attribute *attr, 3586 char *buf) 3587{ 3588 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3589 3590 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn); 3591} 3592static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL); 3593 3594static ssize_t nvme_sysfs_show_hostnqn(struct device *dev, 3595 struct device_attribute *attr, 3596 char *buf) 3597{ 3598 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3599 3600 return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->opts->host->nqn); 3601} 3602static DEVICE_ATTR(hostnqn, S_IRUGO, nvme_sysfs_show_hostnqn, NULL); 3603 3604static ssize_t nvme_sysfs_show_hostid(struct device *dev, 3605 struct device_attribute *attr, 3606 char *buf) 3607{ 3608 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3609 3610 return snprintf(buf, PAGE_SIZE, "%pU\n", &ctrl->opts->host->id); 3611} 3612static DEVICE_ATTR(hostid, S_IRUGO, nvme_sysfs_show_hostid, NULL); 3613 3614static ssize_t nvme_sysfs_show_address(struct device *dev, 3615 struct device_attribute *attr, 3616 char *buf) 3617{ 3618 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3619 3620 return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE); 3621} 3622static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL); 3623 3624static ssize_t nvme_ctrl_loss_tmo_show(struct device *dev, 3625 struct device_attribute *attr, char *buf) 3626{ 3627 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3628 struct nvmf_ctrl_options *opts = ctrl->opts; 3629 3630 if (ctrl->opts->max_reconnects == -1) 3631 return sysfs_emit(buf, "off\n"); 3632 return sysfs_emit(buf, "%d\n", 3633 opts->max_reconnects * opts->reconnect_delay); 3634} 3635 3636static ssize_t nvme_ctrl_loss_tmo_store(struct device *dev, 3637 struct device_attribute *attr, const char *buf, size_t count) 3638{ 3639 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3640 struct nvmf_ctrl_options *opts = ctrl->opts; 3641 int ctrl_loss_tmo, err; 3642 3643 err = kstrtoint(buf, 10, &ctrl_loss_tmo); 3644 if (err) 3645 return -EINVAL; 3646 3647 else if (ctrl_loss_tmo < 0) 3648 opts->max_reconnects = -1; 3649 else 3650 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo, 3651 opts->reconnect_delay); 3652 return count; 3653} 3654static DEVICE_ATTR(ctrl_loss_tmo, S_IRUGO | S_IWUSR, 3655 nvme_ctrl_loss_tmo_show, nvme_ctrl_loss_tmo_store); 3656 3657static ssize_t nvme_ctrl_reconnect_delay_show(struct device *dev, 3658 struct device_attribute *attr, char *buf) 3659{ 3660 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3661 3662 if (ctrl->opts->reconnect_delay == -1) 3663 return sysfs_emit(buf, "off\n"); 3664 return sysfs_emit(buf, "%d\n", ctrl->opts->reconnect_delay); 3665} 3666 3667static ssize_t nvme_ctrl_reconnect_delay_store(struct device *dev, 3668 struct device_attribute *attr, const char *buf, size_t count) 3669{ 3670 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3671 unsigned int v; 3672 int err; 3673 3674 err = kstrtou32(buf, 10, &v); 3675 if (err) 3676 return err; 3677 3678 ctrl->opts->reconnect_delay = v; 3679 return count; 3680} 3681static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR, 3682 nvme_ctrl_reconnect_delay_show, nvme_ctrl_reconnect_delay_store); 3683 3684static struct attribute *nvme_dev_attrs[] = { 3685 &dev_attr_reset_controller.attr, 3686 &dev_attr_rescan_controller.attr, 3687 &dev_attr_model.attr, 3688 &dev_attr_serial.attr, 3689 &dev_attr_firmware_rev.attr, 3690 &dev_attr_cntlid.attr, 3691 &dev_attr_delete_controller.attr, 3692 &dev_attr_transport.attr, 3693 &dev_attr_subsysnqn.attr, 3694 &dev_attr_address.attr, 3695 &dev_attr_state.attr, 3696 &dev_attr_numa_node.attr, 3697 &dev_attr_queue_count.attr, 3698 &dev_attr_sqsize.attr, 3699 &dev_attr_hostnqn.attr, 3700 &dev_attr_hostid.attr, 3701 &dev_attr_ctrl_loss_tmo.attr, 3702 &dev_attr_reconnect_delay.attr, 3703 NULL 3704}; 3705 3706static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, 3707 struct attribute *a, int n) 3708{ 3709 struct device *dev = container_of(kobj, struct device, kobj); 3710 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); 3711 3712 if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl) 3713 return 0; 3714 if (a == &dev_attr_address.attr && !ctrl->ops->get_address) 3715 return 0; 3716 if (a == &dev_attr_hostnqn.attr && !ctrl->opts) 3717 return 0; 3718 if (a == &dev_attr_hostid.attr && !ctrl->opts) 3719 return 0; 3720 if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts) 3721 return 0; 3722 if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts) 3723 return 0; 3724 3725 return a->mode; 3726} 3727 3728static struct attribute_group nvme_dev_attrs_group = { 3729 .attrs = nvme_dev_attrs, 3730 .is_visible = nvme_dev_attrs_are_visible, 3731}; 3732 3733static const struct attribute_group *nvme_dev_attr_groups[] = { 3734 &nvme_dev_attrs_group, 3735 NULL, 3736}; 3737 3738static struct nvme_ns_head *nvme_find_ns_head(struct nvme_subsystem *subsys, 3739 unsigned nsid) 3740{ 3741 struct nvme_ns_head *h; 3742 3743 lockdep_assert_held(&subsys->lock); 3744 3745 list_for_each_entry(h, &subsys->nsheads, entry) { 3746 if (h->ns_id == nsid && kref_get_unless_zero(&h->ref)) 3747 return h; 3748 } 3749 3750 return NULL; 3751} 3752 3753static int nvme_subsys_check_duplicate_ids(struct nvme_subsystem *subsys, 3754 struct nvme_ns_ids *ids) 3755{ 3756 struct nvme_ns_head *h; 3757 3758 lockdep_assert_held(&subsys->lock); 3759 3760 list_for_each_entry(h, &subsys->nsheads, entry) { 3761 if (nvme_ns_ids_valid(ids) && nvme_ns_ids_equal(ids, &h->ids)) 3762 return -EINVAL; 3763 } 3764 3765 return 0; 3766} 3767 3768static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, 3769 unsigned nsid, struct nvme_ns_ids *ids) 3770{ 3771 struct nvme_ns_head *head; 3772 size_t size = sizeof(*head); 3773 int ret = -ENOMEM; 3774 3775#ifdef CONFIG_NVME_MULTIPATH 3776 size += num_possible_nodes() * sizeof(struct nvme_ns *); 3777#endif 3778 3779 head = kzalloc(size, GFP_KERNEL); 3780 if (!head) 3781 goto out; 3782 ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL); 3783 if (ret < 0) 3784 goto out_free_head; 3785 head->instance = ret; 3786 INIT_LIST_HEAD(&head->list); 3787 ret = init_srcu_struct(&head->srcu); 3788 if (ret) 3789 goto out_ida_remove; 3790 head->subsys = ctrl->subsys; 3791 head->ns_id = nsid; 3792 head->ids = *ids; 3793 kref_init(&head->ref); 3794 3795 ret = nvme_subsys_check_duplicate_ids(ctrl->subsys, &head->ids); 3796 if (ret) { 3797 dev_err(ctrl->device, 3798 "duplicate IDs for nsid %d\n", nsid); 3799 goto out_cleanup_srcu; 3800 } 3801 3802 if (head->ids.csi) { 3803 ret = nvme_get_effects_log(ctrl, head->ids.csi, &head->effects); 3804 if (ret) 3805 goto out_cleanup_srcu; 3806 } else 3807 head->effects = ctrl->effects; 3808 3809 ret = nvme_mpath_alloc_disk(ctrl, head); 3810 if (ret) 3811 goto out_cleanup_srcu; 3812 3813 list_add_tail(&head->entry, &ctrl->subsys->nsheads); 3814 3815 kref_get(&ctrl->subsys->ref); 3816 3817 return head; 3818out_cleanup_srcu: 3819 cleanup_srcu_struct(&head->srcu); 3820out_ida_remove: 3821 ida_simple_remove(&ctrl->subsys->ns_ida, head->instance); 3822out_free_head: 3823 kfree(head); 3824out: 3825 if (ret > 0) 3826 ret = blk_status_to_errno(nvme_error_status(ret)); 3827 return ERR_PTR(ret); 3828} 3829 3830static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, 3831 struct nvme_ns_ids *ids, bool is_shared) 3832{ 3833 struct nvme_ctrl *ctrl = ns->ctrl; 3834 struct nvme_ns_head *head = NULL; 3835 int ret = 0; 3836 3837 mutex_lock(&ctrl->subsys->lock); 3838 head = nvme_find_ns_head(ctrl->subsys, nsid); 3839 if (!head) { 3840 head = nvme_alloc_ns_head(ctrl, nsid, ids); 3841 if (IS_ERR(head)) { 3842 ret = PTR_ERR(head); 3843 goto out_unlock; 3844 } 3845 head->shared = is_shared; 3846 } else { 3847 ret = -EINVAL; 3848 if (!is_shared || !head->shared) { 3849 dev_err(ctrl->device, 3850 "Duplicate unshared namespace %d\n", nsid); 3851 goto out_put_ns_head; 3852 } 3853 if (!nvme_ns_ids_equal(&head->ids, ids)) { 3854 dev_err(ctrl->device, 3855 "IDs don't match for shared namespace %d\n", 3856 nsid); 3857 goto out_put_ns_head; 3858 } 3859 } 3860 3861 list_add_tail(&ns->siblings, &head->list); 3862 ns->head = head; 3863 mutex_unlock(&ctrl->subsys->lock); 3864 return 0; 3865 3866out_put_ns_head: 3867 nvme_put_ns_head(head); 3868out_unlock: 3869 mutex_unlock(&ctrl->subsys->lock); 3870 return ret; 3871} 3872 3873struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) 3874{ 3875 struct nvme_ns *ns, *ret = NULL; 3876 3877 down_read(&ctrl->namespaces_rwsem); 3878 list_for_each_entry(ns, &ctrl->namespaces, list) { 3879 if (ns->head->ns_id == nsid) { 3880 if (!kref_get_unless_zero(&ns->kref)) 3881 continue; 3882 ret = ns; 3883 break; 3884 } 3885 if (ns->head->ns_id > nsid) 3886 break; 3887 } 3888 up_read(&ctrl->namespaces_rwsem); 3889 return ret; 3890} 3891EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU); 3892 3893/* 3894 * Add the namespace to the controller list while keeping the list ordered. 3895 */ 3896static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns) 3897{ 3898 struct nvme_ns *tmp; 3899 3900 list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) { 3901 if (tmp->head->ns_id < ns->head->ns_id) { 3902 list_add(&ns->list, &tmp->list); 3903 return; 3904 } 3905 } 3906 list_add(&ns->list, &ns->ctrl->namespaces); 3907} 3908 3909static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid, 3910 struct nvme_ns_ids *ids) 3911{ 3912 struct nvme_ns *ns; 3913 struct gendisk *disk; 3914 struct nvme_id_ns *id; 3915 char disk_name[DISK_NAME_LEN]; 3916 int node = ctrl->numa_node, flags = GENHD_FL_EXT_DEVT, ret; 3917 3918 if (nvme_identify_ns(ctrl, nsid, ids, &id)) 3919 return; 3920 3921 ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); 3922 if (!ns) 3923 goto out_free_id; 3924 3925 ns->queue = blk_mq_init_queue(ctrl->tagset); 3926 if (IS_ERR(ns->queue)) 3927 goto out_free_ns; 3928 3929 if (ctrl->opts && ctrl->opts->data_digest) 3930 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue); 3931 3932 blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue); 3933 if (ctrl->ops->flags & NVME_F_PCI_P2PDMA) 3934 blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue); 3935 3936 ns->queue->queuedata = ns; 3937 ns->ctrl = ctrl; 3938 kref_init(&ns->kref); 3939 3940 ret = nvme_init_ns_head(ns, nsid, ids, id->nmic & NVME_NS_NMIC_SHARED); 3941 if (ret) 3942 goto out_free_queue; 3943 nvme_set_disk_name(disk_name, ns, ctrl, &flags); 3944 3945 disk = alloc_disk_node(0, node); 3946 if (!disk) 3947 goto out_unlink_ns; 3948 3949 disk->fops = &nvme_fops; 3950 disk->private_data = ns; 3951 disk->queue = ns->queue; 3952 disk->flags = flags; 3953 memcpy(disk->disk_name, disk_name, DISK_NAME_LEN); 3954 ns->disk = disk; 3955 3956 if (nvme_update_ns_info(ns, id)) 3957 goto out_put_disk; 3958 3959 if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) { 3960 ret = nvme_nvm_register(ns, disk_name, node); 3961 if (ret) { 3962 dev_warn(ctrl->device, "LightNVM init failure\n"); 3963 goto out_put_disk; 3964 } 3965 } 3966 3967 down_write(&ctrl->namespaces_rwsem); 3968 nvme_ns_add_to_ctrl_list(ns); 3969 up_write(&ctrl->namespaces_rwsem); 3970 nvme_get_ctrl(ctrl); 3971 3972 device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups); 3973 3974 nvme_mpath_add_disk(ns, id); 3975 nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name); 3976 kfree(id); 3977 3978 return; 3979 out_put_disk: 3980 /* prevent double queue cleanup */ 3981 ns->disk->queue = NULL; 3982 put_disk(ns->disk); 3983 out_unlink_ns: 3984 mutex_lock(&ctrl->subsys->lock); 3985 list_del_rcu(&ns->siblings); 3986 if (list_empty(&ns->head->list)) 3987 list_del_init(&ns->head->entry); 3988 mutex_unlock(&ctrl->subsys->lock); 3989 nvme_put_ns_head(ns->head); 3990 out_free_queue: 3991 blk_cleanup_queue(ns->queue); 3992 out_free_ns: 3993 kfree(ns); 3994 out_free_id: 3995 kfree(id); 3996} 3997 3998static void nvme_ns_remove(struct nvme_ns *ns) 3999{ 4000 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) 4001 return; 4002 4003 set_capacity(ns->disk, 0); 4004 nvme_fault_inject_fini(&ns->fault_inject); 4005 4006 mutex_lock(&ns->ctrl->subsys->lock); 4007 list_del_rcu(&ns->siblings); 4008 if (list_empty(&ns->head->list)) 4009 list_del_init(&ns->head->entry); 4010 mutex_unlock(&ns->ctrl->subsys->lock); 4011 4012 synchronize_rcu(); /* guarantee not available in head->list */ 4013 nvme_mpath_clear_current_path(ns); 4014 synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */ 4015 4016 if (ns->disk->flags & GENHD_FL_UP) { 4017 del_gendisk(ns->disk); 4018 blk_cleanup_queue(ns->queue); 4019 if (blk_get_integrity(ns->disk)) 4020 blk_integrity_unregister(ns->disk); 4021 } 4022 4023 down_write(&ns->ctrl->namespaces_rwsem); 4024 list_del_init(&ns->list); 4025 up_write(&ns->ctrl->namespaces_rwsem); 4026 4027 nvme_mpath_check_last_path(ns); 4028 nvme_put_ns(ns); 4029} 4030 4031static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid) 4032{ 4033 struct nvme_ns *ns = nvme_find_get_ns(ctrl, nsid); 4034 4035 if (ns) { 4036 nvme_ns_remove(ns); 4037 nvme_put_ns(ns); 4038 } 4039} 4040 4041static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids) 4042{ 4043 struct nvme_id_ns *id; 4044 int ret = NVME_SC_INVALID_NS | NVME_SC_DNR; 4045 4046 if (test_bit(NVME_NS_DEAD, &ns->flags)) 4047 goto out; 4048 4049 ret = nvme_identify_ns(ns->ctrl, ns->head->ns_id, ids, &id); 4050 if (ret) 4051 goto out; 4052 4053 ret = NVME_SC_INVALID_NS | NVME_SC_DNR; 4054 if (!nvme_ns_ids_equal(&ns->head->ids, ids)) { 4055 dev_err(ns->ctrl->device, 4056 "identifiers changed for nsid %d\n", ns->head->ns_id); 4057 goto out_free_id; 4058 } 4059 4060 ret = nvme_update_ns_info(ns, id); 4061 4062out_free_id: 4063 kfree(id); 4064out: 4065 /* 4066 * Only remove the namespace if we got a fatal error back from the 4067 * device, otherwise ignore the error and just move on. 4068 * 4069 * TODO: we should probably schedule a delayed retry here. 4070 */ 4071 if (ret > 0 && (ret & NVME_SC_DNR)) 4072 nvme_ns_remove(ns); 4073 else 4074 revalidate_disk_size(ns->disk, true); 4075} 4076 4077static void nvme_validate_or_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) 4078{ 4079 struct nvme_ns_ids ids = { }; 4080 struct nvme_ns *ns; 4081 4082 if (nvme_identify_ns_descs(ctrl, nsid, &ids)) 4083 return; 4084 4085 ns = nvme_find_get_ns(ctrl, nsid); 4086 if (ns) { 4087 nvme_validate_ns(ns, &ids); 4088 nvme_put_ns(ns); 4089 return; 4090 } 4091 4092 switch (ids.csi) { 4093 case NVME_CSI_NVM: 4094 nvme_alloc_ns(ctrl, nsid, &ids); 4095 break; 4096 case NVME_CSI_ZNS: 4097 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { 4098 dev_warn(ctrl->device, 4099 "nsid %u not supported without CONFIG_BLK_DEV_ZONED\n", 4100 nsid); 4101 break; 4102 } 4103 if (!nvme_multi_css(ctrl)) { 4104 dev_warn(ctrl->device, 4105 "command set not reported for nsid: %d\n", 4106 nsid); 4107 break; 4108 } 4109 nvme_alloc_ns(ctrl, nsid, &ids); 4110 break; 4111 default: 4112 dev_warn(ctrl->device, "unknown csi %u for nsid %u\n", 4113 ids.csi, nsid); 4114 break; 4115 } 4116} 4117 4118static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, 4119 unsigned nsid) 4120{ 4121 struct nvme_ns *ns, *next; 4122 LIST_HEAD(rm_list); 4123 4124 down_write(&ctrl->namespaces_rwsem); 4125 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { 4126 if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags)) 4127 list_move_tail(&ns->list, &rm_list); 4128 } 4129 up_write(&ctrl->namespaces_rwsem); 4130 4131 list_for_each_entry_safe(ns, next, &rm_list, list) 4132 nvme_ns_remove(ns); 4133 4134} 4135 4136static int nvme_scan_ns_list(struct nvme_ctrl *ctrl) 4137{ 4138 const int nr_entries = NVME_IDENTIFY_DATA_SIZE / sizeof(__le32); 4139 __le32 *ns_list; 4140 u32 prev = 0; 4141 int ret = 0, i; 4142 4143 if (nvme_ctrl_limited_cns(ctrl)) 4144 return -EOPNOTSUPP; 4145 4146 ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); 4147 if (!ns_list) 4148 return -ENOMEM; 4149 4150 for (;;) { 4151 struct nvme_command cmd = { 4152 .identify.opcode = nvme_admin_identify, 4153 .identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST, 4154 .identify.nsid = cpu_to_le32(prev), 4155 }; 4156 4157 ret = nvme_submit_sync_cmd(ctrl->admin_q, &cmd, ns_list, 4158 NVME_IDENTIFY_DATA_SIZE); 4159 if (ret) 4160 goto free; 4161 4162 for (i = 0; i < nr_entries; i++) { 4163 u32 nsid = le32_to_cpu(ns_list[i]); 4164 4165 if (!nsid) /* end of the list? */ 4166 goto out; 4167 nvme_validate_or_alloc_ns(ctrl, nsid); 4168 while (++prev < nsid) 4169 nvme_ns_remove_by_nsid(ctrl, prev); 4170 } 4171 } 4172 out: 4173 nvme_remove_invalid_namespaces(ctrl, prev); 4174 free: 4175 kfree(ns_list); 4176 return ret; 4177} 4178 4179static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl) 4180{ 4181 struct nvme_id_ctrl *id; 4182 u32 nn, i; 4183 4184 if (nvme_identify_ctrl(ctrl, &id)) 4185 return; 4186 nn = le32_to_cpu(id->nn); 4187 kfree(id); 4188 4189 for (i = 1; i <= nn; i++) 4190 nvme_validate_or_alloc_ns(ctrl, i); 4191 4192 nvme_remove_invalid_namespaces(ctrl, nn); 4193} 4194 4195static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl) 4196{ 4197 size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32); 4198 __le32 *log; 4199 int error; 4200 4201 log = kzalloc(log_size, GFP_KERNEL); 4202 if (!log) 4203 return; 4204 4205 /* 4206 * We need to read the log to clear the AEN, but we don't want to rely 4207 * on it for the changed namespace information as userspace could have 4208 * raced with us in reading the log page, which could cause us to miss 4209 * updates. 4210 */ 4211 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, 4212 NVME_CSI_NVM, log, log_size, 0); 4213 if (error) 4214 dev_warn(ctrl->device, 4215 "reading changed ns log failed: %d\n", error); 4216 4217 kfree(log); 4218} 4219 4220static void nvme_scan_work(struct work_struct *work) 4221{ 4222 struct nvme_ctrl *ctrl = 4223 container_of(work, struct nvme_ctrl, scan_work); 4224 4225 /* No tagset on a live ctrl means IO queues could not created */ 4226 if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset) 4227 return; 4228 4229 if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) { 4230 dev_info(ctrl->device, "rescanning namespaces.\n"); 4231 nvme_clear_changed_ns_log(ctrl); 4232 } 4233 4234 mutex_lock(&ctrl->scan_lock); 4235 if (nvme_scan_ns_list(ctrl) != 0) 4236 nvme_scan_ns_sequential(ctrl); 4237 mutex_unlock(&ctrl->scan_lock); 4238} 4239 4240/* 4241 * This function iterates the namespace list unlocked to allow recovery from 4242 * controller failure. It is up to the caller to ensure the namespace list is 4243 * not modified by scan work while this function is executing. 4244 */ 4245void nvme_remove_namespaces(struct nvme_ctrl *ctrl) 4246{ 4247 struct nvme_ns *ns, *next; 4248 LIST_HEAD(ns_list); 4249 4250 /* 4251 * make sure to requeue I/O to all namespaces as these 4252 * might result from the scan itself and must complete 4253 * for the scan_work to make progress 4254 */ 4255 nvme_mpath_clear_ctrl_paths(ctrl); 4256 4257 /* prevent racing with ns scanning */ 4258 flush_work(&ctrl->scan_work); 4259 4260 /* 4261 * The dead states indicates the controller was not gracefully 4262 * disconnected. In that case, we won't be able to flush any data while 4263 * removing the namespaces' disks; fail all the queues now to avoid 4264 * potentially having to clean up the failed sync later. 4265 */ 4266 if (ctrl->state == NVME_CTRL_DEAD) 4267 nvme_kill_queues(ctrl); 4268 4269 /* this is a no-op when called from the controller reset handler */ 4270 nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING_NOIO); 4271 4272 down_write(&ctrl->namespaces_rwsem); 4273 list_splice_init(&ctrl->namespaces, &ns_list); 4274 up_write(&ctrl->namespaces_rwsem); 4275 4276 list_for_each_entry_safe(ns, next, &ns_list, list) 4277 nvme_ns_remove(ns); 4278} 4279EXPORT_SYMBOL_GPL(nvme_remove_namespaces); 4280 4281static int nvme_class_uevent(struct device *dev, struct kobj_uevent_env *env) 4282{ 4283 struct nvme_ctrl *ctrl = 4284 container_of(dev, struct nvme_ctrl, ctrl_device); 4285 struct nvmf_ctrl_options *opts = ctrl->opts; 4286 int ret; 4287 4288 ret = add_uevent_var(env, "NVME_TRTYPE=%s", ctrl->ops->name); 4289 if (ret) 4290 return ret; 4291 4292 if (opts) { 4293 ret = add_uevent_var(env, "NVME_TRADDR=%s", opts->traddr); 4294 if (ret) 4295 return ret; 4296 4297 ret = add_uevent_var(env, "NVME_TRSVCID=%s", 4298 opts->trsvcid ?: "none"); 4299 if (ret) 4300 return ret; 4301 4302 ret = add_uevent_var(env, "NVME_HOST_TRADDR=%s", 4303 opts->host_traddr ?: "none"); 4304 } 4305 return ret; 4306} 4307 4308static void nvme_aen_uevent(struct nvme_ctrl *ctrl) 4309{ 4310 char *envp[2] = { NULL, NULL }; 4311 u32 aen_result = ctrl->aen_result; 4312 4313 ctrl->aen_result = 0; 4314 if (!aen_result) 4315 return; 4316 4317 envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result); 4318 if (!envp[0]) 4319 return; 4320 kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); 4321 kfree(envp[0]); 4322} 4323 4324static void nvme_async_event_work(struct work_struct *work) 4325{ 4326 struct nvme_ctrl *ctrl = 4327 container_of(work, struct nvme_ctrl, async_event_work); 4328 4329 nvme_aen_uevent(ctrl); 4330 4331 /* 4332 * The transport drivers must guarantee AER submission here is safe by 4333 * flushing ctrl async_event_work after changing the controller state 4334 * from LIVE and before freeing the admin queue. 4335 */ 4336 if (ctrl->state == NVME_CTRL_LIVE) 4337 ctrl->ops->submit_async_event(ctrl); 4338} 4339 4340static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) 4341{ 4342 4343 u32 csts; 4344 4345 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) 4346 return false; 4347 4348 if (csts == ~0) 4349 return false; 4350 4351 return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP)); 4352} 4353 4354static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) 4355{ 4356 struct nvme_fw_slot_info_log *log; 4357 4358 log = kmalloc(sizeof(*log), GFP_KERNEL); 4359 if (!log) 4360 return; 4361 4362 if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM, 4363 log, sizeof(*log), 0)) 4364 dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); 4365 kfree(log); 4366} 4367 4368static void nvme_fw_act_work(struct work_struct *work) 4369{ 4370 struct nvme_ctrl *ctrl = container_of(work, 4371 struct nvme_ctrl, fw_act_work); 4372 unsigned long fw_act_timeout; 4373 4374 if (ctrl->mtfa) 4375 fw_act_timeout = jiffies + 4376 msecs_to_jiffies(ctrl->mtfa * 100); 4377 else 4378 fw_act_timeout = jiffies + 4379 msecs_to_jiffies(admin_timeout * 1000); 4380 4381 nvme_stop_queues(ctrl); 4382 while (nvme_ctrl_pp_status(ctrl)) { 4383 if (time_after(jiffies, fw_act_timeout)) { 4384 dev_warn(ctrl->device, 4385 "Fw activation timeout, reset controller\n"); 4386 nvme_try_sched_reset(ctrl); 4387 return; 4388 } 4389 msleep(100); 4390 } 4391 4392 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) 4393 return; 4394 4395 nvme_start_queues(ctrl); 4396 /* read FW slot information to clear the AER */ 4397 nvme_get_fw_slot_info(ctrl); 4398} 4399 4400static u32 nvme_aer_type(u32 result) 4401{ 4402 return result & 0x7; 4403} 4404 4405static u32 nvme_aer_subtype(u32 result) 4406{ 4407 return (result & 0xff00) >> 8; 4408} 4409 4410static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) 4411{ 4412 u32 aer_notice_type = nvme_aer_subtype(result); 4413 4414 switch (aer_notice_type) { 4415 case NVME_AER_NOTICE_NS_CHANGED: 4416 set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events); 4417 nvme_queue_scan(ctrl); 4418 break; 4419 case NVME_AER_NOTICE_FW_ACT_STARTING: 4420 /* 4421 * We are (ab)using the RESETTING state to prevent subsequent 4422 * recovery actions from interfering with the controller's 4423 * firmware activation. 4424 */ 4425 if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) 4426 queue_work(nvme_wq, &ctrl->fw_act_work); 4427 break; 4428#ifdef CONFIG_NVME_MULTIPATH 4429 case NVME_AER_NOTICE_ANA: 4430 if (!ctrl->ana_log_buf) 4431 break; 4432 queue_work(nvme_wq, &ctrl->ana_work); 4433 break; 4434#endif 4435 case NVME_AER_NOTICE_DISC_CHANGED: 4436 ctrl->aen_result = result; 4437 break; 4438 default: 4439 dev_warn(ctrl->device, "async event result %08x\n", result); 4440 } 4441} 4442 4443static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl) 4444{ 4445 dev_warn(ctrl->device, "resetting controller due to AER\n"); 4446 nvme_reset_ctrl(ctrl); 4447} 4448 4449void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, 4450 volatile union nvme_result *res) 4451{ 4452 u32 result = le32_to_cpu(res->u32); 4453 u32 aer_type = nvme_aer_type(result); 4454 u32 aer_subtype = nvme_aer_subtype(result); 4455 4456 if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS) 4457 return; 4458 4459 trace_nvme_async_event(ctrl, result); 4460 switch (aer_type) { 4461 case NVME_AER_NOTICE: 4462 nvme_handle_aen_notice(ctrl, result); 4463 break; 4464 case NVME_AER_ERROR: 4465 /* 4466 * For a persistent internal error, don't run async_event_work 4467 * to submit a new AER. The controller reset will do it. 4468 */ 4469 if (aer_subtype == NVME_AER_ERROR_PERSIST_INT_ERR) { 4470 nvme_handle_aer_persistent_error(ctrl); 4471 return; 4472 } 4473 fallthrough; 4474 case NVME_AER_SMART: 4475 case NVME_AER_CSS: 4476 case NVME_AER_VS: 4477 ctrl->aen_result = result; 4478 break; 4479 default: 4480 break; 4481 } 4482 queue_work(nvme_wq, &ctrl->async_event_work); 4483} 4484EXPORT_SYMBOL_GPL(nvme_complete_async_event); 4485 4486void nvme_stop_ctrl(struct nvme_ctrl *ctrl) 4487{ 4488 nvme_mpath_stop(ctrl); 4489 nvme_stop_keep_alive(ctrl); 4490 flush_work(&ctrl->async_event_work); 4491 cancel_work_sync(&ctrl->fw_act_work); 4492 if (ctrl->ops->stop_ctrl) 4493 ctrl->ops->stop_ctrl(ctrl); 4494} 4495EXPORT_SYMBOL_GPL(nvme_stop_ctrl); 4496 4497void nvme_start_ctrl(struct nvme_ctrl *ctrl) 4498{ 4499 nvme_start_keep_alive(ctrl); 4500 4501 nvme_enable_aen(ctrl); 4502 4503 if (ctrl->queue_count > 1) { 4504 nvme_queue_scan(ctrl); 4505 nvme_start_queues(ctrl); 4506 nvme_mpath_update(ctrl); 4507 } 4508} 4509EXPORT_SYMBOL_GPL(nvme_start_ctrl); 4510 4511void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) 4512{ 4513 nvme_hwmon_exit(ctrl); 4514 nvme_fault_inject_fini(&ctrl->fault_inject); 4515 dev_pm_qos_hide_latency_tolerance(ctrl->device); 4516 cdev_device_del(&ctrl->cdev, ctrl->device); 4517 nvme_put_ctrl(ctrl); 4518} 4519EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); 4520 4521static void nvme_free_cels(struct nvme_ctrl *ctrl) 4522{ 4523 struct nvme_effects_log *cel; 4524 unsigned long i; 4525 4526 xa_for_each (&ctrl->cels, i, cel) { 4527 xa_erase(&ctrl->cels, i); 4528 kfree(cel); 4529 } 4530 4531 xa_destroy(&ctrl->cels); 4532} 4533 4534static void nvme_free_ctrl(struct device *dev) 4535{ 4536 struct nvme_ctrl *ctrl = 4537 container_of(dev, struct nvme_ctrl, ctrl_device); 4538 struct nvme_subsystem *subsys = ctrl->subsys; 4539 4540 if (!subsys || ctrl->instance != subsys->instance) 4541 ida_simple_remove(&nvme_instance_ida, ctrl->instance); 4542 4543 nvme_free_cels(ctrl); 4544 nvme_mpath_uninit(ctrl); 4545 __free_page(ctrl->discard_page); 4546 4547 if (subsys) { 4548 mutex_lock(&nvme_subsystems_lock); 4549 list_del(&ctrl->subsys_entry); 4550 sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device)); 4551 mutex_unlock(&nvme_subsystems_lock); 4552 } 4553 4554 ctrl->ops->free_ctrl(ctrl); 4555 4556 if (subsys) 4557 nvme_put_subsystem(subsys); 4558} 4559 4560/* 4561 * Initialize a NVMe controller structures. This needs to be called during 4562 * earliest initialization so that we have the initialized structured around 4563 * during probing. 4564 */ 4565int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, 4566 const struct nvme_ctrl_ops *ops, unsigned long quirks) 4567{ 4568 int ret; 4569 4570 ctrl->state = NVME_CTRL_NEW; 4571 spin_lock_init(&ctrl->lock); 4572 mutex_init(&ctrl->scan_lock); 4573 INIT_LIST_HEAD(&ctrl->namespaces); 4574 xa_init(&ctrl->cels); 4575 init_rwsem(&ctrl->namespaces_rwsem); 4576 ctrl->dev = dev; 4577 ctrl->ops = ops; 4578 ctrl->quirks = quirks; 4579 ctrl->numa_node = NUMA_NO_NODE; 4580 INIT_WORK(&ctrl->scan_work, nvme_scan_work); 4581 INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); 4582 INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work); 4583 INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work); 4584 init_waitqueue_head(&ctrl->state_wq); 4585 4586 INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); 4587 memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); 4588 ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive; 4589 4590 BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) > 4591 PAGE_SIZE); 4592 ctrl->discard_page = alloc_page(GFP_KERNEL); 4593 if (!ctrl->discard_page) { 4594 ret = -ENOMEM; 4595 goto out; 4596 } 4597 4598 ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL); 4599 if (ret < 0) 4600 goto out; 4601 ctrl->instance = ret; 4602 4603 device_initialize(&ctrl->ctrl_device); 4604 ctrl->device = &ctrl->ctrl_device; 4605 ctrl->device->devt = MKDEV(MAJOR(nvme_chr_devt), ctrl->instance); 4606 ctrl->device->class = nvme_class; 4607 ctrl->device->parent = ctrl->dev; 4608 ctrl->device->groups = nvme_dev_attr_groups; 4609 ctrl->device->release = nvme_free_ctrl; 4610 dev_set_drvdata(ctrl->device, ctrl); 4611 ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance); 4612 if (ret) 4613 goto out_release_instance; 4614 4615 nvme_get_ctrl(ctrl); 4616 cdev_init(&ctrl->cdev, &nvme_dev_fops); 4617 ctrl->cdev.owner = ops->module; 4618 ret = cdev_device_add(&ctrl->cdev, ctrl->device); 4619 if (ret) 4620 goto out_free_name; 4621 4622 /* 4623 * Initialize latency tolerance controls. The sysfs files won't 4624 * be visible to userspace unless the device actually supports APST. 4625 */ 4626 ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance; 4627 dev_pm_qos_update_user_latency_tolerance(ctrl->device, 4628 min(default_ps_max_latency_us, (unsigned long)S32_MAX)); 4629 4630 nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device)); 4631 nvme_mpath_init_ctrl(ctrl); 4632 4633 return 0; 4634out_free_name: 4635 nvme_put_ctrl(ctrl); 4636 kfree_const(ctrl->device->kobj.name); 4637out_release_instance: 4638 ida_simple_remove(&nvme_instance_ida, ctrl->instance); 4639out: 4640 if (ctrl->discard_page) 4641 __free_page(ctrl->discard_page); 4642 return ret; 4643} 4644EXPORT_SYMBOL_GPL(nvme_init_ctrl); 4645 4646static void nvme_start_ns_queue(struct nvme_ns *ns) 4647{ 4648 if (test_and_clear_bit(NVME_NS_STOPPED, &ns->flags)) 4649 blk_mq_unquiesce_queue(ns->queue); 4650} 4651 4652static void nvme_stop_ns_queue(struct nvme_ns *ns) 4653{ 4654 if (!test_and_set_bit(NVME_NS_STOPPED, &ns->flags)) 4655 blk_mq_quiesce_queue(ns->queue); 4656} 4657 4658/* 4659 * Prepare a queue for teardown. 4660 * 4661 * This must forcibly unquiesce queues to avoid blocking dispatch, and only set 4662 * the capacity to 0 after that to avoid blocking dispatchers that may be 4663 * holding bd_butex. This will end buffered writers dirtying pages that can't 4664 * be synced. 4665 */ 4666static void nvme_set_queue_dying(struct nvme_ns *ns) 4667{ 4668 if (test_and_set_bit(NVME_NS_DEAD, &ns->flags)) 4669 return; 4670 4671 blk_set_queue_dying(ns->queue); 4672 nvme_start_ns_queue(ns); 4673 4674 set_capacity(ns->disk, 0); 4675 nvme_update_bdev_size(ns->disk); 4676} 4677 4678/** 4679 * nvme_kill_queues(): Ends all namespace queues 4680 * @ctrl: the dead controller that needs to end 4681 * 4682 * Call this function when the driver determines it is unable to get the 4683 * controller in a state capable of servicing IO. 4684 */ 4685void nvme_kill_queues(struct nvme_ctrl *ctrl) 4686{ 4687 struct nvme_ns *ns; 4688 4689 down_read(&ctrl->namespaces_rwsem); 4690 4691 /* Forcibly unquiesce queues to avoid blocking dispatch */ 4692 if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q)) 4693 nvme_start_admin_queue(ctrl); 4694 4695 list_for_each_entry(ns, &ctrl->namespaces, list) 4696 nvme_set_queue_dying(ns); 4697 4698 up_read(&ctrl->namespaces_rwsem); 4699} 4700EXPORT_SYMBOL_GPL(nvme_kill_queues); 4701 4702void nvme_unfreeze(struct nvme_ctrl *ctrl) 4703{ 4704 struct nvme_ns *ns; 4705 4706 down_read(&ctrl->namespaces_rwsem); 4707 list_for_each_entry(ns, &ctrl->namespaces, list) 4708 blk_mq_unfreeze_queue(ns->queue); 4709 up_read(&ctrl->namespaces_rwsem); 4710} 4711EXPORT_SYMBOL_GPL(nvme_unfreeze); 4712 4713int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) 4714{ 4715 struct nvme_ns *ns; 4716 4717 down_read(&ctrl->namespaces_rwsem); 4718 list_for_each_entry(ns, &ctrl->namespaces, list) { 4719 timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); 4720 if (timeout <= 0) 4721 break; 4722 } 4723 up_read(&ctrl->namespaces_rwsem); 4724 return timeout; 4725} 4726EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); 4727 4728void nvme_wait_freeze(struct nvme_ctrl *ctrl) 4729{ 4730 struct nvme_ns *ns; 4731 4732 down_read(&ctrl->namespaces_rwsem); 4733 list_for_each_entry(ns, &ctrl->namespaces, list) 4734 blk_mq_freeze_queue_wait(ns->queue); 4735 up_read(&ctrl->namespaces_rwsem); 4736} 4737EXPORT_SYMBOL_GPL(nvme_wait_freeze); 4738 4739void nvme_start_freeze(struct nvme_ctrl *ctrl) 4740{ 4741 struct nvme_ns *ns; 4742 4743 down_read(&ctrl->namespaces_rwsem); 4744 list_for_each_entry(ns, &ctrl->namespaces, list) 4745 blk_freeze_queue_start(ns->queue); 4746 up_read(&ctrl->namespaces_rwsem); 4747} 4748EXPORT_SYMBOL_GPL(nvme_start_freeze); 4749 4750void nvme_stop_queues(struct nvme_ctrl *ctrl) 4751{ 4752 struct nvme_ns *ns; 4753 4754 down_read(&ctrl->namespaces_rwsem); 4755 list_for_each_entry(ns, &ctrl->namespaces, list) 4756 nvme_stop_ns_queue(ns); 4757 up_read(&ctrl->namespaces_rwsem); 4758} 4759EXPORT_SYMBOL_GPL(nvme_stop_queues); 4760 4761void nvme_start_queues(struct nvme_ctrl *ctrl) 4762{ 4763 struct nvme_ns *ns; 4764 4765 down_read(&ctrl->namespaces_rwsem); 4766 list_for_each_entry(ns, &ctrl->namespaces, list) 4767 nvme_start_ns_queue(ns); 4768 up_read(&ctrl->namespaces_rwsem); 4769} 4770EXPORT_SYMBOL_GPL(nvme_start_queues); 4771 4772void nvme_stop_admin_queue(struct nvme_ctrl *ctrl) 4773{ 4774 if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags)) 4775 blk_mq_quiesce_queue(ctrl->admin_q); 4776} 4777EXPORT_SYMBOL_GPL(nvme_stop_admin_queue); 4778 4779void nvme_start_admin_queue(struct nvme_ctrl *ctrl) 4780{ 4781 if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags)) 4782 blk_mq_unquiesce_queue(ctrl->admin_q); 4783} 4784EXPORT_SYMBOL_GPL(nvme_start_admin_queue); 4785 4786void nvme_sync_io_queues(struct nvme_ctrl *ctrl) 4787{ 4788 struct nvme_ns *ns; 4789 4790 down_read(&ctrl->namespaces_rwsem); 4791 list_for_each_entry(ns, &ctrl->namespaces, list) 4792 blk_sync_queue(ns->queue); 4793 up_read(&ctrl->namespaces_rwsem); 4794} 4795EXPORT_SYMBOL_GPL(nvme_sync_io_queues); 4796 4797void nvme_sync_queues(struct nvme_ctrl *ctrl) 4798{ 4799 nvme_sync_io_queues(ctrl); 4800 if (ctrl->admin_q) 4801 blk_sync_queue(ctrl->admin_q); 4802} 4803EXPORT_SYMBOL_GPL(nvme_sync_queues); 4804 4805struct nvme_ctrl *nvme_ctrl_from_file(struct file *file) 4806{ 4807 if (file->f_op != &nvme_dev_fops) 4808 return NULL; 4809 return file->private_data; 4810} 4811EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file, NVME_TARGET_PASSTHRU); 4812 4813/* 4814 * Check we didn't inadvertently grow the command structure sizes: 4815 */ 4816static inline void _nvme_check_size(void) 4817{ 4818 BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64); 4819 BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); 4820 BUILD_BUG_ON(sizeof(struct nvme_identify) != 64); 4821 BUILD_BUG_ON(sizeof(struct nvme_features) != 64); 4822 BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64); 4823 BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); 4824 BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64); 4825 BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64); 4826 BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64); 4827 BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64); 4828 BUILD_BUG_ON(sizeof(struct nvme_command) != 64); 4829 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE); 4830 BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE); 4831 BUILD_BUG_ON(sizeof(struct nvme_id_ns_zns) != NVME_IDENTIFY_DATA_SIZE); 4832 BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_zns) != NVME_IDENTIFY_DATA_SIZE); 4833 BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); 4834 BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); 4835 BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64); 4836 BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64); 4837} 4838 4839 4840static int __init nvme_core_init(void) 4841{ 4842 int result = -ENOMEM; 4843 4844 _nvme_check_size(); 4845 4846 nvme_wq = alloc_workqueue("nvme-wq", 4847 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 4848 if (!nvme_wq) 4849 goto out; 4850 4851 nvme_reset_wq = alloc_workqueue("nvme-reset-wq", 4852 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 4853 if (!nvme_reset_wq) 4854 goto destroy_wq; 4855 4856 nvme_delete_wq = alloc_workqueue("nvme-delete-wq", 4857 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 4858 if (!nvme_delete_wq) 4859 goto destroy_reset_wq; 4860 4861 result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme"); 4862 if (result < 0) 4863 goto destroy_delete_wq; 4864 4865 nvme_class = class_create(THIS_MODULE, "nvme"); 4866 if (IS_ERR(nvme_class)) { 4867 result = PTR_ERR(nvme_class); 4868 goto unregister_chrdev; 4869 } 4870 nvme_class->dev_uevent = nvme_class_uevent; 4871 4872 nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem"); 4873 if (IS_ERR(nvme_subsys_class)) { 4874 result = PTR_ERR(nvme_subsys_class); 4875 goto destroy_class; 4876 } 4877 return 0; 4878 4879destroy_class: 4880 class_destroy(nvme_class); 4881unregister_chrdev: 4882 unregister_chrdev_region(nvme_chr_devt, NVME_MINORS); 4883destroy_delete_wq: 4884 destroy_workqueue(nvme_delete_wq); 4885destroy_reset_wq: 4886 destroy_workqueue(nvme_reset_wq); 4887destroy_wq: 4888 destroy_workqueue(nvme_wq); 4889out: 4890 return result; 4891} 4892 4893static void __exit nvme_core_exit(void) 4894{ 4895 class_destroy(nvme_subsys_class); 4896 class_destroy(nvme_class); 4897 unregister_chrdev_region(nvme_chr_devt, NVME_MINORS); 4898 destroy_workqueue(nvme_delete_wq); 4899 destroy_workqueue(nvme_reset_wq); 4900 destroy_workqueue(nvme_wq); 4901 ida_destroy(&nvme_instance_ida); 4902} 4903 4904MODULE_LICENSE("GPL"); 4905MODULE_VERSION("1.0"); 4906module_init(nvme_core_init); 4907module_exit(nvme_core_exit); 4908