1// SPDX-License-Identifier: GPL-2.0
2/*
3 * nvme-lightnvm.c - LightNVM NVMe device
4 *
5 * Copyright (C) 2014-2015 IT University of Copenhagen
6 * Initial release: Matias Bjorling <mb@lightnvm.io>
7 */
8
9#include "nvme.h"
10
11#include <linux/nvme.h>
12#include <linux/bitops.h>
13#include <linux/lightnvm.h>
14#include <linux/vmalloc.h>
15#include <linux/sched/sysctl.h>
16#include <uapi/linux/lightnvm.h>
17
18enum nvme_nvm_admin_opcode {
19	nvme_nvm_admin_identity		= 0xe2,
20	nvme_nvm_admin_get_bb_tbl	= 0xf2,
21	nvme_nvm_admin_set_bb_tbl	= 0xf1,
22};
23
24enum nvme_nvm_log_page {
25	NVME_NVM_LOG_REPORT_CHUNK	= 0xca,
26};
27
28struct nvme_nvm_ph_rw {
29	__u8			opcode;
30	__u8			flags;
31	__u16			command_id;
32	__le32			nsid;
33	__u64			rsvd2;
34	__le64			metadata;
35	__le64			prp1;
36	__le64			prp2;
37	__le64			spba;
38	__le16			length;
39	__le16			control;
40	__le32			dsmgmt;
41	__le64			resv;
42};
43
44struct nvme_nvm_erase_blk {
45	__u8			opcode;
46	__u8			flags;
47	__u16			command_id;
48	__le32			nsid;
49	__u64			rsvd[2];
50	__le64			prp1;
51	__le64			prp2;
52	__le64			spba;
53	__le16			length;
54	__le16			control;
55	__le32			dsmgmt;
56	__le64			resv;
57};
58
59struct nvme_nvm_identity {
60	__u8			opcode;
61	__u8			flags;
62	__u16			command_id;
63	__le32			nsid;
64	__u64			rsvd[2];
65	__le64			prp1;
66	__le64			prp2;
67	__u32			rsvd11[6];
68};
69
70struct nvme_nvm_getbbtbl {
71	__u8			opcode;
72	__u8			flags;
73	__u16			command_id;
74	__le32			nsid;
75	__u64			rsvd[2];
76	__le64			prp1;
77	__le64			prp2;
78	__le64			spba;
79	__u32			rsvd4[4];
80};
81
82struct nvme_nvm_setbbtbl {
83	__u8			opcode;
84	__u8			flags;
85	__u16			command_id;
86	__le32			nsid;
87	__le64			rsvd[2];
88	__le64			prp1;
89	__le64			prp2;
90	__le64			spba;
91	__le16			nlb;
92	__u8			value;
93	__u8			rsvd3;
94	__u32			rsvd4[3];
95};
96
97struct nvme_nvm_command {
98	union {
99		struct nvme_common_command common;
100		struct nvme_nvm_ph_rw ph_rw;
101		struct nvme_nvm_erase_blk erase;
102		struct nvme_nvm_identity identity;
103		struct nvme_nvm_getbbtbl get_bb;
104		struct nvme_nvm_setbbtbl set_bb;
105	};
106};
107
108struct nvme_nvm_id12_grp {
109	__u8			mtype;
110	__u8			fmtype;
111	__le16			res16;
112	__u8			num_ch;
113	__u8			num_lun;
114	__u8			num_pln;
115	__u8			rsvd1;
116	__le16			num_chk;
117	__le16			num_pg;
118	__le16			fpg_sz;
119	__le16			csecs;
120	__le16			sos;
121	__le16			rsvd2;
122	__le32			trdt;
123	__le32			trdm;
124	__le32			tprt;
125	__le32			tprm;
126	__le32			tbet;
127	__le32			tbem;
128	__le32			mpos;
129	__le32			mccap;
130	__le16			cpar;
131	__u8			reserved[906];
132} __packed;
133
134struct nvme_nvm_id12_addrf {
135	__u8			ch_offset;
136	__u8			ch_len;
137	__u8			lun_offset;
138	__u8			lun_len;
139	__u8			pln_offset;
140	__u8			pln_len;
141	__u8			blk_offset;
142	__u8			blk_len;
143	__u8			pg_offset;
144	__u8			pg_len;
145	__u8			sec_offset;
146	__u8			sec_len;
147	__u8			res[4];
148} __packed;
149
150struct nvme_nvm_id12 {
151	__u8			ver_id;
152	__u8			vmnt;
153	__u8			cgrps;
154	__u8			res;
155	__le32			cap;
156	__le32			dom;
157	struct nvme_nvm_id12_addrf ppaf;
158	__u8			resv[228];
159	struct nvme_nvm_id12_grp grp;
160	__u8			resv2[2880];
161} __packed;
162
163struct nvme_nvm_bb_tbl {
164	__u8	tblid[4];
165	__le16	verid;
166	__le16	revid;
167	__le32	rvsd1;
168	__le32	tblks;
169	__le32	tfact;
170	__le32	tgrown;
171	__le32	tdresv;
172	__le32	thresv;
173	__le32	rsvd2[8];
174	__u8	blk[];
175};
176
177struct nvme_nvm_id20_addrf {
178	__u8			grp_len;
179	__u8			pu_len;
180	__u8			chk_len;
181	__u8			lba_len;
182	__u8			resv[4];
183};
184
185struct nvme_nvm_id20 {
186	__u8			mjr;
187	__u8			mnr;
188	__u8			resv[6];
189
190	struct nvme_nvm_id20_addrf lbaf;
191
192	__le32			mccap;
193	__u8			resv2[12];
194
195	__u8			wit;
196	__u8			resv3[31];
197
198	/* Geometry */
199	__le16			num_grp;
200	__le16			num_pu;
201	__le32			num_chk;
202	__le32			clba;
203	__u8			resv4[52];
204
205	/* Write data requirements */
206	__le32			ws_min;
207	__le32			ws_opt;
208	__le32			mw_cunits;
209	__le32			maxoc;
210	__le32			maxocpu;
211	__u8			resv5[44];
212
213	/* Performance related metrics */
214	__le32			trdt;
215	__le32			trdm;
216	__le32			twrt;
217	__le32			twrm;
218	__le32			tcrst;
219	__le32			tcrsm;
220	__u8			resv6[40];
221
222	/* Reserved area */
223	__u8			resv7[2816];
224
225	/* Vendor specific */
226	__u8			vs[1024];
227};
228
229struct nvme_nvm_chk_meta {
230	__u8	state;
231	__u8	type;
232	__u8	wi;
233	__u8	rsvd[5];
234	__le64	slba;
235	__le64	cnlb;
236	__le64	wp;
237};
238
239/*
240 * Check we didn't inadvertently grow the command struct
241 */
242static inline void _nvme_nvm_check_size(void)
243{
244	BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64);
245	BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64);
246	BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64);
247	BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64);
248	BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64);
249	BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_grp) != 960);
250	BUILD_BUG_ON(sizeof(struct nvme_nvm_id12_addrf) != 16);
251	BUILD_BUG_ON(sizeof(struct nvme_nvm_id12) != NVME_IDENTIFY_DATA_SIZE);
252	BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 64);
253	BUILD_BUG_ON(sizeof(struct nvme_nvm_id20_addrf) != 8);
254	BUILD_BUG_ON(sizeof(struct nvme_nvm_id20) != NVME_IDENTIFY_DATA_SIZE);
255	BUILD_BUG_ON(sizeof(struct nvme_nvm_chk_meta) != 32);
256	BUILD_BUG_ON(sizeof(struct nvme_nvm_chk_meta) !=
257						sizeof(struct nvm_chk_meta));
258}
259
260static void nvme_nvm_set_addr_12(struct nvm_addrf_12 *dst,
261				 struct nvme_nvm_id12_addrf *src)
262{
263	dst->ch_len = src->ch_len;
264	dst->lun_len = src->lun_len;
265	dst->blk_len = src->blk_len;
266	dst->pg_len = src->pg_len;
267	dst->pln_len = src->pln_len;
268	dst->sec_len = src->sec_len;
269
270	dst->ch_offset = src->ch_offset;
271	dst->lun_offset = src->lun_offset;
272	dst->blk_offset = src->blk_offset;
273	dst->pg_offset = src->pg_offset;
274	dst->pln_offset = src->pln_offset;
275	dst->sec_offset = src->sec_offset;
276
277	dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
278	dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
279	dst->blk_mask = ((1ULL << dst->blk_len) - 1) << dst->blk_offset;
280	dst->pg_mask = ((1ULL << dst->pg_len) - 1) << dst->pg_offset;
281	dst->pln_mask = ((1ULL << dst->pln_len) - 1) << dst->pln_offset;
282	dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset;
283}
284
285static int nvme_nvm_setup_12(struct nvme_nvm_id12 *id,
286			     struct nvm_geo *geo)
287{
288	struct nvme_nvm_id12_grp *src;
289	int sec_per_pg, sec_per_pl, pg_per_blk;
290
291	if (id->cgrps != 1)
292		return -EINVAL;
293
294	src = &id->grp;
295
296	if (src->mtype != 0) {
297		pr_err("nvm: memory type not supported\n");
298		return -EINVAL;
299	}
300
301	/* 1.2 spec. only reports a single version id - unfold */
302	geo->major_ver_id = id->ver_id;
303	geo->minor_ver_id = 2;
304
305	/* Set compacted version for upper layers */
306	geo->version = NVM_OCSSD_SPEC_12;
307
308	geo->num_ch = src->num_ch;
309	geo->num_lun = src->num_lun;
310	geo->all_luns = geo->num_ch * geo->num_lun;
311
312	geo->num_chk = le16_to_cpu(src->num_chk);
313
314	geo->csecs = le16_to_cpu(src->csecs);
315	geo->sos = le16_to_cpu(src->sos);
316
317	pg_per_blk = le16_to_cpu(src->num_pg);
318	sec_per_pg = le16_to_cpu(src->fpg_sz) / geo->csecs;
319	sec_per_pl = sec_per_pg * src->num_pln;
320	geo->clba = sec_per_pl * pg_per_blk;
321
322	geo->all_chunks = geo->all_luns * geo->num_chk;
323	geo->total_secs = geo->clba * geo->all_chunks;
324
325	geo->ws_min = sec_per_pg;
326	geo->ws_opt = sec_per_pg;
327	geo->mw_cunits = geo->ws_opt << 3;	/* default to MLC safe values */
328
329	/* Do not impose values for maximum number of open blocks as it is
330	 * unspecified in 1.2. Users of 1.2 must be aware of this and eventually
331	 * specify these values through a quirk if restrictions apply.
332	 */
333	geo->maxoc = geo->all_luns * geo->num_chk;
334	geo->maxocpu = geo->num_chk;
335
336	geo->mccap = le32_to_cpu(src->mccap);
337
338	geo->trdt = le32_to_cpu(src->trdt);
339	geo->trdm = le32_to_cpu(src->trdm);
340	geo->tprt = le32_to_cpu(src->tprt);
341	geo->tprm = le32_to_cpu(src->tprm);
342	geo->tbet = le32_to_cpu(src->tbet);
343	geo->tbem = le32_to_cpu(src->tbem);
344
345	/* 1.2 compatibility */
346	geo->vmnt = id->vmnt;
347	geo->cap = le32_to_cpu(id->cap);
348	geo->dom = le32_to_cpu(id->dom);
349
350	geo->mtype = src->mtype;
351	geo->fmtype = src->fmtype;
352
353	geo->cpar = le16_to_cpu(src->cpar);
354	geo->mpos = le32_to_cpu(src->mpos);
355
356	geo->pln_mode = NVM_PLANE_SINGLE;
357
358	if (geo->mpos & 0x020202) {
359		geo->pln_mode = NVM_PLANE_DOUBLE;
360		geo->ws_opt <<= 1;
361	} else if (geo->mpos & 0x040404) {
362		geo->pln_mode = NVM_PLANE_QUAD;
363		geo->ws_opt <<= 2;
364	}
365
366	geo->num_pln = src->num_pln;
367	geo->num_pg = le16_to_cpu(src->num_pg);
368	geo->fpg_sz = le16_to_cpu(src->fpg_sz);
369
370	nvme_nvm_set_addr_12((struct nvm_addrf_12 *)&geo->addrf, &id->ppaf);
371
372	return 0;
373}
374
375static void nvme_nvm_set_addr_20(struct nvm_addrf *dst,
376				 struct nvme_nvm_id20_addrf *src)
377{
378	dst->ch_len = src->grp_len;
379	dst->lun_len = src->pu_len;
380	dst->chk_len = src->chk_len;
381	dst->sec_len = src->lba_len;
382
383	dst->sec_offset = 0;
384	dst->chk_offset = dst->sec_len;
385	dst->lun_offset = dst->chk_offset + dst->chk_len;
386	dst->ch_offset = dst->lun_offset + dst->lun_len;
387
388	dst->ch_mask = ((1ULL << dst->ch_len) - 1) << dst->ch_offset;
389	dst->lun_mask = ((1ULL << dst->lun_len) - 1) << dst->lun_offset;
390	dst->chk_mask = ((1ULL << dst->chk_len) - 1) << dst->chk_offset;
391	dst->sec_mask = ((1ULL << dst->sec_len) - 1) << dst->sec_offset;
392}
393
394static int nvme_nvm_setup_20(struct nvme_nvm_id20 *id,
395			     struct nvm_geo *geo)
396{
397	geo->major_ver_id = id->mjr;
398	geo->minor_ver_id = id->mnr;
399
400	/* Set compacted version for upper layers */
401	geo->version = NVM_OCSSD_SPEC_20;
402
403	geo->num_ch = le16_to_cpu(id->num_grp);
404	geo->num_lun = le16_to_cpu(id->num_pu);
405	geo->all_luns = geo->num_ch * geo->num_lun;
406
407	geo->num_chk = le32_to_cpu(id->num_chk);
408	geo->clba = le32_to_cpu(id->clba);
409
410	geo->all_chunks = geo->all_luns * geo->num_chk;
411	geo->total_secs = geo->clba * geo->all_chunks;
412
413	geo->ws_min = le32_to_cpu(id->ws_min);
414	geo->ws_opt = le32_to_cpu(id->ws_opt);
415	geo->mw_cunits = le32_to_cpu(id->mw_cunits);
416	geo->maxoc = le32_to_cpu(id->maxoc);
417	geo->maxocpu = le32_to_cpu(id->maxocpu);
418
419	geo->trdt = le32_to_cpu(id->trdt);
420	geo->trdm = le32_to_cpu(id->trdm);
421	geo->tprt = le32_to_cpu(id->twrt);
422	geo->tprm = le32_to_cpu(id->twrm);
423	geo->tbet = le32_to_cpu(id->tcrst);
424	geo->tbem = le32_to_cpu(id->tcrsm);
425
426	nvme_nvm_set_addr_20(&geo->addrf, &id->lbaf);
427
428	return 0;
429}
430
431static int nvme_nvm_identity(struct nvm_dev *nvmdev)
432{
433	struct nvme_ns *ns = nvmdev->q->queuedata;
434	struct nvme_nvm_id12 *id;
435	struct nvme_nvm_command c = {};
436	int ret;
437
438	c.identity.opcode = nvme_nvm_admin_identity;
439	c.identity.nsid = cpu_to_le32(ns->head->ns_id);
440
441	id = kmalloc(sizeof(struct nvme_nvm_id12), GFP_KERNEL);
442	if (!id)
443		return -ENOMEM;
444
445	ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
446				id, sizeof(struct nvme_nvm_id12));
447	if (ret) {
448		ret = -EIO;
449		goto out;
450	}
451
452	/*
453	 * The 1.2 and 2.0 specifications share the first byte in their geometry
454	 * command to make it possible to know what version a device implements.
455	 */
456	switch (id->ver_id) {
457	case 1:
458		ret = nvme_nvm_setup_12(id, &nvmdev->geo);
459		break;
460	case 2:
461		ret = nvme_nvm_setup_20((struct nvme_nvm_id20 *)id,
462							&nvmdev->geo);
463		break;
464	default:
465		dev_err(ns->ctrl->device, "OCSSD revision not supported (%d)\n",
466							id->ver_id);
467		ret = -EINVAL;
468	}
469
470out:
471	kfree(id);
472	return ret;
473}
474
475static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
476								u8 *blks)
477{
478	struct request_queue *q = nvmdev->q;
479	struct nvm_geo *geo = &nvmdev->geo;
480	struct nvme_ns *ns = q->queuedata;
481	struct nvme_ctrl *ctrl = ns->ctrl;
482	struct nvme_nvm_command c = {};
483	struct nvme_nvm_bb_tbl *bb_tbl;
484	int nr_blks = geo->num_chk * geo->num_pln;
485	int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
486	int ret = 0;
487
488	c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
489	c.get_bb.nsid = cpu_to_le32(ns->head->ns_id);
490	c.get_bb.spba = cpu_to_le64(ppa.ppa);
491
492	bb_tbl = kzalloc(tblsz, GFP_KERNEL);
493	if (!bb_tbl)
494		return -ENOMEM;
495
496	ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
497								bb_tbl, tblsz);
498	if (ret) {
499		dev_err(ctrl->device, "get bad block table failed (%d)\n", ret);
500		ret = -EIO;
501		goto out;
502	}
503
504	if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
505		bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
506		dev_err(ctrl->device, "bbt format mismatch\n");
507		ret = -EINVAL;
508		goto out;
509	}
510
511	if (le16_to_cpu(bb_tbl->verid) != 1) {
512		ret = -EINVAL;
513		dev_err(ctrl->device, "bbt version not supported\n");
514		goto out;
515	}
516
517	if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
518		ret = -EINVAL;
519		dev_err(ctrl->device,
520				"bbt unsuspected blocks returned (%u!=%u)",
521				le32_to_cpu(bb_tbl->tblks), nr_blks);
522		goto out;
523	}
524
525	memcpy(blks, bb_tbl->blk, geo->num_chk * geo->num_pln);
526out:
527	kfree(bb_tbl);
528	return ret;
529}
530
531static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
532							int nr_ppas, int type)
533{
534	struct nvme_ns *ns = nvmdev->q->queuedata;
535	struct nvme_nvm_command c = {};
536	int ret = 0;
537
538	c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
539	c.set_bb.nsid = cpu_to_le32(ns->head->ns_id);
540	c.set_bb.spba = cpu_to_le64(ppas->ppa);
541	c.set_bb.nlb = cpu_to_le16(nr_ppas - 1);
542	c.set_bb.value = type;
543
544	ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
545								NULL, 0);
546	if (ret)
547		dev_err(ns->ctrl->device, "set bad block table failed (%d)\n",
548									ret);
549	return ret;
550}
551
552/*
553 * Expect the lba in device format
554 */
555static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
556				 sector_t slba, int nchks,
557				 struct nvm_chk_meta *meta)
558{
559	struct nvm_geo *geo = &ndev->geo;
560	struct nvme_ns *ns = ndev->q->queuedata;
561	struct nvme_ctrl *ctrl = ns->ctrl;
562	struct nvme_nvm_chk_meta *dev_meta, *dev_meta_off;
563	struct ppa_addr ppa;
564	size_t left = nchks * sizeof(struct nvme_nvm_chk_meta);
565	size_t log_pos, offset, len;
566	int i, max_len;
567	int ret = 0;
568
569	/*
570	 * limit requests to maximum 256K to avoid issuing arbitrary large
571	 * requests when the device does not specific a maximum transfer size.
572	 */
573	max_len = min_t(unsigned int, ctrl->max_hw_sectors << 9, 256 * 1024);
574
575	dev_meta = kmalloc(max_len, GFP_KERNEL);
576	if (!dev_meta)
577		return -ENOMEM;
578
579	/* Normalize lba address space to obtain log offset */
580	ppa.ppa = slba;
581	ppa = dev_to_generic_addr(ndev, ppa);
582
583	log_pos = ppa.m.chk;
584	log_pos += ppa.m.pu * geo->num_chk;
585	log_pos += ppa.m.grp * geo->num_lun * geo->num_chk;
586
587	offset = log_pos * sizeof(struct nvme_nvm_chk_meta);
588
589	while (left) {
590		len = min_t(unsigned int, left, max_len);
591
592		memset(dev_meta, 0, max_len);
593		dev_meta_off = dev_meta;
594
595		ret = nvme_get_log(ctrl, ns->head->ns_id,
596				NVME_NVM_LOG_REPORT_CHUNK, 0, NVME_CSI_NVM,
597				dev_meta, len, offset);
598		if (ret) {
599			dev_err(ctrl->device, "Get REPORT CHUNK log error\n");
600			break;
601		}
602
603		for (i = 0; i < len; i += sizeof(struct nvme_nvm_chk_meta)) {
604			meta->state = dev_meta_off->state;
605			meta->type = dev_meta_off->type;
606			meta->wi = dev_meta_off->wi;
607			meta->slba = le64_to_cpu(dev_meta_off->slba);
608			meta->cnlb = le64_to_cpu(dev_meta_off->cnlb);
609			meta->wp = le64_to_cpu(dev_meta_off->wp);
610
611			meta++;
612			dev_meta_off++;
613		}
614
615		offset += len;
616		left -= len;
617	}
618
619	kfree(dev_meta);
620
621	return ret;
622}
623
624static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
625				    struct nvme_nvm_command *c)
626{
627	c->ph_rw.opcode = rqd->opcode;
628	c->ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
629	c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
630	c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
631	c->ph_rw.control = cpu_to_le16(rqd->flags);
632	c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
633}
634
635static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
636{
637	struct nvm_rq *rqd = rq->end_io_data;
638
639	rqd->ppa_status = le64_to_cpu(nvme_req(rq)->result.u64);
640	rqd->error = nvme_req(rq)->status;
641	nvm_end_io(rqd);
642
643	kfree(nvme_req(rq)->cmd);
644	blk_mq_free_request(rq);
645}
646
647static struct request *nvme_nvm_alloc_request(struct request_queue *q,
648					      struct nvm_rq *rqd,
649					      struct nvme_nvm_command *cmd)
650{
651	struct nvme_ns *ns = q->queuedata;
652	struct request *rq;
653
654	nvme_nvm_rqtocmd(rqd, ns, cmd);
655
656	rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0);
657	if (IS_ERR(rq))
658		return rq;
659
660	rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
661
662	if (rqd->bio)
663		blk_rq_append_bio(rq, &rqd->bio);
664	else
665		rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
666
667	return rq;
668}
669
670static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd,
671			      void *buf)
672{
673	struct nvm_geo *geo = &dev->geo;
674	struct request_queue *q = dev->q;
675	struct nvme_nvm_command *cmd;
676	struct request *rq;
677	int ret;
678
679	cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
680	if (!cmd)
681		return -ENOMEM;
682
683	rq = nvme_nvm_alloc_request(q, rqd, cmd);
684	if (IS_ERR(rq)) {
685		ret = PTR_ERR(rq);
686		goto err_free_cmd;
687	}
688
689	if (buf) {
690		ret = blk_rq_map_kern(q, rq, buf, geo->csecs * rqd->nr_ppas,
691				GFP_KERNEL);
692		if (ret)
693			goto err_free_cmd;
694	}
695
696	rq->end_io_data = rqd;
697
698	blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);
699
700	return 0;
701
702err_free_cmd:
703	kfree(cmd);
704	return ret;
705}
706
707static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name,
708					int size)
709{
710	struct nvme_ns *ns = nvmdev->q->queuedata;
711
712	return dma_pool_create(name, ns->ctrl->dev, size, PAGE_SIZE, 0);
713}
714
715static void nvme_nvm_destroy_dma_pool(void *pool)
716{
717	struct dma_pool *dma_pool = pool;
718
719	dma_pool_destroy(dma_pool);
720}
721
722static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
723				    gfp_t mem_flags, dma_addr_t *dma_handler)
724{
725	return dma_pool_alloc(pool, mem_flags, dma_handler);
726}
727
728static void nvme_nvm_dev_dma_free(void *pool, void *addr,
729							dma_addr_t dma_handler)
730{
731	dma_pool_free(pool, addr, dma_handler);
732}
733
734static struct nvm_dev_ops nvme_nvm_dev_ops = {
735	.identity		= nvme_nvm_identity,
736
737	.get_bb_tbl		= nvme_nvm_get_bb_tbl,
738	.set_bb_tbl		= nvme_nvm_set_bb_tbl,
739
740	.get_chk_meta		= nvme_nvm_get_chk_meta,
741
742	.submit_io		= nvme_nvm_submit_io,
743
744	.create_dma_pool	= nvme_nvm_create_dma_pool,
745	.destroy_dma_pool	= nvme_nvm_destroy_dma_pool,
746	.dev_dma_alloc		= nvme_nvm_dev_dma_alloc,
747	.dev_dma_free		= nvme_nvm_dev_dma_free,
748};
749
750static int nvme_nvm_submit_user_cmd(struct request_queue *q,
751				struct nvme_ns *ns,
752				struct nvme_nvm_command *vcmd,
753				void __user *ubuf, unsigned int bufflen,
754				void __user *meta_buf, unsigned int meta_len,
755				void __user *ppa_buf, unsigned int ppa_len,
756				u32 *result, u64 *status, unsigned int timeout)
757{
758	bool write = nvme_is_write((struct nvme_command *)vcmd);
759	struct nvm_dev *dev = ns->ndev;
760	struct gendisk *disk = ns->disk;
761	struct request *rq;
762	struct bio *bio = NULL;
763	__le64 *ppa_list = NULL;
764	dma_addr_t ppa_dma;
765	__le64 *metadata = NULL;
766	dma_addr_t metadata_dma;
767	DECLARE_COMPLETION_ONSTACK(wait);
768	int ret = 0;
769
770	rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0);
771	if (IS_ERR(rq)) {
772		ret = -ENOMEM;
773		goto err_cmd;
774	}
775
776	if (timeout)
777		rq->timeout = timeout;
778
779	if (ppa_buf && ppa_len) {
780		ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
781		if (!ppa_list) {
782			ret = -ENOMEM;
783			goto err_rq;
784		}
785		if (copy_from_user(ppa_list, (void __user *)ppa_buf,
786						sizeof(u64) * (ppa_len + 1))) {
787			ret = -EFAULT;
788			goto err_ppa;
789		}
790		vcmd->ph_rw.spba = cpu_to_le64(ppa_dma);
791	} else {
792		vcmd->ph_rw.spba = cpu_to_le64((uintptr_t)ppa_buf);
793	}
794
795	if (ubuf && bufflen) {
796		ret = blk_rq_map_user(q, rq, NULL, ubuf, bufflen, GFP_KERNEL);
797		if (ret)
798			goto err_ppa;
799		bio = rq->bio;
800
801		if (meta_buf && meta_len) {
802			metadata = dma_pool_alloc(dev->dma_pool, GFP_KERNEL,
803								&metadata_dma);
804			if (!metadata) {
805				ret = -ENOMEM;
806				goto err_map;
807			}
808
809			if (write) {
810				if (copy_from_user(metadata,
811						(void __user *)meta_buf,
812						meta_len)) {
813					ret = -EFAULT;
814					goto err_meta;
815				}
816			}
817			vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
818		}
819
820		bio->bi_disk = disk;
821	}
822
823	blk_execute_rq(q, NULL, rq, 0);
824
825	if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
826		ret = -EINTR;
827	else if (nvme_req(rq)->status & 0x7ff)
828		ret = -EIO;
829	if (result)
830		*result = nvme_req(rq)->status & 0x7ff;
831	if (status)
832		*status = le64_to_cpu(nvme_req(rq)->result.u64);
833
834	if (metadata && !ret && !write) {
835		if (copy_to_user(meta_buf, (void *)metadata, meta_len))
836			ret = -EFAULT;
837	}
838err_meta:
839	if (meta_buf && meta_len)
840		dma_pool_free(dev->dma_pool, metadata, metadata_dma);
841err_map:
842	if (bio)
843		blk_rq_unmap_user(bio);
844err_ppa:
845	if (ppa_buf && ppa_len)
846		dma_pool_free(dev->dma_pool, ppa_list, ppa_dma);
847err_rq:
848	blk_mq_free_request(rq);
849err_cmd:
850	return ret;
851}
852
853static int nvme_nvm_submit_vio(struct nvme_ns *ns,
854					struct nvm_user_vio __user *uvio)
855{
856	struct nvm_user_vio vio;
857	struct nvme_nvm_command c;
858	unsigned int length;
859	int ret;
860
861	if (copy_from_user(&vio, uvio, sizeof(vio)))
862		return -EFAULT;
863	if (vio.flags)
864		return -EINVAL;
865
866	memset(&c, 0, sizeof(c));
867	c.ph_rw.opcode = vio.opcode;
868	c.ph_rw.nsid = cpu_to_le32(ns->head->ns_id);
869	c.ph_rw.control = cpu_to_le16(vio.control);
870	c.ph_rw.length = cpu_to_le16(vio.nppas);
871
872	length = (vio.nppas + 1) << ns->lba_shift;
873
874	ret = nvme_nvm_submit_user_cmd(ns->queue, ns, &c,
875			(void __user *)(uintptr_t)vio.addr, length,
876			(void __user *)(uintptr_t)vio.metadata,
877							vio.metadata_len,
878			(void __user *)(uintptr_t)vio.ppa_list, vio.nppas,
879			&vio.result, &vio.status, 0);
880
881	if (ret && copy_to_user(uvio, &vio, sizeof(vio)))
882		return -EFAULT;
883
884	return ret;
885}
886
887static int nvme_nvm_user_vcmd(struct nvme_ns *ns, int admin,
888					struct nvm_passthru_vio __user *uvcmd)
889{
890	struct nvm_passthru_vio vcmd;
891	struct nvme_nvm_command c;
892	struct request_queue *q;
893	unsigned int timeout = 0;
894	int ret;
895
896	if (copy_from_user(&vcmd, uvcmd, sizeof(vcmd)))
897		return -EFAULT;
898	if ((vcmd.opcode != 0xF2) && (!capable(CAP_SYS_ADMIN)))
899		return -EACCES;
900	if (vcmd.flags)
901		return -EINVAL;
902
903	memset(&c, 0, sizeof(c));
904	c.common.opcode = vcmd.opcode;
905	c.common.nsid = cpu_to_le32(ns->head->ns_id);
906	c.common.cdw2[0] = cpu_to_le32(vcmd.cdw2);
907	c.common.cdw2[1] = cpu_to_le32(vcmd.cdw3);
908	/* cdw11-12 */
909	c.ph_rw.length = cpu_to_le16(vcmd.nppas);
910	c.ph_rw.control  = cpu_to_le16(vcmd.control);
911	c.common.cdw13 = cpu_to_le32(vcmd.cdw13);
912	c.common.cdw14 = cpu_to_le32(vcmd.cdw14);
913	c.common.cdw15 = cpu_to_le32(vcmd.cdw15);
914
915	if (vcmd.timeout_ms)
916		timeout = msecs_to_jiffies(vcmd.timeout_ms);
917
918	q = admin ? ns->ctrl->admin_q : ns->queue;
919
920	ret = nvme_nvm_submit_user_cmd(q, ns,
921			(struct nvme_nvm_command *)&c,
922			(void __user *)(uintptr_t)vcmd.addr, vcmd.data_len,
923			(void __user *)(uintptr_t)vcmd.metadata,
924							vcmd.metadata_len,
925			(void __user *)(uintptr_t)vcmd.ppa_list, vcmd.nppas,
926			&vcmd.result, &vcmd.status, timeout);
927
928	if (ret && copy_to_user(uvcmd, &vcmd, sizeof(vcmd)))
929		return -EFAULT;
930
931	return ret;
932}
933
934int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg)
935{
936	switch (cmd) {
937	case NVME_NVM_IOCTL_ADMIN_VIO:
938		return nvme_nvm_user_vcmd(ns, 1, (void __user *)arg);
939	case NVME_NVM_IOCTL_IO_VIO:
940		return nvme_nvm_user_vcmd(ns, 0, (void __user *)arg);
941	case NVME_NVM_IOCTL_SUBMIT_VIO:
942		return nvme_nvm_submit_vio(ns, (void __user *)arg);
943	default:
944		return -ENOTTY;
945	}
946}
947
948int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
949{
950	struct request_queue *q = ns->queue;
951	struct nvm_dev *dev;
952	struct nvm_geo *geo;
953
954	_nvme_nvm_check_size();
955
956	dev = nvm_alloc_dev(node);
957	if (!dev)
958		return -ENOMEM;
959
960	/* Note that csecs and sos will be overridden if it is a 1.2 drive. */
961	geo = &dev->geo;
962	geo->csecs = 1 << ns->lba_shift;
963	geo->sos = ns->ms;
964	if (ns->features & NVME_NS_EXT_LBAS)
965		geo->ext = true;
966	else
967		geo->ext = false;
968	geo->mdts = ns->ctrl->max_hw_sectors;
969
970	dev->q = q;
971	memcpy(dev->name, disk_name, DISK_NAME_LEN);
972	dev->ops = &nvme_nvm_dev_ops;
973	dev->private_data = ns;
974	ns->ndev = dev;
975
976	return nvm_register(dev);
977}
978
979void nvme_nvm_unregister(struct nvme_ns *ns)
980{
981	nvm_unregister(ns->ndev);
982}
983
984static ssize_t nvm_dev_attr_show(struct device *dev,
985		struct device_attribute *dattr, char *page)
986{
987	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
988	struct nvm_dev *ndev = ns->ndev;
989	struct nvm_geo *geo = &ndev->geo;
990	struct attribute *attr;
991
992	if (!ndev)
993		return 0;
994
995	attr = &dattr->attr;
996
997	if (strcmp(attr->name, "version") == 0) {
998		if (geo->major_ver_id == 1)
999			return scnprintf(page, PAGE_SIZE, "%u\n",
1000						geo->major_ver_id);
1001		else
1002			return scnprintf(page, PAGE_SIZE, "%u.%u\n",
1003						geo->major_ver_id,
1004						geo->minor_ver_id);
1005	} else if (strcmp(attr->name, "capabilities") == 0) {
1006		return scnprintf(page, PAGE_SIZE, "%u\n", geo->cap);
1007	} else if (strcmp(attr->name, "read_typ") == 0) {
1008		return scnprintf(page, PAGE_SIZE, "%u\n", geo->trdt);
1009	} else if (strcmp(attr->name, "read_max") == 0) {
1010		return scnprintf(page, PAGE_SIZE, "%u\n", geo->trdm);
1011	} else {
1012		return scnprintf(page,
1013				 PAGE_SIZE,
1014				 "Unhandled attr(%s) in `%s`\n",
1015				 attr->name, __func__);
1016	}
1017}
1018
1019static ssize_t nvm_dev_attr_show_ppaf(struct nvm_addrf_12 *ppaf, char *page)
1020{
1021	return scnprintf(page, PAGE_SIZE,
1022		"0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
1023				ppaf->ch_offset, ppaf->ch_len,
1024				ppaf->lun_offset, ppaf->lun_len,
1025				ppaf->pln_offset, ppaf->pln_len,
1026				ppaf->blk_offset, ppaf->blk_len,
1027				ppaf->pg_offset, ppaf->pg_len,
1028				ppaf->sec_offset, ppaf->sec_len);
1029}
1030
1031static ssize_t nvm_dev_attr_show_12(struct device *dev,
1032		struct device_attribute *dattr, char *page)
1033{
1034	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
1035	struct nvm_dev *ndev = ns->ndev;
1036	struct nvm_geo *geo = &ndev->geo;
1037	struct attribute *attr;
1038
1039	if (!ndev)
1040		return 0;
1041
1042	attr = &dattr->attr;
1043
1044	if (strcmp(attr->name, "vendor_opcode") == 0) {
1045		return scnprintf(page, PAGE_SIZE, "%u\n", geo->vmnt);
1046	} else if (strcmp(attr->name, "device_mode") == 0) {
1047		return scnprintf(page, PAGE_SIZE, "%u\n", geo->dom);
1048	/* kept for compatibility */
1049	} else if (strcmp(attr->name, "media_manager") == 0) {
1050		return scnprintf(page, PAGE_SIZE, "%s\n", "gennvm");
1051	} else if (strcmp(attr->name, "ppa_format") == 0) {
1052		return nvm_dev_attr_show_ppaf((void *)&geo->addrf, page);
1053	} else if (strcmp(attr->name, "media_type") == 0) {	/* u8 */
1054		return scnprintf(page, PAGE_SIZE, "%u\n", geo->mtype);
1055	} else if (strcmp(attr->name, "flash_media_type") == 0) {
1056		return scnprintf(page, PAGE_SIZE, "%u\n", geo->fmtype);
1057	} else if (strcmp(attr->name, "num_channels") == 0) {
1058		return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_ch);
1059	} else if (strcmp(attr->name, "num_luns") == 0) {
1060		return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_lun);
1061	} else if (strcmp(attr->name, "num_planes") == 0) {
1062		return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_pln);
1063	} else if (strcmp(attr->name, "num_blocks") == 0) {	/* u16 */
1064		return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_chk);
1065	} else if (strcmp(attr->name, "num_pages") == 0) {
1066		return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_pg);
1067	} else if (strcmp(attr->name, "page_size") == 0) {
1068		return scnprintf(page, PAGE_SIZE, "%u\n", geo->fpg_sz);
1069	} else if (strcmp(attr->name, "hw_sector_size") == 0) {
1070		return scnprintf(page, PAGE_SIZE, "%u\n", geo->csecs);
1071	} else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */
1072		return scnprintf(page, PAGE_SIZE, "%u\n", geo->sos);
1073	} else if (strcmp(attr->name, "prog_typ") == 0) {
1074		return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprt);
1075	} else if (strcmp(attr->name, "prog_max") == 0) {
1076		return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprm);
1077	} else if (strcmp(attr->name, "erase_typ") == 0) {
1078		return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbet);
1079	} else if (strcmp(attr->name, "erase_max") == 0) {
1080		return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbem);
1081	} else if (strcmp(attr->name, "multiplane_modes") == 0) {
1082		return scnprintf(page, PAGE_SIZE, "0x%08x\n", geo->mpos);
1083	} else if (strcmp(attr->name, "media_capabilities") == 0) {
1084		return scnprintf(page, PAGE_SIZE, "0x%08x\n", geo->mccap);
1085	} else if (strcmp(attr->name, "max_phys_secs") == 0) {
1086		return scnprintf(page, PAGE_SIZE, "%u\n", NVM_MAX_VLBA);
1087	} else {
1088		return scnprintf(page, PAGE_SIZE,
1089			"Unhandled attr(%s) in `%s`\n",
1090			attr->name, __func__);
1091	}
1092}
1093
1094static ssize_t nvm_dev_attr_show_20(struct device *dev,
1095		struct device_attribute *dattr, char *page)
1096{
1097	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
1098	struct nvm_dev *ndev = ns->ndev;
1099	struct nvm_geo *geo = &ndev->geo;
1100	struct attribute *attr;
1101
1102	if (!ndev)
1103		return 0;
1104
1105	attr = &dattr->attr;
1106
1107	if (strcmp(attr->name, "groups") == 0) {
1108		return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_ch);
1109	} else if (strcmp(attr->name, "punits") == 0) {
1110		return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_lun);
1111	} else if (strcmp(attr->name, "chunks") == 0) {
1112		return scnprintf(page, PAGE_SIZE, "%u\n", geo->num_chk);
1113	} else if (strcmp(attr->name, "clba") == 0) {
1114		return scnprintf(page, PAGE_SIZE, "%u\n", geo->clba);
1115	} else if (strcmp(attr->name, "ws_min") == 0) {
1116		return scnprintf(page, PAGE_SIZE, "%u\n", geo->ws_min);
1117	} else if (strcmp(attr->name, "ws_opt") == 0) {
1118		return scnprintf(page, PAGE_SIZE, "%u\n", geo->ws_opt);
1119	} else if (strcmp(attr->name, "maxoc") == 0) {
1120		return scnprintf(page, PAGE_SIZE, "%u\n", geo->maxoc);
1121	} else if (strcmp(attr->name, "maxocpu") == 0) {
1122		return scnprintf(page, PAGE_SIZE, "%u\n", geo->maxocpu);
1123	} else if (strcmp(attr->name, "mw_cunits") == 0) {
1124		return scnprintf(page, PAGE_SIZE, "%u\n", geo->mw_cunits);
1125	} else if (strcmp(attr->name, "write_typ") == 0) {
1126		return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprt);
1127	} else if (strcmp(attr->name, "write_max") == 0) {
1128		return scnprintf(page, PAGE_SIZE, "%u\n", geo->tprm);
1129	} else if (strcmp(attr->name, "reset_typ") == 0) {
1130		return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbet);
1131	} else if (strcmp(attr->name, "reset_max") == 0) {
1132		return scnprintf(page, PAGE_SIZE, "%u\n", geo->tbem);
1133	} else {
1134		return scnprintf(page, PAGE_SIZE,
1135			"Unhandled attr(%s) in `%s`\n",
1136			attr->name, __func__);
1137	}
1138}
1139
1140#define NVM_DEV_ATTR_RO(_name)					\
1141	DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL)
1142#define NVM_DEV_ATTR_12_RO(_name)					\
1143	DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show_12, NULL)
1144#define NVM_DEV_ATTR_20_RO(_name)					\
1145	DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show_20, NULL)
1146
1147/* general attributes */
1148static NVM_DEV_ATTR_RO(version);
1149static NVM_DEV_ATTR_RO(capabilities);
1150
1151static NVM_DEV_ATTR_RO(read_typ);
1152static NVM_DEV_ATTR_RO(read_max);
1153
1154/* 1.2 values */
1155static NVM_DEV_ATTR_12_RO(vendor_opcode);
1156static NVM_DEV_ATTR_12_RO(device_mode);
1157static NVM_DEV_ATTR_12_RO(ppa_format);
1158static NVM_DEV_ATTR_12_RO(media_manager);
1159static NVM_DEV_ATTR_12_RO(media_type);
1160static NVM_DEV_ATTR_12_RO(flash_media_type);
1161static NVM_DEV_ATTR_12_RO(num_channels);
1162static NVM_DEV_ATTR_12_RO(num_luns);
1163static NVM_DEV_ATTR_12_RO(num_planes);
1164static NVM_DEV_ATTR_12_RO(num_blocks);
1165static NVM_DEV_ATTR_12_RO(num_pages);
1166static NVM_DEV_ATTR_12_RO(page_size);
1167static NVM_DEV_ATTR_12_RO(hw_sector_size);
1168static NVM_DEV_ATTR_12_RO(oob_sector_size);
1169static NVM_DEV_ATTR_12_RO(prog_typ);
1170static NVM_DEV_ATTR_12_RO(prog_max);
1171static NVM_DEV_ATTR_12_RO(erase_typ);
1172static NVM_DEV_ATTR_12_RO(erase_max);
1173static NVM_DEV_ATTR_12_RO(multiplane_modes);
1174static NVM_DEV_ATTR_12_RO(media_capabilities);
1175static NVM_DEV_ATTR_12_RO(max_phys_secs);
1176
1177/* 2.0 values */
1178static NVM_DEV_ATTR_20_RO(groups);
1179static NVM_DEV_ATTR_20_RO(punits);
1180static NVM_DEV_ATTR_20_RO(chunks);
1181static NVM_DEV_ATTR_20_RO(clba);
1182static NVM_DEV_ATTR_20_RO(ws_min);
1183static NVM_DEV_ATTR_20_RO(ws_opt);
1184static NVM_DEV_ATTR_20_RO(maxoc);
1185static NVM_DEV_ATTR_20_RO(maxocpu);
1186static NVM_DEV_ATTR_20_RO(mw_cunits);
1187static NVM_DEV_ATTR_20_RO(write_typ);
1188static NVM_DEV_ATTR_20_RO(write_max);
1189static NVM_DEV_ATTR_20_RO(reset_typ);
1190static NVM_DEV_ATTR_20_RO(reset_max);
1191
1192static struct attribute *nvm_dev_attrs[] = {
1193	/* version agnostic attrs */
1194	&dev_attr_version.attr,
1195	&dev_attr_capabilities.attr,
1196	&dev_attr_read_typ.attr,
1197	&dev_attr_read_max.attr,
1198
1199	/* 1.2 attrs */
1200	&dev_attr_vendor_opcode.attr,
1201	&dev_attr_device_mode.attr,
1202	&dev_attr_media_manager.attr,
1203	&dev_attr_ppa_format.attr,
1204	&dev_attr_media_type.attr,
1205	&dev_attr_flash_media_type.attr,
1206	&dev_attr_num_channels.attr,
1207	&dev_attr_num_luns.attr,
1208	&dev_attr_num_planes.attr,
1209	&dev_attr_num_blocks.attr,
1210	&dev_attr_num_pages.attr,
1211	&dev_attr_page_size.attr,
1212	&dev_attr_hw_sector_size.attr,
1213	&dev_attr_oob_sector_size.attr,
1214	&dev_attr_prog_typ.attr,
1215	&dev_attr_prog_max.attr,
1216	&dev_attr_erase_typ.attr,
1217	&dev_attr_erase_max.attr,
1218	&dev_attr_multiplane_modes.attr,
1219	&dev_attr_media_capabilities.attr,
1220	&dev_attr_max_phys_secs.attr,
1221
1222	/* 2.0 attrs */
1223	&dev_attr_groups.attr,
1224	&dev_attr_punits.attr,
1225	&dev_attr_chunks.attr,
1226	&dev_attr_clba.attr,
1227	&dev_attr_ws_min.attr,
1228	&dev_attr_ws_opt.attr,
1229	&dev_attr_maxoc.attr,
1230	&dev_attr_maxocpu.attr,
1231	&dev_attr_mw_cunits.attr,
1232
1233	&dev_attr_write_typ.attr,
1234	&dev_attr_write_max.attr,
1235	&dev_attr_reset_typ.attr,
1236	&dev_attr_reset_max.attr,
1237
1238	NULL,
1239};
1240
1241static umode_t nvm_dev_attrs_visible(struct kobject *kobj,
1242				     struct attribute *attr, int index)
1243{
1244	struct device *dev = container_of(kobj, struct device, kobj);
1245	struct gendisk *disk = dev_to_disk(dev);
1246	struct nvme_ns *ns = disk->private_data;
1247	struct nvm_dev *ndev = ns->ndev;
1248	struct device_attribute *dev_attr =
1249		container_of(attr, typeof(*dev_attr), attr);
1250
1251	if (!ndev)
1252		return 0;
1253
1254	if (dev_attr->show == nvm_dev_attr_show)
1255		return attr->mode;
1256
1257	switch (ndev->geo.major_ver_id) {
1258	case 1:
1259		if (dev_attr->show == nvm_dev_attr_show_12)
1260			return attr->mode;
1261		break;
1262	case 2:
1263		if (dev_attr->show == nvm_dev_attr_show_20)
1264			return attr->mode;
1265		break;
1266	}
1267
1268	return 0;
1269}
1270
1271const struct attribute_group nvme_nvm_attr_group = {
1272	.name		= "lightnvm",
1273	.attrs		= nvm_dev_attrs,
1274	.is_visible	= nvm_dev_attrs_visible,
1275};
1276