1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
4 * Initial release: Matias Bjorling <m@bjorling.me>
5 */
6
7#define pr_fmt(fmt) "nvm: " fmt
8
9#include <linux/list.h>
10#include <linux/types.h>
11#include <linux/sem.h>
12#include <linux/bitmap.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/miscdevice.h>
16#include <linux/lightnvm.h>
17#include <linux/sched/sysctl.h>
18
19static LIST_HEAD(nvm_tgt_types);
20static DECLARE_RWSEM(nvm_tgtt_lock);
21static LIST_HEAD(nvm_devices);
22static DECLARE_RWSEM(nvm_lock);
23
24/* Map between virtual and physical channel and lun */
25struct nvm_ch_map {
26	int ch_off;
27	int num_lun;
28	int *lun_offs;
29};
30
31struct nvm_dev_map {
32	struct nvm_ch_map *chnls;
33	int num_ch;
34};
35
36static void nvm_free(struct kref *ref);
37
38static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
39{
40	struct nvm_target *tgt;
41
42	list_for_each_entry(tgt, &dev->targets, list)
43		if (!strcmp(name, tgt->disk->disk_name))
44			return tgt;
45
46	return NULL;
47}
48
49static bool nvm_target_exists(const char *name)
50{
51	struct nvm_dev *dev;
52	struct nvm_target *tgt;
53	bool ret = false;
54
55	down_write(&nvm_lock);
56	list_for_each_entry(dev, &nvm_devices, devices) {
57		mutex_lock(&dev->mlock);
58		list_for_each_entry(tgt, &dev->targets, list) {
59			if (!strcmp(name, tgt->disk->disk_name)) {
60				ret = true;
61				mutex_unlock(&dev->mlock);
62				goto out;
63			}
64		}
65		mutex_unlock(&dev->mlock);
66	}
67
68out:
69	up_write(&nvm_lock);
70	return ret;
71}
72
73static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
74{
75	int i;
76
77	for (i = lun_begin; i <= lun_end; i++) {
78		if (test_and_set_bit(i, dev->lun_map)) {
79			pr_err("lun %d already allocated\n", i);
80			goto err;
81		}
82	}
83
84	return 0;
85err:
86	while (--i >= lun_begin)
87		clear_bit(i, dev->lun_map);
88
89	return -EBUSY;
90}
91
92static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
93				 int lun_end)
94{
95	int i;
96
97	for (i = lun_begin; i <= lun_end; i++)
98		WARN_ON(!test_and_clear_bit(i, dev->lun_map));
99}
100
101static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
102{
103	struct nvm_dev *dev = tgt_dev->parent;
104	struct nvm_dev_map *dev_map = tgt_dev->map;
105	int i, j;
106
107	for (i = 0; i < dev_map->num_ch; i++) {
108		struct nvm_ch_map *ch_map = &dev_map->chnls[i];
109		int *lun_offs = ch_map->lun_offs;
110		int ch = i + ch_map->ch_off;
111
112		if (clear) {
113			for (j = 0; j < ch_map->num_lun; j++) {
114				int lun = j + lun_offs[j];
115				int lunid = (ch * dev->geo.num_lun) + lun;
116
117				WARN_ON(!test_and_clear_bit(lunid,
118							dev->lun_map));
119			}
120		}
121
122		kfree(ch_map->lun_offs);
123	}
124
125	kfree(dev_map->chnls);
126	kfree(dev_map);
127
128	kfree(tgt_dev->luns);
129	kfree(tgt_dev);
130}
131
132static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
133					      u16 lun_begin, u16 lun_end,
134					      u16 op)
135{
136	struct nvm_tgt_dev *tgt_dev = NULL;
137	struct nvm_dev_map *dev_rmap = dev->rmap;
138	struct nvm_dev_map *dev_map;
139	struct ppa_addr *luns;
140	int num_lun = lun_end - lun_begin + 1;
141	int luns_left = num_lun;
142	int num_ch = num_lun / dev->geo.num_lun;
143	int num_ch_mod = num_lun % dev->geo.num_lun;
144	int bch = lun_begin / dev->geo.num_lun;
145	int blun = lun_begin % dev->geo.num_lun;
146	int lunid = 0;
147	int lun_balanced = 1;
148	int sec_per_lun, prev_num_lun;
149	int i, j;
150
151	num_ch = (num_ch_mod == 0) ? num_ch : num_ch + 1;
152
153	dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
154	if (!dev_map)
155		goto err_dev;
156
157	dev_map->chnls = kcalloc(num_ch, sizeof(struct nvm_ch_map), GFP_KERNEL);
158	if (!dev_map->chnls)
159		goto err_chnls;
160
161	luns = kcalloc(num_lun, sizeof(struct ppa_addr), GFP_KERNEL);
162	if (!luns)
163		goto err_luns;
164
165	prev_num_lun = (luns_left > dev->geo.num_lun) ?
166					dev->geo.num_lun : luns_left;
167	for (i = 0; i < num_ch; i++) {
168		struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
169		int *lun_roffs = ch_rmap->lun_offs;
170		struct nvm_ch_map *ch_map = &dev_map->chnls[i];
171		int *lun_offs;
172		int luns_in_chnl = (luns_left > dev->geo.num_lun) ?
173					dev->geo.num_lun : luns_left;
174
175		if (lun_balanced && prev_num_lun != luns_in_chnl)
176			lun_balanced = 0;
177
178		ch_map->ch_off = ch_rmap->ch_off = bch;
179		ch_map->num_lun = luns_in_chnl;
180
181		lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
182		if (!lun_offs)
183			goto err_ch;
184
185		for (j = 0; j < luns_in_chnl; j++) {
186			luns[lunid].ppa = 0;
187			luns[lunid].a.ch = i;
188			luns[lunid++].a.lun = j;
189
190			lun_offs[j] = blun;
191			lun_roffs[j + blun] = blun;
192		}
193
194		ch_map->lun_offs = lun_offs;
195
196		/* when starting a new channel, lun offset is reset */
197		blun = 0;
198		luns_left -= luns_in_chnl;
199	}
200
201	dev_map->num_ch = num_ch;
202
203	tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
204	if (!tgt_dev)
205		goto err_ch;
206
207	/* Inherit device geometry from parent */
208	memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
209
210	/* Target device only owns a portion of the physical device */
211	tgt_dev->geo.num_ch = num_ch;
212	tgt_dev->geo.num_lun = (lun_balanced) ? prev_num_lun : -1;
213	tgt_dev->geo.all_luns = num_lun;
214	tgt_dev->geo.all_chunks = num_lun * dev->geo.num_chk;
215
216	tgt_dev->geo.op = op;
217
218	sec_per_lun = dev->geo.clba * dev->geo.num_chk;
219	tgt_dev->geo.total_secs = num_lun * sec_per_lun;
220
221	tgt_dev->q = dev->q;
222	tgt_dev->map = dev_map;
223	tgt_dev->luns = luns;
224	tgt_dev->parent = dev;
225
226	return tgt_dev;
227err_ch:
228	while (--i >= 0)
229		kfree(dev_map->chnls[i].lun_offs);
230	kfree(luns);
231err_luns:
232	kfree(dev_map->chnls);
233err_chnls:
234	kfree(dev_map);
235err_dev:
236	return tgt_dev;
237}
238
239static struct nvm_tgt_type *__nvm_find_target_type(const char *name)
240{
241	struct nvm_tgt_type *tt;
242
243	list_for_each_entry(tt, &nvm_tgt_types, list)
244		if (!strcmp(name, tt->name))
245			return tt;
246
247	return NULL;
248}
249
250static struct nvm_tgt_type *nvm_find_target_type(const char *name)
251{
252	struct nvm_tgt_type *tt;
253
254	down_write(&nvm_tgtt_lock);
255	tt = __nvm_find_target_type(name);
256	up_write(&nvm_tgtt_lock);
257
258	return tt;
259}
260
261static int nvm_config_check_luns(struct nvm_geo *geo, int lun_begin,
262				 int lun_end)
263{
264	if (lun_begin > lun_end || lun_end >= geo->all_luns) {
265		pr_err("lun out of bound (%u:%u > %u)\n",
266			lun_begin, lun_end, geo->all_luns - 1);
267		return -EINVAL;
268	}
269
270	return 0;
271}
272
273static int __nvm_config_simple(struct nvm_dev *dev,
274			       struct nvm_ioctl_create_simple *s)
275{
276	struct nvm_geo *geo = &dev->geo;
277
278	if (s->lun_begin == -1 && s->lun_end == -1) {
279		s->lun_begin = 0;
280		s->lun_end = geo->all_luns - 1;
281	}
282
283	return nvm_config_check_luns(geo, s->lun_begin, s->lun_end);
284}
285
286static int __nvm_config_extended(struct nvm_dev *dev,
287				 struct nvm_ioctl_create_extended *e)
288{
289	if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) {
290		e->lun_begin = 0;
291		e->lun_end = dev->geo.all_luns - 1;
292	}
293
294	/* op not set falls into target's default */
295	if (e->op == 0xFFFF) {
296		e->op = NVM_TARGET_DEFAULT_OP;
297	} else if (e->op < NVM_TARGET_MIN_OP || e->op > NVM_TARGET_MAX_OP) {
298		pr_err("invalid over provisioning value\n");
299		return -EINVAL;
300	}
301
302	return nvm_config_check_luns(&dev->geo, e->lun_begin, e->lun_end);
303}
304
305static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
306{
307	struct nvm_ioctl_create_extended e;
308	struct request_queue *tqueue;
309	struct gendisk *tdisk;
310	struct nvm_tgt_type *tt;
311	struct nvm_target *t;
312	struct nvm_tgt_dev *tgt_dev;
313	void *targetdata;
314	unsigned int mdts;
315	int ret;
316
317	switch (create->conf.type) {
318	case NVM_CONFIG_TYPE_SIMPLE:
319		ret = __nvm_config_simple(dev, &create->conf.s);
320		if (ret)
321			return ret;
322
323		e.lun_begin = create->conf.s.lun_begin;
324		e.lun_end = create->conf.s.lun_end;
325		e.op = NVM_TARGET_DEFAULT_OP;
326		break;
327	case NVM_CONFIG_TYPE_EXTENDED:
328		ret = __nvm_config_extended(dev, &create->conf.e);
329		if (ret)
330			return ret;
331
332		e = create->conf.e;
333		break;
334	default:
335		pr_err("config type not valid\n");
336		return -EINVAL;
337	}
338
339	tt = nvm_find_target_type(create->tgttype);
340	if (!tt) {
341		pr_err("target type %s not found\n", create->tgttype);
342		return -EINVAL;
343	}
344
345	if ((tt->flags & NVM_TGT_F_HOST_L2P) != (dev->geo.dom & NVM_RSP_L2P)) {
346		pr_err("device is incompatible with target L2P type.\n");
347		return -EINVAL;
348	}
349
350	if (nvm_target_exists(create->tgtname)) {
351		pr_err("target name already exists (%s)\n",
352							create->tgtname);
353		return -EINVAL;
354	}
355
356	ret = nvm_reserve_luns(dev, e.lun_begin, e.lun_end);
357	if (ret)
358		return ret;
359
360	t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
361	if (!t) {
362		ret = -ENOMEM;
363		goto err_reserve;
364	}
365
366	tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op);
367	if (!tgt_dev) {
368		pr_err("could not create target device\n");
369		ret = -ENOMEM;
370		goto err_t;
371	}
372
373	tdisk = alloc_disk(0);
374	if (!tdisk) {
375		ret = -ENOMEM;
376		goto err_dev;
377	}
378
379	tqueue = blk_alloc_queue(dev->q->node);
380	if (!tqueue) {
381		ret = -ENOMEM;
382		goto err_disk;
383	}
384
385	strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name));
386	tdisk->flags = GENHD_FL_EXT_DEVT;
387	tdisk->major = 0;
388	tdisk->first_minor = 0;
389	tdisk->fops = tt->bops;
390	tdisk->queue = tqueue;
391
392	targetdata = tt->init(tgt_dev, tdisk, create->flags);
393	if (IS_ERR(targetdata)) {
394		ret = PTR_ERR(targetdata);
395		goto err_init;
396	}
397
398	tdisk->private_data = targetdata;
399	tqueue->queuedata = targetdata;
400
401	mdts = (dev->geo.csecs >> 9) * NVM_MAX_VLBA;
402	if (dev->geo.mdts) {
403		mdts = min_t(u32, dev->geo.mdts,
404				(dev->geo.csecs >> 9) * NVM_MAX_VLBA);
405	}
406	blk_queue_max_hw_sectors(tqueue, mdts);
407
408	set_capacity(tdisk, tt->capacity(targetdata));
409	add_disk(tdisk);
410
411	if (tt->sysfs_init && tt->sysfs_init(tdisk)) {
412		ret = -ENOMEM;
413		goto err_sysfs;
414	}
415
416	t->type = tt;
417	t->disk = tdisk;
418	t->dev = tgt_dev;
419
420	mutex_lock(&dev->mlock);
421	list_add_tail(&t->list, &dev->targets);
422	mutex_unlock(&dev->mlock);
423
424	__module_get(tt->owner);
425
426	return 0;
427err_sysfs:
428	if (tt->exit)
429		tt->exit(targetdata, true);
430err_init:
431	blk_cleanup_queue(tqueue);
432	tdisk->queue = NULL;
433err_disk:
434	put_disk(tdisk);
435err_dev:
436	nvm_remove_tgt_dev(tgt_dev, 0);
437err_t:
438	kfree(t);
439err_reserve:
440	nvm_release_luns_err(dev, e.lun_begin, e.lun_end);
441	return ret;
442}
443
444static void __nvm_remove_target(struct nvm_target *t, bool graceful)
445{
446	struct nvm_tgt_type *tt = t->type;
447	struct gendisk *tdisk = t->disk;
448	struct request_queue *q = tdisk->queue;
449
450	del_gendisk(tdisk);
451	blk_cleanup_queue(q);
452
453	if (tt->sysfs_exit)
454		tt->sysfs_exit(tdisk);
455
456	if (tt->exit)
457		tt->exit(tdisk->private_data, graceful);
458
459	nvm_remove_tgt_dev(t->dev, 1);
460	put_disk(tdisk);
461	module_put(t->type->owner);
462
463	list_del(&t->list);
464	kfree(t);
465}
466
467/**
468 * nvm_remove_tgt - Removes a target from the media manager
469 * @remove:	ioctl structure with target name to remove.
470 *
471 * Returns:
472 * 0: on success
473 * 1: on not found
474 * <0: on error
475 */
476static int nvm_remove_tgt(struct nvm_ioctl_remove *remove)
477{
478	struct nvm_target *t = NULL;
479	struct nvm_dev *dev;
480
481	down_read(&nvm_lock);
482	list_for_each_entry(dev, &nvm_devices, devices) {
483		mutex_lock(&dev->mlock);
484		t = nvm_find_target(dev, remove->tgtname);
485		if (t) {
486			mutex_unlock(&dev->mlock);
487			break;
488		}
489		mutex_unlock(&dev->mlock);
490	}
491	up_read(&nvm_lock);
492
493	if (!t) {
494		pr_err("failed to remove target %s\n",
495				remove->tgtname);
496		return 1;
497	}
498
499	__nvm_remove_target(t, true);
500	kref_put(&dev->ref, nvm_free);
501
502	return 0;
503}
504
505static int nvm_register_map(struct nvm_dev *dev)
506{
507	struct nvm_dev_map *rmap;
508	int i, j;
509
510	rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
511	if (!rmap)
512		goto err_rmap;
513
514	rmap->chnls = kcalloc(dev->geo.num_ch, sizeof(struct nvm_ch_map),
515								GFP_KERNEL);
516	if (!rmap->chnls)
517		goto err_chnls;
518
519	for (i = 0; i < dev->geo.num_ch; i++) {
520		struct nvm_ch_map *ch_rmap;
521		int *lun_roffs;
522		int luns_in_chnl = dev->geo.num_lun;
523
524		ch_rmap = &rmap->chnls[i];
525
526		ch_rmap->ch_off = -1;
527		ch_rmap->num_lun = luns_in_chnl;
528
529		lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
530		if (!lun_roffs)
531			goto err_ch;
532
533		for (j = 0; j < luns_in_chnl; j++)
534			lun_roffs[j] = -1;
535
536		ch_rmap->lun_offs = lun_roffs;
537	}
538
539	dev->rmap = rmap;
540
541	return 0;
542err_ch:
543	while (--i >= 0)
544		kfree(rmap->chnls[i].lun_offs);
545err_chnls:
546	kfree(rmap);
547err_rmap:
548	return -ENOMEM;
549}
550
551static void nvm_unregister_map(struct nvm_dev *dev)
552{
553	struct nvm_dev_map *rmap = dev->rmap;
554	int i;
555
556	for (i = 0; i < dev->geo.num_ch; i++)
557		kfree(rmap->chnls[i].lun_offs);
558
559	kfree(rmap->chnls);
560	kfree(rmap);
561}
562
563static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
564{
565	struct nvm_dev_map *dev_map = tgt_dev->map;
566	struct nvm_ch_map *ch_map = &dev_map->chnls[p->a.ch];
567	int lun_off = ch_map->lun_offs[p->a.lun];
568
569	p->a.ch += ch_map->ch_off;
570	p->a.lun += lun_off;
571}
572
573static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
574{
575	struct nvm_dev *dev = tgt_dev->parent;
576	struct nvm_dev_map *dev_rmap = dev->rmap;
577	struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->a.ch];
578	int lun_roff = ch_rmap->lun_offs[p->a.lun];
579
580	p->a.ch -= ch_rmap->ch_off;
581	p->a.lun -= lun_roff;
582}
583
584static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
585				struct ppa_addr *ppa_list, int nr_ppas)
586{
587	int i;
588
589	for (i = 0; i < nr_ppas; i++) {
590		nvm_map_to_dev(tgt_dev, &ppa_list[i]);
591		ppa_list[i] = generic_to_dev_addr(tgt_dev->parent, ppa_list[i]);
592	}
593}
594
595static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
596				struct ppa_addr *ppa_list, int nr_ppas)
597{
598	int i;
599
600	for (i = 0; i < nr_ppas; i++) {
601		ppa_list[i] = dev_to_generic_addr(tgt_dev->parent, ppa_list[i]);
602		nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
603	}
604}
605
606static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
607{
608	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
609
610	nvm_ppa_tgt_to_dev(tgt_dev, ppa_list, rqd->nr_ppas);
611}
612
613static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
614{
615	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
616
617	nvm_ppa_dev_to_tgt(tgt_dev, ppa_list, rqd->nr_ppas);
618}
619
620int nvm_register_tgt_type(struct nvm_tgt_type *tt)
621{
622	int ret = 0;
623
624	down_write(&nvm_tgtt_lock);
625	if (__nvm_find_target_type(tt->name))
626		ret = -EEXIST;
627	else
628		list_add(&tt->list, &nvm_tgt_types);
629	up_write(&nvm_tgtt_lock);
630
631	return ret;
632}
633EXPORT_SYMBOL(nvm_register_tgt_type);
634
635void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
636{
637	if (!tt)
638		return;
639
640	down_write(&nvm_tgtt_lock);
641	list_del(&tt->list);
642	up_write(&nvm_tgtt_lock);
643}
644EXPORT_SYMBOL(nvm_unregister_tgt_type);
645
646void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
647							dma_addr_t *dma_handler)
648{
649	return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
650								dma_handler);
651}
652EXPORT_SYMBOL(nvm_dev_dma_alloc);
653
654void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
655{
656	dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
657}
658EXPORT_SYMBOL(nvm_dev_dma_free);
659
660static struct nvm_dev *nvm_find_nvm_dev(const char *name)
661{
662	struct nvm_dev *dev;
663
664	list_for_each_entry(dev, &nvm_devices, devices)
665		if (!strcmp(name, dev->name))
666			return dev;
667
668	return NULL;
669}
670
671static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
672			const struct ppa_addr *ppas, int nr_ppas)
673{
674	struct nvm_dev *dev = tgt_dev->parent;
675	struct nvm_geo *geo = &tgt_dev->geo;
676	int i, plane_cnt, pl_idx;
677	struct ppa_addr ppa;
678
679	if (geo->pln_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
680		rqd->nr_ppas = nr_ppas;
681		rqd->ppa_addr = ppas[0];
682
683		return 0;
684	}
685
686	rqd->nr_ppas = nr_ppas;
687	rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
688	if (!rqd->ppa_list) {
689		pr_err("failed to allocate dma memory\n");
690		return -ENOMEM;
691	}
692
693	plane_cnt = geo->pln_mode;
694	rqd->nr_ppas *= plane_cnt;
695
696	for (i = 0; i < nr_ppas; i++) {
697		for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
698			ppa = ppas[i];
699			ppa.g.pl = pl_idx;
700			rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
701		}
702	}
703
704	return 0;
705}
706
707static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
708			struct nvm_rq *rqd)
709{
710	if (!rqd->ppa_list)
711		return;
712
713	nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
714}
715
716static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd)
717{
718	int flags = 0;
719
720	if (geo->version == NVM_OCSSD_SPEC_20)
721		return 0;
722
723	if (rqd->is_seq)
724		flags |= geo->pln_mode >> 1;
725
726	if (rqd->opcode == NVM_OP_PREAD)
727		flags |= (NVM_IO_SCRAMBLE_ENABLE | NVM_IO_SUSPEND);
728	else if (rqd->opcode == NVM_OP_PWRITE)
729		flags |= NVM_IO_SCRAMBLE_ENABLE;
730
731	return flags;
732}
733
734int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, void *buf)
735{
736	struct nvm_dev *dev = tgt_dev->parent;
737	int ret;
738
739	if (!dev->ops->submit_io)
740		return -ENODEV;
741
742	nvm_rq_tgt_to_dev(tgt_dev, rqd);
743
744	rqd->dev = tgt_dev;
745	rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
746
747	/* In case of error, fail with right address format */
748	ret = dev->ops->submit_io(dev, rqd, buf);
749	if (ret)
750		nvm_rq_dev_to_tgt(tgt_dev, rqd);
751	return ret;
752}
753EXPORT_SYMBOL(nvm_submit_io);
754
755static void nvm_sync_end_io(struct nvm_rq *rqd)
756{
757	struct completion *waiting = rqd->private;
758
759	complete(waiting);
760}
761
762static int nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd,
763			      void *buf)
764{
765	DECLARE_COMPLETION_ONSTACK(wait);
766	int ret = 0;
767
768	rqd->end_io = nvm_sync_end_io;
769	rqd->private = &wait;
770
771	ret = dev->ops->submit_io(dev, rqd, buf);
772	if (ret)
773		return ret;
774
775	wait_for_completion_io(&wait);
776
777	return 0;
778}
779
780int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
781		       void *buf)
782{
783	struct nvm_dev *dev = tgt_dev->parent;
784	int ret;
785
786	if (!dev->ops->submit_io)
787		return -ENODEV;
788
789	nvm_rq_tgt_to_dev(tgt_dev, rqd);
790
791	rqd->dev = tgt_dev;
792	rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
793
794	ret = nvm_submit_io_wait(dev, rqd, buf);
795
796	return ret;
797}
798EXPORT_SYMBOL(nvm_submit_io_sync);
799
800void nvm_end_io(struct nvm_rq *rqd)
801{
802	struct nvm_tgt_dev *tgt_dev = rqd->dev;
803
804	/* Convert address space */
805	if (tgt_dev)
806		nvm_rq_dev_to_tgt(tgt_dev, rqd);
807
808	if (rqd->end_io)
809		rqd->end_io(rqd);
810}
811EXPORT_SYMBOL(nvm_end_io);
812
813static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd)
814{
815	if (!dev->ops->submit_io)
816		return -ENODEV;
817
818	rqd->dev = NULL;
819	rqd->flags = nvm_set_flags(&dev->geo, rqd);
820
821	return nvm_submit_io_wait(dev, rqd, NULL);
822}
823
824static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
825{
826	struct nvm_rq rqd = { NULL };
827	struct bio bio;
828	struct bio_vec bio_vec;
829	struct page *page;
830	int ret;
831
832	page = alloc_page(GFP_KERNEL);
833	if (!page)
834		return -ENOMEM;
835
836	bio_init(&bio, &bio_vec, 1);
837	bio_add_page(&bio, page, PAGE_SIZE, 0);
838	bio_set_op_attrs(&bio, REQ_OP_READ, 0);
839
840	rqd.bio = &bio;
841	rqd.opcode = NVM_OP_PREAD;
842	rqd.is_seq = 1;
843	rqd.nr_ppas = 1;
844	rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
845
846	ret = nvm_submit_io_sync_raw(dev, &rqd);
847	__free_page(page);
848	if (ret)
849		return ret;
850
851	return rqd.error;
852}
853
854/*
855 * Scans a 1.2 chunk first and last page to determine if its state.
856 * If the chunk is found to be open, also scan it to update the write
857 * pointer.
858 */
859static int nvm_bb_chunk_scan(struct nvm_dev *dev, struct ppa_addr ppa,
860			     struct nvm_chk_meta *meta)
861{
862	struct nvm_geo *geo = &dev->geo;
863	int ret, pg, pl;
864
865	/* sense first page */
866	ret = nvm_bb_chunk_sense(dev, ppa);
867	if (ret < 0) /* io error */
868		return ret;
869	else if (ret == 0) /* valid data */
870		meta->state = NVM_CHK_ST_OPEN;
871	else if (ret > 0) {
872		/*
873		 * If empty page, the chunk is free, else it is an
874		 * actual io error. In that case, mark it offline.
875		 */
876		switch (ret) {
877		case NVM_RSP_ERR_EMPTYPAGE:
878			meta->state = NVM_CHK_ST_FREE;
879			return 0;
880		case NVM_RSP_ERR_FAILCRC:
881		case NVM_RSP_ERR_FAILECC:
882		case NVM_RSP_WARN_HIGHECC:
883			meta->state = NVM_CHK_ST_OPEN;
884			goto scan;
885		default:
886			return -ret; /* other io error */
887		}
888	}
889
890	/* sense last page */
891	ppa.g.pg = geo->num_pg - 1;
892	ppa.g.pl = geo->num_pln - 1;
893
894	ret = nvm_bb_chunk_sense(dev, ppa);
895	if (ret < 0) /* io error */
896		return ret;
897	else if (ret == 0) { /* Chunk fully written */
898		meta->state = NVM_CHK_ST_CLOSED;
899		meta->wp = geo->clba;
900		return 0;
901	} else if (ret > 0) {
902		switch (ret) {
903		case NVM_RSP_ERR_EMPTYPAGE:
904		case NVM_RSP_ERR_FAILCRC:
905		case NVM_RSP_ERR_FAILECC:
906		case NVM_RSP_WARN_HIGHECC:
907			meta->state = NVM_CHK_ST_OPEN;
908			break;
909		default:
910			return -ret; /* other io error */
911		}
912	}
913
914scan:
915	/*
916	 * chunk is open, we scan sequentially to update the write pointer.
917	 * We make the assumption that targets write data across all planes
918	 * before moving to the next page.
919	 */
920	for (pg = 0; pg < geo->num_pg; pg++) {
921		for (pl = 0; pl < geo->num_pln; pl++) {
922			ppa.g.pg = pg;
923			ppa.g.pl = pl;
924
925			ret = nvm_bb_chunk_sense(dev, ppa);
926			if (ret < 0) /* io error */
927				return ret;
928			else if (ret == 0) {
929				meta->wp += geo->ws_min;
930			} else if (ret > 0) {
931				switch (ret) {
932				case NVM_RSP_ERR_EMPTYPAGE:
933					return 0;
934				case NVM_RSP_ERR_FAILCRC:
935				case NVM_RSP_ERR_FAILECC:
936				case NVM_RSP_WARN_HIGHECC:
937					meta->wp += geo->ws_min;
938					break;
939				default:
940					return -ret; /* other io error */
941				}
942			}
943		}
944	}
945
946	return 0;
947}
948
949/*
950 * folds a bad block list from its plane representation to its
951 * chunk representation.
952 *
953 * If any of the planes status are bad or grown bad, the chunk is marked
954 * offline. If not bad, the first plane state acts as the chunk state.
955 */
956static int nvm_bb_to_chunk(struct nvm_dev *dev, struct ppa_addr ppa,
957			   u8 *blks, int nr_blks, struct nvm_chk_meta *meta)
958{
959	struct nvm_geo *geo = &dev->geo;
960	int ret, blk, pl, offset, blktype;
961
962	for (blk = 0; blk < geo->num_chk; blk++) {
963		offset = blk * geo->pln_mode;
964		blktype = blks[offset];
965
966		for (pl = 0; pl < geo->pln_mode; pl++) {
967			if (blks[offset + pl] &
968					(NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
969				blktype = blks[offset + pl];
970				break;
971			}
972		}
973
974		ppa.g.blk = blk;
975
976		meta->wp = 0;
977		meta->type = NVM_CHK_TP_W_SEQ;
978		meta->wi = 0;
979		meta->slba = generic_to_dev_addr(dev, ppa).ppa;
980		meta->cnlb = dev->geo.clba;
981
982		if (blktype == NVM_BLK_T_FREE) {
983			ret = nvm_bb_chunk_scan(dev, ppa, meta);
984			if (ret)
985				return ret;
986		} else {
987			meta->state = NVM_CHK_ST_OFFLINE;
988		}
989
990		meta++;
991	}
992
993	return 0;
994}
995
996static int nvm_get_bb_meta(struct nvm_dev *dev, sector_t slba,
997			   int nchks, struct nvm_chk_meta *meta)
998{
999	struct nvm_geo *geo = &dev->geo;
1000	struct ppa_addr ppa;
1001	u8 *blks;
1002	int ch, lun, nr_blks;
1003	int ret = 0;
1004
1005	ppa.ppa = slba;
1006	ppa = dev_to_generic_addr(dev, ppa);
1007
1008	if (ppa.g.blk != 0)
1009		return -EINVAL;
1010
1011	if ((nchks % geo->num_chk) != 0)
1012		return -EINVAL;
1013
1014	nr_blks = geo->num_chk * geo->pln_mode;
1015
1016	blks = kmalloc(nr_blks, GFP_KERNEL);
1017	if (!blks)
1018		return -ENOMEM;
1019
1020	for (ch = ppa.g.ch; ch < geo->num_ch; ch++) {
1021		for (lun = ppa.g.lun; lun < geo->num_lun; lun++) {
1022			struct ppa_addr ppa_gen, ppa_dev;
1023
1024			if (!nchks)
1025				goto done;
1026
1027			ppa_gen.ppa = 0;
1028			ppa_gen.g.ch = ch;
1029			ppa_gen.g.lun = lun;
1030			ppa_dev = generic_to_dev_addr(dev, ppa_gen);
1031
1032			ret = dev->ops->get_bb_tbl(dev, ppa_dev, blks);
1033			if (ret)
1034				goto done;
1035
1036			ret = nvm_bb_to_chunk(dev, ppa_gen, blks, nr_blks,
1037									meta);
1038			if (ret)
1039				goto done;
1040
1041			meta += geo->num_chk;
1042			nchks -= geo->num_chk;
1043		}
1044	}
1045done:
1046	kfree(blks);
1047	return ret;
1048}
1049
1050int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
1051		       int nchks, struct nvm_chk_meta *meta)
1052{
1053	struct nvm_dev *dev = tgt_dev->parent;
1054
1055	nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
1056
1057	if (dev->geo.version == NVM_OCSSD_SPEC_12)
1058		return nvm_get_bb_meta(dev, (sector_t)ppa.ppa, nchks, meta);
1059
1060	return dev->ops->get_chk_meta(dev, (sector_t)ppa.ppa, nchks, meta);
1061}
1062EXPORT_SYMBOL_GPL(nvm_get_chunk_meta);
1063
1064int nvm_set_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
1065		       int nr_ppas, int type)
1066{
1067	struct nvm_dev *dev = tgt_dev->parent;
1068	struct nvm_rq rqd;
1069	int ret;
1070
1071	if (dev->geo.version == NVM_OCSSD_SPEC_20)
1072		return 0;
1073
1074	if (nr_ppas > NVM_MAX_VLBA) {
1075		pr_err("unable to update all blocks atomically\n");
1076		return -EINVAL;
1077	}
1078
1079	memset(&rqd, 0, sizeof(struct nvm_rq));
1080
1081	nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
1082	nvm_rq_tgt_to_dev(tgt_dev, &rqd);
1083
1084	ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
1085	nvm_free_rqd_ppalist(tgt_dev, &rqd);
1086	if (ret)
1087		return -EINVAL;
1088
1089	return 0;
1090}
1091EXPORT_SYMBOL_GPL(nvm_set_chunk_meta);
1092
1093static int nvm_core_init(struct nvm_dev *dev)
1094{
1095	struct nvm_geo *geo = &dev->geo;
1096	int ret;
1097
1098	dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns),
1099					sizeof(unsigned long), GFP_KERNEL);
1100	if (!dev->lun_map)
1101		return -ENOMEM;
1102
1103	INIT_LIST_HEAD(&dev->area_list);
1104	INIT_LIST_HEAD(&dev->targets);
1105	mutex_init(&dev->mlock);
1106	spin_lock_init(&dev->lock);
1107
1108	ret = nvm_register_map(dev);
1109	if (ret)
1110		goto err_fmtype;
1111
1112	return 0;
1113err_fmtype:
1114	kfree(dev->lun_map);
1115	return ret;
1116}
1117
1118static void nvm_free(struct kref *ref)
1119{
1120	struct nvm_dev *dev = container_of(ref, struct nvm_dev, ref);
1121
1122	if (dev->dma_pool)
1123		dev->ops->destroy_dma_pool(dev->dma_pool);
1124
1125	if (dev->rmap)
1126		nvm_unregister_map(dev);
1127
1128	kfree(dev->lun_map);
1129	kfree(dev);
1130}
1131
1132static int nvm_init(struct nvm_dev *dev)
1133{
1134	struct nvm_geo *geo = &dev->geo;
1135	int ret = -EINVAL;
1136
1137	if (dev->ops->identity(dev)) {
1138		pr_err("device could not be identified\n");
1139		goto err;
1140	}
1141
1142	pr_debug("ver:%u.%u nvm_vendor:%x\n", geo->major_ver_id,
1143			geo->minor_ver_id, geo->vmnt);
1144
1145	ret = nvm_core_init(dev);
1146	if (ret) {
1147		pr_err("could not initialize core structures.\n");
1148		goto err;
1149	}
1150
1151	pr_info("registered %s [%u/%u/%u/%u/%u]\n",
1152			dev->name, dev->geo.ws_min, dev->geo.ws_opt,
1153			dev->geo.num_chk, dev->geo.all_luns,
1154			dev->geo.num_ch);
1155	return 0;
1156err:
1157	pr_err("failed to initialize nvm\n");
1158	return ret;
1159}
1160
1161struct nvm_dev *nvm_alloc_dev(int node)
1162{
1163	struct nvm_dev *dev;
1164
1165	dev = kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
1166	if (dev)
1167		kref_init(&dev->ref);
1168
1169	return dev;
1170}
1171EXPORT_SYMBOL(nvm_alloc_dev);
1172
1173int nvm_register(struct nvm_dev *dev)
1174{
1175	int ret, exp_pool_size;
1176
1177	if (!dev->q || !dev->ops) {
1178		kref_put(&dev->ref, nvm_free);
1179		return -EINVAL;
1180	}
1181
1182	ret = nvm_init(dev);
1183	if (ret) {
1184		kref_put(&dev->ref, nvm_free);
1185		return ret;
1186	}
1187
1188	exp_pool_size = max_t(int, PAGE_SIZE,
1189			      (NVM_MAX_VLBA * (sizeof(u64) + dev->geo.sos)));
1190	exp_pool_size = round_up(exp_pool_size, PAGE_SIZE);
1191
1192	dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist",
1193						  exp_pool_size);
1194	if (!dev->dma_pool) {
1195		pr_err("could not create dma pool\n");
1196		kref_put(&dev->ref, nvm_free);
1197		return -ENOMEM;
1198	}
1199
1200	/* register device with a supported media manager */
1201	down_write(&nvm_lock);
1202	list_add(&dev->devices, &nvm_devices);
1203	up_write(&nvm_lock);
1204
1205	return 0;
1206}
1207EXPORT_SYMBOL(nvm_register);
1208
1209void nvm_unregister(struct nvm_dev *dev)
1210{
1211	struct nvm_target *t, *tmp;
1212
1213	mutex_lock(&dev->mlock);
1214	list_for_each_entry_safe(t, tmp, &dev->targets, list) {
1215		if (t->dev->parent != dev)
1216			continue;
1217		__nvm_remove_target(t, false);
1218		kref_put(&dev->ref, nvm_free);
1219	}
1220	mutex_unlock(&dev->mlock);
1221
1222	down_write(&nvm_lock);
1223	list_del(&dev->devices);
1224	up_write(&nvm_lock);
1225
1226	kref_put(&dev->ref, nvm_free);
1227}
1228EXPORT_SYMBOL(nvm_unregister);
1229
1230static int __nvm_configure_create(struct nvm_ioctl_create *create)
1231{
1232	struct nvm_dev *dev;
1233	int ret;
1234
1235	down_write(&nvm_lock);
1236	dev = nvm_find_nvm_dev(create->dev);
1237	up_write(&nvm_lock);
1238
1239	if (!dev) {
1240		pr_err("device not found\n");
1241		return -EINVAL;
1242	}
1243
1244	kref_get(&dev->ref);
1245	ret = nvm_create_tgt(dev, create);
1246	if (ret)
1247		kref_put(&dev->ref, nvm_free);
1248
1249	return ret;
1250}
1251
1252static long nvm_ioctl_info(struct file *file, void __user *arg)
1253{
1254	struct nvm_ioctl_info *info;
1255	struct nvm_tgt_type *tt;
1256	int tgt_iter = 0;
1257
1258	info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
1259	if (IS_ERR(info))
1260		return -EFAULT;
1261
1262	info->version[0] = NVM_VERSION_MAJOR;
1263	info->version[1] = NVM_VERSION_MINOR;
1264	info->version[2] = NVM_VERSION_PATCH;
1265
1266	down_write(&nvm_tgtt_lock);
1267	list_for_each_entry(tt, &nvm_tgt_types, list) {
1268		struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
1269
1270		tgt->version[0] = tt->version[0];
1271		tgt->version[1] = tt->version[1];
1272		tgt->version[2] = tt->version[2];
1273		strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
1274
1275		tgt_iter++;
1276	}
1277
1278	info->tgtsize = tgt_iter;
1279	up_write(&nvm_tgtt_lock);
1280
1281	if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
1282		kfree(info);
1283		return -EFAULT;
1284	}
1285
1286	kfree(info);
1287	return 0;
1288}
1289
1290static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
1291{
1292	struct nvm_ioctl_get_devices *devices;
1293	struct nvm_dev *dev;
1294	int i = 0;
1295
1296	devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
1297	if (!devices)
1298		return -ENOMEM;
1299
1300	down_write(&nvm_lock);
1301	list_for_each_entry(dev, &nvm_devices, devices) {
1302		struct nvm_ioctl_device_info *info = &devices->info[i];
1303
1304		strlcpy(info->devname, dev->name, sizeof(info->devname));
1305
1306		/* kept for compatibility */
1307		info->bmversion[0] = 1;
1308		info->bmversion[1] = 0;
1309		info->bmversion[2] = 0;
1310		strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
1311		i++;
1312
1313		if (i >= ARRAY_SIZE(devices->info)) {
1314			pr_err("max %zd devices can be reported.\n",
1315			       ARRAY_SIZE(devices->info));
1316			break;
1317		}
1318	}
1319	up_write(&nvm_lock);
1320
1321	devices->nr_devices = i;
1322
1323	if (copy_to_user(arg, devices,
1324			 sizeof(struct nvm_ioctl_get_devices))) {
1325		kfree(devices);
1326		return -EFAULT;
1327	}
1328
1329	kfree(devices);
1330	return 0;
1331}
1332
1333static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
1334{
1335	struct nvm_ioctl_create create;
1336
1337	if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
1338		return -EFAULT;
1339
1340	if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED &&
1341	    create.conf.e.rsv != 0) {
1342		pr_err("reserved config field in use\n");
1343		return -EINVAL;
1344	}
1345
1346	create.dev[DISK_NAME_LEN - 1] = '\0';
1347	create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
1348	create.tgtname[DISK_NAME_LEN - 1] = '\0';
1349
1350	if (create.flags != 0) {
1351		__u32 flags = create.flags;
1352
1353		/* Check for valid flags */
1354		if (flags & NVM_TARGET_FACTORY)
1355			flags &= ~NVM_TARGET_FACTORY;
1356
1357		if (flags) {
1358			pr_err("flag not supported\n");
1359			return -EINVAL;
1360		}
1361	}
1362
1363	return __nvm_configure_create(&create);
1364}
1365
1366static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
1367{
1368	struct nvm_ioctl_remove remove;
1369
1370	if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
1371		return -EFAULT;
1372
1373	remove.tgtname[DISK_NAME_LEN - 1] = '\0';
1374
1375	if (remove.flags != 0) {
1376		pr_err("no flags supported\n");
1377		return -EINVAL;
1378	}
1379
1380	return nvm_remove_tgt(&remove);
1381}
1382
1383/* kept for compatibility reasons */
1384static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1385{
1386	struct nvm_ioctl_dev_init init;
1387
1388	if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1389		return -EFAULT;
1390
1391	if (init.flags != 0) {
1392		pr_err("no flags supported\n");
1393		return -EINVAL;
1394	}
1395
1396	return 0;
1397}
1398
1399/* Kept for compatibility reasons */
1400static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1401{
1402	struct nvm_ioctl_dev_factory fact;
1403
1404	if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1405		return -EFAULT;
1406
1407	fact.dev[DISK_NAME_LEN - 1] = '\0';
1408
1409	if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1410		return -EINVAL;
1411
1412	return 0;
1413}
1414
1415static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1416{
1417	void __user *argp = (void __user *)arg;
1418
1419	if (!capable(CAP_SYS_ADMIN))
1420		return -EPERM;
1421
1422	switch (cmd) {
1423	case NVM_INFO:
1424		return nvm_ioctl_info(file, argp);
1425	case NVM_GET_DEVICES:
1426		return nvm_ioctl_get_devices(file, argp);
1427	case NVM_DEV_CREATE:
1428		return nvm_ioctl_dev_create(file, argp);
1429	case NVM_DEV_REMOVE:
1430		return nvm_ioctl_dev_remove(file, argp);
1431	case NVM_DEV_INIT:
1432		return nvm_ioctl_dev_init(file, argp);
1433	case NVM_DEV_FACTORY:
1434		return nvm_ioctl_dev_factory(file, argp);
1435	}
1436	return 0;
1437}
1438
1439static const struct file_operations _ctl_fops = {
1440	.open = nonseekable_open,
1441	.unlocked_ioctl = nvm_ctl_ioctl,
1442	.owner = THIS_MODULE,
1443	.llseek  = noop_llseek,
1444};
1445
1446static struct miscdevice _nvm_misc = {
1447	.minor		= MISC_DYNAMIC_MINOR,
1448	.name		= "lightnvm",
1449	.nodename	= "lightnvm/control",
1450	.fops		= &_ctl_fops,
1451};
1452builtin_misc_device(_nvm_misc);
1453