1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Userspace block device - block device which IO is handled from userspace
4 *
5 * Take full use of io_uring passthrough command for communicating with
6 * ublk userspace daemon(ublksrvd) for handling basic IO request.
7 *
8 * Copyright 2022 Ming Lei <ming.lei@redhat.com>
9 *
10 * (part of code stolen from loop.c)
11 */
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/sched.h>
15#include <linux/fs.h>
16#include <linux/pagemap.h>
17#include <linux/file.h>
18#include <linux/stat.h>
19#include <linux/errno.h>
20#include <linux/major.h>
21#include <linux/wait.h>
22#include <linux/blkdev.h>
23#include <linux/init.h>
24#include <linux/swap.h>
25#include <linux/slab.h>
26#include <linux/compat.h>
27#include <linux/mutex.h>
28#include <linux/writeback.h>
29#include <linux/completion.h>
30#include <linux/highmem.h>
31#include <linux/sysfs.h>
32#include <linux/miscdevice.h>
33#include <linux/falloc.h>
34#include <linux/uio.h>
35#include <linux/ioprio.h>
36#include <linux/sched/mm.h>
37#include <linux/uaccess.h>
38#include <linux/cdev.h>
39#include <linux/io_uring.h>
40#include <linux/blk-mq.h>
41#include <linux/delay.h>
42#include <linux/mm.h>
43#include <asm/page.h>
44#include <linux/task_work.h>
45#include <linux/namei.h>
46#include <linux/kref.h>
47#include <uapi/linux/ublk_cmd.h>
48
49#define UBLK_MINORS		(1U << MINORBITS)
50
51/* All UBLK_F_* have to be included into UBLK_F_ALL */
52#define UBLK_F_ALL (UBLK_F_SUPPORT_ZERO_COPY \
53		| UBLK_F_URING_CMD_COMP_IN_TASK \
54		| UBLK_F_NEED_GET_DATA \
55		| UBLK_F_USER_RECOVERY \
56		| UBLK_F_USER_RECOVERY_REISSUE \
57		| UBLK_F_UNPRIVILEGED_DEV \
58		| UBLK_F_CMD_IOCTL_ENCODE \
59		| UBLK_F_USER_COPY \
60		| UBLK_F_ZONED)
61
62/* All UBLK_PARAM_TYPE_* should be included here */
63#define UBLK_PARAM_TYPE_ALL                                \
64	(UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD | \
65	 UBLK_PARAM_TYPE_DEVT | UBLK_PARAM_TYPE_ZONED)
66
67struct ublk_rq_data {
68	struct llist_node node;
69
70	struct kref ref;
71	__u64 sector;
72	__u32 operation;
73	__u32 nr_zones;
74};
75
76struct ublk_uring_cmd_pdu {
77	struct ublk_queue *ubq;
78};
79
80/*
81 * io command is active: sqe cmd is received, and its cqe isn't done
82 *
83 * If the flag is set, the io command is owned by ublk driver, and waited
84 * for incoming blk-mq request from the ublk block device.
85 *
86 * If the flag is cleared, the io command will be completed, and owned by
87 * ublk server.
88 */
89#define UBLK_IO_FLAG_ACTIVE	0x01
90
91/*
92 * IO command is completed via cqe, and it is being handled by ublksrv, and
93 * not committed yet
94 *
95 * Basically exclusively with UBLK_IO_FLAG_ACTIVE, so can be served for
96 * cross verification
97 */
98#define UBLK_IO_FLAG_OWNED_BY_SRV 0x02
99
100/*
101 * IO command is aborted, so this flag is set in case of
102 * !UBLK_IO_FLAG_ACTIVE.
103 *
104 * After this flag is observed, any pending or new incoming request
105 * associated with this io command will be failed immediately
106 */
107#define UBLK_IO_FLAG_ABORTED 0x04
108
109/*
110 * UBLK_IO_FLAG_NEED_GET_DATA is set because IO command requires
111 * get data buffer address from ublksrv.
112 *
113 * Then, bio data could be copied into this data buffer for a WRITE request
114 * after the IO command is issued again and UBLK_IO_FLAG_NEED_GET_DATA is unset.
115 */
116#define UBLK_IO_FLAG_NEED_GET_DATA 0x08
117
118/* atomic RW with ubq->cancel_lock */
119#define UBLK_IO_FLAG_CANCELED	0x80000000
120
121struct ublk_io {
122	/* userspace buffer address from io cmd */
123	__u64	addr;
124	unsigned int flags;
125	int res;
126
127	struct io_uring_cmd *cmd;
128};
129
130struct ublk_queue {
131	int q_id;
132	int q_depth;
133
134	unsigned long flags;
135	struct task_struct	*ubq_daemon;
136	char *io_cmd_buf;
137
138	struct llist_head	io_cmds;
139
140	unsigned long io_addr;	/* mapped vm address */
141	unsigned int max_io_sz;
142	bool force_abort;
143	bool timeout;
144	unsigned short nr_io_ready;	/* how many ios setup */
145	spinlock_t		cancel_lock;
146	struct ublk_device *dev;
147	struct ublk_io ios[];
148};
149
150#define UBLK_DAEMON_MONITOR_PERIOD	(5 * HZ)
151
152struct ublk_device {
153	struct gendisk		*ub_disk;
154
155	char	*__queues;
156
157	unsigned int	queue_size;
158	struct ublksrv_ctrl_dev_info	dev_info;
159
160	struct blk_mq_tag_set	tag_set;
161
162	struct cdev		cdev;
163	struct device		cdev_dev;
164
165#define UB_STATE_OPEN		0
166#define UB_STATE_USED		1
167#define UB_STATE_DELETED	2
168	unsigned long		state;
169	int			ub_number;
170
171	struct mutex		mutex;
172
173	spinlock_t		mm_lock;
174	struct mm_struct	*mm;
175
176	struct ublk_params	params;
177
178	struct completion	completion;
179	unsigned int		nr_queues_ready;
180	unsigned int		nr_privileged_daemon;
181
182	/*
183	 * Our ubq->daemon may be killed without any notification, so
184	 * monitor each queue's daemon periodically
185	 */
186	struct delayed_work	monitor_work;
187	struct work_struct	quiesce_work;
188	struct work_struct	stop_work;
189};
190
191/* header of ublk_params */
192struct ublk_params_header {
193	__u32	len;
194	__u32	types;
195};
196
197static inline unsigned int ublk_req_build_flags(struct request *req);
198static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
199						   int tag);
200
201static inline bool ublk_dev_is_user_copy(const struct ublk_device *ub)
202{
203	return ub->dev_info.flags & UBLK_F_USER_COPY;
204}
205
206static inline bool ublk_dev_is_zoned(const struct ublk_device *ub)
207{
208	return ub->dev_info.flags & UBLK_F_ZONED;
209}
210
211static inline bool ublk_queue_is_zoned(struct ublk_queue *ubq)
212{
213	return ubq->flags & UBLK_F_ZONED;
214}
215
216#ifdef CONFIG_BLK_DEV_ZONED
217
218static int ublk_get_nr_zones(const struct ublk_device *ub)
219{
220	const struct ublk_param_basic *p = &ub->params.basic;
221
222	/* Zone size is a power of 2 */
223	return p->dev_sectors >> ilog2(p->chunk_sectors);
224}
225
226static int ublk_revalidate_disk_zones(struct ublk_device *ub)
227{
228	return blk_revalidate_disk_zones(ub->ub_disk, NULL);
229}
230
231static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
232{
233	const struct ublk_param_zoned *p = &ub->params.zoned;
234	int nr_zones;
235
236	if (!ublk_dev_is_zoned(ub))
237		return -EINVAL;
238
239	if (!p->max_zone_append_sectors)
240		return -EINVAL;
241
242	nr_zones = ublk_get_nr_zones(ub);
243
244	if (p->max_active_zones > nr_zones)
245		return -EINVAL;
246
247	if (p->max_open_zones > nr_zones)
248		return -EINVAL;
249
250	return 0;
251}
252
253static int ublk_dev_param_zoned_apply(struct ublk_device *ub)
254{
255	const struct ublk_param_zoned *p = &ub->params.zoned;
256
257	disk_set_zoned(ub->ub_disk, BLK_ZONED_HM);
258	blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ub->ub_disk->queue);
259	blk_queue_required_elevator_features(ub->ub_disk->queue,
260					     ELEVATOR_F_ZBD_SEQ_WRITE);
261	disk_set_max_active_zones(ub->ub_disk, p->max_active_zones);
262	disk_set_max_open_zones(ub->ub_disk, p->max_open_zones);
263	blk_queue_max_zone_append_sectors(ub->ub_disk->queue, p->max_zone_append_sectors);
264
265	ub->ub_disk->nr_zones = ublk_get_nr_zones(ub);
266
267	return 0;
268}
269
270/* Based on virtblk_alloc_report_buffer */
271static void *ublk_alloc_report_buffer(struct ublk_device *ublk,
272				      unsigned int nr_zones, size_t *buflen)
273{
274	struct request_queue *q = ublk->ub_disk->queue;
275	size_t bufsize;
276	void *buf;
277
278	nr_zones = min_t(unsigned int, nr_zones,
279			 ublk->ub_disk->nr_zones);
280
281	bufsize = nr_zones * sizeof(struct blk_zone);
282	bufsize =
283		min_t(size_t, bufsize, queue_max_hw_sectors(q) << SECTOR_SHIFT);
284
285	while (bufsize >= sizeof(struct blk_zone)) {
286		buf = kvmalloc(bufsize, GFP_KERNEL | __GFP_NORETRY);
287		if (buf) {
288			*buflen = bufsize;
289			return buf;
290		}
291		bufsize >>= 1;
292	}
293
294	*buflen = 0;
295	return NULL;
296}
297
298static int ublk_report_zones(struct gendisk *disk, sector_t sector,
299		      unsigned int nr_zones, report_zones_cb cb, void *data)
300{
301	struct ublk_device *ub = disk->private_data;
302	unsigned int zone_size_sectors = disk->queue->limits.chunk_sectors;
303	unsigned int first_zone = sector >> ilog2(zone_size_sectors);
304	unsigned int done_zones = 0;
305	unsigned int max_zones_per_request;
306	int ret;
307	struct blk_zone *buffer;
308	size_t buffer_length;
309
310	nr_zones = min_t(unsigned int, ub->ub_disk->nr_zones - first_zone,
311			 nr_zones);
312
313	buffer = ublk_alloc_report_buffer(ub, nr_zones, &buffer_length);
314	if (!buffer)
315		return -ENOMEM;
316
317	max_zones_per_request = buffer_length / sizeof(struct blk_zone);
318
319	while (done_zones < nr_zones) {
320		unsigned int remaining_zones = nr_zones - done_zones;
321		unsigned int zones_in_request =
322			min_t(unsigned int, remaining_zones, max_zones_per_request);
323		struct request *req;
324		struct ublk_rq_data *pdu;
325		blk_status_t status;
326
327		memset(buffer, 0, buffer_length);
328
329		req = blk_mq_alloc_request(disk->queue, REQ_OP_DRV_IN, 0);
330		if (IS_ERR(req)) {
331			ret = PTR_ERR(req);
332			goto out;
333		}
334
335		pdu = blk_mq_rq_to_pdu(req);
336		pdu->operation = UBLK_IO_OP_REPORT_ZONES;
337		pdu->sector = sector;
338		pdu->nr_zones = zones_in_request;
339
340		ret = blk_rq_map_kern(disk->queue, req, buffer, buffer_length,
341					GFP_KERNEL);
342		if (ret) {
343			blk_mq_free_request(req);
344			goto out;
345		}
346
347		status = blk_execute_rq(req, 0);
348		ret = blk_status_to_errno(status);
349		blk_mq_free_request(req);
350		if (ret)
351			goto out;
352
353		for (unsigned int i = 0; i < zones_in_request; i++) {
354			struct blk_zone *zone = buffer + i;
355
356			/* A zero length zone means no more zones in this response */
357			if (!zone->len)
358				break;
359
360			ret = cb(zone, i, data);
361			if (ret)
362				goto out;
363
364			done_zones++;
365			sector += zone_size_sectors;
366
367		}
368	}
369
370	ret = done_zones;
371
372out:
373	kvfree(buffer);
374	return ret;
375}
376
377static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
378					 struct request *req)
379{
380	struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
381	struct ublk_io *io = &ubq->ios[req->tag];
382	struct ublk_rq_data *pdu = blk_mq_rq_to_pdu(req);
383	u32 ublk_op;
384
385	switch (req_op(req)) {
386	case REQ_OP_ZONE_OPEN:
387		ublk_op = UBLK_IO_OP_ZONE_OPEN;
388		break;
389	case REQ_OP_ZONE_CLOSE:
390		ublk_op = UBLK_IO_OP_ZONE_CLOSE;
391		break;
392	case REQ_OP_ZONE_FINISH:
393		ublk_op = UBLK_IO_OP_ZONE_FINISH;
394		break;
395	case REQ_OP_ZONE_RESET:
396		ublk_op = UBLK_IO_OP_ZONE_RESET;
397		break;
398	case REQ_OP_ZONE_APPEND:
399		ublk_op = UBLK_IO_OP_ZONE_APPEND;
400		break;
401	case REQ_OP_ZONE_RESET_ALL:
402		ublk_op = UBLK_IO_OP_ZONE_RESET_ALL;
403		break;
404	case REQ_OP_DRV_IN:
405		ublk_op = pdu->operation;
406		switch (ublk_op) {
407		case UBLK_IO_OP_REPORT_ZONES:
408			iod->op_flags = ublk_op | ublk_req_build_flags(req);
409			iod->nr_zones = pdu->nr_zones;
410			iod->start_sector = pdu->sector;
411			return BLK_STS_OK;
412		default:
413			return BLK_STS_IOERR;
414		}
415	case REQ_OP_DRV_OUT:
416		/* We do not support drv_out */
417		return BLK_STS_NOTSUPP;
418	default:
419		return BLK_STS_IOERR;
420	}
421
422	iod->op_flags = ublk_op | ublk_req_build_flags(req);
423	iod->nr_sectors = blk_rq_sectors(req);
424	iod->start_sector = blk_rq_pos(req);
425	iod->addr = io->addr;
426
427	return BLK_STS_OK;
428}
429
430#else
431
432#define ublk_report_zones (NULL)
433
434static int ublk_dev_param_zoned_validate(const struct ublk_device *ub)
435{
436	return -EOPNOTSUPP;
437}
438
439static int ublk_dev_param_zoned_apply(struct ublk_device *ub)
440{
441	return -EOPNOTSUPP;
442}
443
444static int ublk_revalidate_disk_zones(struct ublk_device *ub)
445{
446	return 0;
447}
448
449static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
450					 struct request *req)
451{
452	return BLK_STS_NOTSUPP;
453}
454
455#endif
456
457static inline void __ublk_complete_rq(struct request *req);
458static void ublk_complete_rq(struct kref *ref);
459
460static dev_t ublk_chr_devt;
461static const struct class ublk_chr_class = {
462	.name = "ublk-char",
463};
464
465static DEFINE_IDR(ublk_index_idr);
466static DEFINE_SPINLOCK(ublk_idr_lock);
467static wait_queue_head_t ublk_idr_wq;	/* wait until one idr is freed */
468
469static DEFINE_MUTEX(ublk_ctl_mutex);
470
471/*
472 * Max ublk devices allowed to add
473 *
474 * It can be extended to one per-user limit in future or even controlled
475 * by cgroup.
476 */
477static unsigned int ublks_max = 64;
478static unsigned int ublks_added;	/* protected by ublk_ctl_mutex */
479
480static struct miscdevice ublk_misc;
481
482static inline unsigned ublk_pos_to_hwq(loff_t pos)
483{
484	return ((pos - UBLKSRV_IO_BUF_OFFSET) >> UBLK_QID_OFF) &
485		UBLK_QID_BITS_MASK;
486}
487
488static inline unsigned ublk_pos_to_buf_off(loff_t pos)
489{
490	return (pos - UBLKSRV_IO_BUF_OFFSET) & UBLK_IO_BUF_BITS_MASK;
491}
492
493static inline unsigned ublk_pos_to_tag(loff_t pos)
494{
495	return ((pos - UBLKSRV_IO_BUF_OFFSET) >> UBLK_TAG_OFF) &
496		UBLK_TAG_BITS_MASK;
497}
498
499static void ublk_dev_param_basic_apply(struct ublk_device *ub)
500{
501	struct request_queue *q = ub->ub_disk->queue;
502	const struct ublk_param_basic *p = &ub->params.basic;
503
504	blk_queue_logical_block_size(q, 1 << p->logical_bs_shift);
505	blk_queue_physical_block_size(q, 1 << p->physical_bs_shift);
506	blk_queue_io_min(q, 1 << p->io_min_shift);
507	blk_queue_io_opt(q, 1 << p->io_opt_shift);
508
509	blk_queue_write_cache(q, p->attrs & UBLK_ATTR_VOLATILE_CACHE,
510			p->attrs & UBLK_ATTR_FUA);
511	if (p->attrs & UBLK_ATTR_ROTATIONAL)
512		blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
513	else
514		blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
515
516	blk_queue_max_hw_sectors(q, p->max_sectors);
517	blk_queue_chunk_sectors(q, p->chunk_sectors);
518	blk_queue_virt_boundary(q, p->virt_boundary_mask);
519
520	if (p->attrs & UBLK_ATTR_READ_ONLY)
521		set_disk_ro(ub->ub_disk, true);
522
523	set_capacity(ub->ub_disk, p->dev_sectors);
524}
525
526static void ublk_dev_param_discard_apply(struct ublk_device *ub)
527{
528	struct request_queue *q = ub->ub_disk->queue;
529	const struct ublk_param_discard *p = &ub->params.discard;
530
531	q->limits.discard_alignment = p->discard_alignment;
532	q->limits.discard_granularity = p->discard_granularity;
533	blk_queue_max_discard_sectors(q, p->max_discard_sectors);
534	blk_queue_max_write_zeroes_sectors(q,
535			p->max_write_zeroes_sectors);
536	blk_queue_max_discard_segments(q, p->max_discard_segments);
537}
538
539static int ublk_validate_params(const struct ublk_device *ub)
540{
541	/* basic param is the only one which must be set */
542	if (ub->params.types & UBLK_PARAM_TYPE_BASIC) {
543		const struct ublk_param_basic *p = &ub->params.basic;
544
545		if (p->logical_bs_shift > PAGE_SHIFT || p->logical_bs_shift < 9)
546			return -EINVAL;
547
548		if (p->logical_bs_shift > p->physical_bs_shift)
549			return -EINVAL;
550
551		if (p->max_sectors > (ub->dev_info.max_io_buf_bytes >> 9))
552			return -EINVAL;
553
554		if (ublk_dev_is_zoned(ub) && !p->chunk_sectors)
555			return -EINVAL;
556	} else
557		return -EINVAL;
558
559	if (ub->params.types & UBLK_PARAM_TYPE_DISCARD) {
560		const struct ublk_param_discard *p = &ub->params.discard;
561
562		/* So far, only support single segment discard */
563		if (p->max_discard_sectors && p->max_discard_segments != 1)
564			return -EINVAL;
565
566		if (!p->discard_granularity)
567			return -EINVAL;
568	}
569
570	/* dev_t is read-only */
571	if (ub->params.types & UBLK_PARAM_TYPE_DEVT)
572		return -EINVAL;
573
574	if (ub->params.types & UBLK_PARAM_TYPE_ZONED)
575		return ublk_dev_param_zoned_validate(ub);
576	else if (ublk_dev_is_zoned(ub))
577		return -EINVAL;
578
579	return 0;
580}
581
582static int ublk_apply_params(struct ublk_device *ub)
583{
584	if (!(ub->params.types & UBLK_PARAM_TYPE_BASIC))
585		return -EINVAL;
586
587	ublk_dev_param_basic_apply(ub);
588
589	if (ub->params.types & UBLK_PARAM_TYPE_DISCARD)
590		ublk_dev_param_discard_apply(ub);
591
592	if (ub->params.types & UBLK_PARAM_TYPE_ZONED)
593		return ublk_dev_param_zoned_apply(ub);
594
595	return 0;
596}
597
598static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
599{
600	return ubq->flags & UBLK_F_USER_COPY;
601}
602
603static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
604{
605	/*
606	 * read()/write() is involved in user copy, so request reference
607	 * has to be grabbed
608	 */
609	return ublk_support_user_copy(ubq);
610}
611
612static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
613		struct request *req)
614{
615	if (ublk_need_req_ref(ubq)) {
616		struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
617
618		kref_init(&data->ref);
619	}
620}
621
622static inline bool ublk_get_req_ref(const struct ublk_queue *ubq,
623		struct request *req)
624{
625	if (ublk_need_req_ref(ubq)) {
626		struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
627
628		return kref_get_unless_zero(&data->ref);
629	}
630
631	return true;
632}
633
634static inline void ublk_put_req_ref(const struct ublk_queue *ubq,
635		struct request *req)
636{
637	if (ublk_need_req_ref(ubq)) {
638		struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
639
640		kref_put(&data->ref, ublk_complete_rq);
641	} else {
642		__ublk_complete_rq(req);
643	}
644}
645
646static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
647{
648	return ubq->flags & UBLK_F_NEED_GET_DATA;
649}
650
651static struct ublk_device *ublk_get_device(struct ublk_device *ub)
652{
653	if (kobject_get_unless_zero(&ub->cdev_dev.kobj))
654		return ub;
655	return NULL;
656}
657
658static void ublk_put_device(struct ublk_device *ub)
659{
660	put_device(&ub->cdev_dev);
661}
662
663static inline struct ublk_queue *ublk_get_queue(struct ublk_device *dev,
664		int qid)
665{
666       return (struct ublk_queue *)&(dev->__queues[qid * dev->queue_size]);
667}
668
669static inline bool ublk_rq_has_data(const struct request *rq)
670{
671	return bio_has_data(rq->bio);
672}
673
674static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
675		int tag)
676{
677	return (struct ublksrv_io_desc *)
678		&(ubq->io_cmd_buf[tag * sizeof(struct ublksrv_io_desc)]);
679}
680
681static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id)
682{
683	return ublk_get_queue(ub, q_id)->io_cmd_buf;
684}
685
686static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
687{
688	struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
689
690	return round_up(ubq->q_depth * sizeof(struct ublksrv_io_desc),
691			PAGE_SIZE);
692}
693
694static inline bool ublk_queue_can_use_recovery_reissue(
695		struct ublk_queue *ubq)
696{
697	return (ubq->flags & UBLK_F_USER_RECOVERY) &&
698			(ubq->flags & UBLK_F_USER_RECOVERY_REISSUE);
699}
700
701static inline bool ublk_queue_can_use_recovery(
702		struct ublk_queue *ubq)
703{
704	return ubq->flags & UBLK_F_USER_RECOVERY;
705}
706
707static inline bool ublk_can_use_recovery(struct ublk_device *ub)
708{
709	return ub->dev_info.flags & UBLK_F_USER_RECOVERY;
710}
711
712static void ublk_free_disk(struct gendisk *disk)
713{
714	struct ublk_device *ub = disk->private_data;
715
716	clear_bit(UB_STATE_USED, &ub->state);
717	put_device(&ub->cdev_dev);
718}
719
720static void ublk_store_owner_uid_gid(unsigned int *owner_uid,
721		unsigned int *owner_gid)
722{
723	kuid_t uid;
724	kgid_t gid;
725
726	current_uid_gid(&uid, &gid);
727
728	*owner_uid = from_kuid(&init_user_ns, uid);
729	*owner_gid = from_kgid(&init_user_ns, gid);
730}
731
732static int ublk_open(struct gendisk *disk, blk_mode_t mode)
733{
734	struct ublk_device *ub = disk->private_data;
735
736	if (capable(CAP_SYS_ADMIN))
737		return 0;
738
739	/*
740	 * If it is one unprivileged device, only owner can open
741	 * the disk. Otherwise it could be one trap made by one
742	 * evil user who grants this disk's privileges to other
743	 * users deliberately.
744	 *
745	 * This way is reasonable too given anyone can create
746	 * unprivileged device, and no need other's grant.
747	 */
748	if (ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV) {
749		unsigned int curr_uid, curr_gid;
750
751		ublk_store_owner_uid_gid(&curr_uid, &curr_gid);
752
753		if (curr_uid != ub->dev_info.owner_uid || curr_gid !=
754				ub->dev_info.owner_gid)
755			return -EPERM;
756	}
757
758	return 0;
759}
760
761static const struct block_device_operations ub_fops = {
762	.owner =	THIS_MODULE,
763	.open =		ublk_open,
764	.free_disk =	ublk_free_disk,
765	.report_zones =	ublk_report_zones,
766};
767
768#define UBLK_MAX_PIN_PAGES	32
769
770struct ublk_io_iter {
771	struct page *pages[UBLK_MAX_PIN_PAGES];
772	struct bio *bio;
773	struct bvec_iter iter;
774};
775
776/* return how many pages are copied */
777static void ublk_copy_io_pages(struct ublk_io_iter *data,
778		size_t total, size_t pg_off, int dir)
779{
780	unsigned done = 0;
781	unsigned pg_idx = 0;
782
783	while (done < total) {
784		struct bio_vec bv = bio_iter_iovec(data->bio, data->iter);
785		unsigned int bytes = min3(bv.bv_len, (unsigned)total - done,
786				(unsigned)(PAGE_SIZE - pg_off));
787		void *bv_buf = bvec_kmap_local(&bv);
788		void *pg_buf = kmap_local_page(data->pages[pg_idx]);
789
790		if (dir == ITER_DEST)
791			memcpy(pg_buf + pg_off, bv_buf, bytes);
792		else
793			memcpy(bv_buf, pg_buf + pg_off, bytes);
794
795		kunmap_local(pg_buf);
796		kunmap_local(bv_buf);
797
798		/* advance page array */
799		pg_off += bytes;
800		if (pg_off == PAGE_SIZE) {
801			pg_idx += 1;
802			pg_off = 0;
803		}
804
805		done += bytes;
806
807		/* advance bio */
808		bio_advance_iter_single(data->bio, &data->iter, bytes);
809		if (!data->iter.bi_size) {
810			data->bio = data->bio->bi_next;
811			if (data->bio == NULL)
812				break;
813			data->iter = data->bio->bi_iter;
814		}
815	}
816}
817
818static bool ublk_advance_io_iter(const struct request *req,
819		struct ublk_io_iter *iter, unsigned int offset)
820{
821	struct bio *bio = req->bio;
822
823	for_each_bio(bio) {
824		if (bio->bi_iter.bi_size > offset) {
825			iter->bio = bio;
826			iter->iter = bio->bi_iter;
827			bio_advance_iter(iter->bio, &iter->iter, offset);
828			return true;
829		}
830		offset -= bio->bi_iter.bi_size;
831	}
832	return false;
833}
834
835/*
836 * Copy data between request pages and io_iter, and 'offset'
837 * is the start point of linear offset of request.
838 */
839static size_t ublk_copy_user_pages(const struct request *req,
840		unsigned offset, struct iov_iter *uiter, int dir)
841{
842	struct ublk_io_iter iter;
843	size_t done = 0;
844
845	if (!ublk_advance_io_iter(req, &iter, offset))
846		return 0;
847
848	while (iov_iter_count(uiter) && iter.bio) {
849		unsigned nr_pages;
850		ssize_t len;
851		size_t off;
852		int i;
853
854		len = iov_iter_get_pages2(uiter, iter.pages,
855				iov_iter_count(uiter),
856				UBLK_MAX_PIN_PAGES, &off);
857		if (len <= 0)
858			return done;
859
860		ublk_copy_io_pages(&iter, len, off, dir);
861		nr_pages = DIV_ROUND_UP(len + off, PAGE_SIZE);
862		for (i = 0; i < nr_pages; i++) {
863			if (dir == ITER_DEST)
864				set_page_dirty(iter.pages[i]);
865			put_page(iter.pages[i]);
866		}
867		done += len;
868	}
869
870	return done;
871}
872
873static inline bool ublk_need_map_req(const struct request *req)
874{
875	return ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE;
876}
877
878static inline bool ublk_need_unmap_req(const struct request *req)
879{
880	return ublk_rq_has_data(req) &&
881	       (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_DRV_IN);
882}
883
884static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
885		struct ublk_io *io)
886{
887	const unsigned int rq_bytes = blk_rq_bytes(req);
888
889	if (ublk_support_user_copy(ubq))
890		return rq_bytes;
891
892	/*
893	 * no zero copy, we delay copy WRITE request data into ublksrv
894	 * context and the big benefit is that pinning pages in current
895	 * context is pretty fast, see ublk_pin_user_pages
896	 */
897	if (ublk_need_map_req(req)) {
898		struct iov_iter iter;
899		struct iovec iov;
900		const int dir = ITER_DEST;
901
902		import_single_range(dir, u64_to_user_ptr(io->addr), rq_bytes,
903				&iov, &iter);
904
905		return ublk_copy_user_pages(req, 0, &iter, dir);
906	}
907	return rq_bytes;
908}
909
910static int ublk_unmap_io(const struct ublk_queue *ubq,
911		const struct request *req,
912		struct ublk_io *io)
913{
914	const unsigned int rq_bytes = blk_rq_bytes(req);
915
916	if (ublk_support_user_copy(ubq))
917		return rq_bytes;
918
919	if (ublk_need_unmap_req(req)) {
920		struct iov_iter iter;
921		struct iovec iov;
922		const int dir = ITER_SOURCE;
923
924		WARN_ON_ONCE(io->res > rq_bytes);
925
926		import_single_range(dir, u64_to_user_ptr(io->addr), io->res,
927				&iov, &iter);
928		return ublk_copy_user_pages(req, 0, &iter, dir);
929	}
930	return rq_bytes;
931}
932
933static inline unsigned int ublk_req_build_flags(struct request *req)
934{
935	unsigned flags = 0;
936
937	if (req->cmd_flags & REQ_FAILFAST_DEV)
938		flags |= UBLK_IO_F_FAILFAST_DEV;
939
940	if (req->cmd_flags & REQ_FAILFAST_TRANSPORT)
941		flags |= UBLK_IO_F_FAILFAST_TRANSPORT;
942
943	if (req->cmd_flags & REQ_FAILFAST_DRIVER)
944		flags |= UBLK_IO_F_FAILFAST_DRIVER;
945
946	if (req->cmd_flags & REQ_META)
947		flags |= UBLK_IO_F_META;
948
949	if (req->cmd_flags & REQ_FUA)
950		flags |= UBLK_IO_F_FUA;
951
952	if (req->cmd_flags & REQ_NOUNMAP)
953		flags |= UBLK_IO_F_NOUNMAP;
954
955	if (req->cmd_flags & REQ_SWAP)
956		flags |= UBLK_IO_F_SWAP;
957
958	return flags;
959}
960
961static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
962{
963	struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
964	struct ublk_io *io = &ubq->ios[req->tag];
965	enum req_op op = req_op(req);
966	u32 ublk_op;
967
968	if (!ublk_queue_is_zoned(ubq) &&
969	    (op_is_zone_mgmt(op) || op == REQ_OP_ZONE_APPEND))
970		return BLK_STS_IOERR;
971
972	switch (req_op(req)) {
973	case REQ_OP_READ:
974		ublk_op = UBLK_IO_OP_READ;
975		break;
976	case REQ_OP_WRITE:
977		ublk_op = UBLK_IO_OP_WRITE;
978		break;
979	case REQ_OP_FLUSH:
980		ublk_op = UBLK_IO_OP_FLUSH;
981		break;
982	case REQ_OP_DISCARD:
983		ublk_op = UBLK_IO_OP_DISCARD;
984		break;
985	case REQ_OP_WRITE_ZEROES:
986		ublk_op = UBLK_IO_OP_WRITE_ZEROES;
987		break;
988	default:
989		if (ublk_queue_is_zoned(ubq))
990			return ublk_setup_iod_zoned(ubq, req);
991		return BLK_STS_IOERR;
992	}
993
994	/* need to translate since kernel may change */
995	iod->op_flags = ublk_op | ublk_req_build_flags(req);
996	iod->nr_sectors = blk_rq_sectors(req);
997	iod->start_sector = blk_rq_pos(req);
998	iod->addr = io->addr;
999
1000	return BLK_STS_OK;
1001}
1002
1003static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
1004		struct io_uring_cmd *ioucmd)
1005{
1006	return (struct ublk_uring_cmd_pdu *)&ioucmd->pdu;
1007}
1008
1009static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
1010{
1011	return ubq->ubq_daemon->flags & PF_EXITING;
1012}
1013
1014/* todo: handle partial completion */
1015static inline void __ublk_complete_rq(struct request *req)
1016{
1017	struct ublk_queue *ubq = req->mq_hctx->driver_data;
1018	struct ublk_io *io = &ubq->ios[req->tag];
1019	unsigned int unmapped_bytes;
1020	blk_status_t res = BLK_STS_OK;
1021
1022	/* called from ublk_abort_queue() code path */
1023	if (io->flags & UBLK_IO_FLAG_ABORTED) {
1024		res = BLK_STS_IOERR;
1025		goto exit;
1026	}
1027
1028	/* failed read IO if nothing is read */
1029	if (!io->res && req_op(req) == REQ_OP_READ)
1030		io->res = -EIO;
1031
1032	if (io->res < 0) {
1033		res = errno_to_blk_status(io->res);
1034		goto exit;
1035	}
1036
1037	/*
1038	 * FLUSH, DISCARD or WRITE_ZEROES usually won't return bytes returned, so end them
1039	 * directly.
1040	 *
1041	 * Both the two needn't unmap.
1042	 */
1043	if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE &&
1044	    req_op(req) != REQ_OP_DRV_IN)
1045		goto exit;
1046
1047	/* for READ request, writing data in iod->addr to rq buffers */
1048	unmapped_bytes = ublk_unmap_io(ubq, req, io);
1049
1050	/*
1051	 * Extremely impossible since we got data filled in just before
1052	 *
1053	 * Re-read simply for this unlikely case.
1054	 */
1055	if (unlikely(unmapped_bytes < io->res))
1056		io->res = unmapped_bytes;
1057
1058	if (blk_update_request(req, BLK_STS_OK, io->res))
1059		blk_mq_requeue_request(req, true);
1060	else
1061		__blk_mq_end_request(req, BLK_STS_OK);
1062
1063	return;
1064exit:
1065	blk_mq_end_request(req, res);
1066}
1067
1068static void ublk_complete_rq(struct kref *ref)
1069{
1070	struct ublk_rq_data *data = container_of(ref, struct ublk_rq_data,
1071			ref);
1072	struct request *req = blk_mq_rq_from_pdu(data);
1073
1074	__ublk_complete_rq(req);
1075}
1076
1077/*
1078 * Since __ublk_rq_task_work always fails requests immediately during
1079 * exiting, __ublk_fail_req() is only called from abort context during
1080 * exiting. So lock is unnecessary.
1081 *
1082 * Also aborting may not be started yet, keep in mind that one failed
1083 * request may be issued by block layer again.
1084 */
1085static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
1086		struct request *req)
1087{
1088	WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
1089
1090	if (!(io->flags & UBLK_IO_FLAG_ABORTED)) {
1091		io->flags |= UBLK_IO_FLAG_ABORTED;
1092		if (ublk_queue_can_use_recovery_reissue(ubq))
1093			blk_mq_requeue_request(req, false);
1094		else
1095			ublk_put_req_ref(ubq, req);
1096	}
1097}
1098
1099static void ubq_complete_io_cmd(struct ublk_io *io, int res,
1100				unsigned issue_flags)
1101{
1102	/* mark this cmd owned by ublksrv */
1103	io->flags |= UBLK_IO_FLAG_OWNED_BY_SRV;
1104
1105	/*
1106	 * clear ACTIVE since we are done with this sqe/cmd slot
1107	 * We can only accept io cmd in case of being not active.
1108	 */
1109	io->flags &= ~UBLK_IO_FLAG_ACTIVE;
1110
1111	/* tell ublksrv one io request is coming */
1112	io_uring_cmd_done(io->cmd, res, 0, issue_flags);
1113}
1114
1115#define UBLK_REQUEUE_DELAY_MS	3
1116
1117static inline void __ublk_abort_rq(struct ublk_queue *ubq,
1118		struct request *rq)
1119{
1120	/* We cannot process this rq so just requeue it. */
1121	if (ublk_queue_can_use_recovery(ubq))
1122		blk_mq_requeue_request(rq, false);
1123	else
1124		blk_mq_end_request(rq, BLK_STS_IOERR);
1125
1126	mod_delayed_work(system_wq, &ubq->dev->monitor_work, 0);
1127}
1128
1129static inline void __ublk_rq_task_work(struct request *req,
1130				       unsigned issue_flags)
1131{
1132	struct ublk_queue *ubq = req->mq_hctx->driver_data;
1133	int tag = req->tag;
1134	struct ublk_io *io = &ubq->ios[tag];
1135	unsigned int mapped_bytes;
1136
1137	pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
1138			__func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
1139			ublk_get_iod(ubq, req->tag)->addr);
1140
1141	/*
1142	 * Task is exiting if either:
1143	 *
1144	 * (1) current != ubq_daemon.
1145	 * io_uring_cmd_complete_in_task() tries to run task_work
1146	 * in a workqueue if ubq_daemon(cmd's task) is PF_EXITING.
1147	 *
1148	 * (2) current->flags & PF_EXITING.
1149	 */
1150	if (unlikely(current != ubq->ubq_daemon || current->flags & PF_EXITING)) {
1151		__ublk_abort_rq(ubq, req);
1152		return;
1153	}
1154
1155	if (ublk_need_get_data(ubq) && ublk_need_map_req(req)) {
1156		/*
1157		 * We have not handled UBLK_IO_NEED_GET_DATA command yet,
1158		 * so immepdately pass UBLK_IO_RES_NEED_GET_DATA to ublksrv
1159		 * and notify it.
1160		 */
1161		if (!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA)) {
1162			io->flags |= UBLK_IO_FLAG_NEED_GET_DATA;
1163			pr_devel("%s: need get data. op %d, qid %d tag %d io_flags %x\n",
1164					__func__, io->cmd->cmd_op, ubq->q_id,
1165					req->tag, io->flags);
1166			ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA, issue_flags);
1167			return;
1168		}
1169		/*
1170		 * We have handled UBLK_IO_NEED_GET_DATA command,
1171		 * so clear UBLK_IO_FLAG_NEED_GET_DATA now and just
1172		 * do the copy work.
1173		 */
1174		io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
1175		/* update iod->addr because ublksrv may have passed a new io buffer */
1176		ublk_get_iod(ubq, req->tag)->addr = io->addr;
1177		pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
1178				__func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
1179				ublk_get_iod(ubq, req->tag)->addr);
1180	}
1181
1182	mapped_bytes = ublk_map_io(ubq, req, io);
1183
1184	/* partially mapped, update io descriptor */
1185	if (unlikely(mapped_bytes != blk_rq_bytes(req))) {
1186		/*
1187		 * Nothing mapped, retry until we succeed.
1188		 *
1189		 * We may never succeed in mapping any bytes here because
1190		 * of OOM. TODO: reserve one buffer with single page pinned
1191		 * for providing forward progress guarantee.
1192		 */
1193		if (unlikely(!mapped_bytes)) {
1194			blk_mq_requeue_request(req, false);
1195			blk_mq_delay_kick_requeue_list(req->q,
1196					UBLK_REQUEUE_DELAY_MS);
1197			return;
1198		}
1199
1200		ublk_get_iod(ubq, req->tag)->nr_sectors =
1201			mapped_bytes >> 9;
1202	}
1203
1204	ublk_init_req_ref(ubq, req);
1205	ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags);
1206}
1207
1208static inline void ublk_forward_io_cmds(struct ublk_queue *ubq,
1209					unsigned issue_flags)
1210{
1211	struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
1212	struct ublk_rq_data *data, *tmp;
1213
1214	io_cmds = llist_reverse_order(io_cmds);
1215	llist_for_each_entry_safe(data, tmp, io_cmds, node)
1216		__ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags);
1217}
1218
1219static inline void ublk_abort_io_cmds(struct ublk_queue *ubq)
1220{
1221	struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
1222	struct ublk_rq_data *data, *tmp;
1223
1224	llist_for_each_entry_safe(data, tmp, io_cmds, node)
1225		__ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data));
1226}
1227
1228static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
1229{
1230	struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
1231	struct ublk_queue *ubq = pdu->ubq;
1232
1233	ublk_forward_io_cmds(ubq, issue_flags);
1234}
1235
1236static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
1237{
1238	struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
1239	struct ublk_io *io;
1240
1241	if (!llist_add(&data->node, &ubq->io_cmds))
1242		return;
1243
1244	io = &ubq->ios[rq->tag];
1245	/*
1246	 * If the check pass, we know that this is a re-issued request aborted
1247	 * previously in monitor_work because the ubq_daemon(cmd's task) is
1248	 * PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore
1249	 * because this ioucmd's io_uring context may be freed now if no inflight
1250	 * ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work.
1251	 *
1252	 * Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing
1253	 * the tag). Then the request is re-started(allocating the tag) and we are here.
1254	 * Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED
1255	 * guarantees that here is a re-issued request aborted previously.
1256	 */
1257	if (unlikely(io->flags & UBLK_IO_FLAG_ABORTED)) {
1258		ublk_abort_io_cmds(ubq);
1259	} else {
1260		struct io_uring_cmd *cmd = io->cmd;
1261		struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
1262
1263		pdu->ubq = ubq;
1264		io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb);
1265	}
1266}
1267
1268static enum blk_eh_timer_return ublk_timeout(struct request *rq)
1269{
1270	struct ublk_queue *ubq = rq->mq_hctx->driver_data;
1271
1272	if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) {
1273		if (!ubq->timeout) {
1274			send_sig(SIGKILL, ubq->ubq_daemon, 0);
1275			ubq->timeout = true;
1276		}
1277
1278		return BLK_EH_DONE;
1279	}
1280
1281	return BLK_EH_RESET_TIMER;
1282}
1283
1284static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
1285		const struct blk_mq_queue_data *bd)
1286{
1287	struct ublk_queue *ubq = hctx->driver_data;
1288	struct request *rq = bd->rq;
1289	blk_status_t res;
1290
1291	/* fill iod to slot in io cmd buffer */
1292	res = ublk_setup_iod(ubq, rq);
1293	if (unlikely(res != BLK_STS_OK))
1294		return BLK_STS_IOERR;
1295
1296	/* With recovery feature enabled, force_abort is set in
1297	 * ublk_stop_dev() before calling del_gendisk(). We have to
1298	 * abort all requeued and new rqs here to let del_gendisk()
1299	 * move on. Besides, we cannot not call io_uring_cmd_complete_in_task()
1300	 * to avoid UAF on io_uring ctx.
1301	 *
1302	 * Note: force_abort is guaranteed to be seen because it is set
1303	 * before request queue is unqiuesced.
1304	 */
1305	if (ublk_queue_can_use_recovery(ubq) && unlikely(ubq->force_abort))
1306		return BLK_STS_IOERR;
1307
1308	blk_mq_start_request(bd->rq);
1309
1310	if (unlikely(ubq_daemon_is_dying(ubq))) {
1311		__ublk_abort_rq(ubq, rq);
1312		return BLK_STS_OK;
1313	}
1314
1315	ublk_queue_cmd(ubq, rq);
1316
1317	return BLK_STS_OK;
1318}
1319
1320static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
1321		unsigned int hctx_idx)
1322{
1323	struct ublk_device *ub = driver_data;
1324	struct ublk_queue *ubq = ublk_get_queue(ub, hctx->queue_num);
1325
1326	hctx->driver_data = ubq;
1327	return 0;
1328}
1329
1330static const struct blk_mq_ops ublk_mq_ops = {
1331	.queue_rq       = ublk_queue_rq,
1332	.init_hctx	= ublk_init_hctx,
1333	.timeout	= ublk_timeout,
1334};
1335
1336static int ublk_ch_open(struct inode *inode, struct file *filp)
1337{
1338	struct ublk_device *ub = container_of(inode->i_cdev,
1339			struct ublk_device, cdev);
1340
1341	if (test_and_set_bit(UB_STATE_OPEN, &ub->state))
1342		return -EBUSY;
1343	filp->private_data = ub;
1344	return 0;
1345}
1346
1347static int ublk_ch_release(struct inode *inode, struct file *filp)
1348{
1349	struct ublk_device *ub = filp->private_data;
1350
1351	clear_bit(UB_STATE_OPEN, &ub->state);
1352	return 0;
1353}
1354
1355/* map pre-allocated per-queue cmd buffer to ublksrv daemon */
1356static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
1357{
1358	struct ublk_device *ub = filp->private_data;
1359	size_t sz = vma->vm_end - vma->vm_start;
1360	unsigned max_sz = UBLK_MAX_QUEUE_DEPTH * sizeof(struct ublksrv_io_desc);
1361	unsigned long pfn, end, phys_off = vma->vm_pgoff << PAGE_SHIFT;
1362	int q_id, ret = 0;
1363
1364	spin_lock(&ub->mm_lock);
1365	if (!ub->mm)
1366		ub->mm = current->mm;
1367	if (current->mm != ub->mm)
1368		ret = -EINVAL;
1369	spin_unlock(&ub->mm_lock);
1370
1371	if (ret)
1372		return ret;
1373
1374	if (vma->vm_flags & VM_WRITE)
1375		return -EPERM;
1376
1377	end = UBLKSRV_CMD_BUF_OFFSET + ub->dev_info.nr_hw_queues * max_sz;
1378	if (phys_off < UBLKSRV_CMD_BUF_OFFSET || phys_off >= end)
1379		return -EINVAL;
1380
1381	q_id = (phys_off - UBLKSRV_CMD_BUF_OFFSET) / max_sz;
1382	pr_devel("%s: qid %d, pid %d, addr %lx pg_off %lx sz %lu\n",
1383			__func__, q_id, current->pid, vma->vm_start,
1384			phys_off, (unsigned long)sz);
1385
1386	if (sz != ublk_queue_cmd_buf_size(ub, q_id))
1387		return -EINVAL;
1388
1389	pfn = virt_to_phys(ublk_queue_cmd_buf(ub, q_id)) >> PAGE_SHIFT;
1390	return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
1391}
1392
1393static void ublk_commit_completion(struct ublk_device *ub,
1394		const struct ublksrv_io_cmd *ub_cmd)
1395{
1396	u32 qid = ub_cmd->q_id, tag = ub_cmd->tag;
1397	struct ublk_queue *ubq = ublk_get_queue(ub, qid);
1398	struct ublk_io *io = &ubq->ios[tag];
1399	struct request *req;
1400
1401	/* now this cmd slot is owned by nbd driver */
1402	io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
1403	io->res = ub_cmd->result;
1404
1405	/* find the io request and complete */
1406	req = blk_mq_tag_to_rq(ub->tag_set.tags[qid], tag);
1407	if (WARN_ON_ONCE(unlikely(!req)))
1408		return;
1409
1410	if (req_op(req) == REQ_OP_ZONE_APPEND)
1411		req->__sector = ub_cmd->zone_append_lba;
1412
1413	if (likely(!blk_should_fake_timeout(req->q)))
1414		ublk_put_req_ref(ubq, req);
1415}
1416
1417/*
1418 * When ->ubq_daemon is exiting, either new request is ended immediately,
1419 * or any queued io command is drained, so it is safe to abort queue
1420 * lockless
1421 */
1422static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
1423{
1424	int i;
1425
1426	if (!ublk_get_device(ub))
1427		return;
1428
1429	for (i = 0; i < ubq->q_depth; i++) {
1430		struct ublk_io *io = &ubq->ios[i];
1431
1432		if (!(io->flags & UBLK_IO_FLAG_ACTIVE)) {
1433			struct request *rq;
1434
1435			/*
1436			 * Either we fail the request or ublk_rq_task_work_fn
1437			 * will do it
1438			 */
1439			rq = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], i);
1440			if (rq)
1441				__ublk_fail_req(ubq, io, rq);
1442		}
1443	}
1444	ublk_put_device(ub);
1445}
1446
1447static void ublk_daemon_monitor_work(struct work_struct *work)
1448{
1449	struct ublk_device *ub =
1450		container_of(work, struct ublk_device, monitor_work.work);
1451	int i;
1452
1453	for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
1454		struct ublk_queue *ubq = ublk_get_queue(ub, i);
1455
1456		if (ubq_daemon_is_dying(ubq)) {
1457			if (ublk_queue_can_use_recovery(ubq))
1458				schedule_work(&ub->quiesce_work);
1459			else
1460				schedule_work(&ub->stop_work);
1461
1462			/* abort queue is for making forward progress */
1463			ublk_abort_queue(ub, ubq);
1464		}
1465	}
1466
1467	/*
1468	 * We can't schedule monitor work after ub's state is not UBLK_S_DEV_LIVE.
1469	 * after ublk_remove() or __ublk_quiesce_dev() is started.
1470	 *
1471	 * No need ub->mutex, monitor work are canceled after state is marked
1472	 * as not LIVE, so new state is observed reliably.
1473	 */
1474	if (ub->dev_info.state == UBLK_S_DEV_LIVE)
1475		schedule_delayed_work(&ub->monitor_work,
1476				UBLK_DAEMON_MONITOR_PERIOD);
1477}
1478
1479static inline bool ublk_queue_ready(struct ublk_queue *ubq)
1480{
1481	return ubq->nr_io_ready == ubq->q_depth;
1482}
1483
1484static void ublk_cancel_queue(struct ublk_queue *ubq)
1485{
1486	int i;
1487
1488	for (i = 0; i < ubq->q_depth; i++) {
1489		struct ublk_io *io = &ubq->ios[i];
1490
1491		if (io->flags & UBLK_IO_FLAG_ACTIVE) {
1492			bool done;
1493
1494			spin_lock(&ubq->cancel_lock);
1495			done = !!(io->flags & UBLK_IO_FLAG_CANCELED);
1496			if (!done)
1497				io->flags |= UBLK_IO_FLAG_CANCELED;
1498			spin_unlock(&ubq->cancel_lock);
1499
1500			if (!done)
1501				io_uring_cmd_done(io->cmd,
1502						UBLK_IO_RES_ABORT, 0,
1503						IO_URING_F_UNLOCKED);
1504		}
1505	}
1506}
1507
1508/* Cancel all pending commands, must be called after del_gendisk() returns */
1509static void ublk_cancel_dev(struct ublk_device *ub)
1510{
1511	int i;
1512
1513	for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1514		ublk_cancel_queue(ublk_get_queue(ub, i));
1515}
1516
1517static bool ublk_check_inflight_rq(struct request *rq, void *data)
1518{
1519	bool *idle = data;
1520
1521	if (blk_mq_request_started(rq)) {
1522		*idle = false;
1523		return false;
1524	}
1525	return true;
1526}
1527
1528static void ublk_wait_tagset_rqs_idle(struct ublk_device *ub)
1529{
1530	bool idle;
1531
1532	WARN_ON_ONCE(!blk_queue_quiesced(ub->ub_disk->queue));
1533	while (true) {
1534		idle = true;
1535		blk_mq_tagset_busy_iter(&ub->tag_set,
1536				ublk_check_inflight_rq, &idle);
1537		if (idle)
1538			break;
1539		msleep(UBLK_REQUEUE_DELAY_MS);
1540	}
1541}
1542
1543static void __ublk_quiesce_dev(struct ublk_device *ub)
1544{
1545	pr_devel("%s: quiesce ub: dev_id %d state %s\n",
1546			__func__, ub->dev_info.dev_id,
1547			ub->dev_info.state == UBLK_S_DEV_LIVE ?
1548			"LIVE" : "QUIESCED");
1549	blk_mq_quiesce_queue(ub->ub_disk->queue);
1550	ublk_wait_tagset_rqs_idle(ub);
1551	ub->dev_info.state = UBLK_S_DEV_QUIESCED;
1552	/* we are going to release task_struct of ubq_daemon and resets
1553	 * ->ubq_daemon to NULL. So in monitor_work, check on ubq_daemon causes UAF.
1554	 * Besides, monitor_work is not necessary in QUIESCED state since we have
1555	 * already scheduled quiesce_work and quiesced all ubqs.
1556	 *
1557	 * Do not let monitor_work schedule itself if state it QUIESCED. And we cancel
1558	 * it here and re-schedule it in END_USER_RECOVERY to avoid UAF.
1559	 */
1560	cancel_delayed_work_sync(&ub->monitor_work);
1561}
1562
1563static void ublk_quiesce_work_fn(struct work_struct *work)
1564{
1565	struct ublk_device *ub =
1566		container_of(work, struct ublk_device, quiesce_work);
1567
1568	mutex_lock(&ub->mutex);
1569	if (ub->dev_info.state != UBLK_S_DEV_LIVE)
1570		goto unlock;
1571	__ublk_quiesce_dev(ub);
1572 unlock:
1573	mutex_unlock(&ub->mutex);
1574	ublk_cancel_dev(ub);
1575}
1576
1577static void ublk_unquiesce_dev(struct ublk_device *ub)
1578{
1579	int i;
1580
1581	pr_devel("%s: unquiesce ub: dev_id %d state %s\n",
1582			__func__, ub->dev_info.dev_id,
1583			ub->dev_info.state == UBLK_S_DEV_LIVE ?
1584			"LIVE" : "QUIESCED");
1585	/* quiesce_work has run. We let requeued rqs be aborted
1586	 * before running fallback_wq. "force_abort" must be seen
1587	 * after request queue is unqiuesced. Then del_gendisk()
1588	 * can move on.
1589	 */
1590	for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
1591		ublk_get_queue(ub, i)->force_abort = true;
1592
1593	blk_mq_unquiesce_queue(ub->ub_disk->queue);
1594	/* We may have requeued some rqs in ublk_quiesce_queue() */
1595	blk_mq_kick_requeue_list(ub->ub_disk->queue);
1596}
1597
1598static void ublk_stop_dev(struct ublk_device *ub)
1599{
1600	mutex_lock(&ub->mutex);
1601	if (ub->dev_info.state == UBLK_S_DEV_DEAD)
1602		goto unlock;
1603	if (ublk_can_use_recovery(ub)) {
1604		if (ub->dev_info.state == UBLK_S_DEV_LIVE)
1605			__ublk_quiesce_dev(ub);
1606		ublk_unquiesce_dev(ub);
1607	}
1608	del_gendisk(ub->ub_disk);
1609	ub->dev_info.state = UBLK_S_DEV_DEAD;
1610	ub->dev_info.ublksrv_pid = -1;
1611	put_disk(ub->ub_disk);
1612	ub->ub_disk = NULL;
1613 unlock:
1614	mutex_unlock(&ub->mutex);
1615	ublk_cancel_dev(ub);
1616	cancel_delayed_work_sync(&ub->monitor_work);
1617}
1618
1619/* device can only be started after all IOs are ready */
1620static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
1621{
1622	mutex_lock(&ub->mutex);
1623	ubq->nr_io_ready++;
1624	if (ublk_queue_ready(ubq)) {
1625		ubq->ubq_daemon = current;
1626		get_task_struct(ubq->ubq_daemon);
1627		ub->nr_queues_ready++;
1628
1629		if (capable(CAP_SYS_ADMIN))
1630			ub->nr_privileged_daemon++;
1631	}
1632	if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
1633		complete_all(&ub->completion);
1634	mutex_unlock(&ub->mutex);
1635}
1636
1637static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
1638		int tag)
1639{
1640	struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1641	struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
1642
1643	ublk_queue_cmd(ubq, req);
1644}
1645
1646static inline int ublk_check_cmd_op(u32 cmd_op)
1647{
1648	u32 ioc_type = _IOC_TYPE(cmd_op);
1649
1650	if (!IS_ENABLED(CONFIG_BLKDEV_UBLK_LEGACY_OPCODES) && ioc_type != 'u')
1651		return -EOPNOTSUPP;
1652
1653	if (ioc_type != 'u' && ioc_type != 0)
1654		return -EOPNOTSUPP;
1655
1656	return 0;
1657}
1658
1659static inline void ublk_fill_io_cmd(struct ublk_io *io,
1660		struct io_uring_cmd *cmd, unsigned long buf_addr)
1661{
1662	io->cmd = cmd;
1663	io->flags |= UBLK_IO_FLAG_ACTIVE;
1664	io->addr = buf_addr;
1665}
1666
1667static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
1668			       unsigned int issue_flags,
1669			       const struct ublksrv_io_cmd *ub_cmd)
1670{
1671	struct ublk_device *ub = cmd->file->private_data;
1672	struct ublk_queue *ubq;
1673	struct ublk_io *io;
1674	u32 cmd_op = cmd->cmd_op;
1675	unsigned tag = ub_cmd->tag;
1676	int ret = -EINVAL;
1677	struct request *req;
1678
1679	pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
1680			__func__, cmd->cmd_op, ub_cmd->q_id, tag,
1681			ub_cmd->result);
1682
1683	if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
1684		goto out;
1685
1686	ubq = ublk_get_queue(ub, ub_cmd->q_id);
1687	if (!ubq || ub_cmd->q_id != ubq->q_id)
1688		goto out;
1689
1690	if (ubq->ubq_daemon && ubq->ubq_daemon != current)
1691		goto out;
1692
1693	if (tag >= ubq->q_depth)
1694		goto out;
1695
1696	io = &ubq->ios[tag];
1697
1698	/* there is pending io cmd, something must be wrong */
1699	if (io->flags & UBLK_IO_FLAG_ACTIVE) {
1700		ret = -EBUSY;
1701		goto out;
1702	}
1703
1704	/*
1705	 * ensure that the user issues UBLK_IO_NEED_GET_DATA
1706	 * iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA.
1707	 */
1708	if ((!!(io->flags & UBLK_IO_FLAG_NEED_GET_DATA))
1709			^ (_IOC_NR(cmd_op) == UBLK_IO_NEED_GET_DATA))
1710		goto out;
1711
1712	ret = ublk_check_cmd_op(cmd_op);
1713	if (ret)
1714		goto out;
1715
1716	ret = -EINVAL;
1717	switch (_IOC_NR(cmd_op)) {
1718	case UBLK_IO_FETCH_REQ:
1719		/* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
1720		if (ublk_queue_ready(ubq)) {
1721			ret = -EBUSY;
1722			goto out;
1723		}
1724		/*
1725		 * The io is being handled by server, so COMMIT_RQ is expected
1726		 * instead of FETCH_REQ
1727		 */
1728		if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
1729			goto out;
1730
1731		if (!ublk_support_user_copy(ubq)) {
1732			/*
1733			 * FETCH_RQ has to provide IO buffer if NEED GET
1734			 * DATA is not enabled
1735			 */
1736			if (!ub_cmd->addr && !ublk_need_get_data(ubq))
1737				goto out;
1738		} else if (ub_cmd->addr) {
1739			/* User copy requires addr to be unset */
1740			ret = -EINVAL;
1741			goto out;
1742		}
1743
1744		ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
1745		ublk_mark_io_ready(ub, ubq);
1746		break;
1747	case UBLK_IO_COMMIT_AND_FETCH_REQ:
1748		req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
1749
1750		if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
1751			goto out;
1752
1753		if (!ublk_support_user_copy(ubq)) {
1754			/*
1755			 * COMMIT_AND_FETCH_REQ has to provide IO buffer if
1756			 * NEED GET DATA is not enabled or it is Read IO.
1757			 */
1758			if (!ub_cmd->addr && (!ublk_need_get_data(ubq) ||
1759						req_op(req) == REQ_OP_READ))
1760				goto out;
1761		} else if (req_op(req) != REQ_OP_ZONE_APPEND && ub_cmd->addr) {
1762			/*
1763			 * User copy requires addr to be unset when command is
1764			 * not zone append
1765			 */
1766			ret = -EINVAL;
1767			goto out;
1768		}
1769
1770		ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
1771		ublk_commit_completion(ub, ub_cmd);
1772		break;
1773	case UBLK_IO_NEED_GET_DATA:
1774		if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
1775			goto out;
1776		ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
1777		ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag);
1778		break;
1779	default:
1780		goto out;
1781	}
1782	return -EIOCBQUEUED;
1783
1784 out:
1785	io_uring_cmd_done(cmd, ret, 0, issue_flags);
1786	pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
1787			__func__, cmd_op, tag, ret, io->flags);
1788	return -EIOCBQUEUED;
1789}
1790
1791static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
1792		struct ublk_queue *ubq, int tag, size_t offset)
1793{
1794	struct request *req;
1795
1796	if (!ublk_need_req_ref(ubq))
1797		return NULL;
1798
1799	req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
1800	if (!req)
1801		return NULL;
1802
1803	if (!ublk_get_req_ref(ubq, req))
1804		return NULL;
1805
1806	if (unlikely(!blk_mq_request_started(req) || req->tag != tag))
1807		goto fail_put;
1808
1809	if (!ublk_rq_has_data(req))
1810		goto fail_put;
1811
1812	if (offset > blk_rq_bytes(req))
1813		goto fail_put;
1814
1815	return req;
1816fail_put:
1817	ublk_put_req_ref(ubq, req);
1818	return NULL;
1819}
1820
1821static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
1822{
1823	/*
1824	 * Not necessary for async retry, but let's keep it simple and always
1825	 * copy the values to avoid any potential reuse.
1826	 */
1827	const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe);
1828	const struct ublksrv_io_cmd ub_cmd = {
1829		.q_id = READ_ONCE(ub_src->q_id),
1830		.tag = READ_ONCE(ub_src->tag),
1831		.result = READ_ONCE(ub_src->result),
1832		.addr = READ_ONCE(ub_src->addr)
1833	};
1834
1835	return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
1836}
1837
1838static inline bool ublk_check_ubuf_dir(const struct request *req,
1839		int ubuf_dir)
1840{
1841	/* copy ubuf to request pages */
1842	if ((req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_DRV_IN) &&
1843	    ubuf_dir == ITER_SOURCE)
1844		return true;
1845
1846	/* copy request pages to ubuf */
1847	if ((req_op(req) == REQ_OP_WRITE ||
1848	     req_op(req) == REQ_OP_ZONE_APPEND) &&
1849	    ubuf_dir == ITER_DEST)
1850		return true;
1851
1852	return false;
1853}
1854
1855static struct request *ublk_check_and_get_req(struct kiocb *iocb,
1856		struct iov_iter *iter, size_t *off, int dir)
1857{
1858	struct ublk_device *ub = iocb->ki_filp->private_data;
1859	struct ublk_queue *ubq;
1860	struct request *req;
1861	size_t buf_off;
1862	u16 tag, q_id;
1863
1864	if (!ub)
1865		return ERR_PTR(-EACCES);
1866
1867	if (!user_backed_iter(iter))
1868		return ERR_PTR(-EACCES);
1869
1870	if (ub->dev_info.state == UBLK_S_DEV_DEAD)
1871		return ERR_PTR(-EACCES);
1872
1873	tag = ublk_pos_to_tag(iocb->ki_pos);
1874	q_id = ublk_pos_to_hwq(iocb->ki_pos);
1875	buf_off = ublk_pos_to_buf_off(iocb->ki_pos);
1876
1877	if (q_id >= ub->dev_info.nr_hw_queues)
1878		return ERR_PTR(-EINVAL);
1879
1880	ubq = ublk_get_queue(ub, q_id);
1881	if (!ubq)
1882		return ERR_PTR(-EINVAL);
1883
1884	if (tag >= ubq->q_depth)
1885		return ERR_PTR(-EINVAL);
1886
1887	req = __ublk_check_and_get_req(ub, ubq, tag, buf_off);
1888	if (!req)
1889		return ERR_PTR(-EINVAL);
1890
1891	if (!req->mq_hctx || !req->mq_hctx->driver_data)
1892		goto fail;
1893
1894	if (!ublk_check_ubuf_dir(req, dir))
1895		goto fail;
1896
1897	*off = buf_off;
1898	return req;
1899fail:
1900	ublk_put_req_ref(ubq, req);
1901	return ERR_PTR(-EACCES);
1902}
1903
1904static ssize_t ublk_ch_read_iter(struct kiocb *iocb, struct iov_iter *to)
1905{
1906	struct ublk_queue *ubq;
1907	struct request *req;
1908	size_t buf_off;
1909	size_t ret;
1910
1911	req = ublk_check_and_get_req(iocb, to, &buf_off, ITER_DEST);
1912	if (IS_ERR(req))
1913		return PTR_ERR(req);
1914
1915	ret = ublk_copy_user_pages(req, buf_off, to, ITER_DEST);
1916	ubq = req->mq_hctx->driver_data;
1917	ublk_put_req_ref(ubq, req);
1918
1919	return ret;
1920}
1921
1922static ssize_t ublk_ch_write_iter(struct kiocb *iocb, struct iov_iter *from)
1923{
1924	struct ublk_queue *ubq;
1925	struct request *req;
1926	size_t buf_off;
1927	size_t ret;
1928
1929	req = ublk_check_and_get_req(iocb, from, &buf_off, ITER_SOURCE);
1930	if (IS_ERR(req))
1931		return PTR_ERR(req);
1932
1933	ret = ublk_copy_user_pages(req, buf_off, from, ITER_SOURCE);
1934	ubq = req->mq_hctx->driver_data;
1935	ublk_put_req_ref(ubq, req);
1936
1937	return ret;
1938}
1939
1940static const struct file_operations ublk_ch_fops = {
1941	.owner = THIS_MODULE,
1942	.open = ublk_ch_open,
1943	.release = ublk_ch_release,
1944	.llseek = no_llseek,
1945	.read_iter = ublk_ch_read_iter,
1946	.write_iter = ublk_ch_write_iter,
1947	.uring_cmd = ublk_ch_uring_cmd,
1948	.mmap = ublk_ch_mmap,
1949};
1950
1951static void ublk_deinit_queue(struct ublk_device *ub, int q_id)
1952{
1953	int size = ublk_queue_cmd_buf_size(ub, q_id);
1954	struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1955
1956	if (ubq->ubq_daemon)
1957		put_task_struct(ubq->ubq_daemon);
1958	if (ubq->io_cmd_buf)
1959		free_pages((unsigned long)ubq->io_cmd_buf, get_order(size));
1960}
1961
1962static int ublk_init_queue(struct ublk_device *ub, int q_id)
1963{
1964	struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
1965	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
1966	void *ptr;
1967	int size;
1968
1969	spin_lock_init(&ubq->cancel_lock);
1970	ubq->flags = ub->dev_info.flags;
1971	ubq->q_id = q_id;
1972	ubq->q_depth = ub->dev_info.queue_depth;
1973	size = ublk_queue_cmd_buf_size(ub, q_id);
1974
1975	ptr = (void *) __get_free_pages(gfp_flags, get_order(size));
1976	if (!ptr)
1977		return -ENOMEM;
1978
1979	ubq->io_cmd_buf = ptr;
1980	ubq->dev = ub;
1981	return 0;
1982}
1983
1984static void ublk_deinit_queues(struct ublk_device *ub)
1985{
1986	int nr_queues = ub->dev_info.nr_hw_queues;
1987	int i;
1988
1989	if (!ub->__queues)
1990		return;
1991
1992	for (i = 0; i < nr_queues; i++)
1993		ublk_deinit_queue(ub, i);
1994	kfree(ub->__queues);
1995}
1996
1997static int ublk_init_queues(struct ublk_device *ub)
1998{
1999	int nr_queues = ub->dev_info.nr_hw_queues;
2000	int depth = ub->dev_info.queue_depth;
2001	int ubq_size = sizeof(struct ublk_queue) + depth * sizeof(struct ublk_io);
2002	int i, ret = -ENOMEM;
2003
2004	ub->queue_size = ubq_size;
2005	ub->__queues = kcalloc(nr_queues, ubq_size, GFP_KERNEL);
2006	if (!ub->__queues)
2007		return ret;
2008
2009	for (i = 0; i < nr_queues; i++) {
2010		if (ublk_init_queue(ub, i))
2011			goto fail;
2012	}
2013
2014	init_completion(&ub->completion);
2015	return 0;
2016
2017 fail:
2018	ublk_deinit_queues(ub);
2019	return ret;
2020}
2021
2022static int ublk_alloc_dev_number(struct ublk_device *ub, int idx)
2023{
2024	int i = idx;
2025	int err;
2026
2027	spin_lock(&ublk_idr_lock);
2028	/* allocate id, if @id >= 0, we're requesting that specific id */
2029	if (i >= 0) {
2030		err = idr_alloc(&ublk_index_idr, ub, i, i + 1, GFP_NOWAIT);
2031		if (err == -ENOSPC)
2032			err = -EEXIST;
2033	} else {
2034		err = idr_alloc(&ublk_index_idr, ub, 0, 0, GFP_NOWAIT);
2035	}
2036	spin_unlock(&ublk_idr_lock);
2037
2038	if (err >= 0)
2039		ub->ub_number = err;
2040
2041	return err;
2042}
2043
2044static void ublk_free_dev_number(struct ublk_device *ub)
2045{
2046	spin_lock(&ublk_idr_lock);
2047	idr_remove(&ublk_index_idr, ub->ub_number);
2048	wake_up_all(&ublk_idr_wq);
2049	spin_unlock(&ublk_idr_lock);
2050}
2051
2052static void ublk_cdev_rel(struct device *dev)
2053{
2054	struct ublk_device *ub = container_of(dev, struct ublk_device, cdev_dev);
2055
2056	blk_mq_free_tag_set(&ub->tag_set);
2057	ublk_deinit_queues(ub);
2058	ublk_free_dev_number(ub);
2059	mutex_destroy(&ub->mutex);
2060	kfree(ub);
2061}
2062
2063static int ublk_add_chdev(struct ublk_device *ub)
2064{
2065	struct device *dev = &ub->cdev_dev;
2066	int minor = ub->ub_number;
2067	int ret;
2068
2069	dev->parent = ublk_misc.this_device;
2070	dev->devt = MKDEV(MAJOR(ublk_chr_devt), minor);
2071	dev->class = &ublk_chr_class;
2072	dev->release = ublk_cdev_rel;
2073	device_initialize(dev);
2074
2075	ret = dev_set_name(dev, "ublkc%d", minor);
2076	if (ret)
2077		goto fail;
2078
2079	cdev_init(&ub->cdev, &ublk_ch_fops);
2080	ret = cdev_device_add(&ub->cdev, dev);
2081	if (ret)
2082		goto fail;
2083
2084	ublks_added++;
2085	return 0;
2086 fail:
2087	put_device(dev);
2088	return ret;
2089}
2090
2091static void ublk_stop_work_fn(struct work_struct *work)
2092{
2093	struct ublk_device *ub =
2094		container_of(work, struct ublk_device, stop_work);
2095
2096	ublk_stop_dev(ub);
2097}
2098
2099/* align max io buffer size with PAGE_SIZE */
2100static void ublk_align_max_io_size(struct ublk_device *ub)
2101{
2102	unsigned int max_io_bytes = ub->dev_info.max_io_buf_bytes;
2103
2104	ub->dev_info.max_io_buf_bytes =
2105		round_down(max_io_bytes, PAGE_SIZE);
2106}
2107
2108static int ublk_add_tag_set(struct ublk_device *ub)
2109{
2110	ub->tag_set.ops = &ublk_mq_ops;
2111	ub->tag_set.nr_hw_queues = ub->dev_info.nr_hw_queues;
2112	ub->tag_set.queue_depth = ub->dev_info.queue_depth;
2113	ub->tag_set.numa_node = NUMA_NO_NODE;
2114	ub->tag_set.cmd_size = sizeof(struct ublk_rq_data);
2115	ub->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2116	ub->tag_set.driver_data = ub;
2117	return blk_mq_alloc_tag_set(&ub->tag_set);
2118}
2119
2120static void ublk_remove(struct ublk_device *ub)
2121{
2122	ublk_stop_dev(ub);
2123	cancel_work_sync(&ub->stop_work);
2124	cancel_work_sync(&ub->quiesce_work);
2125	cdev_device_del(&ub->cdev, &ub->cdev_dev);
2126	put_device(&ub->cdev_dev);
2127	ublks_added--;
2128}
2129
2130static struct ublk_device *ublk_get_device_from_id(int idx)
2131{
2132	struct ublk_device *ub = NULL;
2133
2134	if (idx < 0)
2135		return NULL;
2136
2137	spin_lock(&ublk_idr_lock);
2138	ub = idr_find(&ublk_index_idr, idx);
2139	if (ub)
2140		ub = ublk_get_device(ub);
2141	spin_unlock(&ublk_idr_lock);
2142
2143	return ub;
2144}
2145
2146static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
2147{
2148	const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2149	int ublksrv_pid = (int)header->data[0];
2150	struct gendisk *disk;
2151	int ret = -EINVAL;
2152
2153	if (ublksrv_pid <= 0)
2154		return -EINVAL;
2155
2156	if (wait_for_completion_interruptible(&ub->completion) != 0)
2157		return -EINTR;
2158
2159	schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
2160
2161	mutex_lock(&ub->mutex);
2162	if (ub->dev_info.state == UBLK_S_DEV_LIVE ||
2163	    test_bit(UB_STATE_USED, &ub->state)) {
2164		ret = -EEXIST;
2165		goto out_unlock;
2166	}
2167
2168	disk = blk_mq_alloc_disk(&ub->tag_set, NULL);
2169	if (IS_ERR(disk)) {
2170		ret = PTR_ERR(disk);
2171		goto out_unlock;
2172	}
2173	sprintf(disk->disk_name, "ublkb%d", ub->ub_number);
2174	disk->fops = &ub_fops;
2175	disk->private_data = ub;
2176
2177	ub->dev_info.ublksrv_pid = ublksrv_pid;
2178	ub->ub_disk = disk;
2179
2180	ret = ublk_apply_params(ub);
2181	if (ret)
2182		goto out_put_disk;
2183
2184	/* don't probe partitions if any one ubq daemon is un-trusted */
2185	if (ub->nr_privileged_daemon != ub->nr_queues_ready)
2186		set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
2187
2188	get_device(&ub->cdev_dev);
2189	ub->dev_info.state = UBLK_S_DEV_LIVE;
2190
2191	if (ublk_dev_is_zoned(ub)) {
2192		ret = ublk_revalidate_disk_zones(ub);
2193		if (ret)
2194			goto out_put_cdev;
2195	}
2196
2197	ret = add_disk(disk);
2198	if (ret)
2199		goto out_put_cdev;
2200
2201	set_bit(UB_STATE_USED, &ub->state);
2202
2203out_put_cdev:
2204	if (ret) {
2205		ub->dev_info.state = UBLK_S_DEV_DEAD;
2206		ublk_put_device(ub);
2207	}
2208out_put_disk:
2209	if (ret)
2210		put_disk(disk);
2211out_unlock:
2212	mutex_unlock(&ub->mutex);
2213	return ret;
2214}
2215
2216static int ublk_ctrl_get_queue_affinity(struct ublk_device *ub,
2217		struct io_uring_cmd *cmd)
2218{
2219	const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2220	void __user *argp = (void __user *)(unsigned long)header->addr;
2221	cpumask_var_t cpumask;
2222	unsigned long queue;
2223	unsigned int retlen;
2224	unsigned int i;
2225	int ret;
2226
2227	if (header->len * BITS_PER_BYTE < nr_cpu_ids)
2228		return -EINVAL;
2229	if (header->len & (sizeof(unsigned long)-1))
2230		return -EINVAL;
2231	if (!header->addr)
2232		return -EINVAL;
2233
2234	queue = header->data[0];
2235	if (queue >= ub->dev_info.nr_hw_queues)
2236		return -EINVAL;
2237
2238	if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
2239		return -ENOMEM;
2240
2241	for_each_possible_cpu(i) {
2242		if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[i] == queue)
2243			cpumask_set_cpu(i, cpumask);
2244	}
2245
2246	ret = -EFAULT;
2247	retlen = min_t(unsigned short, header->len, cpumask_size());
2248	if (copy_to_user(argp, cpumask, retlen))
2249		goto out_free_cpumask;
2250	if (retlen != header->len &&
2251	    clear_user(argp + retlen, header->len - retlen))
2252		goto out_free_cpumask;
2253
2254	ret = 0;
2255out_free_cpumask:
2256	free_cpumask_var(cpumask);
2257	return ret;
2258}
2259
2260static inline void ublk_dump_dev_info(struct ublksrv_ctrl_dev_info *info)
2261{
2262	pr_devel("%s: dev id %d flags %llx\n", __func__,
2263			info->dev_id, info->flags);
2264	pr_devel("\t nr_hw_queues %d queue_depth %d\n",
2265			info->nr_hw_queues, info->queue_depth);
2266}
2267
2268static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
2269{
2270	const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2271	void __user *argp = (void __user *)(unsigned long)header->addr;
2272	struct ublksrv_ctrl_dev_info info;
2273	struct ublk_device *ub;
2274	int ret = -EINVAL;
2275
2276	if (header->len < sizeof(info) || !header->addr)
2277		return -EINVAL;
2278	if (header->queue_id != (u16)-1) {
2279		pr_warn("%s: queue_id is wrong %x\n",
2280			__func__, header->queue_id);
2281		return -EINVAL;
2282	}
2283
2284	if (copy_from_user(&info, argp, sizeof(info)))
2285		return -EFAULT;
2286
2287	if (capable(CAP_SYS_ADMIN))
2288		info.flags &= ~UBLK_F_UNPRIVILEGED_DEV;
2289	else if (!(info.flags & UBLK_F_UNPRIVILEGED_DEV))
2290		return -EPERM;
2291
2292	/*
2293	 * unprivileged device can't be trusted, but RECOVERY and
2294	 * RECOVERY_REISSUE still may hang error handling, so can't
2295	 * support recovery features for unprivileged ublk now
2296	 *
2297	 * TODO: provide forward progress for RECOVERY handler, so that
2298	 * unprivileged device can benefit from it
2299	 */
2300	if (info.flags & UBLK_F_UNPRIVILEGED_DEV)
2301		info.flags &= ~(UBLK_F_USER_RECOVERY_REISSUE |
2302				UBLK_F_USER_RECOVERY);
2303
2304	/* the created device is always owned by current user */
2305	ublk_store_owner_uid_gid(&info.owner_uid, &info.owner_gid);
2306
2307	if (header->dev_id != info.dev_id) {
2308		pr_warn("%s: dev id not match %u %u\n",
2309			__func__, header->dev_id, info.dev_id);
2310		return -EINVAL;
2311	}
2312
2313	ublk_dump_dev_info(&info);
2314
2315	ret = mutex_lock_killable(&ublk_ctl_mutex);
2316	if (ret)
2317		return ret;
2318
2319	ret = -EACCES;
2320	if (ublks_added >= ublks_max)
2321		goto out_unlock;
2322
2323	ret = -ENOMEM;
2324	ub = kzalloc(sizeof(*ub), GFP_KERNEL);
2325	if (!ub)
2326		goto out_unlock;
2327	mutex_init(&ub->mutex);
2328	spin_lock_init(&ub->mm_lock);
2329	INIT_WORK(&ub->quiesce_work, ublk_quiesce_work_fn);
2330	INIT_WORK(&ub->stop_work, ublk_stop_work_fn);
2331	INIT_DELAYED_WORK(&ub->monitor_work, ublk_daemon_monitor_work);
2332
2333	ret = ublk_alloc_dev_number(ub, header->dev_id);
2334	if (ret < 0)
2335		goto out_free_ub;
2336
2337	memcpy(&ub->dev_info, &info, sizeof(info));
2338
2339	/* update device id */
2340	ub->dev_info.dev_id = ub->ub_number;
2341
2342	/*
2343	 * 64bit flags will be copied back to userspace as feature
2344	 * negotiation result, so have to clear flags which driver
2345	 * doesn't support yet, then userspace can get correct flags
2346	 * (features) to handle.
2347	 */
2348	ub->dev_info.flags &= UBLK_F_ALL;
2349
2350	ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE |
2351		UBLK_F_URING_CMD_COMP_IN_TASK;
2352
2353	/* GET_DATA isn't needed any more with USER_COPY */
2354	if (ublk_dev_is_user_copy(ub))
2355		ub->dev_info.flags &= ~UBLK_F_NEED_GET_DATA;
2356
2357	/* Zoned storage support requires user copy feature */
2358	if (ublk_dev_is_zoned(ub) &&
2359	    (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || !ublk_dev_is_user_copy(ub))) {
2360		ret = -EINVAL;
2361		goto out_free_dev_number;
2362	}
2363
2364	/* We are not ready to support zero copy */
2365	ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY;
2366
2367	ub->dev_info.nr_hw_queues = min_t(unsigned int,
2368			ub->dev_info.nr_hw_queues, nr_cpu_ids);
2369	ublk_align_max_io_size(ub);
2370
2371	ret = ublk_init_queues(ub);
2372	if (ret)
2373		goto out_free_dev_number;
2374
2375	ret = ublk_add_tag_set(ub);
2376	if (ret)
2377		goto out_deinit_queues;
2378
2379	ret = -EFAULT;
2380	if (copy_to_user(argp, &ub->dev_info, sizeof(info)))
2381		goto out_free_tag_set;
2382
2383	/*
2384	 * Add the char dev so that ublksrv daemon can be setup.
2385	 * ublk_add_chdev() will cleanup everything if it fails.
2386	 */
2387	ret = ublk_add_chdev(ub);
2388	goto out_unlock;
2389
2390out_free_tag_set:
2391	blk_mq_free_tag_set(&ub->tag_set);
2392out_deinit_queues:
2393	ublk_deinit_queues(ub);
2394out_free_dev_number:
2395	ublk_free_dev_number(ub);
2396out_free_ub:
2397	mutex_destroy(&ub->mutex);
2398	kfree(ub);
2399out_unlock:
2400	mutex_unlock(&ublk_ctl_mutex);
2401	return ret;
2402}
2403
2404static inline bool ublk_idr_freed(int id)
2405{
2406	void *ptr;
2407
2408	spin_lock(&ublk_idr_lock);
2409	ptr = idr_find(&ublk_index_idr, id);
2410	spin_unlock(&ublk_idr_lock);
2411
2412	return ptr == NULL;
2413}
2414
2415static int ublk_ctrl_del_dev(struct ublk_device **p_ub)
2416{
2417	struct ublk_device *ub = *p_ub;
2418	int idx = ub->ub_number;
2419	int ret;
2420
2421	ret = mutex_lock_killable(&ublk_ctl_mutex);
2422	if (ret)
2423		return ret;
2424
2425	if (!test_bit(UB_STATE_DELETED, &ub->state)) {
2426		ublk_remove(ub);
2427		set_bit(UB_STATE_DELETED, &ub->state);
2428	}
2429
2430	/* Mark the reference as consumed */
2431	*p_ub = NULL;
2432	ublk_put_device(ub);
2433	mutex_unlock(&ublk_ctl_mutex);
2434
2435	/*
2436	 * Wait until the idr is removed, then it can be reused after
2437	 * DEL_DEV command is returned.
2438	 *
2439	 * If we returns because of user interrupt, future delete command
2440	 * may come:
2441	 *
2442	 * - the device number isn't freed, this device won't or needn't
2443	 *   be deleted again, since UB_STATE_DELETED is set, and device
2444	 *   will be released after the last reference is dropped
2445	 *
2446	 * - the device number is freed already, we will not find this
2447	 *   device via ublk_get_device_from_id()
2448	 */
2449	if (wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx)))
2450		return -EINTR;
2451	return 0;
2452}
2453
2454static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
2455{
2456	const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2457
2458	pr_devel("%s: cmd_op %x, dev id %d qid %d data %llx buf %llx len %u\n",
2459			__func__, cmd->cmd_op, header->dev_id, header->queue_id,
2460			header->data[0], header->addr, header->len);
2461}
2462
2463static int ublk_ctrl_stop_dev(struct ublk_device *ub)
2464{
2465	ublk_stop_dev(ub);
2466	cancel_work_sync(&ub->stop_work);
2467	cancel_work_sync(&ub->quiesce_work);
2468
2469	return 0;
2470}
2471
2472static int ublk_ctrl_get_dev_info(struct ublk_device *ub,
2473		struct io_uring_cmd *cmd)
2474{
2475	const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2476	void __user *argp = (void __user *)(unsigned long)header->addr;
2477
2478	if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr)
2479		return -EINVAL;
2480
2481	if (copy_to_user(argp, &ub->dev_info, sizeof(ub->dev_info)))
2482		return -EFAULT;
2483
2484	return 0;
2485}
2486
2487/* TYPE_DEVT is readonly, so fill it up before returning to userspace */
2488static void ublk_ctrl_fill_params_devt(struct ublk_device *ub)
2489{
2490	ub->params.devt.char_major = MAJOR(ub->cdev_dev.devt);
2491	ub->params.devt.char_minor = MINOR(ub->cdev_dev.devt);
2492
2493	if (ub->ub_disk) {
2494		ub->params.devt.disk_major = MAJOR(disk_devt(ub->ub_disk));
2495		ub->params.devt.disk_minor = MINOR(disk_devt(ub->ub_disk));
2496	} else {
2497		ub->params.devt.disk_major = 0;
2498		ub->params.devt.disk_minor = 0;
2499	}
2500	ub->params.types |= UBLK_PARAM_TYPE_DEVT;
2501}
2502
2503static int ublk_ctrl_get_params(struct ublk_device *ub,
2504		struct io_uring_cmd *cmd)
2505{
2506	const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2507	void __user *argp = (void __user *)(unsigned long)header->addr;
2508	struct ublk_params_header ph;
2509	int ret;
2510
2511	if (header->len <= sizeof(ph) || !header->addr)
2512		return -EINVAL;
2513
2514	if (copy_from_user(&ph, argp, sizeof(ph)))
2515		return -EFAULT;
2516
2517	if (ph.len > header->len || !ph.len)
2518		return -EINVAL;
2519
2520	if (ph.len > sizeof(struct ublk_params))
2521		ph.len = sizeof(struct ublk_params);
2522
2523	mutex_lock(&ub->mutex);
2524	ublk_ctrl_fill_params_devt(ub);
2525	if (copy_to_user(argp, &ub->params, ph.len))
2526		ret = -EFAULT;
2527	else
2528		ret = 0;
2529	mutex_unlock(&ub->mutex);
2530
2531	return ret;
2532}
2533
2534static int ublk_ctrl_set_params(struct ublk_device *ub,
2535		struct io_uring_cmd *cmd)
2536{
2537	const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2538	void __user *argp = (void __user *)(unsigned long)header->addr;
2539	struct ublk_params_header ph;
2540	int ret = -EFAULT;
2541
2542	if (header->len <= sizeof(ph) || !header->addr)
2543		return -EINVAL;
2544
2545	if (copy_from_user(&ph, argp, sizeof(ph)))
2546		return -EFAULT;
2547
2548	if (ph.len > header->len || !ph.len || !ph.types)
2549		return -EINVAL;
2550
2551	if (ph.len > sizeof(struct ublk_params))
2552		ph.len = sizeof(struct ublk_params);
2553
2554	/* parameters can only be changed when device isn't live */
2555	mutex_lock(&ub->mutex);
2556	if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
2557		ret = -EACCES;
2558	} else if (copy_from_user(&ub->params, argp, ph.len)) {
2559		ret = -EFAULT;
2560	} else {
2561		/* clear all we don't support yet */
2562		ub->params.types &= UBLK_PARAM_TYPE_ALL;
2563		ret = ublk_validate_params(ub);
2564		if (ret)
2565			ub->params.types = 0;
2566	}
2567	mutex_unlock(&ub->mutex);
2568
2569	return ret;
2570}
2571
2572static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
2573{
2574	int i;
2575
2576	WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
2577
2578	/* All old ioucmds have to be completed */
2579	ubq->nr_io_ready = 0;
2580	/* old daemon is PF_EXITING, put it now */
2581	put_task_struct(ubq->ubq_daemon);
2582	/* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
2583	ubq->ubq_daemon = NULL;
2584	ubq->timeout = false;
2585
2586	for (i = 0; i < ubq->q_depth; i++) {
2587		struct ublk_io *io = &ubq->ios[i];
2588
2589		/* forget everything now and be ready for new FETCH_REQ */
2590		io->flags = 0;
2591		io->cmd = NULL;
2592		io->addr = 0;
2593	}
2594}
2595
2596static int ublk_ctrl_start_recovery(struct ublk_device *ub,
2597		struct io_uring_cmd *cmd)
2598{
2599	const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2600	int ret = -EINVAL;
2601	int i;
2602
2603	mutex_lock(&ub->mutex);
2604	if (!ublk_can_use_recovery(ub))
2605		goto out_unlock;
2606	/*
2607	 * START_RECOVERY is only allowd after:
2608	 *
2609	 * (1) UB_STATE_OPEN is not set, which means the dying process is exited
2610	 *     and related io_uring ctx is freed so file struct of /dev/ublkcX is
2611	 *     released.
2612	 *
2613	 * (2) UBLK_S_DEV_QUIESCED is set, which means the quiesce_work:
2614	 *     (a)has quiesced request queue
2615	 *     (b)has requeued every inflight rqs whose io_flags is ACTIVE
2616	 *     (c)has requeued/aborted every inflight rqs whose io_flags is NOT ACTIVE
2617	 *     (d)has completed/camceled all ioucmds owned by ther dying process
2618	 */
2619	if (test_bit(UB_STATE_OPEN, &ub->state) ||
2620			ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
2621		ret = -EBUSY;
2622		goto out_unlock;
2623	}
2624	pr_devel("%s: start recovery for dev id %d.\n", __func__, header->dev_id);
2625	for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
2626		ublk_queue_reinit(ub, ublk_get_queue(ub, i));
2627	/* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
2628	ub->mm = NULL;
2629	ub->nr_queues_ready = 0;
2630	ub->nr_privileged_daemon = 0;
2631	init_completion(&ub->completion);
2632	ret = 0;
2633 out_unlock:
2634	mutex_unlock(&ub->mutex);
2635	return ret;
2636}
2637
2638static int ublk_ctrl_end_recovery(struct ublk_device *ub,
2639		struct io_uring_cmd *cmd)
2640{
2641	const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2642	int ublksrv_pid = (int)header->data[0];
2643	int ret = -EINVAL;
2644
2645	pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
2646			__func__, ub->dev_info.nr_hw_queues, header->dev_id);
2647	/* wait until new ubq_daemon sending all FETCH_REQ */
2648	if (wait_for_completion_interruptible(&ub->completion))
2649		return -EINTR;
2650
2651	pr_devel("%s: All new ubq_daemons(nr: %d) are ready, dev id %d\n",
2652			__func__, ub->dev_info.nr_hw_queues, header->dev_id);
2653
2654	mutex_lock(&ub->mutex);
2655	if (!ublk_can_use_recovery(ub))
2656		goto out_unlock;
2657
2658	if (ub->dev_info.state != UBLK_S_DEV_QUIESCED) {
2659		ret = -EBUSY;
2660		goto out_unlock;
2661	}
2662	ub->dev_info.ublksrv_pid = ublksrv_pid;
2663	pr_devel("%s: new ublksrv_pid %d, dev id %d\n",
2664			__func__, ublksrv_pid, header->dev_id);
2665	blk_mq_unquiesce_queue(ub->ub_disk->queue);
2666	pr_devel("%s: queue unquiesced, dev id %d.\n",
2667			__func__, header->dev_id);
2668	blk_mq_kick_requeue_list(ub->ub_disk->queue);
2669	ub->dev_info.state = UBLK_S_DEV_LIVE;
2670	schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
2671	ret = 0;
2672 out_unlock:
2673	mutex_unlock(&ub->mutex);
2674	return ret;
2675}
2676
2677static int ublk_ctrl_get_features(struct io_uring_cmd *cmd)
2678{
2679	const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2680	void __user *argp = (void __user *)(unsigned long)header->addr;
2681	u64 features = UBLK_F_ALL & ~UBLK_F_SUPPORT_ZERO_COPY;
2682
2683	if (header->len != UBLK_FEATURES_LEN || !header->addr)
2684		return -EINVAL;
2685
2686	if (copy_to_user(argp, &features, UBLK_FEATURES_LEN))
2687		return -EFAULT;
2688
2689	return 0;
2690}
2691
2692/*
2693 * All control commands are sent via /dev/ublk-control, so we have to check
2694 * the destination device's permission
2695 */
2696static int ublk_char_dev_permission(struct ublk_device *ub,
2697		const char *dev_path, int mask)
2698{
2699	int err;
2700	struct path path;
2701	struct kstat stat;
2702
2703	err = kern_path(dev_path, LOOKUP_FOLLOW, &path);
2704	if (err)
2705		return err;
2706
2707	err = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
2708	if (err)
2709		goto exit;
2710
2711	err = -EPERM;
2712	if (stat.rdev != ub->cdev_dev.devt || !S_ISCHR(stat.mode))
2713		goto exit;
2714
2715	err = inode_permission(&nop_mnt_idmap,
2716			d_backing_inode(path.dentry), mask);
2717exit:
2718	path_put(&path);
2719	return err;
2720}
2721
2722static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub,
2723		struct io_uring_cmd *cmd)
2724{
2725	struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)io_uring_sqe_cmd(cmd->sqe);
2726	bool unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV;
2727	void __user *argp = (void __user *)(unsigned long)header->addr;
2728	char *dev_path = NULL;
2729	int ret = 0;
2730	int mask;
2731
2732	if (!unprivileged) {
2733		if (!capable(CAP_SYS_ADMIN))
2734			return -EPERM;
2735		/*
2736		 * The new added command of UBLK_CMD_GET_DEV_INFO2 includes
2737		 * char_dev_path in payload too, since userspace may not
2738		 * know if the specified device is created as unprivileged
2739		 * mode.
2740		 */
2741		if (_IOC_NR(cmd->cmd_op) != UBLK_CMD_GET_DEV_INFO2)
2742			return 0;
2743	}
2744
2745	/*
2746	 * User has to provide the char device path for unprivileged ublk
2747	 *
2748	 * header->addr always points to the dev path buffer, and
2749	 * header->dev_path_len records length of dev path buffer.
2750	 */
2751	if (!header->dev_path_len || header->dev_path_len > PATH_MAX)
2752		return -EINVAL;
2753
2754	if (header->len < header->dev_path_len)
2755		return -EINVAL;
2756
2757	dev_path = memdup_user_nul(argp, header->dev_path_len);
2758	if (IS_ERR(dev_path))
2759		return PTR_ERR(dev_path);
2760
2761	ret = -EINVAL;
2762	switch (_IOC_NR(cmd->cmd_op)) {
2763	case UBLK_CMD_GET_DEV_INFO:
2764	case UBLK_CMD_GET_DEV_INFO2:
2765	case UBLK_CMD_GET_QUEUE_AFFINITY:
2766	case UBLK_CMD_GET_PARAMS:
2767	case (_IOC_NR(UBLK_U_CMD_GET_FEATURES)):
2768		mask = MAY_READ;
2769		break;
2770	case UBLK_CMD_START_DEV:
2771	case UBLK_CMD_STOP_DEV:
2772	case UBLK_CMD_ADD_DEV:
2773	case UBLK_CMD_DEL_DEV:
2774	case UBLK_CMD_SET_PARAMS:
2775	case UBLK_CMD_START_USER_RECOVERY:
2776	case UBLK_CMD_END_USER_RECOVERY:
2777		mask = MAY_READ | MAY_WRITE;
2778		break;
2779	default:
2780		goto exit;
2781	}
2782
2783	ret = ublk_char_dev_permission(ub, dev_path, mask);
2784	if (!ret) {
2785		header->len -= header->dev_path_len;
2786		header->addr += header->dev_path_len;
2787	}
2788	pr_devel("%s: dev id %d cmd_op %x uid %d gid %d path %s ret %d\n",
2789			__func__, ub->ub_number, cmd->cmd_op,
2790			ub->dev_info.owner_uid, ub->dev_info.owner_gid,
2791			dev_path, ret);
2792exit:
2793	kfree(dev_path);
2794	return ret;
2795}
2796
2797static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
2798		unsigned int issue_flags)
2799{
2800	const struct ublksrv_ctrl_cmd *header = io_uring_sqe_cmd(cmd->sqe);
2801	struct ublk_device *ub = NULL;
2802	u32 cmd_op = cmd->cmd_op;
2803	int ret = -EINVAL;
2804
2805	if (issue_flags & IO_URING_F_NONBLOCK)
2806		return -EAGAIN;
2807
2808	ublk_ctrl_cmd_dump(cmd);
2809
2810	if (!(issue_flags & IO_URING_F_SQE128))
2811		goto out;
2812
2813	ret = ublk_check_cmd_op(cmd_op);
2814	if (ret)
2815		goto out;
2816
2817	if (cmd_op == UBLK_U_CMD_GET_FEATURES) {
2818		ret = ublk_ctrl_get_features(cmd);
2819		goto out;
2820	}
2821
2822	if (_IOC_NR(cmd_op) != UBLK_CMD_ADD_DEV) {
2823		ret = -ENODEV;
2824		ub = ublk_get_device_from_id(header->dev_id);
2825		if (!ub)
2826			goto out;
2827
2828		ret = ublk_ctrl_uring_cmd_permission(ub, cmd);
2829		if (ret)
2830			goto put_dev;
2831	}
2832
2833	switch (_IOC_NR(cmd_op)) {
2834	case UBLK_CMD_START_DEV:
2835		ret = ublk_ctrl_start_dev(ub, cmd);
2836		break;
2837	case UBLK_CMD_STOP_DEV:
2838		ret = ublk_ctrl_stop_dev(ub);
2839		break;
2840	case UBLK_CMD_GET_DEV_INFO:
2841	case UBLK_CMD_GET_DEV_INFO2:
2842		ret = ublk_ctrl_get_dev_info(ub, cmd);
2843		break;
2844	case UBLK_CMD_ADD_DEV:
2845		ret = ublk_ctrl_add_dev(cmd);
2846		break;
2847	case UBLK_CMD_DEL_DEV:
2848		ret = ublk_ctrl_del_dev(&ub);
2849		break;
2850	case UBLK_CMD_GET_QUEUE_AFFINITY:
2851		ret = ublk_ctrl_get_queue_affinity(ub, cmd);
2852		break;
2853	case UBLK_CMD_GET_PARAMS:
2854		ret = ublk_ctrl_get_params(ub, cmd);
2855		break;
2856	case UBLK_CMD_SET_PARAMS:
2857		ret = ublk_ctrl_set_params(ub, cmd);
2858		break;
2859	case UBLK_CMD_START_USER_RECOVERY:
2860		ret = ublk_ctrl_start_recovery(ub, cmd);
2861		break;
2862	case UBLK_CMD_END_USER_RECOVERY:
2863		ret = ublk_ctrl_end_recovery(ub, cmd);
2864		break;
2865	default:
2866		ret = -ENOTSUPP;
2867		break;
2868	}
2869
2870 put_dev:
2871	if (ub)
2872		ublk_put_device(ub);
2873 out:
2874	io_uring_cmd_done(cmd, ret, 0, issue_flags);
2875	pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
2876			__func__, ret, cmd->cmd_op, header->dev_id, header->queue_id);
2877	return -EIOCBQUEUED;
2878}
2879
2880static const struct file_operations ublk_ctl_fops = {
2881	.open		= nonseekable_open,
2882	.uring_cmd      = ublk_ctrl_uring_cmd,
2883	.owner		= THIS_MODULE,
2884	.llseek		= noop_llseek,
2885};
2886
2887static struct miscdevice ublk_misc = {
2888	.minor		= MISC_DYNAMIC_MINOR,
2889	.name		= "ublk-control",
2890	.fops		= &ublk_ctl_fops,
2891};
2892
2893static int __init ublk_init(void)
2894{
2895	int ret;
2896
2897	BUILD_BUG_ON((u64)UBLKSRV_IO_BUF_OFFSET +
2898			UBLKSRV_IO_BUF_TOTAL_SIZE < UBLKSRV_IO_BUF_OFFSET);
2899
2900	init_waitqueue_head(&ublk_idr_wq);
2901
2902	ret = misc_register(&ublk_misc);
2903	if (ret)
2904		return ret;
2905
2906	ret = alloc_chrdev_region(&ublk_chr_devt, 0, UBLK_MINORS, "ublk-char");
2907	if (ret)
2908		goto unregister_mis;
2909
2910	ret = class_register(&ublk_chr_class);
2911	if (ret)
2912		goto free_chrdev_region;
2913
2914	return 0;
2915
2916free_chrdev_region:
2917	unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
2918unregister_mis:
2919	misc_deregister(&ublk_misc);
2920	return ret;
2921}
2922
2923static void __exit ublk_exit(void)
2924{
2925	struct ublk_device *ub;
2926	int id;
2927
2928	idr_for_each_entry(&ublk_index_idr, ub, id)
2929		ublk_remove(ub);
2930
2931	class_unregister(&ublk_chr_class);
2932	misc_deregister(&ublk_misc);
2933
2934	idr_destroy(&ublk_index_idr);
2935	unregister_chrdev_region(ublk_chr_devt, UBLK_MINORS);
2936}
2937
2938module_init(ublk_init);
2939module_exit(ublk_exit);
2940
2941module_param(ublks_max, int, 0444);
2942MODULE_PARM_DESC(ublks_max, "max number of ublk devices allowed to add(default: 64)");
2943
2944MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>");
2945MODULE_LICENSE("GPL");
2946