1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * VDPA device simulator core.
4 *
5 * Copyright (c) 2020, Red Hat Inc. All rights reserved.
6 *     Author: Jason Wang <jasowang@redhat.com>
7 *
8 */
9
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/device.h>
13#include <linux/kernel.h>
14#include <linux/kthread.h>
15#include <linux/slab.h>
16#include <linux/dma-map-ops.h>
17#include <linux/vringh.h>
18#include <linux/vdpa.h>
19#include <linux/vhost_iotlb.h>
20#include <uapi/linux/vdpa.h>
21#include <uapi/linux/vhost_types.h>
22
23#include "vdpa_sim.h"
24
25#define DRV_VERSION  "0.1"
26#define DRV_AUTHOR   "Jason Wang <jasowang@redhat.com>"
27#define DRV_DESC     "vDPA Device Simulator core"
28#define DRV_LICENSE  "GPL v2"
29
30static int batch_mapping = 1;
31module_param(batch_mapping, int, 0444);
32MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
33
34static int max_iotlb_entries = 2048;
35module_param(max_iotlb_entries, int, 0444);
36MODULE_PARM_DESC(max_iotlb_entries,
37		 "Maximum number of iotlb entries for each address space. 0 means unlimited. (default: 2048)");
38
39static bool use_va = true;
40module_param(use_va, bool, 0444);
41MODULE_PARM_DESC(use_va, "Enable/disable the device's ability to use VA");
42
43#define VDPASIM_QUEUE_ALIGN PAGE_SIZE
44#define VDPASIM_QUEUE_MAX 256
45#define VDPASIM_VENDOR_ID 0
46
47struct vdpasim_mm_work {
48	struct kthread_work work;
49	struct vdpasim *vdpasim;
50	struct mm_struct *mm_to_bind;
51	int ret;
52};
53
54static void vdpasim_mm_work_fn(struct kthread_work *work)
55{
56	struct vdpasim_mm_work *mm_work =
57		container_of(work, struct vdpasim_mm_work, work);
58	struct vdpasim *vdpasim = mm_work->vdpasim;
59
60	mm_work->ret = 0;
61
62	//TODO: should we attach the cgroup of the mm owner?
63	vdpasim->mm_bound = mm_work->mm_to_bind;
64}
65
66static void vdpasim_worker_change_mm_sync(struct vdpasim *vdpasim,
67					  struct vdpasim_mm_work *mm_work)
68{
69	struct kthread_work *work = &mm_work->work;
70
71	kthread_init_work(work, vdpasim_mm_work_fn);
72	kthread_queue_work(vdpasim->worker, work);
73
74	kthread_flush_work(work);
75}
76
77static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
78{
79	return container_of(vdpa, struct vdpasim, vdpa);
80}
81
82static void vdpasim_vq_notify(struct vringh *vring)
83{
84	struct vdpasim_virtqueue *vq =
85		container_of(vring, struct vdpasim_virtqueue, vring);
86
87	if (!vq->cb)
88		return;
89
90	vq->cb(vq->private);
91}
92
93static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
94{
95	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
96	uint16_t last_avail_idx = vq->vring.last_avail_idx;
97	struct vring_desc *desc = (struct vring_desc *)
98				  (uintptr_t)vq->desc_addr;
99	struct vring_avail *avail = (struct vring_avail *)
100				    (uintptr_t)vq->driver_addr;
101	struct vring_used *used = (struct vring_used *)
102				  (uintptr_t)vq->device_addr;
103
104	if (use_va && vdpasim->mm_bound) {
105		vringh_init_iotlb_va(&vq->vring, vdpasim->features, vq->num,
106				     true, desc, avail, used);
107	} else {
108		vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num,
109				  true, desc, avail, used);
110	}
111
112	vq->vring.last_avail_idx = last_avail_idx;
113
114	/*
115	 * Since vdpa_sim does not support receive inflight descriptors as a
116	 * destination of a migration, let's set both avail_idx and used_idx
117	 * the same at vq start.  This is how vhost-user works in a
118	 * VHOST_SET_VRING_BASE call.
119	 *
120	 * Although the simple fix is to set last_used_idx at
121	 * vdpasim_set_vq_state, it would be reset at vdpasim_queue_ready.
122	 */
123	vq->vring.last_used_idx = last_avail_idx;
124	vq->vring.notify = vdpasim_vq_notify;
125}
126
127static void vdpasim_vq_reset(struct vdpasim *vdpasim,
128			     struct vdpasim_virtqueue *vq)
129{
130	vq->ready = false;
131	vq->desc_addr = 0;
132	vq->driver_addr = 0;
133	vq->device_addr = 0;
134	vq->cb = NULL;
135	vq->private = NULL;
136	vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
137			  VDPASIM_QUEUE_MAX, false, NULL, NULL, NULL);
138
139	vq->vring.notify = NULL;
140}
141
142static void vdpasim_do_reset(struct vdpasim *vdpasim)
143{
144	int i;
145
146	spin_lock(&vdpasim->iommu_lock);
147
148	for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
149		vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]);
150		vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
151				 &vdpasim->iommu_lock);
152	}
153
154	for (i = 0; i < vdpasim->dev_attr.nas; i++) {
155		vhost_iotlb_reset(&vdpasim->iommu[i]);
156		vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX,
157				      0, VHOST_MAP_RW);
158		vdpasim->iommu_pt[i] = true;
159	}
160
161	vdpasim->running = false;
162	spin_unlock(&vdpasim->iommu_lock);
163
164	vdpasim->features = 0;
165	vdpasim->status = 0;
166	++vdpasim->generation;
167}
168
169static const struct vdpa_config_ops vdpasim_config_ops;
170static const struct vdpa_config_ops vdpasim_batch_config_ops;
171
172static void vdpasim_work_fn(struct kthread_work *work)
173{
174	struct vdpasim *vdpasim = container_of(work, struct vdpasim, work);
175	struct mm_struct *mm = vdpasim->mm_bound;
176
177	if (use_va && mm) {
178		if (!mmget_not_zero(mm))
179			return;
180		kthread_use_mm(mm);
181	}
182
183	vdpasim->dev_attr.work_fn(vdpasim);
184
185	if (use_va && mm) {
186		kthread_unuse_mm(mm);
187		mmput(mm);
188	}
189}
190
191struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
192			       const struct vdpa_dev_set_config *config)
193{
194	const struct vdpa_config_ops *ops;
195	struct vdpa_device *vdpa;
196	struct vdpasim *vdpasim;
197	struct device *dev;
198	int i, ret = -ENOMEM;
199
200	if (!dev_attr->alloc_size)
201		return ERR_PTR(-EINVAL);
202
203	if (config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
204		if (config->device_features &
205		    ~dev_attr->supported_features)
206			return ERR_PTR(-EINVAL);
207		dev_attr->supported_features =
208			config->device_features;
209	}
210
211	if (batch_mapping)
212		ops = &vdpasim_batch_config_ops;
213	else
214		ops = &vdpasim_config_ops;
215
216	vdpa = __vdpa_alloc_device(NULL, ops,
217				   dev_attr->ngroups, dev_attr->nas,
218				   dev_attr->alloc_size,
219				   dev_attr->name, use_va);
220	if (IS_ERR(vdpa)) {
221		ret = PTR_ERR(vdpa);
222		goto err_alloc;
223	}
224
225	vdpasim = vdpa_to_sim(vdpa);
226	vdpasim->dev_attr = *dev_attr;
227	dev = &vdpasim->vdpa.dev;
228
229	kthread_init_work(&vdpasim->work, vdpasim_work_fn);
230	vdpasim->worker = kthread_create_worker(0, "vDPA sim worker: %s",
231						dev_attr->name);
232	if (IS_ERR(vdpasim->worker))
233		goto err_iommu;
234
235	mutex_init(&vdpasim->mutex);
236	spin_lock_init(&vdpasim->iommu_lock);
237
238	dev->dma_mask = &dev->coherent_dma_mask;
239	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
240		goto err_iommu;
241	vdpasim->vdpa.mdev = dev_attr->mgmt_dev;
242
243	vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL);
244	if (!vdpasim->config)
245		goto err_iommu;
246
247	vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue),
248			       GFP_KERNEL);
249	if (!vdpasim->vqs)
250		goto err_iommu;
251
252	vdpasim->iommu = kmalloc_array(vdpasim->dev_attr.nas,
253				       sizeof(*vdpasim->iommu), GFP_KERNEL);
254	if (!vdpasim->iommu)
255		goto err_iommu;
256
257	vdpasim->iommu_pt = kmalloc_array(vdpasim->dev_attr.nas,
258					  sizeof(*vdpasim->iommu_pt), GFP_KERNEL);
259	if (!vdpasim->iommu_pt)
260		goto err_iommu;
261
262	for (i = 0; i < vdpasim->dev_attr.nas; i++)
263		vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0);
264
265	for (i = 0; i < dev_attr->nvqs; i++)
266		vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
267				 &vdpasim->iommu_lock);
268
269	vdpasim->vdpa.dma_dev = dev;
270
271	return vdpasim;
272
273err_iommu:
274	put_device(dev);
275err_alloc:
276	return ERR_PTR(ret);
277}
278EXPORT_SYMBOL_GPL(vdpasim_create);
279
280void vdpasim_schedule_work(struct vdpasim *vdpasim)
281{
282	kthread_queue_work(vdpasim->worker, &vdpasim->work);
283}
284EXPORT_SYMBOL_GPL(vdpasim_schedule_work);
285
286static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx,
287				  u64 desc_area, u64 driver_area,
288				  u64 device_area)
289{
290	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
291	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
292
293	vq->desc_addr = desc_area;
294	vq->driver_addr = driver_area;
295	vq->device_addr = device_area;
296
297	return 0;
298}
299
300static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
301{
302	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
303	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
304
305	vq->num = num;
306}
307
308static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx)
309{
310	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
311	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
312
313	if (!vdpasim->running &&
314	    (vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
315		vdpasim->pending_kick = true;
316		return;
317	}
318
319	if (vq->ready)
320		vdpasim_schedule_work(vdpasim);
321}
322
323static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx,
324			      struct vdpa_callback *cb)
325{
326	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
327	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
328
329	vq->cb = cb->callback;
330	vq->private = cb->private;
331}
332
333static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
334{
335	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
336	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
337	bool old_ready;
338
339	mutex_lock(&vdpasim->mutex);
340	old_ready = vq->ready;
341	vq->ready = ready;
342	if (vq->ready && !old_ready) {
343		vdpasim_queue_ready(vdpasim, idx);
344	}
345	mutex_unlock(&vdpasim->mutex);
346}
347
348static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
349{
350	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
351	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
352
353	return vq->ready;
354}
355
356static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
357				const struct vdpa_vq_state *state)
358{
359	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
360	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
361	struct vringh *vrh = &vq->vring;
362
363	mutex_lock(&vdpasim->mutex);
364	vrh->last_avail_idx = state->split.avail_index;
365	mutex_unlock(&vdpasim->mutex);
366
367	return 0;
368}
369
370static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx,
371				struct vdpa_vq_state *state)
372{
373	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
374	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
375	struct vringh *vrh = &vq->vring;
376
377	state->split.avail_index = vrh->last_avail_idx;
378	return 0;
379}
380
381static int vdpasim_get_vq_stats(struct vdpa_device *vdpa, u16 idx,
382				struct sk_buff *msg,
383				struct netlink_ext_ack *extack)
384{
385	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
386
387	if (vdpasim->dev_attr.get_stats)
388		return vdpasim->dev_attr.get_stats(vdpasim, idx,
389						   msg, extack);
390	return -EOPNOTSUPP;
391}
392
393static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
394{
395	return VDPASIM_QUEUE_ALIGN;
396}
397
398static u32 vdpasim_get_vq_group(struct vdpa_device *vdpa, u16 idx)
399{
400	/* RX and TX belongs to group 0, CVQ belongs to group 1 */
401	if (idx == 2)
402		return 1;
403	else
404		return 0;
405}
406
407static u64 vdpasim_get_device_features(struct vdpa_device *vdpa)
408{
409	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
410
411	return vdpasim->dev_attr.supported_features;
412}
413
414static u64 vdpasim_get_backend_features(const struct vdpa_device *vdpa)
415{
416	return BIT_ULL(VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK);
417}
418
419static int vdpasim_set_driver_features(struct vdpa_device *vdpa, u64 features)
420{
421	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
422
423	/* DMA mapping must be done by driver */
424	if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
425		return -EINVAL;
426
427	vdpasim->features = features & vdpasim->dev_attr.supported_features;
428
429	return 0;
430}
431
432static u64 vdpasim_get_driver_features(struct vdpa_device *vdpa)
433{
434	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
435
436	return vdpasim->features;
437}
438
439static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
440				  struct vdpa_callback *cb)
441{
442	/* We don't support config interrupt */
443}
444
445static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa)
446{
447	return VDPASIM_QUEUE_MAX;
448}
449
450static u32 vdpasim_get_device_id(struct vdpa_device *vdpa)
451{
452	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
453
454	return vdpasim->dev_attr.id;
455}
456
457static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa)
458{
459	return VDPASIM_VENDOR_ID;
460}
461
462static u8 vdpasim_get_status(struct vdpa_device *vdpa)
463{
464	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
465	u8 status;
466
467	mutex_lock(&vdpasim->mutex);
468	status = vdpasim->status;
469	mutex_unlock(&vdpasim->mutex);
470
471	return status;
472}
473
474static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
475{
476	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
477
478	mutex_lock(&vdpasim->mutex);
479	vdpasim->status = status;
480	vdpasim->running = (status & VIRTIO_CONFIG_S_DRIVER_OK) != 0;
481	mutex_unlock(&vdpasim->mutex);
482}
483
484static int vdpasim_reset(struct vdpa_device *vdpa)
485{
486	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
487
488	mutex_lock(&vdpasim->mutex);
489	vdpasim->status = 0;
490	vdpasim_do_reset(vdpasim);
491	mutex_unlock(&vdpasim->mutex);
492
493	return 0;
494}
495
496static int vdpasim_suspend(struct vdpa_device *vdpa)
497{
498	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
499
500	mutex_lock(&vdpasim->mutex);
501	vdpasim->running = false;
502	mutex_unlock(&vdpasim->mutex);
503
504	return 0;
505}
506
507static int vdpasim_resume(struct vdpa_device *vdpa)
508{
509	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
510	int i;
511
512	mutex_lock(&vdpasim->mutex);
513	vdpasim->running = true;
514
515	if (vdpasim->pending_kick) {
516		/* Process pending descriptors */
517		for (i = 0; i < vdpasim->dev_attr.nvqs; ++i)
518			vdpasim_kick_vq(vdpa, i);
519
520		vdpasim->pending_kick = false;
521	}
522
523	mutex_unlock(&vdpasim->mutex);
524
525	return 0;
526}
527
528static size_t vdpasim_get_config_size(struct vdpa_device *vdpa)
529{
530	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
531
532	return vdpasim->dev_attr.config_size;
533}
534
535static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
536			     void *buf, unsigned int len)
537{
538	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
539
540	if (offset + len > vdpasim->dev_attr.config_size)
541		return;
542
543	if (vdpasim->dev_attr.get_config)
544		vdpasim->dev_attr.get_config(vdpasim, vdpasim->config);
545
546	memcpy(buf, vdpasim->config + offset, len);
547}
548
549static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
550			     const void *buf, unsigned int len)
551{
552	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
553
554	if (offset + len > vdpasim->dev_attr.config_size)
555		return;
556
557	memcpy(vdpasim->config + offset, buf, len);
558
559	if (vdpasim->dev_attr.set_config)
560		vdpasim->dev_attr.set_config(vdpasim, vdpasim->config);
561}
562
563static u32 vdpasim_get_generation(struct vdpa_device *vdpa)
564{
565	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
566
567	return vdpasim->generation;
568}
569
570static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa)
571{
572	struct vdpa_iova_range range = {
573		.first = 0ULL,
574		.last = ULLONG_MAX,
575	};
576
577	return range;
578}
579
580static int vdpasim_set_group_asid(struct vdpa_device *vdpa, unsigned int group,
581				  unsigned int asid)
582{
583	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
584	struct vhost_iotlb *iommu;
585	int i;
586
587	if (group > vdpasim->dev_attr.ngroups)
588		return -EINVAL;
589
590	if (asid >= vdpasim->dev_attr.nas)
591		return -EINVAL;
592
593	iommu = &vdpasim->iommu[asid];
594
595	mutex_lock(&vdpasim->mutex);
596
597	for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
598		if (vdpasim_get_vq_group(vdpa, i) == group)
599			vringh_set_iotlb(&vdpasim->vqs[i].vring, iommu,
600					 &vdpasim->iommu_lock);
601
602	mutex_unlock(&vdpasim->mutex);
603
604	return 0;
605}
606
607static int vdpasim_set_map(struct vdpa_device *vdpa, unsigned int asid,
608			   struct vhost_iotlb *iotlb)
609{
610	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
611	struct vhost_iotlb_map *map;
612	struct vhost_iotlb *iommu;
613	u64 start = 0ULL, last = 0ULL - 1;
614	int ret;
615
616	if (asid >= vdpasim->dev_attr.nas)
617		return -EINVAL;
618
619	spin_lock(&vdpasim->iommu_lock);
620
621	iommu = &vdpasim->iommu[asid];
622	vhost_iotlb_reset(iommu);
623	vdpasim->iommu_pt[asid] = false;
624
625	for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
626	     map = vhost_iotlb_itree_next(map, start, last)) {
627		ret = vhost_iotlb_add_range(iommu, map->start,
628					    map->last, map->addr, map->perm);
629		if (ret)
630			goto err;
631	}
632	spin_unlock(&vdpasim->iommu_lock);
633	return 0;
634
635err:
636	vhost_iotlb_reset(iommu);
637	spin_unlock(&vdpasim->iommu_lock);
638	return ret;
639}
640
641static int vdpasim_bind_mm(struct vdpa_device *vdpa, struct mm_struct *mm)
642{
643	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
644	struct vdpasim_mm_work mm_work;
645
646	mm_work.vdpasim = vdpasim;
647	mm_work.mm_to_bind = mm;
648
649	vdpasim_worker_change_mm_sync(vdpasim, &mm_work);
650
651	return mm_work.ret;
652}
653
654static void vdpasim_unbind_mm(struct vdpa_device *vdpa)
655{
656	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
657	struct vdpasim_mm_work mm_work;
658
659	mm_work.vdpasim = vdpasim;
660	mm_work.mm_to_bind = NULL;
661
662	vdpasim_worker_change_mm_sync(vdpasim, &mm_work);
663}
664
665static int vdpasim_dma_map(struct vdpa_device *vdpa, unsigned int asid,
666			   u64 iova, u64 size,
667			   u64 pa, u32 perm, void *opaque)
668{
669	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
670	int ret;
671
672	if (asid >= vdpasim->dev_attr.nas)
673		return -EINVAL;
674
675	spin_lock(&vdpasim->iommu_lock);
676	if (vdpasim->iommu_pt[asid]) {
677		vhost_iotlb_reset(&vdpasim->iommu[asid]);
678		vdpasim->iommu_pt[asid] = false;
679	}
680	ret = vhost_iotlb_add_range_ctx(&vdpasim->iommu[asid], iova,
681					iova + size - 1, pa, perm, opaque);
682	spin_unlock(&vdpasim->iommu_lock);
683
684	return ret;
685}
686
687static int vdpasim_dma_unmap(struct vdpa_device *vdpa, unsigned int asid,
688			     u64 iova, u64 size)
689{
690	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
691
692	if (asid >= vdpasim->dev_attr.nas)
693		return -EINVAL;
694
695	if (vdpasim->iommu_pt[asid]) {
696		vhost_iotlb_reset(&vdpasim->iommu[asid]);
697		vdpasim->iommu_pt[asid] = false;
698	}
699
700	spin_lock(&vdpasim->iommu_lock);
701	vhost_iotlb_del_range(&vdpasim->iommu[asid], iova, iova + size - 1);
702	spin_unlock(&vdpasim->iommu_lock);
703
704	return 0;
705}
706
707static void vdpasim_free(struct vdpa_device *vdpa)
708{
709	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
710	int i;
711
712	kthread_cancel_work_sync(&vdpasim->work);
713	kthread_destroy_worker(vdpasim->worker);
714
715	for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
716		vringh_kiov_cleanup(&vdpasim->vqs[i].out_iov);
717		vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov);
718	}
719
720	vdpasim->dev_attr.free(vdpasim);
721
722	for (i = 0; i < vdpasim->dev_attr.nas; i++)
723		vhost_iotlb_reset(&vdpasim->iommu[i]);
724	kfree(vdpasim->iommu);
725	kfree(vdpasim->iommu_pt);
726	kfree(vdpasim->vqs);
727	kfree(vdpasim->config);
728}
729
730static const struct vdpa_config_ops vdpasim_config_ops = {
731	.set_vq_address         = vdpasim_set_vq_address,
732	.set_vq_num             = vdpasim_set_vq_num,
733	.kick_vq                = vdpasim_kick_vq,
734	.set_vq_cb              = vdpasim_set_vq_cb,
735	.set_vq_ready           = vdpasim_set_vq_ready,
736	.get_vq_ready           = vdpasim_get_vq_ready,
737	.set_vq_state           = vdpasim_set_vq_state,
738	.get_vendor_vq_stats    = vdpasim_get_vq_stats,
739	.get_vq_state           = vdpasim_get_vq_state,
740	.get_vq_align           = vdpasim_get_vq_align,
741	.get_vq_group           = vdpasim_get_vq_group,
742	.get_device_features    = vdpasim_get_device_features,
743	.get_backend_features   = vdpasim_get_backend_features,
744	.set_driver_features    = vdpasim_set_driver_features,
745	.get_driver_features    = vdpasim_get_driver_features,
746	.set_config_cb          = vdpasim_set_config_cb,
747	.get_vq_num_max         = vdpasim_get_vq_num_max,
748	.get_device_id          = vdpasim_get_device_id,
749	.get_vendor_id          = vdpasim_get_vendor_id,
750	.get_status             = vdpasim_get_status,
751	.set_status             = vdpasim_set_status,
752	.reset			= vdpasim_reset,
753	.suspend		= vdpasim_suspend,
754	.resume			= vdpasim_resume,
755	.get_config_size        = vdpasim_get_config_size,
756	.get_config             = vdpasim_get_config,
757	.set_config             = vdpasim_set_config,
758	.get_generation         = vdpasim_get_generation,
759	.get_iova_range         = vdpasim_get_iova_range,
760	.set_group_asid         = vdpasim_set_group_asid,
761	.dma_map                = vdpasim_dma_map,
762	.dma_unmap              = vdpasim_dma_unmap,
763	.bind_mm		= vdpasim_bind_mm,
764	.unbind_mm		= vdpasim_unbind_mm,
765	.free                   = vdpasim_free,
766};
767
768static const struct vdpa_config_ops vdpasim_batch_config_ops = {
769	.set_vq_address         = vdpasim_set_vq_address,
770	.set_vq_num             = vdpasim_set_vq_num,
771	.kick_vq                = vdpasim_kick_vq,
772	.set_vq_cb              = vdpasim_set_vq_cb,
773	.set_vq_ready           = vdpasim_set_vq_ready,
774	.get_vq_ready           = vdpasim_get_vq_ready,
775	.set_vq_state           = vdpasim_set_vq_state,
776	.get_vendor_vq_stats    = vdpasim_get_vq_stats,
777	.get_vq_state           = vdpasim_get_vq_state,
778	.get_vq_align           = vdpasim_get_vq_align,
779	.get_vq_group           = vdpasim_get_vq_group,
780	.get_device_features    = vdpasim_get_device_features,
781	.get_backend_features   = vdpasim_get_backend_features,
782	.set_driver_features    = vdpasim_set_driver_features,
783	.get_driver_features    = vdpasim_get_driver_features,
784	.set_config_cb          = vdpasim_set_config_cb,
785	.get_vq_num_max         = vdpasim_get_vq_num_max,
786	.get_device_id          = vdpasim_get_device_id,
787	.get_vendor_id          = vdpasim_get_vendor_id,
788	.get_status             = vdpasim_get_status,
789	.set_status             = vdpasim_set_status,
790	.reset			= vdpasim_reset,
791	.suspend		= vdpasim_suspend,
792	.resume			= vdpasim_resume,
793	.get_config_size        = vdpasim_get_config_size,
794	.get_config             = vdpasim_get_config,
795	.set_config             = vdpasim_set_config,
796	.get_generation         = vdpasim_get_generation,
797	.get_iova_range         = vdpasim_get_iova_range,
798	.set_group_asid         = vdpasim_set_group_asid,
799	.set_map                = vdpasim_set_map,
800	.bind_mm		= vdpasim_bind_mm,
801	.unbind_mm		= vdpasim_unbind_mm,
802	.free                   = vdpasim_free,
803};
804
805MODULE_VERSION(DRV_VERSION);
806MODULE_LICENSE(DRV_LICENSE);
807MODULE_AUTHOR(DRV_AUTHOR);
808MODULE_DESCRIPTION(DRV_DESC);
809