1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * VDPA networking device simulator.
4 *
5 * Copyright (c) 2020, Red Hat Inc. All rights reserved.
6 *     Author: Jason Wang <jasowang@redhat.com>
7 *
8 */
9
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/device.h>
13#include <linux/kernel.h>
14#include <linux/fs.h>
15#include <linux/poll.h>
16#include <linux/slab.h>
17#include <linux/sched.h>
18#include <linux/wait.h>
19#include <linux/uuid.h>
20#include <linux/iommu.h>
21#include <linux/dma-map-ops.h>
22#include <linux/sysfs.h>
23#include <linux/file.h>
24#include <linux/etherdevice.h>
25#include <linux/vringh.h>
26#include <linux/vdpa.h>
27#include <linux/virtio_byteorder.h>
28#include <linux/vhost_iotlb.h>
29#include <uapi/linux/virtio_config.h>
30#include <uapi/linux/virtio_net.h>
31
32#define DRV_VERSION  "0.1"
33#define DRV_AUTHOR   "Jason Wang <jasowang@redhat.com>"
34#define DRV_DESC     "vDPA Device Simulator"
35#define DRV_LICENSE  "GPL v2"
36
37static int batch_mapping = 1;
38module_param(batch_mapping, int, 0444);
39MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
40
41static char *macaddr;
42module_param(macaddr, charp, 0);
43MODULE_PARM_DESC(macaddr, "Ethernet MAC address");
44
45u8 macaddr_buf[ETH_ALEN];
46
47struct vdpasim_virtqueue {
48	struct vringh vring;
49	struct vringh_kiov iov;
50	unsigned short head;
51	bool ready;
52	u64 desc_addr;
53	u64 device_addr;
54	u64 driver_addr;
55	u32 num;
56	void *private;
57	irqreturn_t (*cb)(void *data);
58};
59
60#define VDPASIM_QUEUE_ALIGN PAGE_SIZE
61#define VDPASIM_QUEUE_MAX 256
62#define VDPASIM_DEVICE_ID 0x1
63#define VDPASIM_VENDOR_ID 0
64#define VDPASIM_VQ_NUM 0x2
65#define VDPASIM_NAME "vdpasim-netdev"
66
67static u64 vdpasim_features = (1ULL << VIRTIO_F_ANY_LAYOUT) |
68			      (1ULL << VIRTIO_F_VERSION_1)  |
69			      (1ULL << VIRTIO_F_ACCESS_PLATFORM) |
70			      (1ULL << VIRTIO_NET_F_MAC);
71
72struct vdpasim;
73
74struct vdpasim_dev_attr {
75	size_t config_size;
76	int nvqs;
77	void (*get_config)(struct vdpasim *vdpasim, void *config);
78};
79
80/* State of each vdpasim device */
81struct vdpasim {
82	struct vdpa_device vdpa;
83	struct vdpasim_virtqueue *vqs;
84	struct work_struct work;
85	struct vdpasim_dev_attr dev_attr;
86	/* spinlock to synchronize virtqueue state */
87	spinlock_t lock;
88	/* virtio config according to device type */
89	void *config;
90	struct vhost_iotlb *iommu;
91	void *buffer;
92	u32 status;
93	u32 generation;
94	u64 features;
95	/* spinlock to synchronize iommu table */
96	spinlock_t iommu_lock;
97};
98
99/* TODO: cross-endian support */
100static inline bool vdpasim_is_little_endian(struct vdpasim *vdpasim)
101{
102	return virtio_legacy_is_little_endian() ||
103		(vdpasim->features & (1ULL << VIRTIO_F_VERSION_1));
104}
105
106static inline u16 vdpasim16_to_cpu(struct vdpasim *vdpasim, __virtio16 val)
107{
108	return __virtio16_to_cpu(vdpasim_is_little_endian(vdpasim), val);
109}
110
111static inline __virtio16 cpu_to_vdpasim16(struct vdpasim *vdpasim, u16 val)
112{
113	return __cpu_to_virtio16(vdpasim_is_little_endian(vdpasim), val);
114}
115
116static struct vdpasim *vdpasim_dev;
117
118static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
119{
120	return container_of(vdpa, struct vdpasim, vdpa);
121}
122
123static struct vdpasim *dev_to_sim(struct device *dev)
124{
125	struct vdpa_device *vdpa = dev_to_vdpa(dev);
126
127	return vdpa_to_sim(vdpa);
128}
129
130static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
131{
132	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
133
134	vringh_init_iotlb(&vq->vring, vdpasim_features,
135			  VDPASIM_QUEUE_MAX, false,
136			  (struct vring_desc *)(uintptr_t)vq->desc_addr,
137			  (struct vring_avail *)
138			  (uintptr_t)vq->driver_addr,
139			  (struct vring_used *)
140			  (uintptr_t)vq->device_addr);
141}
142
143static void vdpasim_vq_reset(struct vdpasim_virtqueue *vq)
144{
145	vq->ready = false;
146	vq->desc_addr = 0;
147	vq->driver_addr = 0;
148	vq->device_addr = 0;
149	vq->cb = NULL;
150	vq->private = NULL;
151	vringh_init_iotlb(&vq->vring, vdpasim_features, VDPASIM_QUEUE_MAX,
152			  false, NULL, NULL, NULL);
153}
154
155static void vdpasim_reset(struct vdpasim *vdpasim)
156{
157	int i;
158
159	for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
160		vdpasim_vq_reset(&vdpasim->vqs[i]);
161
162	spin_lock(&vdpasim->iommu_lock);
163	vhost_iotlb_reset(vdpasim->iommu);
164	spin_unlock(&vdpasim->iommu_lock);
165
166	vdpasim->features = 0;
167	vdpasim->status = 0;
168	++vdpasim->generation;
169}
170
171static void vdpasim_work(struct work_struct *work)
172{
173	struct vdpasim *vdpasim = container_of(work, struct
174						 vdpasim, work);
175	struct vdpasim_virtqueue *txq = &vdpasim->vqs[1];
176	struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0];
177	ssize_t read, write;
178	size_t total_write;
179	int pkts = 0;
180	int err;
181
182	spin_lock(&vdpasim->lock);
183
184	if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
185		goto out;
186
187	if (!txq->ready || !rxq->ready)
188		goto out;
189
190	while (true) {
191		total_write = 0;
192		err = vringh_getdesc_iotlb(&txq->vring, &txq->iov, NULL,
193					   &txq->head, GFP_ATOMIC);
194		if (err <= 0)
195			break;
196
197		err = vringh_getdesc_iotlb(&rxq->vring, NULL, &rxq->iov,
198					   &rxq->head, GFP_ATOMIC);
199		if (err <= 0) {
200			vringh_complete_iotlb(&txq->vring, txq->head, 0);
201			break;
202		}
203
204		while (true) {
205			read = vringh_iov_pull_iotlb(&txq->vring, &txq->iov,
206						     vdpasim->buffer,
207						     PAGE_SIZE);
208			if (read <= 0)
209				break;
210
211			write = vringh_iov_push_iotlb(&rxq->vring, &rxq->iov,
212						      vdpasim->buffer, read);
213			if (write <= 0)
214				break;
215
216			total_write += write;
217		}
218
219		/* Make sure data is wrote before advancing index */
220		smp_wmb();
221
222		vringh_complete_iotlb(&txq->vring, txq->head, 0);
223		vringh_complete_iotlb(&rxq->vring, rxq->head, total_write);
224
225		/* Make sure used is visible before rasing the interrupt. */
226		smp_wmb();
227
228		local_bh_disable();
229		if (txq->cb)
230			txq->cb(txq->private);
231		if (rxq->cb)
232			rxq->cb(rxq->private);
233		local_bh_enable();
234
235		if (++pkts > 4) {
236			schedule_work(&vdpasim->work);
237			goto out;
238		}
239	}
240
241out:
242	spin_unlock(&vdpasim->lock);
243}
244
245static int dir_to_perm(enum dma_data_direction dir)
246{
247	int perm = -EFAULT;
248
249	switch (dir) {
250	case DMA_FROM_DEVICE:
251		perm = VHOST_MAP_WO;
252		break;
253	case DMA_TO_DEVICE:
254		perm = VHOST_MAP_RO;
255		break;
256	case DMA_BIDIRECTIONAL:
257		perm = VHOST_MAP_RW;
258		break;
259	default:
260		break;
261	}
262
263	return perm;
264}
265
266static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page,
267				   unsigned long offset, size_t size,
268				   enum dma_data_direction dir,
269				   unsigned long attrs)
270{
271	struct vdpasim *vdpasim = dev_to_sim(dev);
272	struct vhost_iotlb *iommu = vdpasim->iommu;
273	u64 pa = (page_to_pfn(page) << PAGE_SHIFT) + offset;
274	int ret, perm = dir_to_perm(dir);
275
276	if (perm < 0)
277		return DMA_MAPPING_ERROR;
278
279	/* For simplicity, use identical mapping to avoid e.g iova
280	 * allocator.
281	 */
282	spin_lock(&vdpasim->iommu_lock);
283	ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1,
284				    pa, dir_to_perm(dir));
285	spin_unlock(&vdpasim->iommu_lock);
286	if (ret)
287		return DMA_MAPPING_ERROR;
288
289	return (dma_addr_t)(pa);
290}
291
292static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
293			       size_t size, enum dma_data_direction dir,
294			       unsigned long attrs)
295{
296	struct vdpasim *vdpasim = dev_to_sim(dev);
297	struct vhost_iotlb *iommu = vdpasim->iommu;
298
299	spin_lock(&vdpasim->iommu_lock);
300	vhost_iotlb_del_range(iommu, (u64)dma_addr,
301			      (u64)dma_addr + size - 1);
302	spin_unlock(&vdpasim->iommu_lock);
303}
304
305static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
306				    dma_addr_t *dma_addr, gfp_t flag,
307				    unsigned long attrs)
308{
309	struct vdpasim *vdpasim = dev_to_sim(dev);
310	struct vhost_iotlb *iommu = vdpasim->iommu;
311	void *addr = kmalloc(size, flag);
312	int ret;
313
314	spin_lock(&vdpasim->iommu_lock);
315	if (!addr) {
316		*dma_addr = DMA_MAPPING_ERROR;
317	} else {
318		u64 pa = virt_to_phys(addr);
319
320		ret = vhost_iotlb_add_range(iommu, (u64)pa,
321					    (u64)pa + size - 1,
322					    pa, VHOST_MAP_RW);
323		if (ret) {
324			*dma_addr = DMA_MAPPING_ERROR;
325			kfree(addr);
326			addr = NULL;
327		} else
328			*dma_addr = (dma_addr_t)pa;
329	}
330	spin_unlock(&vdpasim->iommu_lock);
331
332	return addr;
333}
334
335static void vdpasim_free_coherent(struct device *dev, size_t size,
336				  void *vaddr, dma_addr_t dma_addr,
337				  unsigned long attrs)
338{
339	struct vdpasim *vdpasim = dev_to_sim(dev);
340	struct vhost_iotlb *iommu = vdpasim->iommu;
341
342	spin_lock(&vdpasim->iommu_lock);
343	vhost_iotlb_del_range(iommu, (u64)dma_addr,
344			      (u64)dma_addr + size - 1);
345	spin_unlock(&vdpasim->iommu_lock);
346
347	kfree(phys_to_virt((uintptr_t)dma_addr));
348}
349
350static const struct dma_map_ops vdpasim_dma_ops = {
351	.map_page = vdpasim_map_page,
352	.unmap_page = vdpasim_unmap_page,
353	.alloc = vdpasim_alloc_coherent,
354	.free = vdpasim_free_coherent,
355};
356
357static const struct vdpa_config_ops vdpasim_net_config_ops;
358static const struct vdpa_config_ops vdpasim_net_batch_config_ops;
359
360static struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
361{
362	const struct vdpa_config_ops *ops;
363	struct vdpasim *vdpasim;
364	struct device *dev;
365	int i, ret = -ENOMEM;
366
367	if (batch_mapping)
368		ops = &vdpasim_net_batch_config_ops;
369	else
370		ops = &vdpasim_net_config_ops;
371
372	vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
373				    dev_attr->nvqs);
374	if (!vdpasim)
375		goto err_alloc;
376
377	vdpasim->dev_attr = *dev_attr;
378	INIT_WORK(&vdpasim->work, vdpasim_work);
379	spin_lock_init(&vdpasim->lock);
380	spin_lock_init(&vdpasim->iommu_lock);
381
382	dev = &vdpasim->vdpa.dev;
383	dev->dma_mask = &dev->coherent_dma_mask;
384	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
385		goto err_iommu;
386	set_dma_ops(dev, &vdpasim_dma_ops);
387
388	vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL);
389	if (!vdpasim->config)
390		goto err_iommu;
391
392	vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue),
393			       GFP_KERNEL);
394	if (!vdpasim->vqs)
395		goto err_iommu;
396
397	vdpasim->iommu = vhost_iotlb_alloc(2048, 0);
398	if (!vdpasim->iommu)
399		goto err_iommu;
400
401	vdpasim->buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
402	if (!vdpasim->buffer)
403		goto err_iommu;
404
405	if (macaddr) {
406		mac_pton(macaddr, macaddr_buf);
407		if (!is_valid_ether_addr(macaddr_buf)) {
408			ret = -EADDRNOTAVAIL;
409			goto err_iommu;
410		}
411	} else {
412		eth_random_addr(macaddr_buf);
413	}
414
415	for (i = 0; i < dev_attr->nvqs; i++)
416		vringh_set_iotlb(&vdpasim->vqs[i].vring, vdpasim->iommu);
417
418	vdpasim->vdpa.dma_dev = dev;
419	ret = vdpa_register_device(&vdpasim->vdpa);
420	if (ret)
421		goto err_iommu;
422
423	return vdpasim;
424
425err_iommu:
426	put_device(dev);
427err_alloc:
428	return ERR_PTR(ret);
429}
430
431static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx,
432				  u64 desc_area, u64 driver_area,
433				  u64 device_area)
434{
435	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
436	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
437
438	vq->desc_addr = desc_area;
439	vq->driver_addr = driver_area;
440	vq->device_addr = device_area;
441
442	return 0;
443}
444
445static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
446{
447	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
448	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
449
450	vq->num = num;
451}
452
453static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx)
454{
455	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
456	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
457
458	if (vq->ready)
459		schedule_work(&vdpasim->work);
460}
461
462static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx,
463			      struct vdpa_callback *cb)
464{
465	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
466	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
467
468	vq->cb = cb->callback;
469	vq->private = cb->private;
470}
471
472static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
473{
474	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
475	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
476	bool old_ready;
477
478	spin_lock(&vdpasim->lock);
479	old_ready = vq->ready;
480	vq->ready = ready;
481	if (vq->ready && !old_ready) {
482		vdpasim_queue_ready(vdpasim, idx);
483	}
484	spin_unlock(&vdpasim->lock);
485}
486
487static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
488{
489	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
490	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
491
492	return vq->ready;
493}
494
495static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
496				const struct vdpa_vq_state *state)
497{
498	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
499	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
500	struct vringh *vrh = &vq->vring;
501
502	spin_lock(&vdpasim->lock);
503	vrh->last_avail_idx = state->avail_index;
504	spin_unlock(&vdpasim->lock);
505
506	return 0;
507}
508
509static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx,
510				struct vdpa_vq_state *state)
511{
512	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
513	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
514	struct vringh *vrh = &vq->vring;
515
516	state->avail_index = vrh->last_avail_idx;
517	return 0;
518}
519
520static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
521{
522	return VDPASIM_QUEUE_ALIGN;
523}
524
525static u64 vdpasim_get_features(struct vdpa_device *vdpa)
526{
527	return vdpasim_features;
528}
529
530static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features)
531{
532	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
533
534	/* DMA mapping must be done by driver */
535	if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
536		return -EINVAL;
537
538	vdpasim->features = features & vdpasim_features;
539
540	return 0;
541}
542
543static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
544				  struct vdpa_callback *cb)
545{
546	/* We don't support config interrupt */
547}
548
549static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa)
550{
551	return VDPASIM_QUEUE_MAX;
552}
553
554static u32 vdpasim_get_device_id(struct vdpa_device *vdpa)
555{
556	return VDPASIM_DEVICE_ID;
557}
558
559static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa)
560{
561	return VDPASIM_VENDOR_ID;
562}
563
564static u8 vdpasim_get_status(struct vdpa_device *vdpa)
565{
566	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
567	u8 status;
568
569	spin_lock(&vdpasim->lock);
570	status = vdpasim->status;
571	spin_unlock(&vdpasim->lock);
572
573	return status;
574}
575
576static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
577{
578	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
579
580	spin_lock(&vdpasim->lock);
581	vdpasim->status = status;
582	if (status == 0)
583		vdpasim_reset(vdpasim);
584	spin_unlock(&vdpasim->lock);
585}
586
587static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
588			     void *buf, unsigned int len)
589{
590	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
591
592	if (offset + len > vdpasim->dev_attr.config_size)
593		return;
594
595	if (vdpasim->dev_attr.get_config)
596		vdpasim->dev_attr.get_config(vdpasim, vdpasim->config);
597
598	memcpy(buf, vdpasim->config + offset, len);
599}
600
601static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
602			     const void *buf, unsigned int len)
603{
604	/* No writable config supportted by vdpasim */
605}
606
607static u32 vdpasim_get_generation(struct vdpa_device *vdpa)
608{
609	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
610
611	return vdpasim->generation;
612}
613
614static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa)
615{
616	struct vdpa_iova_range range = {
617		.first = 0ULL,
618		.last = ULLONG_MAX,
619	};
620
621	return range;
622}
623
624static int vdpasim_set_map(struct vdpa_device *vdpa,
625			   struct vhost_iotlb *iotlb)
626{
627	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
628	struct vhost_iotlb_map *map;
629	u64 start = 0ULL, last = 0ULL - 1;
630	int ret;
631
632	spin_lock(&vdpasim->iommu_lock);
633	vhost_iotlb_reset(vdpasim->iommu);
634
635	for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
636	     map = vhost_iotlb_itree_next(map, start, last)) {
637		ret = vhost_iotlb_add_range(vdpasim->iommu, map->start,
638					    map->last, map->addr, map->perm);
639		if (ret)
640			goto err;
641	}
642	spin_unlock(&vdpasim->iommu_lock);
643	return 0;
644
645err:
646	vhost_iotlb_reset(vdpasim->iommu);
647	spin_unlock(&vdpasim->iommu_lock);
648	return ret;
649}
650
651static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size,
652			   u64 pa, u32 perm)
653{
654	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
655	int ret;
656
657	spin_lock(&vdpasim->iommu_lock);
658	ret = vhost_iotlb_add_range(vdpasim->iommu, iova, iova + size - 1, pa,
659				    perm);
660	spin_unlock(&vdpasim->iommu_lock);
661
662	return ret;
663}
664
665static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size)
666{
667	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
668
669	spin_lock(&vdpasim->iommu_lock);
670	vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1);
671	spin_unlock(&vdpasim->iommu_lock);
672
673	return 0;
674}
675
676static void vdpasim_free(struct vdpa_device *vdpa)
677{
678	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
679
680	cancel_work_sync(&vdpasim->work);
681	kfree(vdpasim->buffer);
682	if (vdpasim->iommu)
683		vhost_iotlb_free(vdpasim->iommu);
684	kfree(vdpasim->vqs);
685	kfree(vdpasim->config);
686}
687
688static const struct vdpa_config_ops vdpasim_net_config_ops = {
689	.set_vq_address         = vdpasim_set_vq_address,
690	.set_vq_num             = vdpasim_set_vq_num,
691	.kick_vq                = vdpasim_kick_vq,
692	.set_vq_cb              = vdpasim_set_vq_cb,
693	.set_vq_ready           = vdpasim_set_vq_ready,
694	.get_vq_ready           = vdpasim_get_vq_ready,
695	.set_vq_state           = vdpasim_set_vq_state,
696	.get_vq_state           = vdpasim_get_vq_state,
697	.get_vq_align           = vdpasim_get_vq_align,
698	.get_features           = vdpasim_get_features,
699	.set_features           = vdpasim_set_features,
700	.set_config_cb          = vdpasim_set_config_cb,
701	.get_vq_num_max         = vdpasim_get_vq_num_max,
702	.get_device_id          = vdpasim_get_device_id,
703	.get_vendor_id          = vdpasim_get_vendor_id,
704	.get_status             = vdpasim_get_status,
705	.set_status             = vdpasim_set_status,
706	.get_config             = vdpasim_get_config,
707	.set_config             = vdpasim_set_config,
708	.get_generation         = vdpasim_get_generation,
709	.get_iova_range         = vdpasim_get_iova_range,
710	.dma_map                = vdpasim_dma_map,
711	.dma_unmap              = vdpasim_dma_unmap,
712	.free                   = vdpasim_free,
713};
714
715static const struct vdpa_config_ops vdpasim_net_batch_config_ops = {
716	.set_vq_address         = vdpasim_set_vq_address,
717	.set_vq_num             = vdpasim_set_vq_num,
718	.kick_vq                = vdpasim_kick_vq,
719	.set_vq_cb              = vdpasim_set_vq_cb,
720	.set_vq_ready           = vdpasim_set_vq_ready,
721	.get_vq_ready           = vdpasim_get_vq_ready,
722	.set_vq_state           = vdpasim_set_vq_state,
723	.get_vq_state           = vdpasim_get_vq_state,
724	.get_vq_align           = vdpasim_get_vq_align,
725	.get_features           = vdpasim_get_features,
726	.set_features           = vdpasim_set_features,
727	.set_config_cb          = vdpasim_set_config_cb,
728	.get_vq_num_max         = vdpasim_get_vq_num_max,
729	.get_device_id          = vdpasim_get_device_id,
730	.get_vendor_id          = vdpasim_get_vendor_id,
731	.get_status             = vdpasim_get_status,
732	.set_status             = vdpasim_set_status,
733	.get_config             = vdpasim_get_config,
734	.set_config             = vdpasim_set_config,
735	.get_generation         = vdpasim_get_generation,
736	.get_iova_range         = vdpasim_get_iova_range,
737	.set_map                = vdpasim_set_map,
738	.free                   = vdpasim_free,
739};
740
741static void vdpasim_net_get_config(struct vdpasim *vdpasim, void *config)
742{
743	struct virtio_net_config *net_config =
744		(struct virtio_net_config *)config;
745
746	net_config->mtu = cpu_to_vdpasim16(vdpasim, 1500);
747	net_config->status = cpu_to_vdpasim16(vdpasim, VIRTIO_NET_S_LINK_UP);
748	memcpy(net_config->mac, macaddr_buf, ETH_ALEN);
749}
750
751static int __init vdpasim_dev_init(void)
752{
753	struct vdpasim_dev_attr dev_attr = {};
754
755	dev_attr.nvqs = VDPASIM_VQ_NUM;
756	dev_attr.config_size = sizeof(struct virtio_net_config);
757	dev_attr.get_config = vdpasim_net_get_config;
758
759	vdpasim_dev = vdpasim_create(&dev_attr);
760
761	if (!IS_ERR(vdpasim_dev))
762		return 0;
763
764	return PTR_ERR(vdpasim_dev);
765}
766
767static void __exit vdpasim_dev_exit(void)
768{
769	struct vdpa_device *vdpa = &vdpasim_dev->vdpa;
770
771	vdpa_unregister_device(vdpa);
772}
773
774module_init(vdpasim_dev_init)
775module_exit(vdpasim_dev_exit)
776
777MODULE_VERSION(DRV_VERSION);
778MODULE_LICENSE(DRV_LICENSE);
779MODULE_AUTHOR(DRV_AUTHOR);
780MODULE_DESCRIPTION(DRV_DESC);
781