Lines Matching refs:RINGSIZE

24 #define RINGSIZE 256
154 mapsize = vring_size(RINGSIZE, ALIGN)
155 + RINGSIZE * 2 * sizeof(int)
156 + RINGSIZE * 6 * sizeof(struct vring_desc);
188 vring_init(&vrh.vring, RINGSIZE, host_map, ALIGN);
189 vringh_init_user(&vrh, features, RINGSIZE, true,
299 /* We pass sg[]s pointing into here, but we need RINGSIZE+1 */
300 data = guest_map + vring_size(RINGSIZE, ALIGN);
301 indirects = (void *)data + (RINGSIZE + 1) * 2 * sizeof(int);
319 vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &gvdev.vdev, true,
327 __kfree_ignore_end = indirects + RINGSIZE * 6;
345 dbuf = data + (xfers % (RINGSIZE + 1));
380 __kmalloc_fake = indirects + (xfers % RINGSIZE) * 4;
444 struct scatterlist guest_sg[RINGSIZE], *sgs[2];
447 struct vring_used_elem used[RINGSIZE];
485 memset(__user_addr_min, 0, vring_size(RINGSIZE, ALIGN));
488 vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &vdev, true, false,
494 vring_init(&vrh.vring, RINGSIZE, __user_addr_min, ALIGN);
495 vringh_init_user(&vrh, vdev.features, RINGSIZE, true,
513 __kmalloc_fake = __user_addr_min + vring_size(RINGSIZE, ALIGN);
563 __kfree_ignore_start = __user_addr_min + vring_size(RINGSIZE, ALIGN);
571 sg_init_table(guest_sg, RINGSIZE);
572 for (i = 0; i < RINGSIZE; i++) {
582 __kmalloc_fake = __user_addr_min + vring_size(RINGSIZE, ALIGN);
583 err = virtqueue_add_outbuf(vq, guest_sg, RINGSIZE, &err, GFP_KERNEL);
599 assert(riov.used == RINGSIZE);
601 assert(riov.used == RINGSIZE * USER_MEM/4);
607 for (i = 0; i < RINGSIZE * USER_MEM/4; i += 3) {
609 if (err != 3 && i + err != RINGSIZE * USER_MEM/4)
634 for (i = 0; i < RINGSIZE; i++) {
644 for (i = 0; i < RINGSIZE; i++) {
652 assert(vrh.vring.used->idx % RINGSIZE != 0);
653 err = vringh_complete_multi_user(&vrh, used, RINGSIZE);
658 for (i = 0; i < RINGSIZE; i++) {
671 vq = vring_new_virtqueue(0, RINGSIZE, ALIGN, &vdev, true,
687 vring_init(&vring, RINGSIZE, __user_addr_min, ALIGN);