1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Helpers for the host side of a virtio ring. 4 * 5 * Since these may be in userspace, we use (inline) accessors. 6 */ 7#include <linux/compiler.h> 8#include <linux/module.h> 9#include <linux/vringh.h> 10#include <linux/virtio_ring.h> 11#include <linux/kernel.h> 12#include <linux/ratelimit.h> 13#include <linux/uaccess.h> 14#include <linux/slab.h> 15#include <linux/export.h> 16#if IS_REACHABLE(CONFIG_VHOST_IOTLB) 17#include <linux/bvec.h> 18#include <linux/highmem.h> 19#include <linux/vhost_iotlb.h> 20#endif 21#include <uapi/linux/virtio_config.h> 22 23static __printf(1,2) __cold void vringh_bad(const char *fmt, ...) 24{ 25 static DEFINE_RATELIMIT_STATE(vringh_rs, 26 DEFAULT_RATELIMIT_INTERVAL, 27 DEFAULT_RATELIMIT_BURST); 28 if (__ratelimit(&vringh_rs)) { 29 va_list ap; 30 va_start(ap, fmt); 31 printk(KERN_NOTICE "vringh:"); 32 vprintk(fmt, ap); 33 va_end(ap); 34 } 35} 36 37/* Returns vring->num if empty, -ve on error. */ 38static inline int __vringh_get_head(const struct vringh *vrh, 39 int (*getu16)(const struct vringh *vrh, 40 u16 *val, const __virtio16 *p), 41 u16 *last_avail_idx) 42{ 43 u16 avail_idx, i, head; 44 int err; 45 46 err = getu16(vrh, &avail_idx, &vrh->vring.avail->idx); 47 if (err) { 48 vringh_bad("Failed to access avail idx at %p", 49 &vrh->vring.avail->idx); 50 return err; 51 } 52 53 if (*last_avail_idx == avail_idx) 54 return vrh->vring.num; 55 56 /* Only get avail ring entries after they have been exposed by guest. */ 57 virtio_rmb(vrh->weak_barriers); 58 59 i = *last_avail_idx & (vrh->vring.num - 1); 60 61 err = getu16(vrh, &head, &vrh->vring.avail->ring[i]); 62 if (err) { 63 vringh_bad("Failed to read head: idx %d address %p", 64 *last_avail_idx, &vrh->vring.avail->ring[i]); 65 return err; 66 } 67 68 if (head >= vrh->vring.num) { 69 vringh_bad("Guest says index %u > %u is available", 70 head, vrh->vring.num); 71 return -EINVAL; 72 } 73 74 (*last_avail_idx)++; 75 return head; 76} 77 78/* Copy some bytes to/from the iovec. Returns num copied. */ 79static inline ssize_t vringh_iov_xfer(struct vringh *vrh, 80 struct vringh_kiov *iov, 81 void *ptr, size_t len, 82 int (*xfer)(const struct vringh *vrh, 83 void *addr, void *ptr, 84 size_t len)) 85{ 86 int err, done = 0; 87 88 while (len && iov->i < iov->used) { 89 size_t partlen; 90 91 partlen = min(iov->iov[iov->i].iov_len, len); 92 err = xfer(vrh, iov->iov[iov->i].iov_base, ptr, partlen); 93 if (err) 94 return err; 95 done += partlen; 96 len -= partlen; 97 ptr += partlen; 98 iov->consumed += partlen; 99 iov->iov[iov->i].iov_len -= partlen; 100 iov->iov[iov->i].iov_base += partlen; 101 102 if (!iov->iov[iov->i].iov_len) { 103 /* Fix up old iov element then increment. */ 104 iov->iov[iov->i].iov_len = iov->consumed; 105 iov->iov[iov->i].iov_base -= iov->consumed; 106 107 108 iov->consumed = 0; 109 iov->i++; 110 } 111 } 112 return done; 113} 114 115/* May reduce *len if range is shorter. */ 116static inline bool range_check(struct vringh *vrh, u64 addr, size_t *len, 117 struct vringh_range *range, 118 bool (*getrange)(struct vringh *, 119 u64, struct vringh_range *)) 120{ 121 if (addr < range->start || addr > range->end_incl) { 122 if (!getrange(vrh, addr, range)) 123 return false; 124 } 125 BUG_ON(addr < range->start || addr > range->end_incl); 126 127 /* To end of memory? */ 128 if (unlikely(addr + *len == 0)) { 129 if (range->end_incl == -1ULL) 130 return true; 131 goto truncate; 132 } 133 134 /* Otherwise, don't wrap. */ 135 if (addr + *len < addr) { 136 vringh_bad("Wrapping descriptor %zu@0x%llx", 137 *len, (unsigned long long)addr); 138 return false; 139 } 140 141 if (unlikely(addr + *len - 1 > range->end_incl)) 142 goto truncate; 143 return true; 144 145truncate: 146 *len = range->end_incl + 1 - addr; 147 return true; 148} 149 150static inline bool no_range_check(struct vringh *vrh, u64 addr, size_t *len, 151 struct vringh_range *range, 152 bool (*getrange)(struct vringh *, 153 u64, struct vringh_range *)) 154{ 155 return true; 156} 157 158/* No reason for this code to be inline. */ 159static int move_to_indirect(const struct vringh *vrh, 160 int *up_next, u16 *i, void *addr, 161 const struct vring_desc *desc, 162 struct vring_desc **descs, int *desc_max) 163{ 164 u32 len; 165 166 /* Indirect tables can't have indirect. */ 167 if (*up_next != -1) { 168 vringh_bad("Multilevel indirect %u->%u", *up_next, *i); 169 return -EINVAL; 170 } 171 172 len = vringh32_to_cpu(vrh, desc->len); 173 if (unlikely(len % sizeof(struct vring_desc))) { 174 vringh_bad("Strange indirect len %u", desc->len); 175 return -EINVAL; 176 } 177 178 /* We will check this when we follow it! */ 179 if (desc->flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) 180 *up_next = vringh16_to_cpu(vrh, desc->next); 181 else 182 *up_next = -2; 183 *descs = addr; 184 *desc_max = len / sizeof(struct vring_desc); 185 186 /* Now, start at the first indirect. */ 187 *i = 0; 188 return 0; 189} 190 191static int resize_iovec(struct vringh_kiov *iov, gfp_t gfp) 192{ 193 struct kvec *new; 194 unsigned int flag, new_num = (iov->max_num & ~VRINGH_IOV_ALLOCATED) * 2; 195 196 if (new_num < 8) 197 new_num = 8; 198 199 flag = (iov->max_num & VRINGH_IOV_ALLOCATED); 200 if (flag) 201 new = krealloc(iov->iov, new_num * sizeof(struct iovec), gfp); 202 else { 203 new = kmalloc_array(new_num, sizeof(struct iovec), gfp); 204 if (new) { 205 memcpy(new, iov->iov, 206 iov->max_num * sizeof(struct iovec)); 207 flag = VRINGH_IOV_ALLOCATED; 208 } 209 } 210 if (!new) 211 return -ENOMEM; 212 iov->iov = new; 213 iov->max_num = (new_num | flag); 214 return 0; 215} 216 217static u16 __cold return_from_indirect(const struct vringh *vrh, int *up_next, 218 struct vring_desc **descs, int *desc_max) 219{ 220 u16 i = *up_next; 221 222 *up_next = -1; 223 *descs = vrh->vring.desc; 224 *desc_max = vrh->vring.num; 225 return i; 226} 227 228static int slow_copy(struct vringh *vrh, void *dst, const void *src, 229 bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len, 230 struct vringh_range *range, 231 bool (*getrange)(struct vringh *vrh, 232 u64, 233 struct vringh_range *)), 234 bool (*getrange)(struct vringh *vrh, 235 u64 addr, 236 struct vringh_range *r), 237 struct vringh_range *range, 238 int (*copy)(const struct vringh *vrh, 239 void *dst, const void *src, size_t len)) 240{ 241 size_t part, len = sizeof(struct vring_desc); 242 243 do { 244 u64 addr; 245 int err; 246 247 part = len; 248 addr = (u64)(unsigned long)src - range->offset; 249 250 if (!rcheck(vrh, addr, &part, range, getrange)) 251 return -EINVAL; 252 253 err = copy(vrh, dst, src, part); 254 if (err) 255 return err; 256 257 dst += part; 258 src += part; 259 len -= part; 260 } while (len); 261 return 0; 262} 263 264static inline int 265__vringh_iov(struct vringh *vrh, u16 i, 266 struct vringh_kiov *riov, 267 struct vringh_kiov *wiov, 268 bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len, 269 struct vringh_range *range, 270 bool (*getrange)(struct vringh *, u64, 271 struct vringh_range *)), 272 bool (*getrange)(struct vringh *, u64, struct vringh_range *), 273 gfp_t gfp, 274 int (*copy)(const struct vringh *vrh, 275 void *dst, const void *src, size_t len)) 276{ 277 int err, count = 0, indirect_count = 0, up_next, desc_max; 278 struct vring_desc desc, *descs; 279 struct vringh_range range = { -1ULL, 0 }, slowrange; 280 bool slow = false; 281 282 /* We start traversing vring's descriptor table. */ 283 descs = vrh->vring.desc; 284 desc_max = vrh->vring.num; 285 up_next = -1; 286 287 /* You must want something! */ 288 if (WARN_ON(!riov && !wiov)) 289 return -EINVAL; 290 291 if (riov) 292 riov->i = riov->used = 0; 293 if (wiov) 294 wiov->i = wiov->used = 0; 295 296 for (;;) { 297 void *addr; 298 struct vringh_kiov *iov; 299 size_t len; 300 301 if (unlikely(slow)) 302 err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange, 303 &slowrange, copy); 304 else 305 err = copy(vrh, &desc, &descs[i], sizeof(desc)); 306 if (unlikely(err)) 307 goto fail; 308 309 if (unlikely(desc.flags & 310 cpu_to_vringh16(vrh, VRING_DESC_F_INDIRECT))) { 311 u64 a = vringh64_to_cpu(vrh, desc.addr); 312 313 /* Make sure it's OK, and get offset. */ 314 len = vringh32_to_cpu(vrh, desc.len); 315 if (!rcheck(vrh, a, &len, &range, getrange)) { 316 err = -EINVAL; 317 goto fail; 318 } 319 320 if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) { 321 slow = true; 322 /* We need to save this range to use offset */ 323 slowrange = range; 324 } 325 326 addr = (void *)(long)(a + range.offset); 327 err = move_to_indirect(vrh, &up_next, &i, addr, &desc, 328 &descs, &desc_max); 329 if (err) 330 goto fail; 331 continue; 332 } 333 334 if (up_next == -1) 335 count++; 336 else 337 indirect_count++; 338 339 if (count > vrh->vring.num || indirect_count > desc_max) { 340 vringh_bad("Descriptor loop in %p", descs); 341 err = -ELOOP; 342 goto fail; 343 } 344 345 if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_WRITE)) 346 iov = wiov; 347 else { 348 iov = riov; 349 if (unlikely(wiov && wiov->used)) { 350 vringh_bad("Readable desc %p after writable", 351 &descs[i]); 352 err = -EINVAL; 353 goto fail; 354 } 355 } 356 357 if (!iov) { 358 vringh_bad("Unexpected %s desc", 359 !wiov ? "writable" : "readable"); 360 err = -EPROTO; 361 goto fail; 362 } 363 364 again: 365 /* Make sure it's OK, and get offset. */ 366 len = vringh32_to_cpu(vrh, desc.len); 367 if (!rcheck(vrh, vringh64_to_cpu(vrh, desc.addr), &len, &range, 368 getrange)) { 369 err = -EINVAL; 370 goto fail; 371 } 372 addr = (void *)(unsigned long)(vringh64_to_cpu(vrh, desc.addr) + 373 range.offset); 374 375 if (unlikely(iov->used == (iov->max_num & ~VRINGH_IOV_ALLOCATED))) { 376 err = resize_iovec(iov, gfp); 377 if (err) 378 goto fail; 379 } 380 381 iov->iov[iov->used].iov_base = addr; 382 iov->iov[iov->used].iov_len = len; 383 iov->used++; 384 385 if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) { 386 desc.len = cpu_to_vringh32(vrh, 387 vringh32_to_cpu(vrh, desc.len) - len); 388 desc.addr = cpu_to_vringh64(vrh, 389 vringh64_to_cpu(vrh, desc.addr) + len); 390 goto again; 391 } 392 393 if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) { 394 i = vringh16_to_cpu(vrh, desc.next); 395 } else { 396 /* Just in case we need to finish traversing above. */ 397 if (unlikely(up_next > 0)) { 398 i = return_from_indirect(vrh, &up_next, 399 &descs, &desc_max); 400 slow = false; 401 indirect_count = 0; 402 } else 403 break; 404 } 405 406 if (i >= desc_max) { 407 vringh_bad("Chained index %u > %u", i, desc_max); 408 err = -EINVAL; 409 goto fail; 410 } 411 } 412 413 return 0; 414 415fail: 416 return err; 417} 418 419static inline int __vringh_complete(struct vringh *vrh, 420 const struct vring_used_elem *used, 421 unsigned int num_used, 422 int (*putu16)(const struct vringh *vrh, 423 __virtio16 *p, u16 val), 424 int (*putused)(const struct vringh *vrh, 425 struct vring_used_elem *dst, 426 const struct vring_used_elem 427 *src, unsigned num)) 428{ 429 struct vring_used *used_ring; 430 int err; 431 u16 used_idx, off; 432 433 used_ring = vrh->vring.used; 434 used_idx = vrh->last_used_idx + vrh->completed; 435 436 off = used_idx % vrh->vring.num; 437 438 /* Compiler knows num_used == 1 sometimes, hence extra check */ 439 if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) { 440 u16 part = vrh->vring.num - off; 441 err = putused(vrh, &used_ring->ring[off], used, part); 442 if (!err) 443 err = putused(vrh, &used_ring->ring[0], used + part, 444 num_used - part); 445 } else 446 err = putused(vrh, &used_ring->ring[off], used, num_used); 447 448 if (err) { 449 vringh_bad("Failed to write %u used entries %u at %p", 450 num_used, off, &used_ring->ring[off]); 451 return err; 452 } 453 454 /* Make sure buffer is written before we update index. */ 455 virtio_wmb(vrh->weak_barriers); 456 457 err = putu16(vrh, &vrh->vring.used->idx, used_idx + num_used); 458 if (err) { 459 vringh_bad("Failed to update used index at %p", 460 &vrh->vring.used->idx); 461 return err; 462 } 463 464 vrh->completed += num_used; 465 return 0; 466} 467 468 469static inline int __vringh_need_notify(struct vringh *vrh, 470 int (*getu16)(const struct vringh *vrh, 471 u16 *val, 472 const __virtio16 *p)) 473{ 474 bool notify; 475 u16 used_event; 476 int err; 477 478 /* Flush out used index update. This is paired with the 479 * barrier that the Guest executes when enabling 480 * interrupts. */ 481 virtio_mb(vrh->weak_barriers); 482 483 /* Old-style, without event indices. */ 484 if (!vrh->event_indices) { 485 u16 flags; 486 err = getu16(vrh, &flags, &vrh->vring.avail->flags); 487 if (err) { 488 vringh_bad("Failed to get flags at %p", 489 &vrh->vring.avail->flags); 490 return err; 491 } 492 return (!(flags & VRING_AVAIL_F_NO_INTERRUPT)); 493 } 494 495 /* Modern: we know when other side wants to know. */ 496 err = getu16(vrh, &used_event, &vring_used_event(&vrh->vring)); 497 if (err) { 498 vringh_bad("Failed to get used event idx at %p", 499 &vring_used_event(&vrh->vring)); 500 return err; 501 } 502 503 /* Just in case we added so many that we wrap. */ 504 if (unlikely(vrh->completed > 0xffff)) 505 notify = true; 506 else 507 notify = vring_need_event(used_event, 508 vrh->last_used_idx + vrh->completed, 509 vrh->last_used_idx); 510 511 vrh->last_used_idx += vrh->completed; 512 vrh->completed = 0; 513 return notify; 514} 515 516static inline bool __vringh_notify_enable(struct vringh *vrh, 517 int (*getu16)(const struct vringh *vrh, 518 u16 *val, const __virtio16 *p), 519 int (*putu16)(const struct vringh *vrh, 520 __virtio16 *p, u16 val)) 521{ 522 u16 avail; 523 524 if (!vrh->event_indices) { 525 /* Old-school; update flags. */ 526 if (putu16(vrh, &vrh->vring.used->flags, 0) != 0) { 527 vringh_bad("Clearing used flags %p", 528 &vrh->vring.used->flags); 529 return true; 530 } 531 } else { 532 if (putu16(vrh, &vring_avail_event(&vrh->vring), 533 vrh->last_avail_idx) != 0) { 534 vringh_bad("Updating avail event index %p", 535 &vring_avail_event(&vrh->vring)); 536 return true; 537 } 538 } 539 540 /* They could have slipped one in as we were doing that: make 541 * sure it's written, then check again. */ 542 virtio_mb(vrh->weak_barriers); 543 544 if (getu16(vrh, &avail, &vrh->vring.avail->idx) != 0) { 545 vringh_bad("Failed to check avail idx at %p", 546 &vrh->vring.avail->idx); 547 return true; 548 } 549 550 /* This is unlikely, so we just leave notifications enabled 551 * (if we're using event_indices, we'll only get one 552 * notification anyway). */ 553 return avail == vrh->last_avail_idx; 554} 555 556static inline void __vringh_notify_disable(struct vringh *vrh, 557 int (*putu16)(const struct vringh *vrh, 558 __virtio16 *p, u16 val)) 559{ 560 if (!vrh->event_indices) { 561 /* Old-school; update flags. */ 562 if (putu16(vrh, &vrh->vring.used->flags, 563 VRING_USED_F_NO_NOTIFY)) { 564 vringh_bad("Setting used flags %p", 565 &vrh->vring.used->flags); 566 } 567 } 568} 569 570/* Userspace access helpers: in this case, addresses are really userspace. */ 571static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p) 572{ 573 __virtio16 v = 0; 574 int rc = get_user(v, (__force __virtio16 __user *)p); 575 *val = vringh16_to_cpu(vrh, v); 576 return rc; 577} 578 579static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val) 580{ 581 __virtio16 v = cpu_to_vringh16(vrh, val); 582 return put_user(v, (__force __virtio16 __user *)p); 583} 584 585static inline int copydesc_user(const struct vringh *vrh, 586 void *dst, const void *src, size_t len) 587{ 588 return copy_from_user(dst, (__force void __user *)src, len) ? 589 -EFAULT : 0; 590} 591 592static inline int putused_user(const struct vringh *vrh, 593 struct vring_used_elem *dst, 594 const struct vring_used_elem *src, 595 unsigned int num) 596{ 597 return copy_to_user((__force void __user *)dst, src, 598 sizeof(*dst) * num) ? -EFAULT : 0; 599} 600 601static inline int xfer_from_user(const struct vringh *vrh, void *src, 602 void *dst, size_t len) 603{ 604 return copy_from_user(dst, (__force void __user *)src, len) ? 605 -EFAULT : 0; 606} 607 608static inline int xfer_to_user(const struct vringh *vrh, 609 void *dst, void *src, size_t len) 610{ 611 return copy_to_user((__force void __user *)dst, src, len) ? 612 -EFAULT : 0; 613} 614 615/** 616 * vringh_init_user - initialize a vringh for a userspace vring. 617 * @vrh: the vringh to initialize. 618 * @features: the feature bits for this ring. 619 * @num: the number of elements. 620 * @weak_barriers: true if we only need memory barriers, not I/O. 621 * @desc: the userpace descriptor pointer. 622 * @avail: the userpace avail pointer. 623 * @used: the userpace used pointer. 624 * 625 * Returns an error if num is invalid: you should check pointers 626 * yourself! 627 */ 628int vringh_init_user(struct vringh *vrh, u64 features, 629 unsigned int num, bool weak_barriers, 630 vring_desc_t __user *desc, 631 vring_avail_t __user *avail, 632 vring_used_t __user *used) 633{ 634 /* Sane power of 2 please! */ 635 if (!num || num > 0xffff || (num & (num - 1))) { 636 vringh_bad("Bad ring size %u", num); 637 return -EINVAL; 638 } 639 640 vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1)); 641 vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX)); 642 vrh->weak_barriers = weak_barriers; 643 vrh->completed = 0; 644 vrh->last_avail_idx = 0; 645 vrh->last_used_idx = 0; 646 vrh->vring.num = num; 647 /* vring expects kernel addresses, but only used via accessors. */ 648 vrh->vring.desc = (__force struct vring_desc *)desc; 649 vrh->vring.avail = (__force struct vring_avail *)avail; 650 vrh->vring.used = (__force struct vring_used *)used; 651 return 0; 652} 653EXPORT_SYMBOL(vringh_init_user); 654 655/** 656 * vringh_getdesc_user - get next available descriptor from userspace ring. 657 * @vrh: the userspace vring. 658 * @riov: where to put the readable descriptors (or NULL) 659 * @wiov: where to put the writable descriptors (or NULL) 660 * @getrange: function to call to check ranges. 661 * @head: head index we received, for passing to vringh_complete_user(). 662 * 663 * Returns 0 if there was no descriptor, 1 if there was, or -errno. 664 * 665 * Note that on error return, you can tell the difference between an 666 * invalid ring and a single invalid descriptor: in the former case, 667 * *head will be vrh->vring.num. You may be able to ignore an invalid 668 * descriptor, but there's not much you can do with an invalid ring. 669 * 670 * Note that you may need to clean up riov and wiov, even on error! 671 */ 672int vringh_getdesc_user(struct vringh *vrh, 673 struct vringh_iov *riov, 674 struct vringh_iov *wiov, 675 bool (*getrange)(struct vringh *vrh, 676 u64 addr, struct vringh_range *r), 677 u16 *head) 678{ 679 int err; 680 681 *head = vrh->vring.num; 682 err = __vringh_get_head(vrh, getu16_user, &vrh->last_avail_idx); 683 if (err < 0) 684 return err; 685 686 /* Empty... */ 687 if (err == vrh->vring.num) 688 return 0; 689 690 /* We need the layouts to be the identical for this to work */ 691 BUILD_BUG_ON(sizeof(struct vringh_kiov) != sizeof(struct vringh_iov)); 692 BUILD_BUG_ON(offsetof(struct vringh_kiov, iov) != 693 offsetof(struct vringh_iov, iov)); 694 BUILD_BUG_ON(offsetof(struct vringh_kiov, i) != 695 offsetof(struct vringh_iov, i)); 696 BUILD_BUG_ON(offsetof(struct vringh_kiov, used) != 697 offsetof(struct vringh_iov, used)); 698 BUILD_BUG_ON(offsetof(struct vringh_kiov, max_num) != 699 offsetof(struct vringh_iov, max_num)); 700 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec)); 701 BUILD_BUG_ON(offsetof(struct iovec, iov_base) != 702 offsetof(struct kvec, iov_base)); 703 BUILD_BUG_ON(offsetof(struct iovec, iov_len) != 704 offsetof(struct kvec, iov_len)); 705 BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_base) 706 != sizeof(((struct kvec *)NULL)->iov_base)); 707 BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_len) 708 != sizeof(((struct kvec *)NULL)->iov_len)); 709 710 *head = err; 711 err = __vringh_iov(vrh, *head, (struct vringh_kiov *)riov, 712 (struct vringh_kiov *)wiov, 713 range_check, getrange, GFP_KERNEL, copydesc_user); 714 if (err) 715 return err; 716 717 return 1; 718} 719EXPORT_SYMBOL(vringh_getdesc_user); 720 721/** 722 * vringh_iov_pull_user - copy bytes from vring_iov. 723 * @riov: the riov as passed to vringh_getdesc_user() (updated as we consume) 724 * @dst: the place to copy. 725 * @len: the maximum length to copy. 726 * 727 * Returns the bytes copied <= len or a negative errno. 728 */ 729ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len) 730{ 731 return vringh_iov_xfer(NULL, (struct vringh_kiov *)riov, 732 dst, len, xfer_from_user); 733} 734EXPORT_SYMBOL(vringh_iov_pull_user); 735 736/** 737 * vringh_iov_push_user - copy bytes into vring_iov. 738 * @wiov: the wiov as passed to vringh_getdesc_user() (updated as we consume) 739 * @src: the place to copy from. 740 * @len: the maximum length to copy. 741 * 742 * Returns the bytes copied <= len or a negative errno. 743 */ 744ssize_t vringh_iov_push_user(struct vringh_iov *wiov, 745 const void *src, size_t len) 746{ 747 return vringh_iov_xfer(NULL, (struct vringh_kiov *)wiov, 748 (void *)src, len, xfer_to_user); 749} 750EXPORT_SYMBOL(vringh_iov_push_user); 751 752/** 753 * vringh_abandon_user - we've decided not to handle the descriptor(s). 754 * @vrh: the vring. 755 * @num: the number of descriptors to put back (ie. num 756 * vringh_get_user() to undo). 757 * 758 * The next vringh_get_user() will return the old descriptor(s) again. 759 */ 760void vringh_abandon_user(struct vringh *vrh, unsigned int num) 761{ 762 /* We only update vring_avail_event(vr) when we want to be notified, 763 * so we haven't changed that yet. */ 764 vrh->last_avail_idx -= num; 765} 766EXPORT_SYMBOL(vringh_abandon_user); 767 768/** 769 * vringh_complete_user - we've finished with descriptor, publish it. 770 * @vrh: the vring. 771 * @head: the head as filled in by vringh_getdesc_user. 772 * @len: the length of data we have written. 773 * 774 * You should check vringh_need_notify_user() after one or more calls 775 * to this function. 776 */ 777int vringh_complete_user(struct vringh *vrh, u16 head, u32 len) 778{ 779 struct vring_used_elem used; 780 781 used.id = cpu_to_vringh32(vrh, head); 782 used.len = cpu_to_vringh32(vrh, len); 783 return __vringh_complete(vrh, &used, 1, putu16_user, putused_user); 784} 785EXPORT_SYMBOL(vringh_complete_user); 786 787/** 788 * vringh_complete_multi_user - we've finished with many descriptors. 789 * @vrh: the vring. 790 * @used: the head, length pairs. 791 * @num_used: the number of used elements. 792 * 793 * You should check vringh_need_notify_user() after one or more calls 794 * to this function. 795 */ 796int vringh_complete_multi_user(struct vringh *vrh, 797 const struct vring_used_elem used[], 798 unsigned num_used) 799{ 800 return __vringh_complete(vrh, used, num_used, 801 putu16_user, putused_user); 802} 803EXPORT_SYMBOL(vringh_complete_multi_user); 804 805/** 806 * vringh_notify_enable_user - we want to know if something changes. 807 * @vrh: the vring. 808 * 809 * This always enables notifications, but returns false if there are 810 * now more buffers available in the vring. 811 */ 812bool vringh_notify_enable_user(struct vringh *vrh) 813{ 814 return __vringh_notify_enable(vrh, getu16_user, putu16_user); 815} 816EXPORT_SYMBOL(vringh_notify_enable_user); 817 818/** 819 * vringh_notify_disable_user - don't tell us if something changes. 820 * @vrh: the vring. 821 * 822 * This is our normal running state: we disable and then only enable when 823 * we're going to sleep. 824 */ 825void vringh_notify_disable_user(struct vringh *vrh) 826{ 827 __vringh_notify_disable(vrh, putu16_user); 828} 829EXPORT_SYMBOL(vringh_notify_disable_user); 830 831/** 832 * vringh_need_notify_user - must we tell the other side about used buffers? 833 * @vrh: the vring we've called vringh_complete_user() on. 834 * 835 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do. 836 */ 837int vringh_need_notify_user(struct vringh *vrh) 838{ 839 return __vringh_need_notify(vrh, getu16_user); 840} 841EXPORT_SYMBOL(vringh_need_notify_user); 842 843/* Kernelspace access helpers. */ 844static inline int getu16_kern(const struct vringh *vrh, 845 u16 *val, const __virtio16 *p) 846{ 847 *val = vringh16_to_cpu(vrh, READ_ONCE(*p)); 848 return 0; 849} 850 851static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val) 852{ 853 WRITE_ONCE(*p, cpu_to_vringh16(vrh, val)); 854 return 0; 855} 856 857static inline int copydesc_kern(const struct vringh *vrh, 858 void *dst, const void *src, size_t len) 859{ 860 memcpy(dst, src, len); 861 return 0; 862} 863 864static inline int putused_kern(const struct vringh *vrh, 865 struct vring_used_elem *dst, 866 const struct vring_used_elem *src, 867 unsigned int num) 868{ 869 memcpy(dst, src, num * sizeof(*dst)); 870 return 0; 871} 872 873static inline int xfer_kern(const struct vringh *vrh, void *src, 874 void *dst, size_t len) 875{ 876 memcpy(dst, src, len); 877 return 0; 878} 879 880static inline int kern_xfer(const struct vringh *vrh, void *dst, 881 void *src, size_t len) 882{ 883 memcpy(dst, src, len); 884 return 0; 885} 886 887/** 888 * vringh_init_kern - initialize a vringh for a kernelspace vring. 889 * @vrh: the vringh to initialize. 890 * @features: the feature bits for this ring. 891 * @num: the number of elements. 892 * @weak_barriers: true if we only need memory barriers, not I/O. 893 * @desc: the userpace descriptor pointer. 894 * @avail: the userpace avail pointer. 895 * @used: the userpace used pointer. 896 * 897 * Returns an error if num is invalid. 898 */ 899int vringh_init_kern(struct vringh *vrh, u64 features, 900 unsigned int num, bool weak_barriers, 901 struct vring_desc *desc, 902 struct vring_avail *avail, 903 struct vring_used *used) 904{ 905 /* Sane power of 2 please! */ 906 if (!num || num > 0xffff || (num & (num - 1))) { 907 vringh_bad("Bad ring size %u", num); 908 return -EINVAL; 909 } 910 911 vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1)); 912 vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX)); 913 vrh->weak_barriers = weak_barriers; 914 vrh->completed = 0; 915 vrh->last_avail_idx = 0; 916 vrh->last_used_idx = 0; 917 vrh->vring.num = num; 918 vrh->vring.desc = desc; 919 vrh->vring.avail = avail; 920 vrh->vring.used = used; 921 return 0; 922} 923EXPORT_SYMBOL(vringh_init_kern); 924 925/** 926 * vringh_getdesc_kern - get next available descriptor from kernelspace ring. 927 * @vrh: the kernelspace vring. 928 * @riov: where to put the readable descriptors (or NULL) 929 * @wiov: where to put the writable descriptors (or NULL) 930 * @head: head index we received, for passing to vringh_complete_kern(). 931 * @gfp: flags for allocating larger riov/wiov. 932 * 933 * Returns 0 if there was no descriptor, 1 if there was, or -errno. 934 * 935 * Note that on error return, you can tell the difference between an 936 * invalid ring and a single invalid descriptor: in the former case, 937 * *head will be vrh->vring.num. You may be able to ignore an invalid 938 * descriptor, but there's not much you can do with an invalid ring. 939 * 940 * Note that you may need to clean up riov and wiov, even on error! 941 */ 942int vringh_getdesc_kern(struct vringh *vrh, 943 struct vringh_kiov *riov, 944 struct vringh_kiov *wiov, 945 u16 *head, 946 gfp_t gfp) 947{ 948 int err; 949 950 err = __vringh_get_head(vrh, getu16_kern, &vrh->last_avail_idx); 951 if (err < 0) 952 return err; 953 954 /* Empty... */ 955 if (err == vrh->vring.num) 956 return 0; 957 958 *head = err; 959 err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL, 960 gfp, copydesc_kern); 961 if (err) 962 return err; 963 964 return 1; 965} 966EXPORT_SYMBOL(vringh_getdesc_kern); 967 968/** 969 * vringh_iov_pull_kern - copy bytes from vring_iov. 970 * @riov: the riov as passed to vringh_getdesc_kern() (updated as we consume) 971 * @dst: the place to copy. 972 * @len: the maximum length to copy. 973 * 974 * Returns the bytes copied <= len or a negative errno. 975 */ 976ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len) 977{ 978 return vringh_iov_xfer(NULL, riov, dst, len, xfer_kern); 979} 980EXPORT_SYMBOL(vringh_iov_pull_kern); 981 982/** 983 * vringh_iov_push_kern - copy bytes into vring_iov. 984 * @wiov: the wiov as passed to vringh_getdesc_kern() (updated as we consume) 985 * @src: the place to copy from. 986 * @len: the maximum length to copy. 987 * 988 * Returns the bytes copied <= len or a negative errno. 989 */ 990ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov, 991 const void *src, size_t len) 992{ 993 return vringh_iov_xfer(NULL, wiov, (void *)src, len, kern_xfer); 994} 995EXPORT_SYMBOL(vringh_iov_push_kern); 996 997/** 998 * vringh_abandon_kern - we've decided not to handle the descriptor(s). 999 * @vrh: the vring. 1000 * @num: the number of descriptors to put back (ie. num 1001 * vringh_get_kern() to undo). 1002 * 1003 * The next vringh_get_kern() will return the old descriptor(s) again. 1004 */ 1005void vringh_abandon_kern(struct vringh *vrh, unsigned int num) 1006{ 1007 /* We only update vring_avail_event(vr) when we want to be notified, 1008 * so we haven't changed that yet. */ 1009 vrh->last_avail_idx -= num; 1010} 1011EXPORT_SYMBOL(vringh_abandon_kern); 1012 1013/** 1014 * vringh_complete_kern - we've finished with descriptor, publish it. 1015 * @vrh: the vring. 1016 * @head: the head as filled in by vringh_getdesc_kern. 1017 * @len: the length of data we have written. 1018 * 1019 * You should check vringh_need_notify_kern() after one or more calls 1020 * to this function. 1021 */ 1022int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len) 1023{ 1024 struct vring_used_elem used; 1025 1026 used.id = cpu_to_vringh32(vrh, head); 1027 used.len = cpu_to_vringh32(vrh, len); 1028 1029 return __vringh_complete(vrh, &used, 1, putu16_kern, putused_kern); 1030} 1031EXPORT_SYMBOL(vringh_complete_kern); 1032 1033/** 1034 * vringh_notify_enable_kern - we want to know if something changes. 1035 * @vrh: the vring. 1036 * 1037 * This always enables notifications, but returns false if there are 1038 * now more buffers available in the vring. 1039 */ 1040bool vringh_notify_enable_kern(struct vringh *vrh) 1041{ 1042 return __vringh_notify_enable(vrh, getu16_kern, putu16_kern); 1043} 1044EXPORT_SYMBOL(vringh_notify_enable_kern); 1045 1046/** 1047 * vringh_notify_disable_kern - don't tell us if something changes. 1048 * @vrh: the vring. 1049 * 1050 * This is our normal running state: we disable and then only enable when 1051 * we're going to sleep. 1052 */ 1053void vringh_notify_disable_kern(struct vringh *vrh) 1054{ 1055 __vringh_notify_disable(vrh, putu16_kern); 1056} 1057EXPORT_SYMBOL(vringh_notify_disable_kern); 1058 1059/** 1060 * vringh_need_notify_kern - must we tell the other side about used buffers? 1061 * @vrh: the vring we've called vringh_complete_kern() on. 1062 * 1063 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do. 1064 */ 1065int vringh_need_notify_kern(struct vringh *vrh) 1066{ 1067 return __vringh_need_notify(vrh, getu16_kern); 1068} 1069EXPORT_SYMBOL(vringh_need_notify_kern); 1070 1071#if IS_REACHABLE(CONFIG_VHOST_IOTLB) 1072 1073static int iotlb_translate(const struct vringh *vrh, 1074 u64 addr, u64 len, struct bio_vec iov[], 1075 int iov_size, u32 perm) 1076{ 1077 struct vhost_iotlb_map *map; 1078 struct vhost_iotlb *iotlb = vrh->iotlb; 1079 int ret = 0; 1080 u64 s = 0, last = addr + len - 1; 1081 1082 while (len > s) { 1083 u64 size, pa, pfn; 1084 1085 if (unlikely(ret >= iov_size)) { 1086 ret = -ENOBUFS; 1087 break; 1088 } 1089 1090 map = vhost_iotlb_itree_first(iotlb, addr, last); 1091 if (!map || map->start > addr) { 1092 ret = -EINVAL; 1093 break; 1094 } else if (!(map->perm & perm)) { 1095 ret = -EPERM; 1096 break; 1097 } 1098 1099 size = map->size - addr + map->start; 1100 pa = map->addr + addr - map->start; 1101 pfn = pa >> PAGE_SHIFT; 1102 iov[ret].bv_page = pfn_to_page(pfn); 1103 iov[ret].bv_len = min(len - s, size); 1104 iov[ret].bv_offset = pa & (PAGE_SIZE - 1); 1105 s += size; 1106 addr += size; 1107 ++ret; 1108 } 1109 1110 return ret; 1111} 1112 1113static inline int copy_from_iotlb(const struct vringh *vrh, void *dst, 1114 void *src, size_t len) 1115{ 1116 struct iov_iter iter; 1117 struct bio_vec iov[16]; 1118 int ret; 1119 1120 ret = iotlb_translate(vrh, (u64)(uintptr_t)src, 1121 len, iov, 16, VHOST_MAP_RO); 1122 if (ret < 0) 1123 return ret; 1124 1125 iov_iter_bvec(&iter, READ, iov, ret, len); 1126 1127 ret = copy_from_iter(dst, len, &iter); 1128 1129 return ret; 1130} 1131 1132static inline int copy_to_iotlb(const struct vringh *vrh, void *dst, 1133 void *src, size_t len) 1134{ 1135 struct iov_iter iter; 1136 struct bio_vec iov[16]; 1137 int ret; 1138 1139 ret = iotlb_translate(vrh, (u64)(uintptr_t)dst, 1140 len, iov, 16, VHOST_MAP_WO); 1141 if (ret < 0) 1142 return ret; 1143 1144 iov_iter_bvec(&iter, WRITE, iov, ret, len); 1145 1146 return copy_to_iter(src, len, &iter); 1147} 1148 1149static inline int getu16_iotlb(const struct vringh *vrh, 1150 u16 *val, const __virtio16 *p) 1151{ 1152 struct bio_vec iov; 1153 void *kaddr, *from; 1154 int ret; 1155 1156 /* Atomic read is needed for getu16 */ 1157 ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), 1158 &iov, 1, VHOST_MAP_RO); 1159 if (ret < 0) 1160 return ret; 1161 1162 kaddr = kmap_atomic(iov.bv_page); 1163 from = kaddr + iov.bv_offset; 1164 *val = vringh16_to_cpu(vrh, READ_ONCE(*(__virtio16 *)from)); 1165 kunmap_atomic(kaddr); 1166 1167 return 0; 1168} 1169 1170static inline int putu16_iotlb(const struct vringh *vrh, 1171 __virtio16 *p, u16 val) 1172{ 1173 struct bio_vec iov; 1174 void *kaddr, *to; 1175 int ret; 1176 1177 /* Atomic write is needed for putu16 */ 1178 ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), 1179 &iov, 1, VHOST_MAP_WO); 1180 if (ret < 0) 1181 return ret; 1182 1183 kaddr = kmap_atomic(iov.bv_page); 1184 to = kaddr + iov.bv_offset; 1185 WRITE_ONCE(*(__virtio16 *)to, cpu_to_vringh16(vrh, val)); 1186 kunmap_atomic(kaddr); 1187 1188 return 0; 1189} 1190 1191static inline int copydesc_iotlb(const struct vringh *vrh, 1192 void *dst, const void *src, size_t len) 1193{ 1194 int ret; 1195 1196 ret = copy_from_iotlb(vrh, dst, (void *)src, len); 1197 if (ret != len) 1198 return -EFAULT; 1199 1200 return 0; 1201} 1202 1203static inline int xfer_from_iotlb(const struct vringh *vrh, void *src, 1204 void *dst, size_t len) 1205{ 1206 int ret; 1207 1208 ret = copy_from_iotlb(vrh, dst, src, len); 1209 if (ret != len) 1210 return -EFAULT; 1211 1212 return 0; 1213} 1214 1215static inline int xfer_to_iotlb(const struct vringh *vrh, 1216 void *dst, void *src, size_t len) 1217{ 1218 int ret; 1219 1220 ret = copy_to_iotlb(vrh, dst, src, len); 1221 if (ret != len) 1222 return -EFAULT; 1223 1224 return 0; 1225} 1226 1227static inline int putused_iotlb(const struct vringh *vrh, 1228 struct vring_used_elem *dst, 1229 const struct vring_used_elem *src, 1230 unsigned int num) 1231{ 1232 int size = num * sizeof(*dst); 1233 int ret; 1234 1235 ret = copy_to_iotlb(vrh, dst, (void *)src, num * sizeof(*dst)); 1236 if (ret != size) 1237 return -EFAULT; 1238 1239 return 0; 1240} 1241 1242/** 1243 * vringh_init_iotlb - initialize a vringh for a ring with IOTLB. 1244 * @vrh: the vringh to initialize. 1245 * @features: the feature bits for this ring. 1246 * @num: the number of elements. 1247 * @weak_barriers: true if we only need memory barriers, not I/O. 1248 * @desc: the userpace descriptor pointer. 1249 * @avail: the userpace avail pointer. 1250 * @used: the userpace used pointer. 1251 * 1252 * Returns an error if num is invalid. 1253 */ 1254int vringh_init_iotlb(struct vringh *vrh, u64 features, 1255 unsigned int num, bool weak_barriers, 1256 struct vring_desc *desc, 1257 struct vring_avail *avail, 1258 struct vring_used *used) 1259{ 1260 return vringh_init_kern(vrh, features, num, weak_barriers, 1261 desc, avail, used); 1262} 1263EXPORT_SYMBOL(vringh_init_iotlb); 1264 1265/** 1266 * vringh_set_iotlb - initialize a vringh for a ring with IOTLB. 1267 * @vrh: the vring 1268 * @iotlb: iotlb associated with this vring 1269 */ 1270void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb) 1271{ 1272 vrh->iotlb = iotlb; 1273} 1274EXPORT_SYMBOL(vringh_set_iotlb); 1275 1276/** 1277 * vringh_getdesc_iotlb - get next available descriptor from ring with 1278 * IOTLB. 1279 * @vrh: the kernelspace vring. 1280 * @riov: where to put the readable descriptors (or NULL) 1281 * @wiov: where to put the writable descriptors (or NULL) 1282 * @head: head index we received, for passing to vringh_complete_iotlb(). 1283 * @gfp: flags for allocating larger riov/wiov. 1284 * 1285 * Returns 0 if there was no descriptor, 1 if there was, or -errno. 1286 * 1287 * Note that on error return, you can tell the difference between an 1288 * invalid ring and a single invalid descriptor: in the former case, 1289 * *head will be vrh->vring.num. You may be able to ignore an invalid 1290 * descriptor, but there's not much you can do with an invalid ring. 1291 * 1292 * Note that you may need to clean up riov and wiov, even on error! 1293 */ 1294int vringh_getdesc_iotlb(struct vringh *vrh, 1295 struct vringh_kiov *riov, 1296 struct vringh_kiov *wiov, 1297 u16 *head, 1298 gfp_t gfp) 1299{ 1300 int err; 1301 1302 err = __vringh_get_head(vrh, getu16_iotlb, &vrh->last_avail_idx); 1303 if (err < 0) 1304 return err; 1305 1306 /* Empty... */ 1307 if (err == vrh->vring.num) 1308 return 0; 1309 1310 *head = err; 1311 err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL, 1312 gfp, copydesc_iotlb); 1313 if (err) 1314 return err; 1315 1316 return 1; 1317} 1318EXPORT_SYMBOL(vringh_getdesc_iotlb); 1319 1320/** 1321 * vringh_iov_pull_iotlb - copy bytes from vring_iov. 1322 * @vrh: the vring. 1323 * @riov: the riov as passed to vringh_getdesc_iotlb() (updated as we consume) 1324 * @dst: the place to copy. 1325 * @len: the maximum length to copy. 1326 * 1327 * Returns the bytes copied <= len or a negative errno. 1328 */ 1329ssize_t vringh_iov_pull_iotlb(struct vringh *vrh, 1330 struct vringh_kiov *riov, 1331 void *dst, size_t len) 1332{ 1333 return vringh_iov_xfer(vrh, riov, dst, len, xfer_from_iotlb); 1334} 1335EXPORT_SYMBOL(vringh_iov_pull_iotlb); 1336 1337/** 1338 * vringh_iov_push_iotlb - copy bytes into vring_iov. 1339 * @vrh: the vring. 1340 * @wiov: the wiov as passed to vringh_getdesc_iotlb() (updated as we consume) 1341 * @src: the place to copy from. 1342 * @len: the maximum length to copy. 1343 * 1344 * Returns the bytes copied <= len or a negative errno. 1345 */ 1346ssize_t vringh_iov_push_iotlb(struct vringh *vrh, 1347 struct vringh_kiov *wiov, 1348 const void *src, size_t len) 1349{ 1350 return vringh_iov_xfer(vrh, wiov, (void *)src, len, xfer_to_iotlb); 1351} 1352EXPORT_SYMBOL(vringh_iov_push_iotlb); 1353 1354/** 1355 * vringh_abandon_iotlb - we've decided not to handle the descriptor(s). 1356 * @vrh: the vring. 1357 * @num: the number of descriptors to put back (ie. num 1358 * vringh_get_iotlb() to undo). 1359 * 1360 * The next vringh_get_iotlb() will return the old descriptor(s) again. 1361 */ 1362void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num) 1363{ 1364 /* We only update vring_avail_event(vr) when we want to be notified, 1365 * so we haven't changed that yet. 1366 */ 1367 vrh->last_avail_idx -= num; 1368} 1369EXPORT_SYMBOL(vringh_abandon_iotlb); 1370 1371/** 1372 * vringh_complete_iotlb - we've finished with descriptor, publish it. 1373 * @vrh: the vring. 1374 * @head: the head as filled in by vringh_getdesc_iotlb. 1375 * @len: the length of data we have written. 1376 * 1377 * You should check vringh_need_notify_iotlb() after one or more calls 1378 * to this function. 1379 */ 1380int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len) 1381{ 1382 struct vring_used_elem used; 1383 1384 used.id = cpu_to_vringh32(vrh, head); 1385 used.len = cpu_to_vringh32(vrh, len); 1386 1387 return __vringh_complete(vrh, &used, 1, putu16_iotlb, putused_iotlb); 1388} 1389EXPORT_SYMBOL(vringh_complete_iotlb); 1390 1391/** 1392 * vringh_notify_enable_iotlb - we want to know if something changes. 1393 * @vrh: the vring. 1394 * 1395 * This always enables notifications, but returns false if there are 1396 * now more buffers available in the vring. 1397 */ 1398bool vringh_notify_enable_iotlb(struct vringh *vrh) 1399{ 1400 return __vringh_notify_enable(vrh, getu16_iotlb, putu16_iotlb); 1401} 1402EXPORT_SYMBOL(vringh_notify_enable_iotlb); 1403 1404/** 1405 * vringh_notify_disable_iotlb - don't tell us if something changes. 1406 * @vrh: the vring. 1407 * 1408 * This is our normal running state: we disable and then only enable when 1409 * we're going to sleep. 1410 */ 1411void vringh_notify_disable_iotlb(struct vringh *vrh) 1412{ 1413 __vringh_notify_disable(vrh, putu16_iotlb); 1414} 1415EXPORT_SYMBOL(vringh_notify_disable_iotlb); 1416 1417/** 1418 * vringh_need_notify_iotlb - must we tell the other side about used buffers? 1419 * @vrh: the vring we've called vringh_complete_iotlb() on. 1420 * 1421 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do. 1422 */ 1423int vringh_need_notify_iotlb(struct vringh *vrh) 1424{ 1425 return __vringh_need_notify(vrh, getu16_iotlb); 1426} 1427EXPORT_SYMBOL(vringh_need_notify_iotlb); 1428 1429#endif 1430 1431MODULE_LICENSE("GPL"); 1432