Lines Matching refs:range

375 	 * Fall back to a flushing entire TLBs if the architecture range-based
544 typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
577 /* Iterate over each memslot intersecting [start, last] (inclusive) range */
584 const struct kvm_hva_range *range)
592 if (WARN_ON_ONCE(range->end <= range->start))
596 if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) &&
597 IS_KVM_NULL_FN(range->handler)))
607 range->start, range->end - 1) {
611 hva_start = max(range->start, slot->userspace_addr);
612 hva_end = min(range->end, slot->userspace_addr +
617 * range is covered by zero or one memslots, don't
621 gfn_range.arg = range->arg;
622 gfn_range.may_block = range->may_block;
635 if (!IS_KVM_NULL_FN(range->on_lock))
636 range->on_lock(kvm, range->start, range->end);
637 if (IS_KVM_NULL_FN(range->handler))
640 ret |= range->handler(kvm, &gfn_range);
644 if (range->flush_on_ret && ret)
649 if (!IS_KVM_NULL_FN(range->on_unlock))
650 range->on_unlock(kvm);
666 const struct kvm_hva_range range = {
677 return __kvm_handle_hva_range(kvm, &range);
686 const struct kvm_hva_range range = {
696 return __kvm_handle_hva_range(kvm, &range);
699 static bool kvm_change_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
710 if (range->slot->flags & KVM_MEMSLOT_INVALID)
713 return kvm_set_spte_gfn(kvm, range);
756 * returns. Keep things simple and just find the minimal range
758 * enough information to subtract a range after its invalidate
771 const struct mmu_notifier_range *range)
775 .start = range->start,
776 .end = range->end,
781 .may_block = mmu_notifier_range_blockable(range),
784 trace_kvm_unmap_hva_range(range->start, range->end);
808 gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end,
835 const struct mmu_notifier_range *range)
839 .start = range->start,
840 .end = range->end,
845 .may_block = mmu_notifier_range_blockable(range),
1492 * If @new is non-NULL its hva_node[slots_idx] range has to be set
1517 * Initialize @new's hva range. Do this even when replacing an @old
5376 struct kvm_io_device *pos = bus->range[i].dev;
5416 struct kvm_io_range *range, key;
5424 range = bsearch(&key, bus->range, bus->dev_count,
5426 if (range == NULL)
5429 off = range - bus->range;
5431 while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
5438 struct kvm_io_range *range, const void *val)
5442 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5447 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5448 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
5449 range->len, val))
5462 struct kvm_io_range range;
5465 range = (struct kvm_io_range) {
5473 r = __kvm_io_bus_write(vcpu, bus, &range, val);
5483 struct kvm_io_range range;
5485 range = (struct kvm_io_range) {
5496 (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
5497 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
5505 return __kvm_io_bus_write(vcpu, bus, &range, val);
5509 struct kvm_io_range *range, void *val)
5513 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5518 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5519 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
5520 range->len, val))
5533 struct kvm_io_range range;
5536 range = (struct kvm_io_range) {
5544 r = __kvm_io_bus_read(vcpu, bus, &range, val);
5554 struct kvm_io_range range;
5564 new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1),
5569 range = (struct kvm_io_range) {
5576 if (kvm_io_bus_cmp(&bus->range[i], &range) > 0)
5581 new_bus->range[i] = range;
5582 memcpy(new_bus->range + i + 1, bus->range + i,
5604 if (bus->range[i].dev == dev) {
5612 new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
5615 memcpy(new_bus, bus, struct_size(bus, range, i));
5617 memcpy(new_bus->range + i, bus->range + i + 1,
5618 flex_array_size(new_bus, range, new_bus->dev_count - i));
5656 iodev = bus->range[dev_idx].dev;