Lines Matching defs:start

89 /* The start value to grow halt_poll_ns from */
546 typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
552 unsigned long start;
577 /* Iterate over each memslot intersecting [start, last] (inclusive) range */
578 #define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \
579 for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
581 node = interval_tree_iter_next(node, start, last)) \
592 if (WARN_ON_ONCE(range->end <= range->start))
607 range->start, range->end - 1) {
611 hva_start = max(range->start, slot->userspace_addr);
628 gfn_range.start = hva_to_gfn_memslot(hva_start, slot);
636 range->on_lock(kvm, range->start, range->end);
660 unsigned long start,
667 .start = start,
681 unsigned long start,
687 .start = start,
703 * surrounded by invalidate_range_{start,end}(), which is currently
727 * .change_pte() must be surrounded by .invalidate_range_{start,end}().
730 * start(); rechecking memslots here is unnecessary. Note, a false
741 void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
751 kvm->mmu_invalidate_range_start = start;
764 min(kvm->mmu_invalidate_range_start, start);
775 .start = range->start,
784 trace_kvm_unmap_hva_range(range->start, range->end);
808 gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end,
816 void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
839 .start = range->start,
868 unsigned long start,
871 trace_kvm_age_hva(start, end);
873 return kvm_handle_hva_range(mn, start, end, KVM_MMU_NOTIFIER_NO_ARG,
879 unsigned long start,
882 trace_kvm_age_hva(start, end);
897 return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn);
1520 new->hva_node[idx].start = new->userspace_addr;
1929 gfn_t start, gfn_t end)
1933 kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
3511 static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
3515 u64 poll_ns = ktime_to_ns(ktime_sub(end, start));
3561 ktime_t start, cur, poll_end;
3571 start = cur = poll_end = ktime_get();
3573 ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns);
3594 halt_ns = ktime_to_ns(cur) - ktime_to_ns(start);
3602 update_halt_poll_stats(vcpu, start, poll_end, !waited);