Lines Matching refs:ghc

2617 				       struct gfn_to_hva_cache *ghc,
2626 /* Update ghc->generation before performing any error checks. */
2627 ghc->generation = slots->generation;
2630 ghc->hva = KVM_HVA_ERR_BAD;
2639 ghc->memslot = __gfn_to_memslot(slots, start_gfn);
2640 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
2642 if (kvm_is_error_hva(ghc->hva))
2648 ghc->hva += offset;
2650 ghc->memslot = NULL;
2652 ghc->gpa = gpa;
2653 ghc->len = len;
2657 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2661 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
2665 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2671 gpa_t gpa = ghc->gpa + offset;
2673 if (WARN_ON_ONCE(len + offset > ghc->len))
2676 if (slots->generation != ghc->generation) {
2677 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
2681 if (kvm_is_error_hva(ghc->hva))
2684 if (unlikely(!ghc->memslot))
2687 r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
2690 mark_page_dirty_in_slot(ghc->memslot, gpa >> PAGE_SHIFT);
2696 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2699 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
2703 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2709 gpa_t gpa = ghc->gpa + offset;
2711 if (WARN_ON_ONCE(len + offset > ghc->len))
2714 if (slots->generation != ghc->generation) {
2715 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
2719 if (kvm_is_error_hva(ghc->hva))
2722 if (unlikely(!ghc->memslot))
2725 r = __copy_from_user(data, (void __user *)ghc->hva + offset, len);
2733 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
2736 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);