Lines Matching refs:ghc

3207 				       struct gfn_to_hva_cache *ghc,
3216 /* Update ghc->generation before performing any error checks. */
3217 ghc->generation = slots->generation;
3220 ghc->hva = KVM_HVA_ERR_BAD;
3229 ghc->memslot = __gfn_to_memslot(slots, start_gfn);
3230 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
3232 if (kvm_is_error_hva(ghc->hva))
3238 ghc->hva += offset;
3240 ghc->memslot = NULL;
3242 ghc->gpa = gpa;
3243 ghc->len = len;
3247 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3251 return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
3255 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3261 gpa_t gpa = ghc->gpa + offset;
3263 if (WARN_ON_ONCE(len + offset > ghc->len))
3266 if (slots->generation != ghc->generation) {
3267 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3271 if (kvm_is_error_hva(ghc->hva))
3274 if (unlikely(!ghc->memslot))
3277 r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
3280 mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT);
3286 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3289 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
3293 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3299 gpa_t gpa = ghc->gpa + offset;
3301 if (WARN_ON_ONCE(len + offset > ghc->len))
3304 if (slots->generation != ghc->generation) {
3305 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3309 if (kvm_is_error_hva(ghc->hva))
3312 if (unlikely(!ghc->memslot))
3315 r = __copy_from_user(data, (void __user *)ghc->hva + offset, len);
3323 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3326 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);