Lines Matching defs:addr
2403 unsigned long addr, size;
2407 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL);
2408 if (kvm_is_error_hva(addr))
2412 vma = find_vma(current->mm, addr);
2502 static inline int check_user_page_hwpoison(unsigned long addr)
2506 rc = get_user_pages(addr, 1, flags, NULL);
2515 static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
2528 if (get_user_page_fast_only(addr, FOLL_WRITE, page)) {
2543 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
2573 npages = get_user_pages_unlocked(addr, 1, &page, flags);
2581 if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) {
2613 unsigned long addr, bool write_fault,
2622 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
2629 r = fixup_user_fault(current->mm, addr,
2637 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
2682 * @addr: host virtual address which maps memory to the guest
2695 kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,
2705 if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
2711 npages = hva_to_pfn_slow(addr, async, write_fault, interruptible,
2720 (!async && check_user_page_hwpoison(addr))) {
2726 vma = vma_lookup(current->mm, addr);
2731 r = hva_to_pfn_remapped(vma, addr, write_fault, writable, &pfn);
2750 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
2753 *hva = addr;
2755 if (addr == KVM_HVA_ERR_RO_BAD) {
2761 if (kvm_is_error_hva(addr)) {
2773 return hva_to_pfn(addr, atomic, interruptible, async, write_fault,
2821 unsigned long addr;
2824 addr = gfn_to_hva_many(slot, gfn, &entry);
2825 if (kvm_is_error_hva(addr))
2831 return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
3032 unsigned long addr;
3034 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
3035 if (kvm_is_error_hva(addr))
3037 r = __copy_from_user(data, (void __user *)addr + offset, len);
3105 unsigned long addr;
3107 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
3108 if (kvm_is_error_hva(addr))
3111 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
3134 unsigned long addr;
3136 addr = gfn_to_hva_memslot(memslot, gfn);
3137 if (kvm_is_error_hva(addr))
3139 r = __copy_to_user((void __user *)addr + offset, data, len);
5386 gpa_t addr1 = r1->addr;
5387 gpa_t addr2 = r2->addr;
5414 gpa_t addr, int len)
5420 .addr = addr,
5442 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5448 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
5458 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5466 .addr = addr,
5480 gpa_t addr, int len, const void *val, long cookie)
5486 .addr = addr,
5497 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
5513 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5519 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
5529 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5537 .addr = addr,
5549 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
5570 .addr = addr,
5640 gpa_t addr)
5652 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);