Lines Matching refs:entry
425 struct iommu_table *tbl, unsigned long entry)
429 unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
440 struct iommu_table *tbl, unsigned long entry)
444 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
461 struct iommu_table *tbl, unsigned long entry)
467 if (WARN_ON_ONCE(iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa,
474 ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
476 iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
483 unsigned long entry)
487 unsigned long io_entry = entry * subpages;
501 unsigned long entry, unsigned long ua,
506 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
524 ret = iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
531 kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
540 unsigned long entry, unsigned long ua,
545 unsigned long io_entry = entry * subpages;
567 unsigned long entry, ua = 0;
594 entry = ioba >> stt->page_shift;
599 stit->tbl, entry);
602 entry, ua, dir);
606 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
611 kvmppc_tce_put(stt, entry, tce);
626 unsigned long entry, ua = 0;
635 entry = ioba >> stt->page_shift;
693 stit->tbl, entry + i, ua,
698 entry + i);
703 kvmppc_tce_put(stt, entry + i, tce);
734 unsigned long entry = ioba >> stt->page_shift;
738 stit->tbl, entry + i);
747 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);