Lines Matching defs:stt
38 struct kvmppc_spapr_tce_table *stt;
40 list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
41 if (stt->liobn == liobn)
42 return stt;
84 struct kvmppc_spapr_tce_table *stt;
89 list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
95 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
111 struct kvmppc_spapr_tce_table *stt = NULL;
124 list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
125 if (stt == f.file->private_data) {
147 if ((tbltmp->it_page_shift <= stt->page_shift) &&
149 stt->offset << stt->page_shift) &&
151 stt->size << stt->page_shift)) {
164 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
192 list_add_rcu(&stit->next, &stt->iommu_tables);
199 struct kvmppc_spapr_tce_table *stt = container_of(head,
201 unsigned long i, npages = kvmppc_tce_pages(stt->size);
204 if (stt->pages[i])
205 __free_page(stt->pages[i]);
207 kfree(stt);
210 static struct page *kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table *stt,
213 struct page *page = stt->pages[sttpage];
218 mutex_lock(&stt->alloc_lock);
219 page = stt->pages[sttpage];
224 stt->pages[sttpage] = page;
226 mutex_unlock(&stt->alloc_lock);
233 struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
236 if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
239 page = kvm_spapr_get_tce_page(stt, vmf->pgoff);
260 struct kvmppc_spapr_tce_table *stt = filp->private_data;
262 struct kvm *kvm = stt->kvm;
265 list_del_rcu(&stt->list);
268 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
277 kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
279 kvm_put_kvm(stt->kvm);
281 call_rcu(&stt->rcu, release_spapr_tce_table);
294 struct kvmppc_spapr_tce_table *stt = NULL;
310 stt = kzalloc(struct_size(stt, pages, npages), GFP_KERNEL | __GFP_NOWARN);
311 if (!stt)
314 stt->liobn = args->liobn;
315 stt->page_shift = args->page_shift;
316 stt->offset = args->offset;
317 stt->size = args->size;
318 stt->kvm = kvm;
319 mutex_init(&stt->alloc_lock);
320 INIT_LIST_HEAD_RCU(&stt->iommu_tables);
336 stt, O_RDWR | O_CLOEXEC);
339 list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
348 kfree(stt);
370 static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
382 if (iommu_tce_check_gpa(stt->page_shift, gpa))
385 if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
389 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
394 mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
410 static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
417 idx -= stt->offset;
419 page = stt->pages[sttpage];
426 page = kvm_spapr_get_tce_page(stt, sttpage);
435 static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
439 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
440 unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
493 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
497 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
550 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
555 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
575 struct kvmppc_spapr_tce_table *stt;
584 stt = kvmppc_find_table(vcpu->kvm, liobn);
585 if (!stt)
588 ret = kvmppc_ioba_validate(stt, ioba, 1);
594 ret = kvmppc_tce_validate(stt, tce);
605 entry = ioba >> stt->page_shift;
607 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
609 ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
612 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
617 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
622 kvmppc_tce_put(stt, entry, tce);
635 struct kvmppc_spapr_tce_table *stt;
642 stt = kvmppc_find_table(vcpu->kvm, liobn);
643 if (!stt)
646 entry = ioba >> stt->page_shift;
657 ret = kvmppc_ioba_validate(stt, ioba, npages);
675 ret = kvmppc_tce_validate(stt, tce);
702 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
703 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
708 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
714 kvmppc_tce_put(stt, entry + i, tce);
728 struct kvmppc_spapr_tce_table *stt;
732 stt = kvmppc_find_table(vcpu->kvm, liobn);
733 if (!stt)
736 ret = kvmppc_ioba_validate(stt, ioba, npages);
744 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
745 unsigned long entry = ioba >> stt->page_shift;
748 ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
758 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
762 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
763 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
772 struct kvmppc_spapr_tce_table *stt;
778 stt = kvmppc_find_table(vcpu->kvm, liobn);
779 if (!stt)
782 ret = kvmppc_ioba_validate(stt, ioba, 1);
786 idx = (ioba >> stt->page_shift) - stt->offset;
787 page = stt->pages[idx / TCES_PER_PAGE];