Lines Matching refs:pages
69 * allocation can hold about 26M of 4k pages and 13G of 2M pages in an
163 static void iopt_pages_add_npinned(struct iopt_pages *pages, size_t npages)
167 rc = check_add_overflow(pages->npinned, npages, &pages->npinned);
169 WARN_ON(rc || pages->npinned > pages->npages);
172 static void iopt_pages_sub_npinned(struct iopt_pages *pages, size_t npages)
176 rc = check_sub_overflow(pages->npinned, npages, &pages->npinned);
178 WARN_ON(rc || pages->npinned > pages->npages);
181 static void iopt_pages_err_unpin(struct iopt_pages *pages,
189 iopt_pages_sub_npinned(pages, npages);
195 * covers a portion of the first and last pages in the range.
249 static struct iopt_area *iopt_pages_find_domain_area(struct iopt_pages *pages,
254 node = interval_tree_iter_first(&pages->domains_itree, index, index);
576 unsigned long last_index, struct page **pages)
578 struct page **end_pages = pages + (last_index - start_index) + 1;
579 struct page **half_pages = pages + (end_pages - pages) / 2;
586 while (pages != end_pages) {
588 if (pages == half_pages && iommufd_should_fail()) {
596 old = xas_store(&xas, xa_mk_value(page_to_pfn(*pages)));
600 pages++;
615 static void batch_from_pages(struct pfn_batch *batch, struct page **pages,
618 struct page **end = pages + npages;
620 for (; pages != end; pages++)
621 if (!batch_add_pfn(batch, page_to_pfn(*pages)))
625 static void batch_unpin(struct pfn_batch *batch, struct iopt_pages *pages,
643 to_unpin, pages->writable);
644 iopt_pages_sub_npinned(pages, to_unpin);
709 struct iopt_pages *pages)
717 if (pages->writable)
722 struct iopt_pages *pages)
726 mmap_read_unlock(pages->source_mm);
727 if (pages->source_mm != current->mm)
728 mmput(pages->source_mm);
737 struct iopt_pages *pages,
741 bool remote_mm = pages->source_mm != current->mm;
762 * providing the pages, so we can optimize into
766 if (!mmget_not_zero(pages->source_mm))
779 uptr = (uintptr_t)(pages->uptr + start_index * PAGE_SIZE);
785 mmap_read_lock(pages->source_mm);
788 rc = pin_user_pages_remote(pages->source_mm, uptr, npages,
797 iopt_pages_add_npinned(pages, rc);
804 static int incr_user_locked_vm(struct iopt_pages *pages, unsigned long npages)
810 lock_limit = task_rlimit(pages->source_task, RLIMIT_MEMLOCK) >>
813 cur_pages = atomic_long_read(&pages->source_user->locked_vm);
817 } while (atomic_long_cmpxchg(&pages->source_user->locked_vm, cur_pages,
822 static void decr_user_locked_vm(struct iopt_pages *pages, unsigned long npages)
824 if (WARN_ON(atomic_long_read(&pages->source_user->locked_vm) < npages))
826 atomic_long_sub(npages, &pages->source_user->locked_vm);
830 static int update_mm_locked_vm(struct iopt_pages *pages, unsigned long npages,
837 mmap_read_unlock(pages->source_mm);
841 pages->source_mm != current->mm) {
842 if (!mmget_not_zero(pages->source_mm))
847 mmap_write_lock(pages->source_mm);
848 rc = __account_locked_vm(pages->source_mm, npages, inc,
849 pages->source_task, false);
850 mmap_write_unlock(pages->source_mm);
853 mmput(pages->source_mm);
857 static int do_update_pinned(struct iopt_pages *pages, unsigned long npages,
862 switch (pages->account_mode) {
867 rc = incr_user_locked_vm(pages, npages);
869 decr_user_locked_vm(pages, npages);
872 rc = update_mm_locked_vm(pages, npages, inc, user);
878 pages->last_npinned = pages->npinned;
880 atomic64_add(npages, &pages->source_mm->pinned_vm);
882 atomic64_sub(npages, &pages->source_mm->pinned_vm);
886 static void update_unpinned(struct iopt_pages *pages)
888 if (WARN_ON(pages->npinned > pages->last_npinned))
890 if (pages->npinned == pages->last_npinned)
892 do_update_pinned(pages, pages->last_npinned - pages->npinned, false,
897 * Changes in the number of pages pinned is done after the pages have been read
900 * how many pages we have already pinned within a range to generate an accurate
904 struct iopt_pages *pages)
909 lockdep_assert_held(&pages->mutex);
911 if (pages->npinned == pages->last_npinned)
914 if (pages->npinned < pages->last_npinned) {
915 npages = pages->last_npinned - pages->npinned;
920 npages = pages->npinned - pages->last_npinned;
923 return do_update_pinned(pages, npages, inc, user);
931 * - The original PFN source, ie pages->source_mm
937 struct iopt_pages *pages;
949 return pfn_reader_user_update_pinned(&pfns->user, pfns->pages);
953 * The batch can contain a mixture of pages that are still in use and pages that
954 * need to be unpinned. Unpin only pages that are not held anywhere else.
961 struct iopt_pages *pages = pfns->pages;
963 lockdep_assert_held(&pages->mutex);
965 interval_tree_for_each_double_span(&span, &pages->access_itree,
966 &pages->domains_itree, start, last) {
970 batch_unpin(&pfns->batch, pages, span.start_hole - start,
988 batch_from_xarray(&pfns->batch, &pfns->pages->pinned_pfns,
995 * Pull as many pages from the first domain we find in the
999 area = iopt_pages_find_domain_area(pfns->pages, start_index);
1003 /* The storage_domain cannot change without the pages mutex */
1011 rc = pfn_reader_user_pin(&pfns->user, pfns->pages, start_index,
1062 static int pfn_reader_init(struct pfn_reader *pfns, struct iopt_pages *pages,
1067 lockdep_assert_held(&pages->mutex);
1069 pfns->pages = pages;
1073 pfn_reader_user_init(&pfns->user, pages);
1077 interval_tree_double_span_iter_first(&pfns->span, &pages->access_itree,
1078 &pages->domains_itree, start_index,
1084 * There are many assertions regarding the state of pages->npinned vs
1085 * pages->last_pinned, for instance something like unmapping a domain must only
1093 struct iopt_pages *pages = pfns->pages;
1098 /* Any pages not transferred to the batch are just unpinned */
1102 iopt_pages_sub_npinned(pages, npages);
1113 struct iopt_pages *pages = pfns->pages;
1116 pfn_reader_user_destroy(&pfns->user, pfns->pages);
1118 WARN_ON(pages->last_npinned != pages->npinned);
1121 static int pfn_reader_first(struct pfn_reader *pfns, struct iopt_pages *pages,
1130 rc = pfn_reader_init(pfns, pages, start_index, last_index);
1144 struct iopt_pages *pages;
1157 pages = kzalloc(sizeof(*pages), GFP_KERNEL_ACCOUNT);
1158 if (!pages)
1161 kref_init(&pages->kref);
1162 xa_init_flags(&pages->pinned_pfns, XA_FLAGS_ACCOUNT);
1163 mutex_init(&pages->mutex);
1164 pages->source_mm = current->mm;
1165 mmgrab(pages->source_mm);
1166 pages->uptr = (void __user *)ALIGN_DOWN((uintptr_t)uptr, PAGE_SIZE);
1167 pages->npages = DIV_ROUND_UP(length + (uptr - pages->uptr), PAGE_SIZE);
1168 pages->access_itree = RB_ROOT_CACHED;
1169 pages->domains_itree = RB_ROOT_CACHED;
1170 pages->writable = writable;
1172 pages->account_mode = IOPT_PAGES_ACCOUNT_NONE;
1174 pages->account_mode = IOPT_PAGES_ACCOUNT_USER;
1175 pages->source_task = current->group_leader;
1177 pages->source_user = get_uid(current_user());
1178 return pages;
1183 struct iopt_pages *pages = container_of(kref, struct iopt_pages, kref);
1185 WARN_ON(!RB_EMPTY_ROOT(&pages->access_itree.rb_root));
1186 WARN_ON(!RB_EMPTY_ROOT(&pages->domains_itree.rb_root));
1187 WARN_ON(pages->npinned);
1188 WARN_ON(!xa_empty(&pages->pinned_pfns));
1189 mmdrop(pages->source_mm);
1190 mutex_destroy(&pages->mutex);
1191 put_task_struct(pages->source_task);
1192 free_uid(pages->source_user);
1193 kfree(pages);
1198 struct iopt_pages *pages, struct iommu_domain *domain,
1228 * contiguous pages. Thus, if we have to stop unpinning in the
1248 batch_unpin(batch, pages, 0,
1258 struct iopt_pages *pages,
1268 lockdep_assert_held(&pages->mutex);
1272 * so this must unmap any IOVA before we go ahead and unpin the pages.
1273 * This creates a complexity where we need to skip over unpinning pages
1285 interval_tree_for_each_double_span(&span, &pages->domains_itree,
1286 &pages->access_itree, start_index,
1293 iopt_area_unpin_domain(&batch, area, pages, domain,
1306 update_unpinned(pages);
1310 struct iopt_pages *pages,
1315 __iopt_area_unfill_domain(area, pages, domain, end_index - 1);
1335 * @pages: page supplier for the area (area->pages is NULL)
1342 void iopt_area_unfill_domain(struct iopt_area *area, struct iopt_pages *pages,
1345 __iopt_area_unfill_domain(area, pages, domain,
1363 lockdep_assert_held(&area->pages->mutex);
1365 rc = pfn_reader_first(&pfns, area->pages, iopt_area_index(area),
1390 iopt_area_unfill_partial_domain(area, area->pages, domain,
1400 * @pages: The pages associated with the area (area->pages is NULL)
1408 int iopt_area_fill_domains(struct iopt_area *area, struct iopt_pages *pages)
1423 mutex_lock(&pages->mutex);
1424 rc = pfn_reader_first(&pfns, pages, iopt_area_index(area),
1449 interval_tree_insert(&area->pages_node, &pages->domains_itree);
1473 iopt_area_unfill_partial_domain(area, pages, domain,
1480 mutex_unlock(&pages->mutex);
1487 * @pages: The pages associated with the area (area->pages is NULL)
1492 void iopt_area_unfill_domains(struct iopt_area *area, struct iopt_pages *pages)
1500 mutex_lock(&pages->mutex);
1512 interval_tree_remove(&area->pages_node, &pages->domains_itree);
1513 iopt_area_unfill_domain(area, pages, area->storage_domain);
1516 mutex_unlock(&pages->mutex);
1520 struct iopt_pages *pages,
1525 batch_from_xarray_clear(batch, &pages->pinned_pfns, start_index,
1527 batch_unpin(batch, pages, 0, batch->total_pfns);
1535 * @pages: The pages to act on
1539 * Called when an iopt_pages_access is removed, removes pages from the itree.
1542 void iopt_pages_unfill_xarray(struct iopt_pages *pages,
1551 lockdep_assert_held(&pages->mutex);
1553 interval_tree_for_each_double_span(&span, &pages->access_itree,
1554 &pages->domains_itree, start_index,
1563 iopt_pages_unpin_xarray(&batch, pages, span.start_hole,
1567 clear_xarray(&pages->pinned_pfns, span.start_used,
1574 update_unpinned(pages);
1579 * @pages: The pages to act on
1582 * @out_pages: The output array to return the pages
1586 * the pages directly from the xarray.
1588 * This is part of the SW iommu interface to read pages for in-kernel use.
1590 void iopt_pages_fill_from_xarray(struct iopt_pages *pages,
1595 XA_STATE(xas, &pages->pinned_pfns, start_index);
1610 static int iopt_pages_fill_from_domain(struct iopt_pages *pages,
1619 area = iopt_pages_find_domain_area(pages, start_index);
1632 static int iopt_pages_fill_from_mm(struct iopt_pages *pages,
1643 rc = pfn_reader_user_pin(user, pages, cur_index, last_index);
1652 iopt_pages_err_unpin(pages, start_index, cur_index - 1,
1659 * @pages: The pages to act on
1662 * @out_pages: The output array to return the pages, may be NULL
1664 * This populates the xarray and returns the pages in out_pages. As the slow
1665 * path this is able to copy pages from other storage tiers into the xarray.
1669 * This is part of the SW iommu interface to read pages for in-kernel use.
1671 int iopt_pages_fill_xarray(struct iopt_pages *pages, unsigned long start_index,
1679 lockdep_assert_held(&pages->mutex);
1681 pfn_reader_user_init(&user, pages);
1683 interval_tree_for_each_double_span(&span, &pages->access_itree,
1684 &pages->domains_itree, start_index,
1690 iopt_pages_fill_from_xarray(pages, span.start_used,
1697 iopt_pages_fill_from_domain(pages, span.start_used,
1699 rc = pages_to_xarray(&pages->pinned_pfns,
1710 rc = iopt_pages_fill_from_mm(pages, &user, span.start_hole,
1714 rc = pages_to_xarray(&pages->pinned_pfns, span.start_hole,
1717 iopt_pages_err_unpin(pages, span.start_hole,
1723 rc = pfn_reader_user_update_pinned(&user, pages);
1727 pfn_reader_user_destroy(&user, pages);
1732 iopt_pages_unfill_xarray(pages, start_index, xa_end - 1);
1734 pfn_reader_user_destroy(&user, pages);
1743 static int iopt_pages_rw_slow(struct iopt_pages *pages,
1752 mutex_lock(&pages->mutex);
1754 rc = pfn_reader_first(&pfns, pages, start_index, last_index);
1776 mutex_unlock(&pages->mutex);
1784 static int iopt_pages_rw_page(struct iopt_pages *pages, unsigned long index,
1791 if (!mmget_not_zero(pages->source_mm))
1792 return iopt_pages_rw_slow(pages, index, index, offset, data,
1800 mmap_read_lock(pages->source_mm);
1802 pages->source_mm, (uintptr_t)(pages->uptr + index * PAGE_SIZE),
1805 mmap_read_unlock(pages->source_mm);
1816 mmput(pages->source_mm);
1821 * iopt_pages_rw_access - Copy to/from a linear slice of the pages
1822 * @pages: pages to act on
1823 * @start_byte: First byte of pages to copy to/from
1831 int iopt_pages_rw_access(struct iopt_pages *pages, unsigned long start_byte,
1836 bool change_mm = current->mm != pages->source_mm;
1843 if ((flags & IOMMUFD_ACCESS_RW_WRITE) && !pages->writable)
1848 return iopt_pages_rw_page(pages, start_index,
1851 return iopt_pages_rw_slow(pages, start_index, last_index,
1861 if (!mmget_not_zero(pages->source_mm))
1862 return iopt_pages_rw_slow(pages, start_index,
1866 kthread_use_mm(pages->source_mm);
1870 if (copy_to_user(pages->uptr + start_byte, data, length))
1873 if (copy_from_user(data, pages->uptr + start_byte, length))
1878 kthread_unuse_mm(pages->source_mm);
1879 mmput(pages->source_mm);
1886 iopt_pages_get_exact_access(struct iopt_pages *pages, unsigned long index,
1891 lockdep_assert_held(&pages->mutex);
1894 for (node = interval_tree_iter_first(&pages->access_itree, index, last);
1910 * Record that an in-kernel access will be accessing the pages, ensure they are
1919 struct iopt_pages *pages = area->pages;
1923 if ((flags & IOMMUFD_ACCESS_RW_WRITE) && !pages->writable)
1926 mutex_lock(&pages->mutex);
1927 access = iopt_pages_get_exact_access(pages, start_index, last_index);
1931 iopt_pages_fill_from_xarray(pages, start_index, last_index,
1933 mutex_unlock(&pages->mutex);
1943 rc = iopt_pages_fill_xarray(pages, start_index, last_index, out_pages);
1951 interval_tree_insert(&access->node, &pages->access_itree);
1952 mutex_unlock(&pages->mutex);
1958 mutex_unlock(&pages->mutex);
1968 * Undo iopt_area_add_access() and unpin the pages if necessary. The caller
1974 struct iopt_pages *pages = area->pages;
1977 mutex_lock(&pages->mutex);
1978 access = iopt_pages_get_exact_access(pages, start_index, last_index);
1988 interval_tree_remove(&access->node, &pages->access_itree);
1989 iopt_pages_unfill_xarray(pages, start_index, last_index);
1992 mutex_unlock(&pages->mutex);