/kernel/linux/linux-6.6/drivers/iommu/iommufd/ |
H A D | pages.c | 262 * PFNs. This is used as a temporary holding memory for shuttling pfns from one 264 * work on the largest possible grouping of pfns. eg fewer lock/unlock cycles, 268 unsigned long *pfns; member 279 batch->pfns[0] = 0; in batch_clear() 297 batch->pfns[0] = batch->pfns[batch->end - 1] + in batch_clear_carry() 310 batch->pfns[0] += skip_pfns; in batch_skip_carry() 318 const size_t elmsz = sizeof(*batch->pfns) + sizeof(*batch->npfns); in __batch_init() 321 batch->pfns = temp_kmalloc(&size, backup, backup_len); in __batch_init() 322 if (!batch->pfns) in __batch_init() 947 pfn_reader_update_pinned(struct pfn_reader *pfns) pfn_reader_update_pinned() argument 956 pfn_reader_unpin(struct pfn_reader *pfns) pfn_reader_unpin() argument 976 pfn_reader_fill_span(struct pfn_reader *pfns) pfn_reader_fill_span() argument 1024 pfn_reader_done(struct pfn_reader *pfns) pfn_reader_done() argument 1029 pfn_reader_next(struct pfn_reader *pfns) pfn_reader_next() argument 1062 pfn_reader_init(struct pfn_reader *pfns, struct iopt_pages *pages, unsigned long start_index, unsigned long last_index) pfn_reader_init() argument 1091 pfn_reader_release_pins(struct pfn_reader *pfns) pfn_reader_release_pins() argument 1111 pfn_reader_destroy(struct pfn_reader *pfns) pfn_reader_destroy() argument 1121 pfn_reader_first(struct pfn_reader *pfns, struct iopt_pages *pages, unsigned long start_index, unsigned long last_index) pfn_reader_first() argument 1360 struct pfn_reader pfns; iopt_area_fill_domain() local 1414 struct pfn_reader pfns; iopt_area_fill_domains() local 1749 struct pfn_reader pfns; iopt_pages_rw_slow() local [all...] |
H A D | selftest.c | 32 * the pfns shifted. The upper bits are used for metadata. 90 struct xarray pfns; member 165 xa_init(&mock->pfns); in mock_domain_alloc() 174 WARN_ON(!xa_empty(&mock->pfns)); in mock_domain_free() 205 old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE, in mock_domain_map_pages() 212 xa_erase(&mock->pfns, in mock_domain_map_pages() 245 ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); in mock_domain_unmap_pages() 279 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); in mock_domain_iova_to_phys() 551 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); in iommufd_test_md_check_pa()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_hmm.c | 165 unsigned long *pfns; in amdgpu_hmm_range_get_pages() local 172 pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); in amdgpu_hmm_range_get_pages() 173 if (unlikely(!pfns)) { in amdgpu_hmm_range_get_pages() 182 hmm_range->hmm_pfns = pfns; in amdgpu_hmm_range_get_pages() 219 hmm_range->hmm_pfns = pfns; in amdgpu_hmm_range_get_pages() 227 pages[i] = hmm_pfn_to_page(pfns[i]); in amdgpu_hmm_range_get_pages() 234 kvfree(pfns); in amdgpu_hmm_range_get_pages()
|
/kernel/linux/linux-5.10/drivers/xen/ |
H A D | xlate_mmu.c | 193 xen_pfn_t *pfns; member 201 info->pfns[info->idx++] = gfn; in setup_balloon_gfn() 218 xen_pfn_t *pfns; in xen_xlate_map_ballooned_pages() local 230 pfns = kcalloc(nr_grant_frames, sizeof(pfns[0]), GFP_KERNEL); in xen_xlate_map_ballooned_pages() 231 if (!pfns) { in xen_xlate_map_ballooned_pages() 240 kfree(pfns); in xen_xlate_map_ballooned_pages() 244 data.pfns = pfns; in xen_xlate_map_ballooned_pages() 254 kfree(pfns); in xen_xlate_map_ballooned_pages() [all...] |
H A D | privcmd.c | 414 /* Allocate pfns that are then mapped with gfns from foreign domid. Update 429 pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__, in alloc_empty_pages() 728 xen_pfn_t *pfns = NULL; in privcmd_ioctl_mmap_resource() local 763 pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN); in privcmd_ioctl_mmap_resource() 764 if (!pfns) { in privcmd_ioctl_mmap_resource() 784 pfns[i] = pfn + (i % XEN_PFN_PER_PAGE); in privcmd_ioctl_mmap_resource() 791 set_xen_guest_handle(xdata.frame_list, pfns); in privcmd_ioctl_mmap_resource() 807 int num, *errs = (int *)pfns; in privcmd_ioctl_mmap_resource() 809 BUILD_BUG_ON(sizeof(*errs) > sizeof(*pfns)); in privcmd_ioctl_mmap_resource() [all...] |
/kernel/linux/linux-6.6/drivers/xen/ |
H A D | xlate_mmu.c | 193 xen_pfn_t *pfns; member 201 info->pfns[info->idx++] = gfn; in setup_balloon_gfn() 218 xen_pfn_t *pfns; in xen_xlate_map_ballooned_pages() local 230 pfns = kcalloc(nr_grant_frames, sizeof(pfns[0]), GFP_KERNEL); in xen_xlate_map_ballooned_pages() 231 if (!pfns) { in xen_xlate_map_ballooned_pages() 240 kfree(pfns); in xen_xlate_map_ballooned_pages() 244 data.pfns = pfns; in xen_xlate_map_ballooned_pages() 254 kfree(pfns); in xen_xlate_map_ballooned_pages() [all...] |
H A D | privcmd.c | 419 /* Allocate pfns that are then mapped with gfns from foreign domid. Update 434 pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__, in alloc_empty_pages() 733 xen_pfn_t *pfns = NULL; in privcmd_ioctl_mmap_resource() local 768 pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN); in privcmd_ioctl_mmap_resource() 769 if (!pfns) { in privcmd_ioctl_mmap_resource() 789 pfns[i] = pfn + (i % XEN_PFN_PER_PAGE); in privcmd_ioctl_mmap_resource() 796 set_xen_guest_handle(xdata.frame_list, pfns); in privcmd_ioctl_mmap_resource() 812 int num, *errs = (int *)pfns; in privcmd_ioctl_mmap_resource() 814 BUILD_BUG_ON(sizeof(*errs) > sizeof(*pfns)); in privcmd_ioctl_mmap_resource() [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gem/ |
H A D | i915_gem_pages.c | 308 unsigned long stack[32], *pfns = stack, i; in i915_gem_object_map_pfn() local 318 pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL); in i915_gem_object_map_pfn() 319 if (!pfns) in i915_gem_object_map_pfn() 325 pfns[i++] = (iomap + addr) >> PAGE_SHIFT; in i915_gem_object_map_pfn() 326 vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO)); in i915_gem_object_map_pfn() 327 if (pfns != stack) in i915_gem_object_map_pfn() 328 kvfree(pfns); in i915_gem_object_map_pfn()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gem/ |
H A D | i915_gem_pages.c | 333 unsigned long stack[32], *pfns = stack, i; in i915_gem_object_map_pfn() local 342 pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL); in i915_gem_object_map_pfn() 343 if (!pfns) in i915_gem_object_map_pfn() 349 pfns[i++] = (iomap + addr) >> PAGE_SHIFT; in i915_gem_object_map_pfn() 350 vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO)); in i915_gem_object_map_pfn() 351 if (pfns != stack) in i915_gem_object_map_pfn() 352 kvfree(pfns); in i915_gem_object_map_pfn()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/ |
H A D | nouveau_svm.h | 34 void nouveau_pfns_free(u64 *pfns); 36 unsigned long addr, u64 *pfns, unsigned long npages);
|
H A D | nouveau_dmem.c | 618 dma_addr_t *dma_addrs, u64 *pfns) in nouveau_dmem_migrate_chunk() 625 args->src[i], dma_addrs + nr_dma, pfns + i); in nouveau_dmem_migrate_chunk() 634 nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i); in nouveau_dmem_migrate_chunk() 660 u64 *pfns; in nouveau_dmem_migrate_vma() local 677 pfns = nouveau_pfns_alloc(max); in nouveau_dmem_migrate_vma() 678 if (!pfns) in nouveau_dmem_migrate_vma() 693 pfns); in nouveau_dmem_migrate_vma() 699 nouveau_pfns_free(pfns); in nouveau_dmem_migrate_vma() 616 nouveau_dmem_migrate_chunk(struct nouveau_drm *drm, struct nouveau_svmm *svmm, struct migrate_vma *args, dma_addr_t *dma_addrs, u64 *pfns) nouveau_dmem_migrate_chunk() argument
|
H A D | nouveau_svm.c | 784 nouveau_pfns_to_args(void *pfns) in nouveau_pfns_to_args() argument 786 return container_of(pfns, struct nouveau_pfnmap_args, p.phys); in nouveau_pfns_to_args() 806 nouveau_pfns_free(u64 *pfns) in nouveau_pfns_free() argument 808 struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns); in nouveau_pfns_free() 815 unsigned long addr, u64 *pfns, unsigned long npages) in nouveau_pfns_map() 817 struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns); in nouveau_pfns_map() 814 nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm, unsigned long addr, u64 *pfns, unsigned long npages) nouveau_pfns_map() argument
|
/kernel/linux/linux-6.6/drivers/gpu/drm/nouveau/ |
H A D | nouveau_svm.h | 34 void nouveau_pfns_free(u64 *pfns); 36 unsigned long addr, u64 *pfns, unsigned long npages);
|
H A D | nouveau_dmem.c | 665 dma_addr_t *dma_addrs, u64 *pfns) in nouveau_dmem_migrate_chunk() 672 args->src[i], dma_addrs + nr_dma, pfns + i); in nouveau_dmem_migrate_chunk() 681 nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i); in nouveau_dmem_migrate_chunk() 707 u64 *pfns; in nouveau_dmem_migrate_vma() local 724 pfns = nouveau_pfns_alloc(max); in nouveau_dmem_migrate_vma() 725 if (!pfns) in nouveau_dmem_migrate_vma() 740 pfns); in nouveau_dmem_migrate_vma() 746 nouveau_pfns_free(pfns); in nouveau_dmem_migrate_vma() 663 nouveau_dmem_migrate_chunk(struct nouveau_drm *drm, struct nouveau_svmm *svmm, struct migrate_vma *args, dma_addr_t *dma_addrs, u64 *pfns) nouveau_dmem_migrate_chunk() argument
|
H A D | nouveau_svm.c | 895 nouveau_pfns_to_args(void *pfns) in nouveau_pfns_to_args() argument 897 return container_of(pfns, struct nouveau_pfnmap_args, p.phys); in nouveau_pfns_to_args() 917 nouveau_pfns_free(u64 *pfns) in nouveau_pfns_free() argument 919 struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns); in nouveau_pfns_free() 926 unsigned long addr, u64 *pfns, unsigned long npages) in nouveau_pfns_map() 928 struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns); in nouveau_pfns_map() 925 nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm, unsigned long addr, u64 *pfns, unsigned long npages) nouveau_pfns_map() argument
|
/kernel/linux/linux-5.10/drivers/virtio/ |
H A D | virtio_balloon.c | 113 /* The array of pfns we tell the Host about. */ 115 __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; member 157 sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns); in tell_host() 196 __virtio32 pfns[], struct page *page) in set_page_pfns() 203 * Set balloon pfns pointing at this page. in set_page_pfns() 207 pfns[i] = cpu_to_virtio32(vb->vdev, in set_page_pfns() 219 num = min(num, ARRAY_SIZE(vb->pfns)); in fill_balloon() 244 set_page_pfns(vb, vb->pfns + vb->num_pfns, page); in fill_balloon() 283 num = min(num, ARRAY_SIZE(vb->pfns)); in leak_balloon() 195 set_page_pfns(struct virtio_balloon *vb, __virtio32 pfns[], struct page *page) set_page_pfns() argument [all...] |
/kernel/linux/linux-6.6/drivers/virtio/ |
H A D | virtio_balloon.c | 106 /* The array of pfns we tell the Host about. */ 108 __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; member 150 sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns); in tell_host() 189 __virtio32 pfns[], struct page *page) in set_page_pfns() 196 * Set balloon pfns pointing at this page. in set_page_pfns() 200 pfns[i] = cpu_to_virtio32(vb->vdev, in set_page_pfns() 212 num = min(num, ARRAY_SIZE(vb->pfns)); in fill_balloon() 237 set_page_pfns(vb, vb->pfns + vb->num_pfns, page); in fill_balloon() 276 num = min(num, ARRAY_SIZE(vb->pfns)); in leak_balloon() 188 set_page_pfns(struct virtio_balloon *vb, __virtio32 pfns[], struct page *page) set_page_pfns() argument [all...] |
/kernel/linux/linux-5.10/arch/powerpc/kvm/ |
H A D | book3s_hv_uvmem.c | 228 unsigned long *pfns; member 253 p->pfns = vcalloc(slot->npages, sizeof(*p->pfns)); in kvmppc_uvmem_slot_init() 254 if (!p->pfns) { in kvmppc_uvmem_slot_init() 278 vfree(p->pfns); in kvmppc_uvmem_slot_free() 297 p->pfns[index] = uvmem_pfn | flag; in kvmppc_mark_gfn() 299 p->pfns[index] = flag; in kvmppc_mark_gfn() 340 if (p->pfns[index] & KVMPPC_GFN_UVMEM_PFN) { in kvmppc_gfn_is_uvmem_pfn() 342 *uvmem_pfn = p->pfns[index] & in kvmppc_gfn_is_uvmem_pfn() 380 if (!(p->pfns[inde in kvmppc_next_nontransitioned_gfn() [all...] |
/kernel/linux/linux-6.6/arch/powerpc/kvm/ |
H A D | book3s_hv_uvmem.c | 230 unsigned long *pfns; member 255 p->pfns = vcalloc(slot->npages, sizeof(*p->pfns)); in kvmppc_uvmem_slot_init() 256 if (!p->pfns) { in kvmppc_uvmem_slot_init() 280 vfree(p->pfns); in kvmppc_uvmem_slot_free() 299 p->pfns[index] = uvmem_pfn | flag; in kvmppc_mark_gfn() 301 p->pfns[index] = flag; in kvmppc_mark_gfn() 342 if (p->pfns[index] & KVMPPC_GFN_UVMEM_PFN) { in kvmppc_gfn_is_uvmem_pfn() 344 *uvmem_pfn = p->pfns[index] & in kvmppc_gfn_is_uvmem_pfn() 382 if (!(p->pfns[inde in kvmppc_next_nontransitioned_gfn() [all...] |
/kernel/linux/linux-5.10/mm/ |
H A D | sparse.c | 161 * in larger pfns than the maximum scope of sparsemem: in mminit_validate_memmodel_limits() 230 unsigned long pfns; in subsection_map_init() local 232 pfns = min(nr_pages, PAGES_PER_SECTION in subsection_map_init() 235 subsection_mask_set(ms->usage->subsection_map, pfn, pfns); in subsection_map_init() 237 pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr, in subsection_map_init() 238 pfns, subsection_map_index(pfn), in subsection_map_init() 239 subsection_map_index(pfn + pfns - 1)); in subsection_map_init() 241 pfn += pfns; in subsection_map_init() 242 nr_pages -= pfns; in subsection_map_init() 892 * @nr_pages: number of pfns t [all...] |
/kernel/linux/linux-6.6/mm/ |
H A D | sparse.c | 136 * in larger pfns than the maximum scope of sparsemem: in mminit_validate_memmodel_limits() 203 unsigned long pfns; in subsection_map_init() local 205 pfns = min(nr_pages, PAGES_PER_SECTION in subsection_map_init() 208 subsection_mask_set(ms->usage->subsection_map, pfn, pfns); in subsection_map_init() 210 pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr, in subsection_map_init() 211 pfns, subsection_map_index(pfn), in subsection_map_init() 212 subsection_map_index(pfn + pfns - 1)); in subsection_map_init() 214 pfn += pfns; in subsection_map_init() 215 nr_pages -= pfns; in subsection_map_init() 875 * @nr_pages: number of pfns t [all...] |
/kernel/linux/linux-5.10/lib/ |
H A D | test_hmm.c | 162 unsigned long *pfns = range->hmm_pfns; in dmirror_do_fault() local 167 pfn++, pfns++) { in dmirror_do_fault() 175 WARN_ON(*pfns & HMM_PFN_ERROR); in dmirror_do_fault() 176 WARN_ON(!(*pfns & HMM_PFN_VALID)); in dmirror_do_fault() 178 page = hmm_pfn_to_page(*pfns); in dmirror_do_fault() 182 if (*pfns & HMM_PFN_WRITE) in dmirror_do_fault() 285 unsigned long pfns[64]; in dmirror_fault() local 288 .hmm_pfns = pfns, in dmirror_fault() 302 range.end = min(addr + (ARRAY_SIZE(pfns) << PAGE_SHIFT), end); in dmirror_fault() 867 unsigned long pfns[6 in dmirror_snapshot() local [all...] |
/kernel/linux/linux-6.6/lib/ |
H A D | test_hmm.c | 209 unsigned long *pfns = range->hmm_pfns; in dmirror_do_fault() local 214 pfn++, pfns++) { in dmirror_do_fault() 222 WARN_ON(*pfns & HMM_PFN_ERROR); in dmirror_do_fault() 223 WARN_ON(!(*pfns & HMM_PFN_VALID)); in dmirror_do_fault() 225 page = hmm_pfn_to_page(*pfns); in dmirror_do_fault() 229 if (*pfns & HMM_PFN_WRITE) in dmirror_do_fault() 332 unsigned long pfns[64]; in dmirror_fault() local 335 .hmm_pfns = pfns, in dmirror_fault() 349 range.end = min(addr + (ARRAY_SIZE(pfns) << PAGE_SHIFT), end); in dmirror_fault() 1172 unsigned long pfns[6 in dmirror_snapshot() local [all...] |
/kernel/linux/linux-5.10/drivers/iommu/ |
H A D | iova.c | 669 /* Don't allow nonsensical pfns */ in reserve_iova() 781 unsigned long pfns[IOVA_MAG_SIZE]; member 812 struct iova *iova = private_find_iova(iovad, mag->pfns[i]); in iova_magazine_free_pfns() 843 /* Only fall back to the rbtree if we have no suitable pfns at all */ in iova_magazine_pop() 844 for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--) in iova_magazine_pop() 849 pfn = mag->pfns[i]; in iova_magazine_pop() 850 mag->pfns[i] = mag->pfns[--mag->size]; in iova_magazine_pop() 859 mag->pfns[mag->size++] = pfn; in iova_magazine_push()
|
/kernel/linux/linux-6.6/drivers/iommu/ |
H A D | iova.c | 585 /* Don't allow nonsensical pfns */ in reserve_iova() 632 unsigned long pfns[IOVA_MAG_SIZE]; member 673 struct iova *iova = private_find_iova(iovad, mag->pfns[i]); in iova_magazine_free_pfns() 703 /* Only fall back to the rbtree if we have no suitable pfns at all */ in iova_magazine_pop() 704 for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--) in iova_magazine_pop() 709 pfn = mag->pfns[i]; in iova_magazine_pop() 710 mag->pfns[i] = mag->pfns[--mag->size]; in iova_magazine_pop() 717 mag->pfns[mag->size++] = pfn; in iova_magazine_push()
|