Lines Matching defs:size
95 * after making sure the size is an order of a 4KiB page and that the
498 size_t size = 256 * sizeof(struct dmar_domain *);
499 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
969 void *addr, int size)
972 clflush_cache_range(addr, size);
1687 * the size is too big.
1803 size_t size;
1819 size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
1820 iommu->domains = kzalloc(size, GFP_KERNEL);
1823 size = 256 * sizeof(struct dmar_domain *);
1824 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
2092 * Get the PASID directory size for scalable mode context entry.
2137 /* Convert value to context PASID directory size field coding. */
2355 /* Returns a number of VTD pages, but aligned to MM page size */
2357 size_t size)
2360 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2501 end of the mapping, if the trailing size is not enough to
3367 * Find the max pasid size of all IOMMU's in the system.
3542 /* Ensure we reserve the whole size-aligned region */
3568 size_t size, int dir, u64 dma_mask)
3588 size = aligned_nrpages(paddr, size);
3590 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3604 * paddr - (paddr + size) might be partial page, we should map the whole
3610 mm_to_dma_pfn(paddr_pfn), size, prot);
3617 trace_map_single(dev, start_paddr, paddr, size << VTD_PAGE_SHIFT);
3623 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3625 size, (unsigned long long)paddr, dir);
3630 unsigned long offset, size_t size,
3635 size, dir, *dev->dma_mask);
3639 size_t size, enum dma_data_direction dir,
3642 return __intel_map_single(dev, phys_addr, size, dir, *dev->dma_mask);
3645 static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
3662 nrpages = aligned_nrpages(dev_addr, size);
3686 trace_unmap_single(dev, dev_addr, size);
3690 size_t size, enum dma_data_direction dir,
3693 intel_unmap(dev, dev_addr, size);
3697 size_t size, enum dma_data_direction dir, unsigned long attrs)
3699 intel_unmap(dev, dev_addr, size);
3702 static void *intel_alloc_coherent(struct device *dev, size_t size,
3712 size = PAGE_ALIGN(size);
3713 order = get_order(size);
3716 unsigned int count = size >> PAGE_SHIFT;
3726 memset(page_address(page), 0, size);
3728 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3733 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3739 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3745 size = PAGE_ALIGN(size);
3746 order = get_order(size);
3748 intel_unmap(dev, dma_handle, size);
3749 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3776 size_t size = 0;
3796 size += aligned_nrpages(sg->offset, sg->length);
3798 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3817 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3820 start_vpfn + size - 1,
3822 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3855 bounce_sync_single(struct device *dev, dma_addr_t addr, size_t size,
3867 swiotlb_tbl_sync_single(dev, tlb_addr, size, dir, target);
3871 bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
3875 size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE);
3896 nrpages = aligned_nrpages(0, size);
3913 * If both the physical buffer start address and size are
3916 if (!IS_ALIGNED(paddr | size, VTD_PAGE_SIZE)) {
3917 tlb_addr = swiotlb_tbl_map_single(dev, paddr, size,
3929 padding_start += size;
3930 padding_size -= size;
3944 trace_bounce_map_single(dev, iova_pfn << PAGE_SHIFT, paddr, size);
3950 swiotlb_tbl_unmap_single(dev, tlb_addr, size,
3955 size, (unsigned long long)paddr, dir);
3961 bounce_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
3964 size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE);
3976 intel_unmap(dev, dev_addr, size);
3978 swiotlb_tbl_unmap_single(dev, tlb_addr, size,
3981 trace_bounce_unmap_single(dev, dev_addr, size);
3986 size_t size, enum dma_data_direction dir, unsigned long attrs)
3989 size, dir, attrs, *dev->dma_mask);
3993 bounce_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size,
3996 return bounce_map_single(dev, phys_addr, size,
4001 bounce_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size,
4004 bounce_unmap_single(dev, dev_addr, size, dir, attrs);
4008 bounce_unmap_resource(struct device *dev, dma_addr_t dev_addr, size_t size,
4011 bounce_unmap_single(dev, dev_addr, size, dir, attrs);
4054 size_t size, enum dma_data_direction dir)
4056 bounce_sync_single(dev, addr, size, dir, SYNC_FOR_CPU);
4061 size_t size, enum dma_data_direction dir)
4063 bounce_sync_single(dev, addr, size, dir, SYNC_FOR_DEVICE);
5521 /* VT-d size is encoded as 2^size of 4K pages, 0 for 4k, 9 for 2MB, etc.
5523 * granu size in contiguous memory.
5540 u64 size = 0;
5567 size = to_vtd_size(inv_info->granu.addr_info.granule_size,
5599 size &&
5600 (inv_info->granu.addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) {
5601 pr_err_ratelimited("User address not aligned, 0x%llx, size order %llu\n",
5602 inv_info->granu.addr_info.addr, size);
5611 (granu == QI_GRAN_NONG_PASID) ? -1 : 1 << size,
5627 * The equivalent of that is we set the size to be the
5632 size = 64 - VTD_PAGE_SHIFT;
5642 size);
5662 size_t size, int iommu_prot, gfp_t gfp)
5676 max_addr = iova + size;
5690 /* Round up size to next multiple of PAGE_SIZE, if it and
5692 size = aligned_nrpages(hpa, size);
5694 hpa >> VTD_PAGE_SHIFT, size, prot);
5699 unsigned long iova, size_t size,
5709 size argument if it happens to be a large-page mapping. */
5712 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5713 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5716 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5728 if (dmar_domain->max_addr == iova + size)
5731 return size;