Lines Matching refs:size

2042 	 * minimum size is the offset to the union.
2050 /* Fields before the variable size union are mandatory */
2065 * size, we shall support the existing flags within the current
2066 * size. Copy the remaining user data _after_ minsz but not more
2067 * than the current kernel supported size.
2115 * minimum size is the offset to the union.
2123 /* Fields before the variable size union are mandatory */
2128 * driver decide on what size it needs. Since the guest PASID bind data
2341 unsigned long addr_merge, size_t size)
2346 /* Max page size that still fits into 'size' */
2347 pgsize_idx = __fls(size);
2351 /* Max page size allowed by address */
2373 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
2378 size_t orig_size = size;
2389 /* find out the minimum page size supported */
2394 * the size of the mapping, must be aligned (at least) to the
2395 * size of the smallest page supported by the hardware
2397 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
2398 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
2399 iova, &paddr, size, min_pagesz);
2403 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
2405 while (size) {
2406 size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
2417 size -= pgsize;
2422 iommu_unmap(domain, orig_iova, orig_size - size);
2430 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
2435 ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
2443 phys_addr_t paddr, size_t size, int prot)
2446 return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
2451 phys_addr_t paddr, size_t size, int prot)
2453 return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
2458 unsigned long iova, size_t size,
2473 /* find out the minimum page size supported */
2477 * The virtual address, as well as the size of the mapping, must be
2478 * aligned (at least) to the size of the smallest page supported
2481 if (!IS_ALIGNED(iova | size, min_pagesz)) {
2482 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
2483 iova, size, min_pagesz);
2487 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
2490 * Keep iterating until we either unmap 'size' bytes (or more)
2493 while (unmapped < size) {
2494 size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
2500 pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
2507 trace_unmap(orig_iova, size, unmapped);
2512 unsigned long iova, size_t size)
2518 ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
2526 unsigned long iova, size_t size,
2529 return __iommu_unmap(domain, iova, size, iotlb_gather);
2596 phys_addr_t paddr, u64 size, int prot)
2601 return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,