Lines Matching defs:range
110 * to call back into the driver in order to unmap a range of GPU VA space. The
112 * enclosed by the given range unmap operations are created. For mappings which
113 * are only partically located within the given range, remap operations are
202 * 5) Requested mapping's range is a left aligned subset of the existent one,
237 * 7) Requested mapping's range is a right aligned subset of the existent one,
300 * 11) Requested mapping's range is a centered subset of the existent one
428 * u64 addr, u64 range,
435 * ops = drm_gpuva_sm_map_ops_create(mgr, addr, range,
524 * u64 addr, u64 range,
543 * ret = drm_gpuva_sm_map(mgr, &ctx, addr, range, obj, offset);
603 #define GPUVA_LAST(node) ((node)->va.addr + (node)->va.range - 1)
617 drm_gpuva_check_overflow(u64 addr, u64 range)
621 return WARN(check_add_overflow(addr, range, &end),
626 drm_gpuva_in_mm_range(struct drm_gpuva_manager *mgr, u64 addr, u64 range)
628 u64 end = addr + range;
636 drm_gpuva_in_kernel_node(struct drm_gpuva_manager *mgr, u64 addr, u64 range)
638 u64 end = addr + range;
640 u64 krange = mgr->kernel_alloc_node.va.range;
648 u64 addr, u64 range)
650 return !drm_gpuva_check_overflow(addr, range) &&
651 drm_gpuva_in_mm_range(mgr, addr, range) &&
652 !drm_gpuva_in_kernel_node(mgr, addr, range);
660 * @range: the size of the GPU VA space
673 u64 start_offset, u64 range,
680 drm_gpuva_check_overflow(start_offset, range);
682 mgr->mm_range = range;
691 mgr->kernel_alloc_node.va.range = reserve_range;
712 if (mgr->kernel_alloc_node.va.range)
752 * Insert a &drm_gpuva with a given address and range into a
766 u64 range = va->va.range;
768 if (unlikely(!drm_gpuva_range_valid(mgr, addr, range)))
855 * drm_gpuva_find_first() - find the first &drm_gpuva in the given range
858 * @range: the &drm_gpuvas range
860 * Returns: the first &drm_gpuva within the given range
864 u64 addr, u64 range)
866 u64 last = addr + range - 1;
876 * @range: the &drm_gpuvas range
878 * Returns: the &drm_gpuva at a given &addr and with a given &range
882 u64 addr, u64 range)
886 va = drm_gpuva_find_first(mgr, addr, range);
891 va->va.range != range)
948 * @mgr: the &drm_gpuva_manager to check the range for
949 * @addr: the start address of the range
950 * @range: the range of the interval
955 drm_gpuva_interval_empty(struct drm_gpuva_manager *mgr, u64 addr, u64 range)
957 return !drm_gpuva_find_first(mgr, addr, range);
1028 u64 addr, u64 range,
1035 op.map.va.range = range;
1090 u64 range = va->va.range;
1091 u64 end = addr + range;
1115 .va.range = range - req_range,
1133 .va.range = ls_range,
1160 .va.range = end - req_end,
1193 .va.range = end - req_end,
1233 u64 range = va->va.range;
1234 u64 end = addr + range;
1238 prev.va.range = req_addr - addr;
1247 next.va.range = end - req_end;
1277 * @req_range: the range of the new mapping
1282 * This function iterates the given range of the GPU VA space. It utilizes the
1327 * @req_addr: the start address of the range to unmap
1328 * @req_range: the range of the mappings to unmap
1330 * This function iterates the given range of the GPU VA space. It utilizes the
1460 * @req_range: the range of the new mapping
1527 * @req_addr: the start address of the range to unmap
1528 * @req_range: the range of the mappings to unmap
1531 * required, splitting of the mappings overlapping the unmap range.
1540 * Note that before calling this function again with another range to unmap it
1588 * @addr: the start address of the range to prefetch
1589 * @range: the range of the mappings to prefetch
1605 u64 addr, u64 range)
1610 u64 end = addr + range;