Lines Matching defs:range
69 * - some range on the mm_struct is being invalidated
75 * - some range on the mm_struct is being invalidated
95 const struct mmu_notifier_range *range,
103 node = interval_tree_iter_first(&subscriptions->itree, range->start,
104 range->end - 1);
118 const struct mmu_notifier_range *range)
123 range->start, range->end - 1);
168 * range
172 * collision-retry scheme similar to seqcount for the VA range under
264 struct mmu_notifier_range range = {
276 mn_itree_inv_start_range(subscriptions, &range, &cur_seq);
278 interval_sub = mn_itree_inv_next(interval_sub, &range)) {
279 ret = interval_sub->ops->invalidate(interval_sub, &range,
445 const struct mmu_notifier_range *range)
451 mn_itree_inv_start_range(subscriptions, range, &cur_seq);
453 interval_sub = mn_itree_inv_next(interval_sub, range)) {
456 ret = interval_sub->ops->invalidate(interval_sub, range,
459 if (WARN_ON(mmu_notifier_range_blockable(range)))
477 struct mmu_notifier_range *range)
491 if (!mmu_notifier_range_blockable(range))
493 _ret = ops->invalidate_range_start(subscription, range);
494 if (!mmu_notifier_range_blockable(range))
499 !mmu_notifier_range_blockable(range) ?
502 WARN_ON(mmu_notifier_range_blockable(range) ||
528 range);
536 int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
539 range->mm->notifier_subscriptions;
543 ret = mn_itree_invalidate(subscriptions, range);
548 return mn_hlist_invalidate_range_start(subscriptions, range);
554 struct mmu_notifier_range *range)
563 if (!mmu_notifier_range_blockable(range))
566 range);
567 if (!mmu_notifier_range_blockable(range))
574 void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
577 range->mm->notifier_subscriptions;
584 mn_hlist_invalidate_end(subscriptions, range);
933 * new range is included.
975 * @length: Length of the range to monitor
981 * whenever an event that intersects with the given range occurs.
985 * mmu_interval_read_begin() to establish SPTEs for this range.