Lines Matching defs:subscriptions
68 * - subscriptions->invalidate_seq & 1 == True (odd)
74 * - subscriptions->invalidate_seq & 1 == False (even)
87 mn_itree_is_invalidating(struct mmu_notifier_subscriptions *subscriptions)
89 lockdep_assert_held(&subscriptions->lock);
90 return subscriptions->invalidate_seq & 1;
94 mn_itree_inv_start_range(struct mmu_notifier_subscriptions *subscriptions,
101 spin_lock(&subscriptions->lock);
102 subscriptions->active_invalidate_ranges++;
103 node = interval_tree_iter_first(&subscriptions->itree, range->start,
106 subscriptions->invalidate_seq |= 1;
111 *seq = subscriptions->invalidate_seq;
112 spin_unlock(&subscriptions->lock);
129 static void mn_itree_inv_end(struct mmu_notifier_subscriptions *subscriptions)
134 spin_lock(&subscriptions->lock);
135 if (--subscriptions->active_invalidate_ranges ||
136 !mn_itree_is_invalidating(subscriptions)) {
137 spin_unlock(&subscriptions->lock);
142 subscriptions->invalidate_seq++;
151 &subscriptions->deferred_list,
155 &subscriptions->itree);
158 &subscriptions->itree);
161 spin_unlock(&subscriptions->lock);
163 wake_up_all(&subscriptions->wq);
188 struct mmu_notifier_subscriptions *subscriptions =
198 * subscriptions seq, then it is currently between
208 * seq = ++subscriptions->invalidate_seq
220 * seq = ++subscriptions->invalidate_seq
232 spin_lock(&subscriptions->lock);
235 is_invalidating = seq == subscriptions->invalidate_seq;
236 spin_unlock(&subscriptions->lock);
243 * subscriptions->invalidate_seq is even in the idle state.
248 wait_event(subscriptions->wq,
249 READ_ONCE(subscriptions->invalidate_seq) != seq);
261 static void mn_itree_release(struct mmu_notifier_subscriptions *subscriptions,
276 mn_itree_inv_start_range(subscriptions, &range, &cur_seq);
284 mn_itree_inv_end(subscriptions);
299 static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions,
310 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
321 spin_lock(&subscriptions->lock);
322 while (unlikely(!hlist_empty(&subscriptions->list))) {
323 subscription = hlist_entry(subscriptions->list.first,
333 spin_unlock(&subscriptions->lock);
350 struct mmu_notifier_subscriptions *subscriptions =
353 if (subscriptions->has_itree)
354 mn_itree_release(subscriptions, mm);
356 if (!hlist_empty(&subscriptions->list))
357 mn_hlist_release(subscriptions, mm);
444 static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions,
451 mn_itree_inv_start_range(subscriptions, range, &cur_seq);
471 mn_itree_inv_end(subscriptions);
476 struct mmu_notifier_subscriptions *subscriptions,
484 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
522 hlist_for_each_entry_rcu(subscription, &subscriptions->list,
538 struct mmu_notifier_subscriptions *subscriptions =
542 if (subscriptions->has_itree) {
543 ret = mn_itree_invalidate(subscriptions, range);
547 if (!hlist_empty(&subscriptions->list))
548 return mn_hlist_invalidate_range_start(subscriptions, range);
553 mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
560 hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
595 struct mmu_notifier_subscriptions *subscriptions =
599 if (subscriptions->has_itree)
600 mn_itree_inv_end(subscriptions);
602 if (!hlist_empty(&subscriptions->list))
603 mn_hlist_invalidate_end(subscriptions, range, only_end);
632 struct mmu_notifier_subscriptions *subscriptions = NULL;
651 subscriptions = kzalloc(
653 if (!subscriptions)
656 INIT_HLIST_HEAD(&subscriptions->list);
657 spin_lock_init(&subscriptions->lock);
658 subscriptions->invalidate_seq = 2;
659 subscriptions->itree = RB_ROOT_CACHED;
660 init_waitqueue_head(&subscriptions->wq);
661 INIT_HLIST_HEAD(&subscriptions->deferred_list);
684 if (subscriptions)
685 smp_store_release(&mm->notifier_subscriptions, subscriptions);
705 kfree(subscriptions);
922 struct mmu_notifier_subscriptions *subscriptions, unsigned long start,
958 spin_lock(&subscriptions->lock);
959 if (subscriptions->active_invalidate_ranges) {
960 if (mn_itree_is_invalidating(subscriptions))
962 &subscriptions->deferred_list);
964 subscriptions->invalidate_seq |= 1;
966 &subscriptions->itree);
968 interval_sub->invalidate_seq = subscriptions->invalidate_seq;
970 WARN_ON(mn_itree_is_invalidating(subscriptions));
978 subscriptions->invalidate_seq - 1;
980 &subscriptions->itree);
982 spin_unlock(&subscriptions->lock);
1007 struct mmu_notifier_subscriptions *subscriptions;
1012 subscriptions = smp_load_acquire(&mm->notifier_subscriptions);
1013 if (!subscriptions || !subscriptions->has_itree) {
1017 subscriptions = mm->notifier_subscriptions;
1019 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
1029 struct mmu_notifier_subscriptions *subscriptions =
1035 if (!subscriptions || !subscriptions->has_itree) {
1039 subscriptions = mm->notifier_subscriptions;
1041 return __mmu_interval_notifier_insert(interval_sub, mm, subscriptions,
1047 mmu_interval_seq_released(struct mmu_notifier_subscriptions *subscriptions,
1052 spin_lock(&subscriptions->lock);
1053 ret = subscriptions->invalidate_seq != seq;
1054 spin_unlock(&subscriptions->lock);
1071 struct mmu_notifier_subscriptions *subscriptions =
1077 spin_lock(&subscriptions->lock);
1078 if (mn_itree_is_invalidating(subscriptions)) {
1087 &subscriptions->deferred_list);
1088 seq = subscriptions->invalidate_seq;
1093 &subscriptions->itree);
1095 spin_unlock(&subscriptions->lock);
1104 wait_event(subscriptions->wq,
1105 mmu_interval_seq_released(subscriptions, seq));