Lines Matching defs:handler
86 struct mmu_rb_handler **handler)
112 *handler = h;
116 void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
124 mmgrab(handler->mn.mm);
127 mmu_notifier_unregister(&handler->mn, handler->mn.mm);
130 * Make sure the wq delete handler is finished running. It will not
133 flush_work(&handler->del_work);
137 spin_lock_irqsave(&handler->lock, flags);
138 while ((node = rb_first_cached(&handler->root))) {
140 rb_erase_cached(node, &handler->root);
144 spin_unlock_irqrestore(&handler->lock, flags);
153 mmdrop(handler->mn.mm);
155 kfree(handler);
158 int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
167 if (current->mm != handler->mn.mm)
170 spin_lock_irqsave(&handler->lock, flags);
171 node = __mmu_rb_search(handler, mnode->addr, mnode->len);
176 __mmu_int_rb_insert(mnode, &handler->root);
177 list_add_tail(&mnode->list, &handler->lru_list);
178 mnode->handler = handler;
180 spin_unlock_irqrestore(&handler->lock, flags);
184 /* Caller must hold handler lock */
185 struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler,
191 node = __mmu_int_rb_iter_first(&handler->root, addr, (addr + len) - 1);
193 list_move_tail(&node->list, &handler->lru_list);
197 /* Caller must hold handler lock */
198 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
205 if (!handler->ops->filter) {
206 node = __mmu_int_rb_iter_first(&handler->root, addr,
209 for (node = __mmu_int_rb_iter_first(&handler->root, addr,
214 if (handler->ops->filter(node, addr, len))
222 * Must NOT call while holding mnode->handler->lock.
223 * mnode->handler->ops->remove() may sleep and mnode->handler->lock is a
230 mnode->handler->ops->remove(mnode->handler->ops_arg, mnode);
233 /* Caller must hold mnode->handler->lock */
238 list_move(&mnode->list, &mnode->handler->del_list);
239 queue_work(mnode->handler->wq, &mnode->handler->del_work);
244 * Adds mmu_rb_node to mmu_rb_node->handler->del_list and queues
245 * handler->del_work on handler->wq.
246 * Does not remove mmu_rb_node from handler->lru_list or handler->rb_root.
247 * Acquires mmu_rb_node->handler->lock; do not call while already holding
248 * handler->lock.
254 struct mmu_rb_handler *handler = mnode->handler;
257 spin_lock_irqsave(&handler->lock, flags);
258 list_move(&mnode->list, &mnode->handler->del_list);
259 spin_unlock_irqrestore(&handler->lock, flags);
260 queue_work(handler->wq, &handler->del_work);
263 void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
270 if (current->mm != handler->mn.mm)
275 spin_lock_irqsave(&handler->lock, flags);
276 list_for_each_entry_safe(rbnode, ptr, &handler->lru_list, list) {
281 if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
283 __mmu_int_rb_remove(rbnode, &handler->root);
290 spin_unlock_irqrestore(&handler->lock, flags);
300 struct mmu_rb_handler *handler =
302 struct rb_root_cached *root = &handler->root;
306 spin_lock_irqsave(&handler->lock, flags);
318 spin_unlock_irqrestore(&handler->lock, flags);
330 struct mmu_rb_handler *handler = container_of(work,
338 spin_lock_irqsave(&handler->lock, flags);
339 list_replace_init(&handler->del_list, &del_list);
340 spin_unlock_irqrestore(&handler->lock, flags);
345 handler->ops->remove(handler->ops_arg, node);