Lines Matching refs:pool

198 struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool)
204 spin_lock_irqsave(&pool->clean_lock, flags);
205 ret = llist_del_first(&pool->clean_list);
206 spin_unlock_irqrestore(&pool->clean_lock, flags);
209 if (pool->pool_type == RDS_IB_MR_8K_POOL)
275 struct rds_ib_mr_pool *pool = ibmr->pool;
277 atomic_sub(pinned, &pool->free_pinned);
281 static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
285 item_count = atomic_read(&pool->item_count);
337 * Flush our pool of MRs.
342 int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
352 if (pool->pool_type == RDS_IB_MR_8K_POOL)
359 while (!mutex_trylock(&pool->flush_lock)) {
360 ibmr = rds_ib_reuse_mr(pool);
363 finish_wait(&pool->flush_wait, &wait);
367 prepare_to_wait(&pool->flush_wait, &wait,
369 if (llist_empty(&pool->clean_list))
372 ibmr = rds_ib_reuse_mr(pool);
375 finish_wait(&pool->flush_wait, &wait);
379 finish_wait(&pool->flush_wait, &wait);
381 mutex_lock(&pool->flush_lock);
384 ibmr = rds_ib_reuse_mr(pool);
394 dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
395 dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
399 spin_lock_irqsave(&pool->clean_lock, flags);
400 llist_append_to_list(&pool->clean_list, &unmap_list);
401 spin_unlock_irqrestore(&pool->clean_lock, flags);
404 free_goal = rds_ib_flush_goal(pool, free_all);
421 spin_lock_irqsave(&pool->clean_lock, flags);
423 &pool->clean_list);
424 spin_unlock_irqrestore(&pool->clean_lock, flags);
428 atomic_sub(unpinned, &pool->free_pinned);
429 atomic_sub(dirty_to_clean, &pool->dirty_count);
430 atomic_sub(nfreed, &pool->item_count);
433 mutex_unlock(&pool->flush_lock);
434 if (waitqueue_active(&pool->flush_wait))
435 wake_up(&pool->flush_wait);
440 struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
446 ibmr = rds_ib_reuse_mr(pool);
450 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
453 atomic_dec(&pool->item_count);
456 if (pool->pool_type == RDS_IB_MR_8K_POOL)
464 if (pool->pool_type == RDS_IB_MR_8K_POOL)
469 rds_ib_flush_mr_pool(pool, 0, &ibmr);
479 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
481 rds_ib_flush_mr_pool(pool, 0, NULL);
487 struct rds_ib_mr_pool *pool = ibmr->pool;
502 /* Return it to the pool's free list */
505 atomic_add(ibmr->sg_len, &pool->free_pinned);
506 atomic_inc(&pool->dirty_count);
509 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
510 atomic_read(&pool->dirty_count) >= pool->max_items / 5)
511 queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
515 rds_ib_flush_mr_pool(pool, 0, NULL);
521 &pool->flush_worker, 10);
634 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
636 cancel_delayed_work_sync(&pool->flush_worker);
637 rds_ib_flush_mr_pool(pool, 1, NULL);
638 WARN_ON(atomic_read(&pool->item_count));
639 WARN_ON(atomic_read(&pool->free_pinned));
640 kfree(pool);
646 struct rds_ib_mr_pool *pool;
648 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
649 if (!pool)
652 pool->pool_type = pool_type;
653 init_llist_head(&pool->free_list);
654 init_llist_head(&pool->drop_list);
655 init_llist_head(&pool->clean_list);
656 spin_lock_init(&pool->clean_lock);
657 mutex_init(&pool->flush_lock);
658 init_waitqueue_head(&pool->flush_wait);
659 INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
663 pool->max_pages = RDS_MR_1M_MSG_SIZE + 1;
664 pool->max_items = rds_ibdev->max_1m_mrs;
667 pool->max_pages = RDS_MR_8K_MSG_SIZE + 1;
668 pool->max_items = rds_ibdev->max_8k_mrs;
671 pool->max_free_pinned = pool->max_items * pool->max_pages / 4;
672 pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4;
674 return pool;
686 * had their pools freed. As each pool is freed its work struct is waited on,
687 * so the pool flushing work queue should be idle by the time we get here.