Lines Matching defs:cache

156 	WRITE_ONCE(dev->cache.last_add, jiffies);
190 /* Asynchronously schedule new MRs to be populated in the cache. */
238 /* Synchronously create a MR in the cache */
263 WRITE_ONCE(ent->dev->cache.last_add, jiffies);
395 * Upon set we immediately fill the cache to high water mark implied by
428 static bool someone_adding(struct mlx5_mr_cache *cache)
433 struct mlx5_cache_ent *ent = &cache->ent[i];
447 * update. The cache refill has hysteresis, once the low water mark is hit it is
458 queue_work(ent->dev->cache.wq, &ent->work);
465 queue_work(ent->dev->cache.wq, &ent->work);
472 queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
475 queue_work(ent->dev->cache.wq, &ent->work);
482 struct mlx5_mr_cache *cache = &dev->cache;
508 queue_delayed_work(cache->wq, &ent->dwork,
528 need_delay = need_resched() || someone_adding(cache) ||
530 READ_ONCE(cache->last_add) + 300 * HZ);
535 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
561 /* Allocate a special entry from the cache */
565 struct mlx5_mr_cache *cache = &dev->cache;
570 entry >= ARRAY_SIZE(cache->ent)))
577 ent = &cache->ent[entry];
597 /* Return a MR already available in the cache */
604 /* Try larger MR pools from the cache to satisfy the allocation */
605 for (; ent != &dev->cache.ent[MR_CACHE_LAST_STD_ENTRY + 1]; ent++) {
606 mlx5_ib_dbg(dev, "order %u, cache index %zu\n", ent->order,
607 ent - dev->cache.ent);
662 struct mlx5_mr_cache *cache = &dev->cache;
663 struct mlx5_cache_ent *ent = &cache->ent[c];
694 debugfs_remove_recursive(dev->cache.root);
695 dev->cache.root = NULL;
700 struct mlx5_mr_cache *cache = &dev->cache;
708 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
711 ent = &cache->ent[i];
713 dir = debugfs_create_dir(ent->name, cache->root);
730 struct mlx5_mr_cache *cache = &dev->cache;
735 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
736 if (!cache->wq) {
744 ent = &cache->ent[i];
786 if (!dev->cache.wq)
790 struct mlx5_cache_ent *ent = &dev->cache.ent[i];
805 destroy_workqueue(dev->cache.wq);
972 struct mlx5_mr_cache *cache = &dev->cache;
974 if (order < cache->ent[0].order)
975 return &cache->ent[0];
976 order = order - cache->ent[0].order;
979 return &cache->ent[order];