Lines Matching defs:cache
179 WRITE_ONCE(ent->dev->cache.last_add,
273 WRITE_ONCE(dev->cache.last_add, jiffies);
318 /* Asynchronously schedule new MRs to be populated in the cache. */
358 /* Synchronously create a MR in the cache */
376 WRITE_ONCE(ent->dev->cache.last_add, jiffies);
498 * Upon set we immediately fill the cache to high water mark implied by
531 static bool someone_adding(struct mlx5_mkey_cache *cache)
537 mutex_lock(&cache->rb_lock);
538 for (node = rb_first(&cache->rb_root); node; node = rb_next(node)) {
544 mutex_unlock(&cache->rb_lock);
548 mutex_unlock(&cache->rb_lock);
554 * update. The cache refill has hysteresis, once the low water mark is hit it is
565 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
572 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
579 queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
582 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
589 struct mlx5_mkey_cache *cache = &dev->cache;
614 queue_delayed_work(cache->wq, &ent->dwork,
634 need_delay = need_resched() || someone_adding(cache) ||
636 READ_ONCE(cache->last_add) + 300 * HZ);
641 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
684 static int mlx5_cache_ent_insert(struct mlx5_mkey_cache *cache,
687 struct rb_node **new = &cache->rb_root.rb_node, *parent = NULL;
701 mutex_unlock(&cache->rb_lock);
708 rb_insert_color(&ent->node, &cache->rb_root);
717 struct rb_node *node = dev->cache.rb_root.rb_node;
843 debugfs_remove_recursive(dev->cache.fs_root);
844 dev->cache.fs_root = NULL;
860 dir = debugfs_create_dir(ent->name, dev->cache.fs_root);
870 struct mlx5_mkey_cache *cache = &dev->cache;
875 cache->fs_root = debugfs_create_dir("mr_cache", dbg_root);
905 ret = mlx5_cache_ent_insert(&dev->cache, ent);
926 mod_delayed_work(ent->dev->cache.wq,
927 &ent->dev->cache.remove_ent_dwork,
936 struct mlx5_mkey_cache *cache;
940 cache = container_of(work, struct mlx5_mkey_cache,
942 mutex_lock(&cache->rb_lock);
943 cur = rb_last(&cache->rb_root);
947 mutex_unlock(&cache->rb_lock);
952 mutex_lock(&cache->rb_lock);
958 mutex_lock(&cache->rb_lock);
960 mutex_unlock(&cache->rb_lock);
965 struct mlx5_mkey_cache *cache = &dev->cache;
966 struct rb_root *root = &dev->cache.rb_root;
976 mutex_init(&dev->cache.rb_lock);
977 dev->cache.rb_root = RB_ROOT;
978 INIT_DELAYED_WORK(&dev->cache.remove_ent_dwork, remove_ent_work_func);
979 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
980 if (!cache->wq) {
988 mutex_lock(&cache->rb_lock);
1002 mutex_unlock(&cache->rb_lock);
1013 mutex_unlock(&cache->rb_lock);
1015 mlx5_ib_warn(dev, "failed to create mkey cache entry\n");
1021 struct rb_root *root = &dev->cache.rb_root;
1025 if (!dev->cache.wq)
1028 mutex_lock(&dev->cache.rb_lock);
1029 cancel_delayed_work(&dev->cache.remove_ent_dwork);
1037 mutex_unlock(&dev->cache.rb_lock);
1043 flush_workqueue(dev->cache.wq);
1049 mutex_lock(&dev->cache.rb_lock);
1058 mutex_unlock(&dev->cache.rb_lock);
1060 destroy_workqueue(dev->cache.wq);
1176 * If the MR can't come from the cache then synchronously create an uncached
1610 /* We only track the allocated sizes of MRs from the cache */
1823 struct mlx5_mkey_cache *cache = &dev->cache;
1833 mutex_lock(&cache->rb_lock);
1838 mutex_unlock(&cache->rb_lock);
1843 mutex_unlock(&cache->rb_lock);
1849 mutex_unlock(&cache->rb_lock);