Lines Matching refs:ent
111 struct mlx5_ib_dev *dev = async_create->ent->dev;
124 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
146 static int push_mkey_locked(struct mlx5_cache_ent *ent, bool limit_pendings,
149 XA_STATE(xas, &ent->mkeys, 0);
153 (ent->reserved - ent->stored) > MAX_PENDING_REG_MR)
162 xas_set(&xas, ent->reserved);
165 if (to_store && ent->stored == ent->reserved)
170 ent->reserved++;
172 if (ent->stored != ent->reserved)
173 __xa_store(&ent->mkeys,
174 ent->stored,
177 ent->stored++;
178 queue_adjust_cache_locked(ent);
179 WRITE_ONCE(ent->dev->cache.last_add,
184 xa_unlock_irq(&ent->mkeys);
192 xa_lock_irq(&ent->mkeys);
194 xa_lock_irq(&ent->mkeys);
202 static int push_mkey(struct mlx5_cache_ent *ent, bool limit_pendings,
207 xa_lock_irq(&ent->mkeys);
208 ret = push_mkey_locked(ent, limit_pendings, to_store);
209 xa_unlock_irq(&ent->mkeys);
213 static void undo_push_reserve_mkey(struct mlx5_cache_ent *ent)
217 ent->reserved--;
218 old = __xa_erase(&ent->mkeys, ent->reserved);
222 static void push_to_reserved(struct mlx5_cache_ent *ent, u32 mkey)
226 old = __xa_store(&ent->mkeys, ent->stored, xa_mk_value(mkey), 0);
228 ent->stored++;
231 static u32 pop_stored_mkey(struct mlx5_cache_ent *ent)
235 ent->stored--;
236 ent->reserved--;
238 if (ent->stored == ent->reserved) {
239 xa_mkey = __xa_erase(&ent->mkeys, ent->stored);
244 xa_mkey = __xa_store(&ent->mkeys, ent->stored, XA_ZERO_ENTRY,
247 old = __xa_erase(&ent->mkeys, ent->reserved);
256 struct mlx5_cache_ent *ent = mkey_out->ent;
257 struct mlx5_ib_dev *dev = ent->dev;
263 xa_lock_irqsave(&ent->mkeys, flags);
264 undo_push_reserve_mkey(ent);
266 xa_unlock_irqrestore(&ent->mkeys, flags);
275 xa_lock_irqsave(&ent->mkeys, flags);
276 push_to_reserved(ent, mkey_out->mkey);
278 queue_adjust_cache_locked(ent);
279 xa_unlock_irqrestore(&ent->mkeys, flags);
302 static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
304 set_mkc_access_pd_addr_fields(mkc, ent->rb_key.access_flags, 0,
305 ent->dev->umrc.pd);
308 MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3);
310 (ent->rb_key.access_mode >> 2) & 0x7);
313 get_mkc_octo_size(ent->rb_key.access_mode,
314 ent->rb_key.ndescs));
319 static int add_keys(struct mlx5_cache_ent *ent, unsigned int num)
333 set_cache_mkc(ent, mkc);
334 async_create->ent = ent;
336 err = push_mkey(ent, true, NULL);
342 mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err);
350 xa_lock_irq(&ent->mkeys);
351 undo_push_reserve_mkey(ent);
352 xa_unlock_irq(&ent->mkeys);
359 static int create_cache_mkey(struct mlx5_cache_ent *ent, u32 *mkey)
370 set_cache_mkc(ent, mkc);
372 err = mlx5_core_create_mkey(ent->dev->mdev, mkey, in, inlen);
376 WRITE_ONCE(ent->dev->cache.last_add, jiffies);
382 static void remove_cache_mr_locked(struct mlx5_cache_ent *ent)
386 lockdep_assert_held(&ent->mkeys.xa_lock);
387 if (!ent->stored)
389 mkey = pop_stored_mkey(ent);
390 xa_unlock_irq(&ent->mkeys);
391 mlx5_core_destroy_mkey(ent->dev->mdev, mkey);
392 xa_lock_irq(&ent->mkeys);
395 static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target,
397 __acquires(&ent->mkeys) __releases(&ent->mkeys)
401 lockdep_assert_held(&ent->mkeys.xa_lock);
405 target = ent->limit * 2;
406 if (target == ent->reserved)
408 if (target > ent->reserved) {
409 u32 todo = target - ent->reserved;
411 xa_unlock_irq(&ent->mkeys);
412 err = add_keys(ent, todo);
415 xa_lock_irq(&ent->mkeys);
422 remove_cache_mr_locked(ent);
430 struct mlx5_cache_ent *ent = filp->private_data;
443 xa_lock_irq(&ent->mkeys);
444 if (target < ent->in_use) {
448 target = target - ent->in_use;
449 if (target < ent->limit || target > ent->limit*2) {
453 err = resize_available_mrs(ent, target, false);
456 xa_unlock_irq(&ent->mkeys);
461 xa_unlock_irq(&ent->mkeys);
468 struct mlx5_cache_ent *ent = filp->private_data;
472 err = snprintf(lbuf, sizeof(lbuf), "%ld\n", ent->stored + ent->in_use);
489 struct mlx5_cache_ent *ent = filp->private_data;
501 xa_lock_irq(&ent->mkeys);
502 ent->limit = var;
503 err = resize_available_mrs(ent, 0, true);
504 xa_unlock_irq(&ent->mkeys);
513 struct mlx5_cache_ent *ent = filp->private_data;
517 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
533 struct mlx5_cache_ent *ent;
539 ent = rb_entry(node, struct mlx5_cache_ent, node);
540 xa_lock_irq(&ent->mkeys);
541 ret = ent->stored < ent->limit;
542 xa_unlock_irq(&ent->mkeys);
557 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
559 lockdep_assert_held(&ent->mkeys.xa_lock);
561 if (ent->disabled || READ_ONCE(ent->dev->fill_delay) || ent->is_tmp)
563 if (ent->stored < ent->limit) {
564 ent->fill_to_high_water = true;
565 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
566 } else if (ent->fill_to_high_water &&
567 ent->reserved < 2 * ent->limit) {
572 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
573 } else if (ent->stored == 2 * ent->limit) {
574 ent->fill_to_high_water = false;
575 } else if (ent->stored > 2 * ent->limit) {
577 ent->fill_to_high_water = false;
578 if (ent->stored != ent->reserved)
579 queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
582 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
586 static void __cache_work_func(struct mlx5_cache_ent *ent)
588 struct mlx5_ib_dev *dev = ent->dev;
592 xa_lock_irq(&ent->mkeys);
593 if (ent->disabled)
596 if (ent->fill_to_high_water && ent->reserved < 2 * ent->limit &&
598 xa_unlock_irq(&ent->mkeys);
599 err = add_keys(ent, 1);
600 xa_lock_irq(&ent->mkeys);
601 if (ent->disabled)
614 queue_delayed_work(cache->wq, &ent->dwork,
618 } else if (ent->stored > 2 * ent->limit) {
633 xa_unlock_irq(&ent->mkeys);
637 xa_lock_irq(&ent->mkeys);
638 if (ent->disabled)
641 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
644 remove_cache_mr_locked(ent);
645 queue_adjust_cache_locked(ent);
648 xa_unlock_irq(&ent->mkeys);
653 struct mlx5_cache_ent *ent;
655 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
656 __cache_work_func(ent);
685 struct mlx5_cache_ent *ent)
695 cmp = cache_ent_key_cmp(cur->rb_key, ent->rb_key);
707 rb_link_node(&ent->node, parent, new);
708 rb_insert_color(&ent->node, &cache->rb_root);
722 * Find the smallest ent with order >= requested_order.
746 struct mlx5_cache_ent *ent,
756 xa_lock_irq(&ent->mkeys);
757 ent->in_use++;
759 if (!ent->stored) {
760 queue_adjust_cache_locked(ent);
761 ent->miss++;
762 xa_unlock_irq(&ent->mkeys);
763 err = create_cache_mkey(ent, &mr->mmkey.key);
765 xa_lock_irq(&ent->mkeys);
766 ent->in_use--;
767 xa_unlock_irq(&ent->mkeys);
772 mr->mmkey.key = pop_stored_mkey(ent);
773 queue_adjust_cache_locked(ent);
774 xa_unlock_irq(&ent->mkeys);
776 mr->mmkey.cache_ent = ent;
815 struct mlx5_cache_ent *ent = mkey_cache_ent_from_rb_key(dev, rb_key);
817 if (!ent)
820 return _mlx5_mr_cache_alloc(dev, ent, access_flags);
823 static void clean_keys(struct mlx5_ib_dev *dev, struct mlx5_cache_ent *ent)
827 cancel_delayed_work(&ent->dwork);
828 xa_lock_irq(&ent->mkeys);
829 while (ent->stored) {
830 mkey = pop_stored_mkey(ent);
831 xa_unlock_irq(&ent->mkeys);
833 xa_lock_irq(&ent->mkeys);
835 xa_unlock_irq(&ent->mkeys);
848 struct mlx5_cache_ent *ent)
850 int order = order_base_2(ent->rb_key.ndescs);
856 if (ent->rb_key.access_mode == MLX5_MKC_ACCESS_MODE_KSM)
859 sprintf(ent->name, "%d", order);
860 dir = debugfs_create_dir(ent->name, dev->cache.fs_root);
861 debugfs_create_file("size", 0600, dir, ent, &size_fops);
862 debugfs_create_file("limit", 0600, dir, ent, &limit_fops);
863 debugfs_create_ulong("cur", 0400, dir, &ent->stored);
864 debugfs_create_u32("miss", 0600, dir, &ent->miss);
890 struct mlx5_cache_ent *ent;
894 ent = kzalloc(sizeof(*ent), GFP_KERNEL);
895 if (!ent)
898 xa_init_flags(&ent->mkeys, XA_FLAGS_LOCK_IRQ);
899 ent->rb_key = rb_key;
900 ent->dev = dev;
901 ent->is_tmp = !persistent_entry;
903 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
905 ret = mlx5_cache_ent_insert(&dev->cache, ent);
907 kfree(ent);
920 ent->limit = dev->mdev->profile.mr_cache[order].limit;
922 ent->limit = 0;
924 mlx5_mkey_cache_debugfs_add_ent(dev, ent);
926 mod_delayed_work(ent->dev->cache.wq,
927 &ent->dev->cache.remove_ent_dwork,
931 return ent;
937 struct mlx5_cache_ent *ent;
945 ent = rb_entry(cur, struct mlx5_cache_ent, node);
949 xa_lock_irq(&ent->mkeys);
950 if (!ent->is_tmp) {
951 xa_unlock_irq(&ent->mkeys);
955 xa_unlock_irq(&ent->mkeys);
957 clean_keys(ent->dev, ent);
970 struct mlx5_cache_ent *ent;
991 ent = mlx5r_cache_create_ent_locked(dev, rb_key, true);
992 if (IS_ERR(ent)) {
993 ret = PTR_ERR(ent);
1004 ent = rb_entry(node, struct mlx5_cache_ent, node);
1005 xa_lock_irq(&ent->mkeys);
1006 queue_adjust_cache_locked(ent);
1007 xa_unlock_irq(&ent->mkeys);
1022 struct mlx5_cache_ent *ent;
1031 ent = rb_entry(node, struct mlx5_cache_ent, node);
1032 xa_lock_irq(&ent->mkeys);
1033 ent->disabled = true;
1034 xa_unlock_irq(&ent->mkeys);
1035 cancel_delayed_work(&ent->dwork);
1052 ent = rb_entry(node, struct mlx5_cache_ent, node);
1054 clean_keys(dev, ent);
1055 rb_erase(&ent->node, root);
1056 kfree(ent);
1159 struct mlx5_cache_ent *ent;
1174 ent = mkey_cache_ent_from_rb_key(dev, rb_key);
1179 if (!ent) {
1189 mr = _mlx5_mr_cache_alloc(dev, ent, access_flags);
1824 struct mlx5_cache_ent *ent;
1834 ent = mkey_cache_ent_from_rb_key(dev, mr->mmkey.rb_key);
1835 if (ent) {
1836 if (ent->rb_key.ndescs == mr->mmkey.rb_key.ndescs) {
1837 if (ent->disabled) {
1841 mr->mmkey.cache_ent = ent;
1848 ent = mlx5r_cache_create_ent_locked(dev, mr->mmkey.rb_key, false);
1850 if (IS_ERR(ent))
1851 return PTR_ERR(ent);
1853 mr->mmkey.cache_ent = ent;