Lines Matching refs:ent

112 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
138 struct mlx5_cache_ent *ent = mr->cache_ent;
144 spin_lock_irqsave(&ent->lock, flags);
145 ent->pending--;
147 spin_unlock_irqrestore(&ent->lock, flags);
158 spin_lock_irqsave(&ent->lock, flags);
159 list_add_tail(&mr->list, &ent->head);
160 ent->available_mrs++;
161 ent->total_mrs++;
163 queue_adjust_cache_locked(ent);
164 ent->pending--;
165 spin_unlock_irqrestore(&ent->lock, flags);
168 static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
175 mr->order = ent->order;
176 mr->cache_ent = ent;
177 mr->dev = ent->dev;
179 set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd);
182 MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
183 MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7);
185 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
186 MLX5_SET(mkc, mkc, log_page_size, ent->page);
191 static int add_keys(struct mlx5_cache_ent *ent, unsigned int num)
206 mr = alloc_cache_mr(ent, mkc);
211 spin_lock_irq(&ent->lock);
212 if (ent->pending >= MAX_PENDING_REG_MR) {
214 spin_unlock_irq(&ent->lock);
218 ent->pending++;
219 spin_unlock_irq(&ent->lock);
220 err = mlx5_ib_create_mkey_cb(ent->dev, &mr->mmkey,
221 &ent->dev->async_ctx, in, inlen,
225 spin_lock_irq(&ent->lock);
226 ent->pending--;
227 spin_unlock_irq(&ent->lock);
228 mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err);
239 static struct mlx5_ib_mr *create_cache_mr(struct mlx5_cache_ent *ent)
252 mr = alloc_cache_mr(ent, mkc);
258 err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey, in, inlen);
263 WRITE_ONCE(ent->dev->cache.last_add, jiffies);
264 spin_lock_irq(&ent->lock);
265 ent->total_mrs++;
266 spin_unlock_irq(&ent->lock);
276 static void remove_cache_mr_locked(struct mlx5_cache_ent *ent)
280 lockdep_assert_held(&ent->lock);
281 if (list_empty(&ent->head))
283 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
285 ent->available_mrs--;
286 ent->total_mrs--;
287 spin_unlock_irq(&ent->lock);
288 mlx5_core_destroy_mkey(ent->dev->mdev, &mr->mmkey);
290 spin_lock_irq(&ent->lock);
293 static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target,
298 lockdep_assert_held(&ent->lock);
302 target = ent->limit * 2;
303 if (target == ent->available_mrs + ent->pending)
305 if (target > ent->available_mrs + ent->pending) {
306 u32 todo = target - (ent->available_mrs + ent->pending);
308 spin_unlock_irq(&ent->lock);
309 err = add_keys(ent, todo);
312 spin_lock_irq(&ent->lock);
319 remove_cache_mr_locked(ent);
327 struct mlx5_cache_ent *ent = filp->private_data;
340 spin_lock_irq(&ent->lock);
341 if (target < ent->total_mrs - ent->available_mrs) {
345 target = target - (ent->total_mrs - ent->available_mrs);
346 if (target < ent->limit || target > ent->limit*2) {
350 err = resize_available_mrs(ent, target, false);
353 spin_unlock_irq(&ent->lock);
358 spin_unlock_irq(&ent->lock);
365 struct mlx5_cache_ent *ent = filp->private_data;
369 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->total_mrs);
386 struct mlx5_cache_ent *ent = filp->private_data;
398 spin_lock_irq(&ent->lock);
399 ent->limit = var;
400 err = resize_available_mrs(ent, 0, true);
401 spin_unlock_irq(&ent->lock);
410 struct mlx5_cache_ent *ent = filp->private_data;
414 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
433 struct mlx5_cache_ent *ent = &cache->ent[i];
436 spin_lock_irq(&ent->lock);
437 ret = ent->available_mrs < ent->limit;
438 spin_unlock_irq(&ent->lock);
450 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
452 lockdep_assert_held(&ent->lock);
454 if (ent->disabled || READ_ONCE(ent->dev->fill_delay))
456 if (ent->available_mrs < ent->limit) {
457 ent->fill_to_high_water = true;
458 queue_work(ent->dev->cache.wq, &ent->work);
459 } else if (ent->fill_to_high_water &&
460 ent->available_mrs + ent->pending < 2 * ent->limit) {
465 queue_work(ent->dev->cache.wq, &ent->work);
466 } else if (ent->available_mrs == 2 * ent->limit) {
467 ent->fill_to_high_water = false;
468 } else if (ent->available_mrs > 2 * ent->limit) {
470 ent->fill_to_high_water = false;
471 if (ent->pending)
472 queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
475 queue_work(ent->dev->cache.wq, &ent->work);
479 static void __cache_work_func(struct mlx5_cache_ent *ent)
481 struct mlx5_ib_dev *dev = ent->dev;
485 spin_lock_irq(&ent->lock);
486 if (ent->disabled)
489 if (ent->fill_to_high_water &&
490 ent->available_mrs + ent->pending < 2 * ent->limit &&
492 spin_unlock_irq(&ent->lock);
493 err = add_keys(ent, 1);
494 spin_lock_irq(&ent->lock);
495 if (ent->disabled)
507 ent->order, err);
508 queue_delayed_work(cache->wq, &ent->dwork,
512 } else if (ent->available_mrs > 2 * ent->limit) {
527 spin_unlock_irq(&ent->lock);
531 spin_lock_irq(&ent->lock);
532 if (ent->disabled)
535 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
538 remove_cache_mr_locked(ent);
539 queue_adjust_cache_locked(ent);
542 spin_unlock_irq(&ent->lock);
547 struct mlx5_cache_ent *ent;
549 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
550 __cache_work_func(ent);
555 struct mlx5_cache_ent *ent;
557 ent = container_of(work, struct mlx5_cache_ent, work);
558 __cache_work_func(ent);
566 struct mlx5_cache_ent *ent;
570 entry >= ARRAY_SIZE(cache->ent)))
577 ent = &cache->ent[entry];
578 spin_lock_irq(&ent->lock);
579 if (list_empty(&ent->head)) {
580 queue_adjust_cache_locked(ent);
581 ent->miss++;
582 spin_unlock_irq(&ent->lock);
583 mr = create_cache_mr(ent);
587 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
589 ent->available_mrs--;
590 queue_adjust_cache_locked(ent);
591 spin_unlock_irq(&ent->lock);
602 struct mlx5_cache_ent *ent = req_ent;
605 for (; ent != &dev->cache.ent[MR_CACHE_LAST_STD_ENTRY + 1]; ent++) {
606 mlx5_ib_dbg(dev, "order %u, cache index %zu\n", ent->order,
607 ent - dev->cache.ent);
609 spin_lock_irq(&ent->lock);
610 if (!list_empty(&ent->head)) {
611 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
614 ent->available_mrs--;
615 queue_adjust_cache_locked(ent);
616 spin_unlock_irq(&ent->lock);
619 queue_adjust_cache_locked(ent);
620 spin_unlock_irq(&ent->lock);
631 struct mlx5_cache_ent *ent = mr->cache_ent;
634 spin_lock_irq(&ent->lock);
635 ent->total_mrs--;
636 spin_unlock_irq(&ent->lock);
641 struct mlx5_cache_ent *ent = mr->cache_ent;
643 if (!ent)
653 spin_lock_irq(&ent->lock);
654 list_add_tail(&mr->list, &ent->head);
655 ent->available_mrs++;
656 queue_adjust_cache_locked(ent);
657 spin_unlock_irq(&ent->lock);
663 struct mlx5_cache_ent *ent = &cache->ent[c];
668 cancel_delayed_work(&ent->dwork);
670 spin_lock_irq(&ent->lock);
671 if (list_empty(&ent->head)) {
672 spin_unlock_irq(&ent->lock);
675 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
677 ent->available_mrs--;
678 ent->total_mrs--;
679 spin_unlock_irq(&ent->lock);
701 struct mlx5_cache_ent *ent;
711 ent = &cache->ent[i];
712 sprintf(ent->name, "%d", ent->order);
713 dir = debugfs_create_dir(ent->name, cache->root);
714 debugfs_create_file("size", 0600, dir, ent, &size_fops);
715 debugfs_create_file("limit", 0600, dir, ent, &limit_fops);
716 debugfs_create_u32("cur", 0400, dir, &ent->available_mrs);
717 debugfs_create_u32("miss", 0600, dir, &ent->miss);
731 struct mlx5_cache_ent *ent;
744 ent = &cache->ent[i];
745 INIT_LIST_HEAD(&ent->head);
746 spin_lock_init(&ent->lock);
747 ent->order = i + 2;
748 ent->dev = dev;
749 ent->limit = 0;
751 INIT_WORK(&ent->work, cache_work_func);
752 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
755 mlx5_odp_init_mr_cache_entry(ent);
759 if (ent->order > mr_cache_max_order(dev))
762 ent->page = PAGE_SHIFT;
763 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
765 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
769 ent->limit = dev->mdev->profile->mr_cache[i].limit;
771 ent->limit = 0;
772 spin_lock_irq(&ent->lock);
773 queue_adjust_cache_locked(ent);
774 spin_unlock_irq(&ent->lock);
790 struct mlx5_cache_ent *ent = &dev->cache.ent[i];
792 spin_lock_irq(&ent->lock);
793 ent->disabled = true;
794 spin_unlock_irq(&ent->lock);
795 cancel_work_sync(&ent->work);
796 cancel_delayed_work_sync(&ent->dwork);
974 if (order < cache->ent[0].order)
975 return &cache->ent[0];
976 order = order - cache->ent[0].order;
979 return &cache->ent[order];
988 struct mlx5_cache_ent *ent = mr_cache_ent_from_order(dev, order);
991 if (!ent)
998 mr = get_cache_mr(ent);
1000 mr = create_cache_mr(ent);