Home
last modified time | relevance | path

Searched refs:qp_table (Results 1 - 25 of 55) sorted by relevance

123

/kernel/linux/linux-5.10/drivers/gpu/drm/amd/display/dc/dsc/
H A Dqp_tables.h28 const qp_table qp_table_422_10bpc_min = {
61 const qp_table qp_table_444_8bpc_max = {
102 const qp_table qp_table_420_12bpc_max = {
135 const qp_table qp_table_444_10bpc_min = {
188 const qp_table qp_table_420_8bpc_max = {
209 const qp_table qp_table_444_8bpc_min = {
250 const qp_table qp_table_444_12bpc_min = {
315 const qp_table qp_table_420_12bpc_min = {
348 const qp_table qp_table_422_12bpc_min = {
389 const qp_table qp_table_422_12bpc_ma
[all...]
H A Drc_calc.h78 typedef struct qp_entry qp_table[]; typedef
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/display/dc/dml/dsc/
H A Dqp_tables.h28 static const qp_table qp_table_422_10bpc_min = {
61 static const qp_table qp_table_444_8bpc_max = {
102 static const qp_table qp_table_420_12bpc_max = {
135 static const qp_table qp_table_444_10bpc_min = {
188 static const qp_table qp_table_420_8bpc_max = {
209 static const qp_table qp_table_444_8bpc_min = {
250 static const qp_table qp_table_444_12bpc_min = {
315 static const qp_table qp_table_420_12bpc_min = {
348 static const qp_table qp_table_422_12bpc_min = {
389 static const qp_table qp_table_422_12bpc_ma
[all...]
H A Drc_calc_fpu.h79 typedef struct qp_entry qp_table[]; typedef
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx4/
H A Dqp.c51 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; in mlx4_qp_event() local
54 spin_lock(&qp_table->lock); in mlx4_qp_event()
60 spin_unlock(&qp_table->lock); in mlx4_qp_event()
226 struct mlx4_qp_table *qp_table = &priv->qp_table; in __mlx4_qp_reserve_range() local
239 *base = mlx4_zone_alloc_entries(qp_table->zones, uid, cnt, align, in __mlx4_qp_reserve_range()
278 struct mlx4_qp_table *qp_table = &priv->qp_table; in __mlx4_qp_release_range() local
282 mlx4_zone_free_entries_unique(qp_table in __mlx4_qp_release_range()
311 struct mlx4_qp_table *qp_table = &priv->qp_table; __mlx4_qp_alloc_icm() local
368 struct mlx4_qp_table *qp_table = &priv->qp_table; __mlx4_qp_free_icm() local
393 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; mlx4_qp_lookup() local
407 struct mlx4_qp_table *qp_table = &priv->qp_table; mlx4_qp_alloc() local
515 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; mlx4_qp_remove() local
551 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; mlx4_create_zones() local
737 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; mlx4_cleanup_qp_zones() local
764 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; mlx4_init_qp_table() local
[all...]
H A Dprofile.c189 for (priv->qp_table.rdmarc_shift = 0; in mlx4_make_profile()
190 request->num_qp << priv->qp_table.rdmarc_shift < profile[i].num; in mlx4_make_profile()
191 ++priv->qp_table.rdmarc_shift) in mlx4_make_profile()
193 dev->caps.max_qp_dest_rdma = 1 << priv->qp_table.rdmarc_shift; in mlx4_make_profile()
194 priv->qp_table.rdmarc_base = (u32) profile[i].start; in mlx4_make_profile()
196 init_hca->log_rd_per_qp = priv->qp_table.rdmarc_shift; in mlx4_make_profile()
H A Dmain.c1590 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, in mlx4_init_cmpt_table()
1636 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); in mlx4_init_cmpt_table()
1720 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, in mlx4_init_icm()
1731 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, in mlx4_init_icm()
1742 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, in mlx4_init_icm()
1753 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, in mlx4_init_icm()
1755 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, in mlx4_init_icm()
1811 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); in mlx4_init_icm()
1814 mlx4_cleanup_icm_table(dev, &priv->qp_table in mlx4_init_icm()
[all...]
/kernel/linux/linux-6.6/drivers/net/ethernet/mellanox/mlx4/
H A Dqp.c58 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; in mlx4_qp_event() local
61 spin_lock(&qp_table->lock); in mlx4_qp_event()
67 spin_unlock(&qp_table->lock); in mlx4_qp_event()
231 struct mlx4_qp_table *qp_table = &priv->qp_table; in __mlx4_qp_reserve_range() local
244 *base = mlx4_zone_alloc_entries(qp_table->zones, uid, cnt, align, in __mlx4_qp_reserve_range()
283 struct mlx4_qp_table *qp_table = &priv->qp_table; in __mlx4_qp_release_range() local
287 mlx4_zone_free_entries_unique(qp_table in __mlx4_qp_release_range()
316 struct mlx4_qp_table *qp_table = &priv->qp_table; __mlx4_qp_alloc_icm() local
373 struct mlx4_qp_table *qp_table = &priv->qp_table; __mlx4_qp_free_icm() local
398 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; mlx4_qp_lookup() local
412 struct mlx4_qp_table *qp_table = &priv->qp_table; mlx4_qp_alloc() local
520 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; mlx4_qp_remove() local
555 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; mlx4_create_zones() local
741 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; mlx4_cleanup_qp_zones() local
768 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; mlx4_init_qp_table() local
[all...]
H A Dprofile.c189 for (priv->qp_table.rdmarc_shift = 0; in mlx4_make_profile()
190 request->num_qp << priv->qp_table.rdmarc_shift < profile[i].num; in mlx4_make_profile()
191 ++priv->qp_table.rdmarc_shift) in mlx4_make_profile()
193 dev->caps.max_qp_dest_rdma = 1 << priv->qp_table.rdmarc_shift; in mlx4_make_profile()
194 priv->qp_table.rdmarc_base = (u32) profile[i].start; in mlx4_make_profile()
196 init_hca->log_rd_per_qp = priv->qp_table.rdmarc_shift; in mlx4_make_profile()
H A Dmain.c1615 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, in mlx4_init_cmpt_table()
1661 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); in mlx4_init_cmpt_table()
1745 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, in mlx4_init_icm()
1756 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, in mlx4_init_icm()
1767 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, in mlx4_init_icm()
1778 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, in mlx4_init_icm()
1780 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, in mlx4_init_icm()
1836 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); in mlx4_init_icm()
1839 mlx4_cleanup_icm_table(dev, &priv->qp_table in mlx4_init_icm()
[all...]
/kernel/linux/linux-5.10/drivers/infiniband/hw/mthca/
H A Dmthca_qp.c198 return qp->qpn >= dev->qp_table.sqp_start && in is_sqp()
199 qp->qpn <= dev->qp_table.sqp_start + 3; in is_sqp()
204 return qp->qpn >= dev->qp_table.sqp_start && in is_qp0()
205 qp->qpn <= dev->qp_table.sqp_start + 1; in is_qp0()
244 spin_lock(&dev->qp_table.lock); in mthca_qp_event()
245 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); in mthca_qp_event()
248 spin_unlock(&dev->qp_table.lock); in mthca_qp_event()
265 spin_lock(&dev->qp_table.lock); in mthca_qp_event()
268 spin_unlock(&dev->qp_table.lock); in mthca_qp_event()
770 cpu_to_be32(dev->qp_table in __mthca_modify_qp()
[all...]
H A Dmthca_main.c443 mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base, in mthca_init_icm()
448 if (!mdev->qp_table.qp_table) { in mthca_init_icm()
454 mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base, in mthca_init_icm()
459 if (!mdev->qp_table.eqp_table) { in mthca_init_icm()
465 mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base, in mthca_init_icm()
468 mdev->qp_table.rdb_shift, 0, in mthca_init_icm()
470 if (!mdev->qp_table.rdb_table) { in mthca_init_icm()
530 mthca_free_icm_table(mdev, mdev->qp_table in mthca_init_icm()
[all...]
H A Dmthca_profile.c206 for (dev->qp_table.rdb_shift = 0; in mthca_make_profile()
207 request->num_qp << dev->qp_table.rdb_shift < profile[i].num; in mthca_make_profile()
208 ++dev->qp_table.rdb_shift) in mthca_make_profile()
210 dev->qp_table.rdb_base = (u32) profile[i].start; in mthca_make_profile()
H A Dmthca_dev.h259 struct mthca_icm_table *qp_table; member
344 struct mthca_qp_table qp_table; member
/kernel/linux/linux-6.6/drivers/infiniband/hw/mthca/
H A Dmthca_qp.c198 return qp->qpn >= dev->qp_table.sqp_start && in is_sqp()
199 qp->qpn <= dev->qp_table.sqp_start + 3; in is_sqp()
204 return qp->qpn >= dev->qp_table.sqp_start && in is_qp0()
205 qp->qpn <= dev->qp_table.sqp_start + 1; in is_qp0()
244 spin_lock(&dev->qp_table.lock); in mthca_qp_event()
245 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); in mthca_qp_event()
248 spin_unlock(&dev->qp_table.lock); in mthca_qp_event()
265 spin_lock(&dev->qp_table.lock); in mthca_qp_event()
268 spin_unlock(&dev->qp_table.lock); in mthca_qp_event()
770 cpu_to_be32(dev->qp_table in __mthca_modify_qp()
[all...]
H A Dmthca_main.c443 mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base, in mthca_init_icm()
448 if (!mdev->qp_table.qp_table) { in mthca_init_icm()
454 mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base, in mthca_init_icm()
459 if (!mdev->qp_table.eqp_table) { in mthca_init_icm()
465 mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base, in mthca_init_icm()
468 mdev->qp_table.rdb_shift, 0, in mthca_init_icm()
470 if (!mdev->qp_table.rdb_table) { in mthca_init_icm()
530 mthca_free_icm_table(mdev, mdev->qp_table in mthca_init_icm()
[all...]
H A Dmthca_profile.c204 for (dev->qp_table.rdb_shift = 0; in mthca_make_profile()
205 request->num_qp << dev->qp_table.rdb_shift < profile[i].num; in mthca_make_profile()
206 ++dev->qp_table.rdb_shift) in mthca_make_profile()
208 dev->qp_table.rdb_base = (u32) profile[i].start; in mthca_make_profile()
/kernel/linux/linux-6.6/drivers/infiniband/hw/hns/
H A Dhns_roce_qp.c230 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; in alloc_qpn() local
238 mutex_lock(&qp_table->bank_mutex); in alloc_qpn()
239 bankid = get_least_load_bankid_for_qp(init_attr, qp_table->bank); in alloc_qpn()
241 ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid, in alloc_qpn()
246 mutex_unlock(&qp_table->bank_mutex); in alloc_qpn()
250 qp_table->bank[bankid].inuse++; in alloc_qpn()
251 mutex_unlock(&qp_table->bank_mutex); in alloc_qpn()
305 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; in alloc_qpc() local
388 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; free_qpc() local
1462 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; hns_roce_init_qp_table() local
[all...]
H A Dhns_roce_main.c775 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table, in hns_roce_init_hem()
783 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.irrl_table, in hns_roce_init_hem()
795 &hr_dev->qp_table.trrl_table, in hns_roce_init_hem()
829 &hr_dev->qp_table.sccc_table, in hns_roce_init_hem()
890 &hr_dev->qp_table.sccc_table); in hns_roce_init_hem()
901 &hr_dev->qp_table.trrl_table); in hns_roce_init_hem()
904 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table); in hns_roce_init_hem()
907 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table); in hns_roce_init_hem()
[all...]
/kernel/linux/linux-5.10/drivers/infiniband/hw/hns/
H A Dhns_roce_qp.c172 ret = hns_roce_bitmap_alloc_range(&hr_dev->qp_table.bitmap, in alloc_qpn()
253 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; in alloc_qpc() local
266 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn); in alloc_qpc()
273 ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn); in alloc_qpc()
281 ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table, in alloc_qpc()
291 ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table, in alloc_qpc()
303 hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); in alloc_qpc()
306 hns_roce_table_put(hr_dev, &qp_table in alloc_qpc()
331 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; free_qpc() local
345 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; free_qpn() local
1295 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; hns_roce_init_qp_table() local
[all...]
H A Dhns_roce_main.c596 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table, in hns_roce_init_hem()
604 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.irrl_table, in hns_roce_init_hem()
616 &hr_dev->qp_table.trrl_table, in hns_roce_init_hem()
650 &hr_dev->qp_table.sccc_table, in hns_roce_init_hem()
694 &hr_dev->qp_table.sccc_table); in hns_roce_init_hem()
705 &hr_dev->qp_table.trrl_table); in hns_roce_init_hem()
708 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table); in hns_roce_init_hem()
711 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table); in hns_roce_init_hem()
[all...]
/kernel/linux/linux-6.6/drivers/infiniband/hw/mlx5/
H A Dqpc.c98 xa_lock_irqsave(&dev->qp_table.dct_xa, flags); in dct_event_notifier()
99 dct = xa_load(&dev->qp_table.dct_xa, qpn); in dct_event_notifier()
102 xa_unlock_irqrestore(&dev->qp_table.dct_xa, flags); in dct_event_notifier()
110 container_of(nb, struct mlx5_ib_dev, qp_table.nb); in rsc_event_notifier()
135 common = mlx5_get_rsc(&dev->qp_table, rsn); in rsc_event_notifier()
162 struct mlx5_qp_table *table = &dev->qp_table; in create_resource_common()
184 struct mlx5_qp_table *table = &dev->qp_table; in destroy_resource_common()
222 err = xa_err(xa_store_irq(&dev->qp_table.dct_xa, qp->qpn, dct, GFP_KERNEL)); in mlx5_core_create_dct()
279 struct mlx5_qp_table *table = &dev->qp_table; in mlx5_core_destroy_dct()
502 struct mlx5_qp_table *table = &dev->qp_table; in mlx5_init_qp_table()
[all...]
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx5/
H A Dqpc.c156 struct mlx5_qp_table *table = &dev->qp_table; in create_resource_common()
178 struct mlx5_qp_table *table = &dev->qp_table; in destroy_resource_common()
479 struct mlx5_qp_table *table = &dev->qp_table; in mlx5_init_qp_table()
493 struct mlx5_qp_table *table = &dev->qp_table; in mlx5_cleanup_qp_table()
632 struct mlx5_qp_table *table = &dev->qp_table; in mlx5_core_res_hold()
/kernel/linux/linux-5.10/drivers/infiniband/sw/rdmavt/
H A Dqp.c426 rdi->qp_dev->qp_table = in rvt_driver_qp_init()
428 sizeof(*rdi->qp_dev->qp_table), in rvt_driver_qp_init()
430 if (!rdi->qp_dev->qp_table) in rvt_driver_qp_init()
434 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL); in rvt_driver_qp_init()
447 kfree(rdi->qp_dev->qp_table); in rvt_driver_qp_init()
509 kfree(rdi->qp_dev->qp_table); in rvt_qp_exit()
789 qpp = &rdi->qp_dev->qp_table[n]; in rvt_remove_qp()
1441 qp->next = rdi->qp_dev->qp_table[n]; in rvt_insert_qp()
1442 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp); in rvt_insert_qp()
2768 rdi->qp_dev->qp_table[ in __must_hold()
[all...]
/kernel/linux/linux-6.6/drivers/infiniband/sw/rdmavt/
H A Dqp.c385 rdi->qp_dev->qp_table = in rvt_driver_qp_init()
387 sizeof(*rdi->qp_dev->qp_table), in rvt_driver_qp_init()
389 if (!rdi->qp_dev->qp_table) in rvt_driver_qp_init()
393 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL); in rvt_driver_qp_init()
406 kfree(rdi->qp_dev->qp_table); in rvt_driver_qp_init()
468 kfree(rdi->qp_dev->qp_table); in rvt_qp_exit()
749 qpp = &rdi->qp_dev->qp_table[n]; in rvt_remove_qp()
1385 qp->next = rdi->qp_dev->qp_table[n]; in rvt_insert_qp()
1386 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp); in rvt_insert_qp()
2717 rdi->qp_dev->qp_table[ in __must_hold()
[all...]

Completed in 30 milliseconds

123