/kernel/linux/linux-5.10/drivers/gpu/drm/msm/adreno/ |
H A D | a5xx_preempt.c | 61 for (i = 0; i < gpu->nr_rings; i++) { in get_next_ring() 98 if (gpu->nr_rings == 1) in a5xx_preempt_trigger() 206 if (gpu->nr_rings == 1) in a5xx_preempt_hw_init() 209 for (i = 0; i < gpu->nr_rings; i++) { in a5xx_preempt_hw_init() 276 for (i = 0; i < gpu->nr_rings; i++) { in a5xx_preempt_fini() 290 if (gpu->nr_rings <= 1) in a5xx_preempt_init() 293 for (i = 0; i < gpu->nr_rings; i++) { in a5xx_preempt_init() 297 * set nr_rings to 1 to force preemption off in a5xx_preempt_init() 300 gpu->nr_rings = 1; in a5xx_preempt_init()
|
H A D | adreno_gpu.c | 255 *value = gpu->nr_rings; in adreno_get_param() 404 for (i = 0; i < gpu->nr_rings; i++) { in adreno_hw_init() 498 for (i = 0; i < gpu->nr_rings; i++) { in adreno_gpu_state_get() 679 for (i = 0; i < gpu->nr_rings; i++) { in adreno_show() 733 for (i = 0; i < gpu->nr_rings; i++) { in adreno_dump_info() 896 const struct adreno_gpu_funcs *funcs, int nr_rings) in adreno_gpu_init() 912 adreno_gpu_config.nr_rings = nr_rings; in adreno_gpu_init() 894 adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs, int nr_rings) adreno_gpu_init() argument
|
H A D | a5xx_gpu.c | 388 if (gpu->nr_rings == 1) in a5xx_preempt_start() 760 sizeof(u32) * gpu->nr_rings, in a5xx_hw_init() 771 } else if (gpu->nr_rings > 1) { in a5xx_hw_init() 774 gpu->nr_rings = 1; in a5xx_hw_init() 1235 for (i = 0; i < gpu->nr_rings; i++) in a5xx_pm_suspend() 1572 unsigned int nr_rings; in a5xx_gpu_init() local 1593 nr_rings = 4; in a5xx_gpu_init() 1596 nr_rings = 1; in a5xx_gpu_init() 1598 ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, nr_rings); in a5xx_gpu_init()
|
H A D | adreno_gpu.h | 256 int nr_rings);
|
H A D | a6xx_gpu.c | 873 sizeof(u32) * gpu->nr_rings, in a6xx_hw_init() 1132 for (i = 0; i < gpu->nr_rings; i++) in a6xx_pm_suspend()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/msm/adreno/ |
H A D | a5xx_preempt.c | 61 for (i = 0; i < gpu->nr_rings; i++) { in get_next_ring() 97 if (gpu->nr_rings == 1) in a5xx_preempt_trigger() 203 if (gpu->nr_rings == 1) in a5xx_preempt_hw_init() 206 for (i = 0; i < gpu->nr_rings; i++) { in a5xx_preempt_hw_init() 272 for (i = 0; i < gpu->nr_rings; i++) { in a5xx_preempt_fini() 285 if (gpu->nr_rings <= 1) in a5xx_preempt_init() 288 for (i = 0; i < gpu->nr_rings; i++) { in a5xx_preempt_init() 292 * set nr_rings to 1 to force preemption off in a5xx_preempt_init() 295 gpu->nr_rings = 1; in a5xx_preempt_init()
|
H A D | adreno_gpu.c | 348 *value = gpu->nr_rings * NR_SCHED_PRIORITIES; in adreno_get_param() 561 for (int i = 0; i < gpu->nr_rings; i++) { in adreno_hw_init() 662 for (i = 0; i < gpu->nr_rings; i++) { in adreno_gpu_state_get() 862 for (i = 0; i < gpu->nr_rings; i++) { in adreno_show() 916 for (i = 0; i < gpu->nr_rings; i++) { in adreno_dump_info() 1059 const struct adreno_gpu_funcs *funcs, int nr_rings) in adreno_gpu_init() 1104 adreno_gpu_config.nr_rings = nr_rings; in adreno_gpu_init() 1057 adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs, int nr_rings) adreno_gpu_init() argument
|
H A D | a5xx_gpu.c | 513 if (gpu->nr_rings == 1) in a5xx_preempt_start() 611 sizeof(u32) * gpu->nr_rings, in a5xx_ucode_load() 621 } else if (gpu->nr_rings > 1) { in a5xx_ucode_load() 624 gpu->nr_rings = 1; in a5xx_ucode_load() 1420 for (i = 0; i < gpu->nr_rings; i++) in a5xx_pm_suspend() 1750 unsigned int nr_rings; in a5xx_gpu_init() local 1771 nr_rings = 4; in a5xx_gpu_init() 1774 nr_rings = 1; in a5xx_gpu_init() 1776 ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, nr_rings); in a5xx_gpu_init()
|
H A D | adreno_device.c | 811 for (i = 0; i < gpu->nr_rings; i++) { in suspend_scheduler() 821 for (i = 0; i < gpu->nr_rings; i++) { in resume_scheduler()
|
H A D | adreno_gpu.h | 432 int nr_rings);
|
H A D | a6xx_gpu.c | 1155 sizeof(u32) * gpu->nr_rings, in a6xx_ucode_load() 1998 for (i = 0; i < gpu->nr_rings; i++) in a6xx_gmu_pm_suspend() 2034 for (i = 0; i < gpu->nr_rings; i++) in a6xx_pm_suspend()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/msm/ |
H A D | msm_gpu.c | 406 for (i = 0; i < gpu->nr_rings; i++) { in recover_worker() 431 for (i = 0; i < gpu->nr_rings; i++) { in recover_worker() 699 for (i = 0; i < gpu->nr_rings; i++) { in retire_submits() 739 for (i = 0; i < gpu->nr_rings; i++) in msm_gpu_retire() 847 int i, ret, nr_rings = config->nr_rings; in msm_gpu_init() local 946 sizeof(struct msm_rbmemptrs) * nr_rings, in msm_gpu_init() 958 if (nr_rings > ARRAY_SIZE(gpu->rb)) { in msm_gpu_init() 961 nr_rings = ARRAY_SIZE(gpu->rb); in msm_gpu_init() 965 for (i = 0; i < nr_rings; in msm_gpu_init() [all...] |
H A D | msm_gpu.h | 29 unsigned int nr_rings; member 187 int nr_rings; member 322 for (i = 0; i < gpu->nr_rings; i++) { in msm_gpu_active() 443 * number of available priority levels is (nr_rings * NR_SCHED_PRIORITIES). 454 * This allows generations without preemption (nr_rings==1) to have some 470 if (rn >= gpu->nr_rings) in msm_gpu_convert_priority()
|
H A D | msm_submitqueue.c | 225 max_priority = (priv->gpu->nr_rings * NR_SCHED_PRIORITIES) - 1; in msm_submitqueue_init()
|
/kernel/linux/linux-6.6/drivers/i3c/master/mipi-i3c-hci/ |
H A D | dma.c | 213 unsigned int i, nr_rings, xfers_sz, resps_sz; in hci_dma_init() local 218 nr_rings = FIELD_GET(MAX_HEADER_COUNT_CAP, regval); in hci_dma_init() 219 dev_info(&hci->master.dev, "%d DMA rings available\n", nr_rings); in hci_dma_init() 220 if (unlikely(nr_rings > 8)) { in hci_dma_init() 222 nr_rings = 8; in hci_dma_init() 224 if (nr_rings > XFER_RINGS) in hci_dma_init() 225 nr_rings = XFER_RINGS; in hci_dma_init() 226 rings = kzalloc(struct_size(rings, headers, nr_rings), GFP_KERNEL); in hci_dma_init() 230 rings->total = nr_rings; in hci_dma_init()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/msm/ |
H A D | msm_gpu.c | 496 for (i = 0; i < gpu->nr_rings; i++) { in recover_worker() 523 for (i = 0; i < gpu->nr_rings; i++) { in recover_worker() 721 for (i = 0; i < gpu->nr_rings; i++) { in retire_submits() 737 for (i = 0; i < gpu->nr_rings; i++) in retire_worker() 861 int i, ret, nr_rings = config->nr_rings; in msm_gpu_init() local 940 sizeof(struct msm_rbmemptrs) * nr_rings, in msm_gpu_init() 952 if (nr_rings > ARRAY_SIZE(gpu->rb)) { in msm_gpu_init() 955 nr_rings = ARRAY_SIZE(gpu->rb); in msm_gpu_init() 959 for (i = 0; i < nr_rings; in msm_gpu_init() [all...] |
H A D | msm_submitqueue.c | 78 if (prio >= priv->gpu->nr_rings) { in msm_submitqueue_create() 110 * Select priority 2 as the "default priority" unless nr_rings is less in msm_submitqueue_init() 114 clamp_t(uint32_t, 2, 0, priv->gpu->nr_rings - 1) : 0; in msm_submitqueue_init()
|
H A D | msm_gpu.h | 27 unsigned int nr_rings; member 95 int nr_rings; member 170 for (i = 0; i < gpu->nr_rings; i++) { in msm_gpu_active()
|
/kernel/linux/linux-5.10/drivers/block/xen-blkback/ |
H A D | xenbus.c | 109 for (i = 0; i < blkif->nr_rings; i++) { in xen_update_blkif_status() 134 blkif->rings = kcalloc(blkif->nr_rings, sizeof(struct xen_blkif_ring), in xen_blkif_alloc_rings() 139 for (r = 0; r < blkif->nr_rings; r++) { in xen_blkif_alloc_rings() 273 for (r = 0; r < blkif->nr_rings; r++) { in xen_blkif_disconnect() 340 blkif->nr_rings = 0; in xen_blkif_disconnect() 392 for (i = 0; i < blkif->nr_rings; i++) { \ 1113 blkif->nr_rings = requested_num_queues; in connect_ring() 1118 blkif->nr_rings, blkif->blk_protocol, protocol, in connect_ring() 1138 if (blkif->nr_rings == 1) in connect_ring() 1148 for (i = 0; i < blkif->nr_rings; in connect_ring() [all...] |
H A D | common.h | 322 unsigned int nr_rings; member
|
/kernel/linux/linux-6.6/drivers/block/xen-blkback/ |
H A D | xenbus.c | 109 for (i = 0; i < blkif->nr_rings; i++) { in xen_update_blkif_status() 134 blkif->rings = kcalloc(blkif->nr_rings, sizeof(struct xen_blkif_ring), in xen_blkif_alloc_rings() 139 for (r = 0; r < blkif->nr_rings; r++) { in xen_blkif_alloc_rings() 273 for (r = 0; r < blkif->nr_rings; r++) { in xen_blkif_disconnect() 340 blkif->nr_rings = 0; in xen_blkif_disconnect() 392 for (i = 0; i < blkif->nr_rings; i++) { \ 1107 blkif->nr_rings = requested_num_queues; in connect_ring() 1112 blkif->nr_rings, blkif->blk_protocol, protocol, in connect_ring() 1132 if (blkif->nr_rings == 1) in connect_ring() 1142 for (i = 0; i < blkif->nr_rings; in connect_ring() [all...] |
H A D | common.h | 322 unsigned int nr_rings; member
|
/kernel/linux/linux-5.10/drivers/block/ |
H A D | xen-blkfront.c | 226 unsigned int nr_rings; member 276 (idx) < (info)->nr_rings; \ 282 BUG_ON(i >= info->nr_rings); in get_rinfo() 999 info->tag_set.nr_hw_queues = info->nr_rings; in xlvbd_init_blk_queue() 1391 info->nr_rings = 0; in blkif_free() 1913 if (info->nr_rings == 1) { in talk_to_blkback() 1922 info->nr_rings); in talk_to_blkback() 2001 BUG_ON(info->nr_rings); in negotiate_mq() 2006 info->nr_rings = min(backend_max_queues, xen_blkif_max_queues); in negotiate_mq() 2008 if (!info->nr_rings) in negotiate_mq() [all...] |
/kernel/linux/linux-6.6/drivers/block/ |
H A D | xen-blkfront.c | 228 unsigned int nr_rings; member 276 (idx) < (info)->nr_rings; \ 282 BUG_ON(i >= info->nr_rings); in get_rinfo() 1119 info->tag_set.nr_hw_queues = info->nr_rings; in xlvbd_alloc_gendisk() 1318 info->nr_rings = 0; in blkif_free() 1820 if (info->nr_rings == 1) { in talk_to_blkback() 1829 info->nr_rings); in talk_to_blkback() 1901 BUG_ON(info->nr_rings); in negotiate_mq() 1906 info->nr_rings = min(backend_max_queues, xen_blkif_max_queues); in negotiate_mq() 1908 if (!info->nr_rings) in negotiate_mq() [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/broadcom/bnxt/ |
H A D | bnxt.c | 5429 unsigned int nr_rings) in bnxt_hwrm_vnic_alloc() 5432 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; in bnxt_hwrm_vnic_alloc() 5445 j, nr_rings); in bnxt_hwrm_vnic_alloc() 5427 bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, unsigned int start_rx_ring_idx, unsigned int nr_rings) bnxt_hwrm_vnic_alloc() argument
|