Home
last modified time | relevance | path

Searched refs:ctxs (Results 1 - 25 of 26) sorted by relevance

12

/kernel/linux/linux-5.10/drivers/video/fbdev/omap2/omapfb/
H A Dvrfb.c67 static struct vrfb_ctx *ctxs; variable
88 omap2_sms_write_rot_control(ctxs[ctx].control, ctx); in restore_hw_context()
89 omap2_sms_write_rot_size(ctxs[ctx].size, ctx); in restore_hw_context()
90 omap2_sms_write_rot_physical_ba(ctxs[ctx].physical_ba, ctx); in restore_hw_context()
211 ctxs[ctx].physical_ba = paddr; in omap_vrfb_setup()
212 ctxs[ctx].size = size; in omap_vrfb_setup()
213 ctxs[ctx].control = control; in omap_vrfb_setup()
308 paddr = ctxs[ctx].base + SMS_ROT_VIRT_BASE(rot); in omap_vrfb_request_ctx()
348 ctxs = devm_kcalloc(&pdev->dev, in vrfb_probe()
352 if (!ctxs) in vrfb_probe()
[all...]
/kernel/linux/linux-6.6/drivers/video/fbdev/omap2/omapfb/
H A Dvrfb.c67 static struct vrfb_ctx *ctxs; variable
88 omap2_sms_write_rot_control(ctxs[ctx].control, ctx); in restore_hw_context()
89 omap2_sms_write_rot_size(ctxs[ctx].size, ctx); in restore_hw_context()
90 omap2_sms_write_rot_physical_ba(ctxs[ctx].physical_ba, ctx); in restore_hw_context()
211 ctxs[ctx].physical_ba = paddr; in omap_vrfb_setup()
212 ctxs[ctx].size = size; in omap_vrfb_setup()
213 ctxs[ctx].control = control; in omap_vrfb_setup()
308 paddr = ctxs[ctx].base + SMS_ROT_VIRT_BASE(rot); in omap_vrfb_request_ctx()
348 ctxs = devm_kcalloc(&pdev->dev, in vrfb_probe()
352 if (!ctxs) in vrfb_probe()
[all...]
/kernel/linux/linux-5.10/arch/x86/mm/
H A Dtlb.c197 this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0); in clear_asid_other()
220 if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) != in choose_new_asid()
225 *need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) < in choose_new_asid()
487 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) != in switch_mm_irqs_off()
515 if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) == in switch_mm_irqs_off()
559 this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); in switch_mm_irqs_off()
560 this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); in switch_mm_irqs_off()
642 this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id); in initialize_tlbstate_and_flush()
643 this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, tlb_gen); in initialize_tlbstate_and_flush()
646 this_cpu_write(cpu_tlbstate.ctxs[ in initialize_tlbstate_and_flush()
[all...]
/kernel/linux/linux-6.6/arch/x86/mm/
H A Dtlb.c211 this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0); in clear_asid_other()
234 if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) != in choose_new_asid()
239 *need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) < in choose_new_asid()
564 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) != in switch_mm_irqs_off()
597 if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) == in switch_mm_irqs_off()
641 this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); in switch_mm_irqs_off()
642 this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); in switch_mm_irqs_off()
728 this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id); in initialize_tlbstate_and_flush()
729 this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, tlb_gen); in initialize_tlbstate_and_flush()
733 this_cpu_write(cpu_tlbstate.ctxs[ in initialize_tlbstate_and_flush()
[all...]
/kernel/linux/linux-6.6/block/
H A Dblk-mq-sysfs.c18 struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj); in blk_mq_sysfs_release() local
20 free_percpu(ctxs->queue_ctx); in blk_mq_sysfs_release()
21 kfree(ctxs); in blk_mq_sysfs_release()
28 /* ctx->ctxs won't be released until all ctx are freed */ in blk_mq_ctx_sysfs_release()
29 kobject_put(&ctx->ctxs->kobj); in blk_mq_ctx_sysfs_release()
40 kfree(hctx->ctxs); in blk_mq_hw_sysfs_release()
H A Dblk-mq.c1703 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; in flush_busy_ctx()
1738 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; in dispatch_rq_from_ctx()
3645 /* hctx->ctxs will be freed in queue's release handler */
3754 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *), in blk_mq_alloc_hctx()
3756 if (!hctx->ctxs) in blk_mq_alloc_hctx()
3779 kfree(hctx->ctxs); in blk_mq_alloc_hctx()
3926 hctx->ctxs[hctx->nr_ctx++] = ctx; in blk_mq_map_swqueue()
4048 struct blk_mq_ctxs *ctxs; in blk_mq_alloc_ctxs() local
4051 ctxs = kzalloc(sizeof(*ctxs), GFP_KERNE in blk_mq_alloc_ctxs()
[all...]
H A Dblk-mq.h29 struct blk_mq_ctxs *ctxs; member
H A Dblk-mq-sched.c204 return hctx->ctxs[idx]; in blk_mq_next_ctx()
/kernel/linux/linux-5.10/block/
H A Dblk-mq-sysfs.c20 struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj); in blk_mq_sysfs_release() local
22 free_percpu(ctxs->queue_ctx); in blk_mq_sysfs_release()
23 kfree(ctxs); in blk_mq_sysfs_release()
30 /* ctx->ctxs won't be released until all ctx are freed */ in blk_mq_ctx_sysfs_release()
31 kobject_put(&ctx->ctxs->kobj); in blk_mq_ctx_sysfs_release()
44 kfree(hctx->ctxs); in blk_mq_hw_sysfs_release()
H A Dblk-mq.c1027 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; in flush_busy_ctx()
1062 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; in dispatch_rq_from_ctx()
2675 /* hctx->ctxs will be freed in queue's release handler */
2790 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *), in blk_mq_alloc_hctx()
2792 if (!hctx->ctxs) in blk_mq_alloc_hctx()
2817 kfree(hctx->ctxs); in blk_mq_alloc_hctx()
2942 hctx->ctxs[hctx->nr_ctx++] = ctx; in blk_mq_map_swqueue()
3064 struct blk_mq_ctxs *ctxs; in blk_mq_alloc_ctxs() local
3067 ctxs = kzalloc(sizeof(*ctxs), GFP_KERNE in blk_mq_alloc_ctxs()
[all...]
H A Dblk-mq.h36 struct blk_mq_ctxs *ctxs; member
H A Dblk-mq-sched.c220 return hctx->ctxs[idx]; in blk_mq_next_ctx()
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/engine/fifo/
H A Dnv04.h9 unsigned ctxs:5; member
H A Ddmanv04.c101 u32 cm = ((1ULL << c->bits) - 1) << c->ctxs; in nv04_fifo_dma_fini()
104 nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs)); in nv04_fifo_dma_fini()
/kernel/linux/linux-6.6/drivers/iommu/arm/arm-smmu/
H A Dqcom_iommu.c54 struct qcom_iommu_ctx *ctxs[]; /* indexed by asid */ member
97 return qcom_iommu->ctxs[asid]; in to_ctx()
546 WARN_ON(qcom_iommu->ctxs[asid] == NULL)) { in qcom_iommu_of_xlate()
705 qcom_iommu->ctxs[ctx->asid] = ctx; in qcom_iommu_ctx_probe()
717 qcom_iommu->ctxs[ctx->asid] = NULL; in qcom_iommu_ctx_remove()
767 qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid + 1), in qcom_iommu_device_probe()
/kernel/linux/linux-5.10/drivers/iommu/arm/arm-smmu/
H A Dqcom_iommu.c57 struct qcom_iommu_ctx *ctxs[]; /* indexed by asid-1 */ member
99 return qcom_iommu->ctxs[asid - 1]; in to_ctx()
591 * index into qcom_iommu->ctxs: in qcom_iommu_of_xlate()
740 qcom_iommu->ctxs[ctx->asid - 1] = ctx; in qcom_iommu_ctx_probe()
752 qcom_iommu->ctxs[ctx->asid - 1] = NULL; in qcom_iommu_ctx_remove()
801 qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid), in qcom_iommu_device_probe()
/kernel/linux/linux-5.10/include/linux/
H A Dblk-mq.h96 /** @ctxs: Array of software queues. */
97 struct blk_mq_ctx **ctxs; member
585 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
/kernel/linux/linux-6.6/include/linux/
H A Ddamon.h665 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
666 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
H A Dblk-mq.h367 /** @ctxs: Array of software queues. */
368 struct blk_mq_ctx **ctxs; member
947 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
/kernel/linux/linux-5.10/arch/x86/include/asm/
H A Dtlbflush.h112 * This tells us to go invalidate all the non-loaded ctxs[]
152 struct tlb_context ctxs[TLB_NR_DYN_ASIDS]; member
/kernel/linux/linux-6.6/drivers/gpu/drm/nouveau/nvkm/engine/fifo/
H A Dchan.h36 unsigned ctxs:5; member
H A Dnv04.c66 u32 cm = ((1ULL << c->bits) - 1) << c->ctxs; in nv04_chan_stop()
69 nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs)); in nv04_chan_stop()
/kernel/linux/linux-6.6/arch/x86/include/asm/
H A Dtlbflush.h100 * This tells us to go invalidate all the non-loaded ctxs[]
150 struct tlb_context ctxs[TLB_NR_DYN_ASIDS]; member
/kernel/linux/linux-6.6/mm/damon/
H A Dcore.c658 * @ctxs: an array of the pointers for contexts to start monitoring
659 * @nr_ctxs: size of @ctxs
671 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive) in damon_start() argument
684 err = __damon_start(ctxs[i]); in damon_start()
722 * @ctxs: an array of the pointers for contexts to stop monitoring
723 * @nr_ctxs: size of @ctxs
727 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs) in damon_stop() argument
733 err = __damon_stop(ctxs[i]); in damon_stop()
/kernel/linux/linux-5.10/kernel/
H A Dworkqueue.c5302 LIST_HEAD(ctxs); in workqueue_apply_unbound_cpumask()
5326 list_add_tail(&ctx->list, &ctxs); in workqueue_apply_unbound_cpumask()
5329 list_for_each_entry_safe(ctx, n, &ctxs, list) { in workqueue_apply_unbound_cpumask()

Completed in 37 milliseconds

12