Lines Matching defs:workload

63 static void update_shadow_pdps(struct intel_vgpu_workload *workload)
66 struct intel_context *ctx = workload->req->context;
68 if (WARN_ON(!workload->shadow_mm))
71 if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
76 (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
84 static void sr_oa_regs(struct intel_vgpu_workload *workload,
87 struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915;
101 if (workload->engine->id != RCS0)
105 workload->oactxctrl = reg_state[ctx_oactxctrl + 1];
107 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
110 workload->flex_mmio[i] = reg_state[state_offset + 1];
115 reg_state[ctx_oactxctrl + 1] = workload->oactxctrl;
117 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
122 reg_state[state_offset + 1] = workload->flex_mmio[i];
127 static int populate_shadow_context(struct intel_vgpu_workload *workload)
129 struct intel_vgpu *vgpu = workload->vgpu;
131 struct intel_context *ctx = workload->req->context;
141 int ring_id = workload->engine->id;
151 sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
153 intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \
156 intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \
165 if (workload->engine->id == RCS0) {
169 } else if (workload->engine->id == BCS0)
171 workload->ring_context_gpa +
182 workload->ring_context_gpa +
188 sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
190 gvt_dbg_sched("ring %s workload lrca %x, ctx_id %x, ctx gpa %llx",
191 workload->engine->name, workload->ctx_desc.lrca,
192 workload->ctx_desc.context_id,
193 workload->ring_context_gpa);
203 workload->ctx_desc.lrca) &&
205 workload->ring_context_gpa))
208 s->last_ctx[ring_id].lrca = workload->ctx_desc.lrca;
209 s->last_ctx[ring_id].ring_context_gpa = workload->ring_context_gpa;
215 context_page_num = workload->engine->context_size;
218 if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0)
227 (u32)((workload->ctx_desc.lrca + i) <<
253 ret = intel_gvt_scan_engine_context(workload);
294 struct intel_vgpu_workload *workload;
311 workload = scheduler->current_workload[ring_id];
312 if (unlikely(!workload))
318 if (workload->vgpu != scheduler->engine_owner[ring_id]) {
321 workload->vgpu, rq->engine);
322 scheduler->engine_owner[ring_id] = workload->vgpu;
325 ring_id, workload->vgpu->id);
327 atomic_set(&workload->shadow_ctx_active, 1);
330 save_ring_hw_state(workload->vgpu, rq->engine);
331 atomic_set(&workload->shadow_ctx_active, 0);
334 save_ring_hw_state(workload->vgpu, rq->engine);
340 wake_up(&workload->shadow_ctx_status_wq);
346 struct intel_vgpu_workload *workload)
355 desc |= (u64)workload->ctx_desc.addressing_mode <<
361 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
363 struct intel_vgpu *vgpu = workload->vgpu;
364 struct i915_request *req = workload->req;
391 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
394 workload->rb_len);
398 shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
401 workload->shadow_ring_buffer_va = cs;
404 workload->rb_len);
406 cs += workload->rb_len / sizeof(u32);
407 intel_ring_advance(workload->req, cs);
434 static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
437 struct intel_vgpu_mm *mm = workload->shadow_mm;
459 intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
461 struct intel_vgpu *vgpu = workload->vgpu;
465 if (workload->req)
468 rq = i915_request_create(s->shadow[workload->engine->id]);
474 workload->req = i915_request_get(rq);
479 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
481 * @workload: an abstract entity for each execlist submission.
483 * This function is called before the workload submitting to i915, to make
484 * sure the content of the workload is valid.
486 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
488 struct intel_vgpu *vgpu = workload->vgpu;
494 if (workload->shadow)
497 if (!test_and_set_bit(workload->engine->id, s->shadow_ctx_desc_updated))
498 shadow_context_descriptor_update(s->shadow[workload->engine->id],
499 workload);
501 ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
505 if (workload->engine->id == RCS0 &&
506 workload->wa_ctx.indirect_ctx.size) {
507 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
512 workload->shadow = true;
516 release_shadow_wa_ctx(&workload->wa_ctx);
520 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
522 static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
524 struct intel_gvt *gvt = workload->vgpu->gvt;
530 list_for_each_entry(bb, &workload->shadow_bb, list) {
539 bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
573 ret = i915_vma_move_to_active(bb->vma, workload->req,
586 release_shadow_batch_buffer(workload);
592 struct intel_vgpu_workload *workload =
594 struct i915_request *rq = workload->req;
650 static void update_vreg_in_ctx(struct intel_vgpu_workload *workload)
652 vgpu_vreg_t(workload->vgpu, RING_START(workload->engine->mmio_base)) =
653 workload->rb_start;
656 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
660 if (list_empty(&workload->shadow_bb))
663 bb = list_first_entry(&workload->shadow_bb,
666 list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
684 intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload)
686 struct intel_vgpu *vgpu = workload->vgpu;
690 ret = intel_vgpu_pin_mm(workload->shadow_mm);
696 if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
697 !workload->shadow_mm->ppgtt_mm.shadowed) {
698 intel_vgpu_unpin_mm(workload->shadow_mm);
699 gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
703 if (!list_empty(&workload->lri_shadow_mm)) {
704 list_for_each_entry(m, &workload->lri_shadow_mm,
709 &workload->lri_shadow_mm,
719 intel_vgpu_unpin_mm(workload->shadow_mm);
725 intel_vgpu_shadow_mm_unpin(struct intel_vgpu_workload *workload)
729 if (!list_empty(&workload->lri_shadow_mm)) {
730 list_for_each_entry(m, &workload->lri_shadow_mm,
734 intel_vgpu_unpin_mm(workload->shadow_mm);
737 static int prepare_workload(struct intel_vgpu_workload *workload)
739 struct intel_vgpu *vgpu = workload->vgpu;
743 ret = intel_vgpu_shadow_mm_pin(workload);
749 update_shadow_pdps(workload);
751 set_context_ppgtt_from_shadow(workload, s->shadow[workload->engine->id]);
753 ret = intel_vgpu_sync_oos_pages(workload->vgpu);
759 ret = intel_vgpu_flush_post_shadow(workload->vgpu);
765 ret = copy_workload_to_ring_buffer(workload);
771 ret = prepare_shadow_batch_buffer(workload);
777 ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
783 if (workload->prepare) {
784 ret = workload->prepare(workload);
791 release_shadow_wa_ctx(&workload->wa_ctx);
793 release_shadow_batch_buffer(workload);
795 intel_vgpu_shadow_mm_unpin(workload);
799 static int dispatch_workload(struct intel_vgpu_workload *workload)
801 struct intel_vgpu *vgpu = workload->vgpu;
805 gvt_dbg_sched("ring id %s prepare to dispatch workload %p\n",
806 workload->engine->name, workload);
810 ret = intel_gvt_workload_req_alloc(workload);
814 ret = intel_gvt_scan_and_shadow_workload(workload);
818 ret = populate_shadow_context(workload);
820 release_shadow_wa_ctx(&workload->wa_ctx);
824 ret = prepare_workload(workload);
830 rq = fetch_and_zero(&workload->req);
834 if (!IS_ERR_OR_NULL(workload->req)) {
835 gvt_dbg_sched("ring id %s submit workload to i915 %p\n",
836 workload->engine->name, workload->req);
837 i915_request_add(workload->req);
838 workload->dispatched = true;
842 workload->status = ret;
851 struct intel_vgpu_workload *workload = NULL;
856 * no current vgpu / will be scheduled out / no workload
875 * still have current workload, maybe the workload disptacher
879 workload = scheduler->current_workload[engine->id];
880 gvt_dbg_sched("ring %s still have current workload %p\n",
881 engine->name, workload);
886 * pick a workload as current workload
887 * once current workload is set, schedule policy routines
888 * will wait the current workload is finished when trying to
896 workload = scheduler->current_workload[engine->id];
898 gvt_dbg_sched("ring %s pick new workload %p\n", engine->name, workload);
900 atomic_inc(&workload->vgpu->submission.running_workload_num);
903 return workload;
936 static void update_guest_context(struct intel_vgpu_workload *workload)
938 struct i915_request *rq = workload->req;
939 struct intel_vgpu *vgpu = workload->vgpu;
941 struct intel_context *ctx = workload->req->context;
952 gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
953 workload->ctx_desc.lrca);
957 head = workload->rb_head;
958 tail = workload->rb_tail;
959 wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF;
989 (u32)((workload->ctx_desc.lrca + i) <<
1016 intel_gvt_write_gpa(vgpu, workload->ring_context_gpa +
1017 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
1021 if (!list_empty(&workload->lri_shadow_mm)) {
1022 struct intel_vgpu_mm *m = list_last_entry(&workload->lri_shadow_mm,
1026 update_guest_pdps(vgpu, workload->ring_context_gpa,
1031 intel_gvt_write_gpa(vgpu, workload->ring_context_gpa + \
1040 workload->ring_context_gpa +
1069 struct intel_vgpu_workload *workload =
1071 struct intel_vgpu *vgpu = workload->vgpu;
1073 struct i915_request *rq = workload->req;
1079 /* For the workload w/ request, needs to wait for the context
1081 * For the workload w/o request, directly complete the workload.
1084 wait_event(workload->shadow_ctx_status_wq,
1085 !atomic_read(&workload->shadow_ctx_active));
1088 * be set to -EIO. Use -EIO to set workload status so
1092 if (likely(workload->status == -EINPROGRESS)) {
1093 if (workload->req->fence.error == -EIO)
1094 workload->status = -EIO;
1096 workload->status = 0;
1099 if (!workload->status &&
1101 update_guest_context(workload);
1103 for_each_set_bit(event, workload->pending_events,
1108 i915_request_put(fetch_and_zero(&workload->req));
1111 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
1112 ring_id, workload, workload->status);
1116 list_del_init(&workload->list);
1118 if (workload->status || vgpu->resetting_eng & BIT(ring_id)) {
1119 /* if workload->status is not successful means HW GPU
1130 * the workload clean up here doesn't have any impact.
1135 workload->complete(workload);
1137 intel_vgpu_shadow_mm_unpin(workload);
1138 intel_vgpu_destroy_workload(workload);
1156 struct intel_vgpu_workload *workload = NULL;
1161 gvt_dbg_core("workload thread for ring %s started\n", engine->name);
1168 workload = pick_next_workload(gvt, engine);
1169 if (workload)
1176 if (!workload)
1179 gvt_dbg_sched("ring %s next workload %p vgpu %d\n",
1180 engine->name, workload,
1181 workload->vgpu->id);
1185 gvt_dbg_sched("ring %s will dispatch workload %p\n",
1186 engine->name, workload);
1193 * workload. The vGPU may use these registers for checking
1195 * in this workload.
1197 update_vreg_in_ctx(workload);
1199 ret = dispatch_workload(workload);
1202 vgpu = workload->vgpu;
1203 gvt_vgpu_err("fail to dispatch workload, skip\n");
1207 gvt_dbg_sched("ring %s wait workload %p\n",
1208 engine->name, workload);
1209 i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
1212 gvt_dbg_sched("will complete workload %p, status: %d\n",
1213 workload, workload->status);
1248 gvt_dbg_core("clean workload scheduler\n");
1265 gvt_dbg_core("init workload scheduler\n");
1275 gvt_err("fail to create workload thread\n");
1511 * intel_vgpu_destroy_workload - destroy a vGPU workload
1512 * @workload: workload to destroy
1514 * This function is called when destroy a vGPU workload.
1517 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
1519 struct intel_vgpu_submission *s = &workload->vgpu->submission;
1521 intel_context_unpin(s->shadow[workload->engine->id]);
1522 release_shadow_batch_buffer(workload);
1523 release_shadow_wa_ctx(&workload->wa_ctx);
1525 if (!list_empty(&workload->lri_shadow_mm)) {
1527 list_for_each_entry_safe(m, mm, &workload->lri_shadow_mm,
1534 GEM_BUG_ON(!list_empty(&workload->lri_shadow_mm));
1535 if (workload->shadow_mm)
1536 intel_vgpu_mm_put(workload->shadow_mm);
1538 kmem_cache_free(s->workloads, workload);
1545 struct intel_vgpu_workload *workload;
1547 workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
1548 if (!workload)
1551 INIT_LIST_HEAD(&workload->list);
1552 INIT_LIST_HEAD(&workload->shadow_bb);
1553 INIT_LIST_HEAD(&workload->lri_shadow_mm);
1555 init_waitqueue_head(&workload->shadow_ctx_status_wq);
1556 atomic_set(&workload->shadow_ctx_active, 0);
1558 workload->status = -EINPROGRESS;
1559 workload->vgpu = vgpu;
1561 return workload;
1580 static int prepare_mm(struct intel_vgpu_workload *workload)
1582 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
1584 struct intel_vgpu *vgpu = workload->vgpu;
1600 read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps);
1602 mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps);
1606 workload->shadow_mm = mm;
1614 * intel_vgpu_create_workload - create a vGPU workload
1619 * This function is called when creating a vGPU workload.
1634 struct intel_vgpu_workload *workload = NULL;
1661 gvt_dbg_el("ring %s cur workload == last\n",
1674 gvt_dbg_el("ring %s begin a new workload\n", engine->name);
1690 workload = alloc_workload(vgpu);
1691 if (IS_ERR(workload))
1692 return workload;
1694 workload->engine = engine;
1695 workload->ctx_desc = *desc;
1696 workload->ring_context_gpa = ring_context_gpa;
1697 workload->rb_head = head;
1698 workload->guest_rb_head = guest_head;
1699 workload->rb_tail = tail;
1700 workload->rb_start = start;
1701 workload->rb_ctl = ctl;
1709 workload->wa_ctx.indirect_ctx.guest_gma =
1711 workload->wa_ctx.indirect_ctx.size =
1715 if (workload->wa_ctx.indirect_ctx.size != 0) {
1717 workload->wa_ctx.indirect_ctx.guest_gma,
1718 workload->wa_ctx.indirect_ctx.size)) {
1720 workload->wa_ctx.indirect_ctx.guest_gma);
1721 kmem_cache_free(s->workloads, workload);
1726 workload->wa_ctx.per_ctx.guest_gma =
1728 workload->wa_ctx.per_ctx.valid = per_ctx & 1;
1729 if (workload->wa_ctx.per_ctx.valid) {
1731 workload->wa_ctx.per_ctx.guest_gma,
1734 workload->wa_ctx.per_ctx.guest_gma);
1735 kmem_cache_free(s->workloads, workload);
1741 gvt_dbg_el("workload %p ring %s head %x tail %x start %x ctl %x\n",
1742 workload, engine->name, head, tail, start, ctl);
1744 ret = prepare_mm(workload);
1746 kmem_cache_free(s->workloads, workload);
1750 /* Only scan and shadow the first workload in the queue
1757 ret = intel_gvt_scan_and_shadow_workload(workload);
1763 intel_vgpu_destroy_workload(workload);
1769 intel_vgpu_destroy_workload(workload);
1773 return workload;
1777 * intel_vgpu_queue_workload - Qeue a vGPU workload
1778 * @workload: the workload to queue in
1780 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
1782 list_add_tail(&workload->list,
1783 workload_q_head(workload->vgpu, workload->engine));
1784 intel_gvt_kick_schedule(workload->vgpu->gvt);
1785 wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->engine->id]);