Lines Matching defs:workload
59 static void update_shadow_pdps(struct intel_vgpu_workload *workload)
62 struct intel_context *ctx = workload->req->context;
64 if (WARN_ON(!workload->shadow_mm))
67 if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
72 (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
80 static void sr_oa_regs(struct intel_vgpu_workload *workload,
83 struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915;
97 if (workload->engine->id != RCS0)
101 workload->oactxctrl = reg_state[ctx_oactxctrl + 1];
103 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
106 workload->flex_mmio[i] = reg_state[state_offset + 1];
111 reg_state[ctx_oactxctrl + 1] = workload->oactxctrl;
113 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
118 reg_state[state_offset + 1] = workload->flex_mmio[i];
123 static int populate_shadow_context(struct intel_vgpu_workload *workload)
125 struct intel_vgpu *vgpu = workload->vgpu;
127 struct intel_context *ctx = workload->req->context;
137 int ring_id = workload->engine->id;
146 sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
148 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
151 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
160 if (workload->engine->id == RCS0) {
169 workload->ring_context_gpa +
175 sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
177 gvt_dbg_sched("ring %s workload lrca %x, ctx_id %x, ctx gpa %llx",
178 workload->engine->name, workload->ctx_desc.lrca,
179 workload->ctx_desc.context_id,
180 workload->ring_context_gpa);
190 workload->ctx_desc.lrca) &&
192 workload->ring_context_gpa))
195 s->last_ctx[ring_id].lrca = workload->ctx_desc.lrca;
196 s->last_ctx[ring_id].ring_context_gpa = workload->ring_context_gpa;
202 context_page_num = workload->engine->context_size;
205 if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0)
214 (u32)((workload->ctx_desc.lrca + i) <<
276 struct intel_vgpu_workload *workload;
293 workload = scheduler->current_workload[ring_id];
294 if (unlikely(!workload))
300 if (workload->vgpu != scheduler->engine_owner[ring_id]) {
303 workload->vgpu, rq->engine);
304 scheduler->engine_owner[ring_id] = workload->vgpu;
307 ring_id, workload->vgpu->id);
309 atomic_set(&workload->shadow_ctx_active, 1);
312 save_ring_hw_state(workload->vgpu, rq->engine);
313 atomic_set(&workload->shadow_ctx_active, 0);
316 save_ring_hw_state(workload->vgpu, rq->engine);
322 wake_up(&workload->shadow_ctx_status_wq);
328 struct intel_vgpu_workload *workload)
337 desc |= (u64)workload->ctx_desc.addressing_mode <<
343 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
345 struct intel_vgpu *vgpu = workload->vgpu;
346 struct i915_request *req = workload->req;
373 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
376 workload->rb_len);
380 shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
383 workload->shadow_ring_buffer_va = cs;
386 workload->rb_len);
388 cs += workload->rb_len / sizeof(u32);
389 intel_ring_advance(workload->req, cs);
414 static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
417 struct intel_vgpu_mm *mm = workload->shadow_mm;
439 intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
441 struct intel_vgpu *vgpu = workload->vgpu;
445 if (workload->req)
448 rq = i915_request_create(s->shadow[workload->engine->id]);
454 workload->req = i915_request_get(rq);
459 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
461 * @workload: an abstract entity for each execlist submission.
463 * This function is called before the workload submitting to i915, to make
464 * sure the content of the workload is valid.
466 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
468 struct intel_vgpu *vgpu = workload->vgpu;
474 if (workload->shadow)
477 if (!test_and_set_bit(workload->engine->id, s->shadow_ctx_desc_updated))
478 shadow_context_descriptor_update(s->shadow[workload->engine->id],
479 workload);
481 ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
485 if (workload->engine->id == RCS0 &&
486 workload->wa_ctx.indirect_ctx.size) {
487 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
492 workload->shadow = true;
496 release_shadow_wa_ctx(&workload->wa_ctx);
500 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
502 static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
504 struct intel_gvt *gvt = workload->vgpu->gvt;
509 list_for_each_entry(bb, &workload->shadow_bb, list) {
518 bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
544 workload->req,
555 release_shadow_batch_buffer(workload);
561 struct intel_vgpu_workload *workload =
563 struct i915_request *rq = workload->req;
604 static void update_vreg_in_ctx(struct intel_vgpu_workload *workload)
606 vgpu_vreg_t(workload->vgpu, RING_START(workload->engine->mmio_base)) =
607 workload->rb_start;
610 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
614 if (list_empty(&workload->shadow_bb))
617 bb = list_first_entry(&workload->shadow_bb,
620 list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
636 intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload)
638 struct intel_vgpu *vgpu = workload->vgpu;
642 ret = intel_vgpu_pin_mm(workload->shadow_mm);
648 if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
649 !workload->shadow_mm->ppgtt_mm.shadowed) {
650 intel_vgpu_unpin_mm(workload->shadow_mm);
651 gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
655 if (!list_empty(&workload->lri_shadow_mm)) {
656 list_for_each_entry(m, &workload->lri_shadow_mm,
661 &workload->lri_shadow_mm,
671 intel_vgpu_unpin_mm(workload->shadow_mm);
677 intel_vgpu_shadow_mm_unpin(struct intel_vgpu_workload *workload)
681 if (!list_empty(&workload->lri_shadow_mm)) {
682 list_for_each_entry(m, &workload->lri_shadow_mm,
686 intel_vgpu_unpin_mm(workload->shadow_mm);
689 static int prepare_workload(struct intel_vgpu_workload *workload)
691 struct intel_vgpu *vgpu = workload->vgpu;
695 ret = intel_vgpu_shadow_mm_pin(workload);
701 update_shadow_pdps(workload);
703 set_context_ppgtt_from_shadow(workload, s->shadow[workload->engine->id]);
705 ret = intel_vgpu_sync_oos_pages(workload->vgpu);
711 ret = intel_vgpu_flush_post_shadow(workload->vgpu);
717 ret = copy_workload_to_ring_buffer(workload);
723 ret = prepare_shadow_batch_buffer(workload);
729 ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
735 if (workload->prepare) {
736 ret = workload->prepare(workload);
743 release_shadow_wa_ctx(&workload->wa_ctx);
745 release_shadow_batch_buffer(workload);
747 intel_vgpu_shadow_mm_unpin(workload);
751 static int dispatch_workload(struct intel_vgpu_workload *workload)
753 struct intel_vgpu *vgpu = workload->vgpu;
757 gvt_dbg_sched("ring id %s prepare to dispatch workload %p\n",
758 workload->engine->name, workload);
762 ret = intel_gvt_workload_req_alloc(workload);
766 ret = intel_gvt_scan_and_shadow_workload(workload);
770 ret = populate_shadow_context(workload);
772 release_shadow_wa_ctx(&workload->wa_ctx);
776 ret = prepare_workload(workload);
782 rq = fetch_and_zero(&workload->req);
786 if (!IS_ERR_OR_NULL(workload->req)) {
787 gvt_dbg_sched("ring id %s submit workload to i915 %p\n",
788 workload->engine->name, workload->req);
789 i915_request_add(workload->req);
790 workload->dispatched = true;
794 workload->status = ret;
803 struct intel_vgpu_workload *workload = NULL;
808 * no current vgpu / will be scheduled out / no workload
826 * still have current workload, maybe the workload disptacher
830 workload = scheduler->current_workload[engine->id];
831 gvt_dbg_sched("ring %s still have current workload %p\n",
832 engine->name, workload);
837 * pick a workload as current workload
838 * once current workload is set, schedule policy routines
839 * will wait the current workload is finished when trying to
847 workload = scheduler->current_workload[engine->id];
849 gvt_dbg_sched("ring %s pick new workload %p\n", engine->name, workload);
851 atomic_inc(&workload->vgpu->submission.running_workload_num);
854 return workload;
888 static void update_guest_context(struct intel_vgpu_workload *workload)
890 struct i915_request *rq = workload->req;
891 struct intel_vgpu *vgpu = workload->vgpu;
893 struct intel_context *ctx = workload->req->context;
904 gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
905 workload->ctx_desc.lrca);
909 head = workload->rb_head;
910 tail = workload->rb_tail;
911 wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF;
941 (u32)((workload->ctx_desc.lrca + i) <<
968 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
969 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
973 if (!list_empty(&workload->lri_shadow_mm)) {
974 struct intel_vgpu_mm *m = list_last_entry(&workload->lri_shadow_mm,
978 update_guest_pdps(vgpu, workload->ring_context_gpa,
983 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
992 workload->ring_context_gpa +
1022 struct intel_vgpu_workload *workload =
1024 struct intel_vgpu *vgpu = workload->vgpu;
1026 struct i915_request *rq = workload->req;
1032 /* For the workload w/ request, needs to wait for the context
1034 * For the workload w/o request, directly complete the workload.
1037 wait_event(workload->shadow_ctx_status_wq,
1038 !atomic_read(&workload->shadow_ctx_active));
1041 * be set to -EIO. Use -EIO to set workload status so
1045 if (likely(workload->status == -EINPROGRESS)) {
1046 if (workload->req->fence.error == -EIO)
1047 workload->status = -EIO;
1049 workload->status = 0;
1052 if (!workload->status &&
1054 update_guest_context(workload);
1056 for_each_set_bit(event, workload->pending_events,
1061 i915_request_put(fetch_and_zero(&workload->req));
1064 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
1065 ring_id, workload, workload->status);
1069 list_del_init(&workload->list);
1071 if (workload->status || vgpu->resetting_eng & BIT(ring_id)) {
1072 /* if workload->status is not successful means HW GPU
1083 * the workload clean up here doesn't have any impact.
1088 workload->complete(workload);
1090 intel_vgpu_shadow_mm_unpin(workload);
1091 intel_vgpu_destroy_workload(workload);
1109 struct intel_vgpu_workload *workload = NULL;
1114 gvt_dbg_core("workload thread for ring %s started\n", engine->name);
1121 workload = pick_next_workload(gvt, engine);
1122 if (workload)
1129 if (!workload)
1132 gvt_dbg_sched("ring %s next workload %p vgpu %d\n",
1133 engine->name, workload,
1134 workload->vgpu->id);
1138 gvt_dbg_sched("ring %s will dispatch workload %p\n",
1139 engine->name, workload);
1146 * workload. The vGPU may use these registers for checking
1148 * in this workload.
1150 update_vreg_in_ctx(workload);
1152 ret = dispatch_workload(workload);
1155 vgpu = workload->vgpu;
1156 gvt_vgpu_err("fail to dispatch workload, skip\n");
1160 gvt_dbg_sched("ring %s wait workload %p\n",
1161 engine->name, workload);
1162 i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
1165 gvt_dbg_sched("will complete workload %p, status: %d\n",
1166 workload, workload->status);
1201 gvt_dbg_core("clean workload scheduler\n");
1218 gvt_dbg_core("init workload scheduler\n");
1228 gvt_err("fail to create workload thread\n");
1467 * intel_vgpu_destroy_workload - destroy a vGPU workload
1468 * @workload: workload to destroy
1470 * This function is called when destroy a vGPU workload.
1473 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
1475 struct intel_vgpu_submission *s = &workload->vgpu->submission;
1477 intel_context_unpin(s->shadow[workload->engine->id]);
1478 release_shadow_batch_buffer(workload);
1479 release_shadow_wa_ctx(&workload->wa_ctx);
1481 if (!list_empty(&workload->lri_shadow_mm)) {
1483 list_for_each_entry_safe(m, mm, &workload->lri_shadow_mm,
1490 GEM_BUG_ON(!list_empty(&workload->lri_shadow_mm));
1491 if (workload->shadow_mm)
1492 intel_vgpu_mm_put(workload->shadow_mm);
1494 kmem_cache_free(s->workloads, workload);
1501 struct intel_vgpu_workload *workload;
1503 workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
1504 if (!workload)
1507 INIT_LIST_HEAD(&workload->list);
1508 INIT_LIST_HEAD(&workload->shadow_bb);
1509 INIT_LIST_HEAD(&workload->lri_shadow_mm);
1511 init_waitqueue_head(&workload->shadow_ctx_status_wq);
1512 atomic_set(&workload->shadow_ctx_active, 0);
1514 workload->status = -EINPROGRESS;
1515 workload->vgpu = vgpu;
1517 return workload;
1536 static int prepare_mm(struct intel_vgpu_workload *workload)
1538 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
1540 struct intel_vgpu *vgpu = workload->vgpu;
1556 read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps);
1558 mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps);
1562 workload->shadow_mm = mm;
1570 * intel_vgpu_create_workload - create a vGPU workload
1575 * This function is called when creating a vGPU workload.
1590 struct intel_vgpu_workload *workload = NULL;
1617 gvt_dbg_el("ring %s cur workload == last\n",
1630 gvt_dbg_el("ring %s begin a new workload\n", engine->name);
1646 workload = alloc_workload(vgpu);
1647 if (IS_ERR(workload))
1648 return workload;
1650 workload->engine = engine;
1651 workload->ctx_desc = *desc;
1652 workload->ring_context_gpa = ring_context_gpa;
1653 workload->rb_head = head;
1654 workload->guest_rb_head = guest_head;
1655 workload->rb_tail = tail;
1656 workload->rb_start = start;
1657 workload->rb_ctl = ctl;
1665 workload->wa_ctx.indirect_ctx.guest_gma =
1667 workload->wa_ctx.indirect_ctx.size =
1671 if (workload->wa_ctx.indirect_ctx.size != 0) {
1673 workload->wa_ctx.indirect_ctx.guest_gma,
1674 workload->wa_ctx.indirect_ctx.size)) {
1676 workload->wa_ctx.indirect_ctx.guest_gma);
1677 kmem_cache_free(s->workloads, workload);
1682 workload->wa_ctx.per_ctx.guest_gma =
1684 workload->wa_ctx.per_ctx.valid = per_ctx & 1;
1685 if (workload->wa_ctx.per_ctx.valid) {
1687 workload->wa_ctx.per_ctx.guest_gma,
1690 workload->wa_ctx.per_ctx.guest_gma);
1691 kmem_cache_free(s->workloads, workload);
1697 gvt_dbg_el("workload %p ring %s head %x tail %x start %x ctl %x\n",
1698 workload, engine->name, head, tail, start, ctl);
1700 ret = prepare_mm(workload);
1702 kmem_cache_free(s->workloads, workload);
1706 /* Only scan and shadow the first workload in the queue
1713 ret = intel_gvt_scan_and_shadow_workload(workload);
1719 intel_vgpu_destroy_workload(workload);
1725 intel_vgpu_destroy_workload(workload);
1729 return workload;
1733 * intel_vgpu_queue_workload - Qeue a vGPU workload
1734 * @workload: the workload to queue in
1736 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
1738 list_add_tail(&workload->list,
1739 workload_q_head(workload->vgpu, workload->engine));
1740 intel_gvt_kick_schedule(workload->vgpu->gvt);
1741 wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->engine->id]);