Lines Matching refs:exec
157 struct vc4_exec_info *exec[2];
169 exec[0] = vc4_first_bin_job(vc4);
170 exec[1] = vc4_first_render_job(vc4);
171 if (!exec[0] && !exec[1]) {
179 if (!exec[i])
183 list_for_each_entry(bo, &exec[i]->unref_list, unref_head)
185 state->bo_count += exec[i]->bo_count + unref_list_count;
198 if (!exec[i])
201 for (j = 0; j < exec[i]->bo_count; j++) {
202 bo = to_vc4_bo(exec[i]->bo[j]);
210 drm_gem_object_get(exec[i]->bo[j]);
211 kernel_state->bo[k++] = exec[i]->bo[j];
214 list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
225 if (exec[0])
226 state->start_bin = exec[0]->ct0ca;
227 if (exec[1])
228 state->start_render = exec[1]->ct1ca;
475 struct vc4_exec_info *exec;
481 exec = vc4_first_bin_job(vc4);
482 if (!exec)
490 if (exec->perfmon && vc4->active_perfmon != exec->perfmon)
491 vc4_perfmon_start(vc4, exec->perfmon);
496 if (exec->ct0ca != exec->ct0ea) {
497 trace_vc4_submit_cl(dev, false, exec->seqno, exec->ct0ca,
498 exec->ct0ea);
499 submit_cl(dev, 0, exec->ct0ca, exec->ct0ea);
503 vc4_move_job_to_render(dev, exec);
511 if (next && next->perfmon == exec->perfmon)
520 struct vc4_exec_info *exec = vc4_first_render_job(vc4);
522 if (!exec)
536 trace_vc4_submit_cl(dev, true, exec->seqno, exec->ct1ca, exec->ct1ea);
537 submit_cl(dev, 1, exec->ct1ca, exec->ct1ea);
541 vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec)
549 list_move_tail(&exec->head, &vc4->render_job_list);
555 vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
560 for (i = 0; i < exec->bo_count; i++) {
561 bo = to_vc4_bo(exec->bo[i]);
564 dma_resv_add_fence(bo->base.base.resv, exec->fence,
568 list_for_each_entry(bo, &exec->unref_list, unref_head) {
572 for (i = 0; i < exec->rcl_write_bo_count; i++) {
573 bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
576 dma_resv_add_fence(bo->base.base.resv, exec->fence,
583 struct vc4_exec_info *exec,
588 for (i = 0; i < exec->bo_count; i++)
589 dma_resv_unlock(exec->bo[i]->resv);
598 * (all of which are on exec->unref_list). They're entirely private
603 struct vc4_exec_info *exec,
614 bo = exec->bo[contended_lock];
622 for (i = 0; i < exec->bo_count; i++) {
626 bo = exec->bo[i];
633 bo = exec->bo[j];
638 bo = exec->bo[contended_lock];
658 for (i = 0; i < exec->bo_count; i++) {
659 bo = exec->bo[i];
663 vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
681 vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
699 exec->seqno = seqno;
702 vc4->dma_fence_context, exec->seqno);
703 fence->seqno = exec->seqno;
704 exec->fence = &fence->base;
707 drm_syncobj_replace_fence(out_sync, exec->fence);
709 vc4_update_bo_seqnos(exec, seqno);
711 vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
713 list_add_tail(&exec->head, &vc4->bin_job_list);
721 if (vc4_first_bin_job(vc4) == exec &&
722 (!renderjob || renderjob->perfmon == exec->perfmon)) {
733 * vc4_cl_lookup_bos() - Sets up exec->bo[] with the GEM objects
737 * @exec: V3D job being set up
746 struct vc4_exec_info *exec)
748 struct drm_vc4_submit_cl *args = exec->args;
752 exec->bo_count = args->bo_handle_count;
754 if (!exec->bo_count) {
763 exec->bo_count, &exec->bo);
768 for (i = 0; i < exec->bo_count; i++) {
769 ret = vc4_bo_inc_usecnt(to_vc4_bo(exec->bo[i]));
782 * exec->bo to NULL so that vc4_complete_exec() skips the 'BO release'
786 vc4_bo_dec_usecnt(to_vc4_bo(exec->bo[i]));
790 for (i = 0; i < exec->bo_count && exec->bo[i]; i++)
791 drm_gem_object_put(exec->bo[i]);
793 kvfree(exec->bo);
794 exec->bo = NULL;
799 vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
801 struct drm_vc4_submit_cl *args = exec->args;
821 DRM_DEBUG("overflow in exec arguments\n");
841 exec->shader_rec_u = temp + shader_rec_offset;
842 exec->uniforms_u = temp + uniforms_offset;
843 exec->shader_state = temp + exec_size;
844 exec->shader_state_size = args->shader_rec_count;
853 if (copy_from_user(exec->shader_rec_u,
860 if (copy_from_user(exec->uniforms_u,
873 exec->exec_bo = &bo->base;
875 list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
876 &exec->unref_list);
878 exec->ct0ca = exec->exec_bo->dma_addr + bin_offset;
880 exec->bin_u = bin;
882 exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
883 exec->shader_rec_p = exec->exec_bo->dma_addr + shader_rec_offset;
884 exec->shader_rec_size = args->shader_rec_size;
886 exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
887 exec->uniforms_p = exec->exec_bo->dma_addr + uniforms_offset;
888 exec->uniforms_size = args->uniforms_size;
891 exec->exec_bo->vaddr + bin_offset,
893 exec);
897 ret = vc4_validate_shader_recs(dev, exec);
901 if (exec->found_tile_binning_mode_config_packet) {
902 ret = vc4_v3d_bin_bo_get(vc4, &exec->bin_bo_used);
911 ret = vc4_wait_for_seqno(dev, exec->bin_dep_seqno, ~0ull, true);
919 vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
928 if (exec->fence) {
929 dma_fence_signal(exec->fence);
930 dma_fence_put(exec->fence);
933 if (exec->bo) {
934 for (i = 0; i < exec->bo_count; i++) {
935 struct vc4_bo *bo = to_vc4_bo(exec->bo[i]);
938 drm_gem_object_put(exec->bo[i]);
940 kvfree(exec->bo);
943 while (!list_empty(&exec->unref_list)) {
944 struct vc4_bo *bo = list_first_entry(&exec->unref_list,
952 vc4->bin_alloc_used &= ~exec->bin_slots;
956 if (exec->bin_bo_used)
960 vc4_perfmon_put(exec->perfmon);
964 kfree(exec);
978 struct vc4_exec_info *exec =
981 list_del(&exec->head);
984 vc4_complete_exec(&vc4->base, exec);
1031 * jobs that had completed and unrefs their BOs and frees their exec
1125 struct vc4_exec_info *exec;
1155 exec = kcalloc(1, sizeof(*exec), GFP_KERNEL);
1156 if (!exec) {
1157 DRM_ERROR("malloc failure on exec struct\n");
1160 exec->dev = vc4;
1164 kfree(exec);
1168 exec->args = args;
1169 INIT_LIST_HEAD(&exec->unref_list);
1171 ret = vc4_cl_lookup_bos(dev, file_priv, exec);
1176 exec->perfmon = vc4_perfmon_find(vc4file,
1178 if (!exec->perfmon) {
1207 if (exec->args->bin_cl_size != 0) {
1208 ret = vc4_get_bcl(dev, exec);
1212 exec->ct0ca = 0;
1213 exec->ct0ea = 0;
1216 ret = vc4_get_rcl(dev, exec);
1220 ret = vc4_lock_bo_reservations(dev, exec, &acquire_ctx);
1241 exec->args = NULL;
1243 ret = vc4_queue_submit(dev, exec, &acquire_ctx, out_sync);
1245 /* The syncobj isn't part of the exec data and we need to free our
1260 vc4_complete_exec(&vc4->base, exec);
1304 /* Waiting for exec to finish would need to be done before