Lines Matching refs:gpu

89 static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu,
94 lockdep_assert_held(&gpu->lock);
102 if (gpu->exec_state == ETNA_PIPE_2D)
104 else if (gpu->exec_state == ETNA_PIPE_3D)
115 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
121 dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
123 &gpu->mmu_context->cmdbuf_mapping) +
151 static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
158 &gpu->mmu_context->cmdbuf_mapping) +
162 u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
164 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
166 lockdep_assert_held(&gpu->lock);
173 etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
179 u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr)
181 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
183 lockdep_assert_held(&gpu->lock);
187 if (gpu->identity.features & chipFeatures_PIPE_3D) {
197 if (gpu->identity.features & chipFeatures_PIPE_2D) {
214 u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu, unsigned short id)
216 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
218 lockdep_assert_held(&gpu->lock);
232 void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
234 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
237 bool has_blt = !!(gpu->identity.minor_features5 &
240 lockdep_assert_held(&gpu->lock);
242 if (gpu->exec_state == ETNA_PIPE_2D)
244 else if (gpu->exec_state == ETNA_PIPE_3D)
257 link_target = etnaviv_buffer_reserve(gpu, buffer, dwords);
268 if (gpu->exec_state == ETNA_PIPE_3D) {
300 void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
302 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
306 lockdep_assert_held(&gpu->lock);
313 target = etnaviv_buffer_reserve(gpu, buffer, dwords);
325 etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
339 void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
343 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
347 bool switch_context = gpu->exec_state != exec_state;
348 bool switch_mmu_context = gpu->mmu_context != mmu_context;
349 unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq);
350 bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq;
351 bool has_blt = !!(gpu->identity.minor_features5 &
354 lockdep_assert_held(&gpu->lock);
357 etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
360 &gpu->mmu_context->cmdbuf_mapping);
376 if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1)
387 if (switch_mmu_context && gpu->sec_mode == ETNA_SEC_KERNEL)
390 target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
398 struct etnaviv_iommu_context *old_context = gpu->mmu_context;
400 gpu->mmu_context = etnaviv_iommu_context_get(mmu_context);
406 if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1) {
418 gpu->sec_mode == ETNA_SEC_KERNEL) {
420 etnaviv_iommuv2_get_pta_id(gpu->mmu_context);
426 if (gpu->sec_mode == ETNA_SEC_NONE)
427 flush |= etnaviv_iommuv2_get_mtlb_addr(gpu->mmu_context);
437 gpu->flush_seq = new_flush_seq;
441 etnaviv_cmd_select_pipe(gpu, buffer, exec_state);
442 gpu->exec_state = exec_state;
447 &gpu->mmu_context->cmdbuf_mapping);
471 return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
478 if (gpu->exec_state == ETNA_PIPE_2D) {
508 etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
514 etnaviv_cmdbuf_get_va(cmdbuf, &gpu->mmu_context->cmdbuf_mapping),
537 etnaviv_buffer_dump(gpu, buffer, 0, 0x50);