Lines Matching refs:gpu
31 { .name = "etnaviv-gpu,2d" },
39 int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
41 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
45 *value = gpu->identity.model;
49 *value = gpu->identity.revision;
53 *value = gpu->identity.features;
57 *value = gpu->identity.minor_features0;
61 *value = gpu->identity.minor_features1;
65 *value = gpu->identity.minor_features2;
69 *value = gpu->identity.minor_features3;
73 *value = gpu->identity.minor_features4;
77 *value = gpu->identity.minor_features5;
81 *value = gpu->identity.minor_features6;
85 *value = gpu->identity.minor_features7;
89 *value = gpu->identity.minor_features8;
93 *value = gpu->identity.minor_features9;
97 *value = gpu->identity.minor_features10;
101 *value = gpu->identity.minor_features11;
105 *value = gpu->identity.stream_count;
109 *value = gpu->identity.register_max;
113 *value = gpu->identity.thread_count;
117 *value = gpu->identity.vertex_cache_size;
121 *value = gpu->identity.shader_core_count;
125 *value = gpu->identity.pixel_pipes;
129 *value = gpu->identity.vertex_output_buffer_size;
133 *value = gpu->identity.buffer_size;
137 *value = gpu->identity.instruction_count;
141 *value = gpu->identity.num_constants;
145 *value = gpu->identity.varyings_count;
156 *value = gpu->identity.product_id;
160 *value = gpu->identity.customer_id;
164 *value = gpu->identity.eco_id;
168 DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
176 #define etnaviv_is_model_rev(gpu, mod, rev) \
177 ((gpu)->identity.model == chipModel_##mod && \
178 (gpu)->identity.revision == rev)
182 static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
184 if (gpu->identity.minor_features0 &
189 specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
190 specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
191 specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3);
192 specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4);
194 gpu->identity.stream_count = etnaviv_field(specs[0],
196 gpu->identity.register_max = etnaviv_field(specs[0],
198 gpu->identity.thread_count = etnaviv_field(specs[0],
200 gpu->identity.vertex_cache_size = etnaviv_field(specs[0],
202 gpu->identity.shader_core_count = etnaviv_field(specs[0],
204 gpu->identity.pixel_pipes = etnaviv_field(specs[0],
206 gpu->identity.vertex_output_buffer_size =
210 gpu->identity.buffer_size = etnaviv_field(specs[1],
212 gpu->identity.instruction_count = etnaviv_field(specs[1],
214 gpu->identity.num_constants = etnaviv_field(specs[1],
217 gpu->identity.varyings_count = etnaviv_field(specs[2],
224 gpu->identity.stream_count = streams;
228 if (gpu->identity.stream_count == 0) {
229 if (gpu->identity.model >= 0x1000)
230 gpu->identity.stream_count = 4;
232 gpu->identity.stream_count = 1;
236 if (gpu->identity.register_max)
237 gpu->identity.register_max = 1 << gpu->identity.register_max;
238 else if (gpu->identity.model == chipModel_GC400)
239 gpu->identity.register_max = 32;
241 gpu->identity.register_max = 64;
244 if (gpu->identity.thread_count)
245 gpu->identity.thread_count = 1 << gpu->identity.thread_count;
246 else if (gpu->identity.model == chipModel_GC400)
247 gpu->identity.thread_count = 64;
248 else if (gpu->identity.model == chipModel_GC500 ||
249 gpu->identity.model == chipModel_GC530)
250 gpu->identity.thread_count = 128;
252 gpu->identity.thread_count = 256;
254 if (gpu->identity.vertex_cache_size == 0)
255 gpu->identity.vertex_cache_size = 8;
257 if (gpu->identity.shader_core_count == 0) {
258 if (gpu->identity.model >= 0x1000)
259 gpu->identity.shader_core_count = 2;
261 gpu->identity.shader_core_count = 1;
264 if (gpu->identity.pixel_pipes == 0)
265 gpu->identity.pixel_pipes = 1;
268 if (gpu->identity.vertex_output_buffer_size) {
269 gpu->identity.vertex_output_buffer_size =
270 1 << gpu->identity.vertex_output_buffer_size;
271 } else if (gpu->identity.model == chipModel_GC400) {
272 if (gpu->identity.revision < 0x4000)
273 gpu->identity.vertex_output_buffer_size = 512;
274 else if (gpu->identity.revision < 0x4200)
275 gpu->identity.vertex_output_buffer_size = 256;
277 gpu->identity.vertex_output_buffer_size = 128;
279 gpu->identity.vertex_output_buffer_size = 512;
282 switch (gpu->identity.instruction_count) {
284 if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
285 gpu->identity.model == chipModel_GC880)
286 gpu->identity.instruction_count = 512;
288 gpu->identity.instruction_count = 256;
292 gpu->identity.instruction_count = 1024;
296 gpu->identity.instruction_count = 2048;
300 gpu->identity.instruction_count = 256;
304 if (gpu->identity.num_constants == 0)
305 gpu->identity.num_constants = 168;
307 if (gpu->identity.varyings_count == 0) {
308 if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0)
309 gpu->identity.varyings_count = 12;
311 gpu->identity.varyings_count = 8;
318 if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) ||
319 etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
320 etnaviv_is_model_rev(gpu, GC4000, 0x5245) ||
321 etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
322 etnaviv_is_model_rev(gpu, GC3000, 0x5435) ||
323 etnaviv_is_model_rev(gpu, GC2200, 0x5244) ||
324 etnaviv_is_model_rev(gpu, GC2100, 0x5108) ||
325 etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
326 etnaviv_is_model_rev(gpu, GC1500, 0x5246) ||
327 etnaviv_is_model_rev(gpu, GC880, 0x5107) ||
328 etnaviv_is_model_rev(gpu, GC880, 0x5106))
329 gpu->identity.varyings_count -= 1;
332 static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
336 chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
340 gpu->identity.model = chipModel_GC500;
341 gpu->identity.revision = etnaviv_field(chipIdentity,
344 u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
346 gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
347 gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
348 gpu->identity.customer_id = gpu_read(gpu, VIVS_HI_CHIP_CUSTOMER_ID);
354 if (!etnaviv_is_model_rev(gpu, GC600, 0x19)) {
355 gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID);
356 gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID);
365 if ((gpu->identity.model & 0xff00) == 0x0400 &&
366 gpu->identity.model != chipModel_GC420) {
367 gpu->identity.model = gpu->identity.model & 0x0400;
371 if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
372 u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
379 gpu->identity.revision = 0x1051;
390 if (etnaviv_is_model_rev(gpu, GC2000, 0xffff5450)) {
391 gpu->identity.model = chipModel_GC3000;
392 gpu->identity.revision &= 0xffff;
395 if (etnaviv_is_model_rev(gpu, GC1000, 0x5037) && (chipDate == 0x20120617))
396 gpu->identity.eco_id = 1;
398 if (etnaviv_is_model_rev(gpu, GC320, 0x5303) && (chipDate == 0x20140511))
399 gpu->identity.eco_id = 1;
402 dev_info(gpu->dev, "model: GC%x, revision: %x\n",
403 gpu->identity.model, gpu->identity.revision);
405 gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
410 if (etnaviv_fill_identity_from_hwdb(gpu))
413 gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
416 if (gpu->identity.model == chipModel_GC700)
417 gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
420 if ((gpu->identity.model == chipModel_GC500 &&
421 gpu->identity.revision <= 2) ||
422 gpu->identity.model == chipModel_GC300)
423 gpu->identity.features |= chipFeatures_PIPE_2D;
425 if ((gpu->identity.model == chipModel_GC500 &&
426 gpu->identity.revision < 2) ||
427 (gpu->identity.model == chipModel_GC300 &&
428 gpu->identity.revision < 0x2000)) {
434 gpu->identity.minor_features0 = 0;
435 gpu->identity.minor_features1 = 0;
436 gpu->identity.minor_features2 = 0;
437 gpu->identity.minor_features3 = 0;
438 gpu->identity.minor_features4 = 0;
439 gpu->identity.minor_features5 = 0;
441 gpu->identity.minor_features0 =
442 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
444 if (gpu->identity.minor_features0 &
446 gpu->identity.minor_features1 =
447 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
448 gpu->identity.minor_features2 =
449 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
450 gpu->identity.minor_features3 =
451 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
452 gpu->identity.minor_features4 =
453 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_4);
454 gpu->identity.minor_features5 =
455 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5);
459 if (gpu->identity.model == chipModel_GC600 ||
460 gpu->identity.model == chipModel_GC300)
461 gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
470 etnaviv_hw_specs(gpu);
473 static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
475 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock |
477 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
480 static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
482 if (gpu->identity.minor_features2 &
484 clk_set_rate(gpu->clk_core,
485 gpu->base_rate_core >> gpu->freq_scale);
486 clk_set_rate(gpu->clk_shader,
487 gpu->base_rate_shader >> gpu->freq_scale);
489 unsigned int fscale = 1 << (6 - gpu->freq_scale);
490 u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
494 etnaviv_gpu_load_clock(gpu, clock);
502 gpu->fe_waitcycles = clamp(gpu->base_rate_core >> (15 - gpu->freq_scale),
506 static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
517 unsigned int fscale = 1 << (6 - gpu->freq_scale);
519 etnaviv_gpu_load_clock(gpu, control);
523 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
525 if (gpu->sec_mode == ETNA_SEC_KERNEL) {
526 gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL,
531 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
539 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
543 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
546 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
550 dev_dbg(gpu->dev, "FE is not idle\n");
555 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
560 dev_dbg(gpu->dev, "GPU is not idle\n");
566 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
573 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
574 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
576 dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
585 etnaviv_gpu_update_clock(gpu);
587 gpu->state = ETNA_GPU_STATE_RESET;
588 gpu->exec_state = -1;
589 if (gpu->mmu_context)
590 etnaviv_iommu_context_put(gpu->mmu_context);
591 gpu->mmu_context = NULL;
596 static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
601 ppc = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS);
605 if (gpu->identity.revision == 0x4301 ||
606 gpu->identity.revision == 0x4302)
609 gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, ppc);
611 pmc = gpu_read_power(gpu, VIVS_PM_MODULE_CONTROLS);
614 if (gpu->identity.model >= chipModel_GC400 &&
615 gpu->identity.model != chipModel_GC420 &&
616 !(gpu->identity.minor_features3 & chipMinorFeatures3_BUG_FIXES12))
623 if (gpu->identity.revision < 0x5000 &&
624 gpu->identity.minor_features0 & chipMinorFeatures0_HZ &&
625 !(gpu->identity.minor_features1 &
629 if (gpu->identity.revision < 0x5422)
633 if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
634 etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
635 etnaviv_is_model_rev(gpu, GC2000, 0x6202) ||
636 etnaviv_is_model_rev(gpu, GC2000, 0x6203))
640 if (etnaviv_is_model_rev(gpu, GC7000, 0x6202))
647 gpu_write_power(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
650 void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
652 gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, address);
653 gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
657 if (gpu->sec_mode == ETNA_SEC_KERNEL) {
658 gpu_write(gpu, VIVS_MMUv2_SEC_COMMAND_CONTROL,
664 static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
670 WARN_ON(gpu->state != ETNA_GPU_STATE_INITIALIZED);
673 etnaviv_iommu_restore(gpu, context);
676 prefetch = etnaviv_buffer_init(gpu);
677 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
678 &gpu->mmu_context->cmdbuf_mapping);
680 etnaviv_gpu_start_fe(gpu, address, prefetch);
682 gpu->state = ETNA_GPU_STATE_RUNNING;
685 static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
693 if (etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
694 etnaviv_is_model_rev(gpu, GC4000, 0x5222)) {
699 if (etnaviv_is_model_rev(gpu, GC1000, 0x5039) ||
700 etnaviv_is_model_rev(gpu, GC1000, 0x5040)) {
705 if ((gpu->identity.revision > 0x5420) &&
706 (gpu->identity.features & chipFeatures_PIPE_3D))
709 pulse_eater = gpu_read_power(gpu, VIVS_PM_PULSE_EATER);
713 gpu_write_power(gpu, VIVS_PM_PULSE_EATER, pulse_eater);
716 static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
718 WARN_ON(!(gpu->state == ETNA_GPU_STATE_IDENTIFIED ||
719 gpu->state == ETNA_GPU_STATE_RESET));
721 if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
722 etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
723 gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
726 mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
728 if (gpu->identity.revision == 0x5007)
733 gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
737 etnaviv_gpu_enable_mlcg(gpu);
743 gpu_write(gpu, VIVS_HI_AXI_CONFIG,
748 if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) {
749 u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
754 gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
757 if (gpu->sec_mode == ETNA_SEC_KERNEL) {
758 u32 val = gpu_read(gpu, VIVS_MMUv2_AHB_CONTROL);
760 gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL, val);
764 etnaviv_gpu_setup_pulse_eater(gpu);
766 gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
768 gpu->state = ETNA_GPU_STATE_INITIALIZED;
771 int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
773 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
777 ret = pm_runtime_get_sync(gpu->dev);
779 dev_err(gpu->dev, "Failed to enable GPU power domain\n");
783 etnaviv_hw_identify(gpu);
785 if (gpu->identity.model == 0) {
786 dev_err(gpu->dev, "Unknown GPU model\n");
791 if (gpu->identity.nn_core_count > 0)
792 dev_warn(gpu->dev, "etnaviv has been instantiated on a NPU, "
796 if (gpu->identity.features & chipFeatures_PIPE_VG &&
797 gpu->identity.features & chipFeatures_FE20) {
798 dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n");
807 if ((gpu->identity.minor_features7 & chipMinorFeatures7_BIT_SECURITY) &&
808 (gpu->identity.minor_features10 & chipMinorFeatures10_SECURITY_AHB))
809 gpu->sec_mode = ETNA_SEC_KERNEL;
811 gpu->state = ETNA_GPU_STATE_IDENTIFIED;
813 ret = etnaviv_hw_reset(gpu);
815 dev_err(gpu->dev, "GPU reset failed\n");
819 ret = etnaviv_iommu_global_init(gpu);
828 if (dma_addressing_limited(gpu->dev))
832 ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer,
835 dev_err(gpu->dev, "could not create command buffer\n");
849 cmdbuf_paddr = ALIGN_DOWN(etnaviv_cmdbuf_get_pa(&gpu->buffer), SZ_128M);
851 if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
852 (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
858 dev_info(gpu->dev,
860 gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
865 spin_lock_init(&gpu->event_spinlock);
866 init_completion(&gpu->event_free);
867 bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
868 for (i = 0; i < ARRAY_SIZE(gpu->event); i++)
869 complete(&gpu->event_free);
872 mutex_lock(&gpu->lock);
873 etnaviv_gpu_hw_init(gpu);
874 mutex_unlock(&gpu->lock);
876 pm_runtime_mark_last_busy(gpu->dev);
877 pm_runtime_put_autosuspend(gpu->dev);
882 pm_runtime_mark_last_busy(gpu->dev);
884 pm_runtime_put_autosuspend(gpu->dev);
895 static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug)
899 debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
900 debug->state[0] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
903 debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
904 debug->state[1] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
914 int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
920 seq_printf(m, "%s Status:\n", dev_name(gpu->dev));
922 ret = pm_runtime_get_sync(gpu->dev);
926 dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
927 dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
928 axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
929 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
931 verify_dma(gpu, &debug);
934 seq_printf(m, "\t model: 0x%x\n", gpu->identity.model);
935 seq_printf(m, "\t revision: 0x%x\n", gpu->identity.revision);
936 seq_printf(m, "\t product_id: 0x%x\n", gpu->identity.product_id);
937 seq_printf(m, "\t customer_id: 0x%x\n", gpu->identity.customer_id);
938 seq_printf(m, "\t eco_id: 0x%x\n", gpu->identity.eco_id);
942 gpu->identity.features);
944 gpu->identity.minor_features0);
946 gpu->identity.minor_features1);
948 gpu->identity.minor_features2);
950 gpu->identity.minor_features3);
952 gpu->identity.minor_features4);
954 gpu->identity.minor_features5);
956 gpu->identity.minor_features6);
958 gpu->identity.minor_features7);
960 gpu->identity.minor_features8);
962 gpu->identity.minor_features9);
964 gpu->identity.minor_features10);
966 gpu->identity.minor_features11);
970 gpu->identity.stream_count);
972 gpu->identity.register_max);
974 gpu->identity.thread_count);
976 gpu->identity.vertex_cache_size);
978 gpu->identity.shader_core_count);
980 gpu->identity.nn_core_count);
982 gpu->identity.pixel_pipes);
984 gpu->identity.vertex_output_buffer_size);
986 gpu->identity.buffer_size);
988 gpu->identity.instruction_count);
990 gpu->identity.num_constants);
992 gpu->identity.varyings_count);
996 idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP;
1038 if (gpu->identity.features & chipFeatures_DEBUG_MODE) {
1039 u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0);
1040 u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1);
1041 u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE);
1069 pm_runtime_mark_last_busy(gpu->dev);
1071 pm_runtime_put_autosuspend(gpu->dev);
1079 struct etnaviv_gpu *gpu;
1097 return dev_name(f->gpu->dev);
1104 return (s32)(f->gpu->completed_fence - f->base.seqno) >= 0;
1121 static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
1129 lockdep_assert_held(&gpu->lock);
1135 f->gpu = gpu;
1137 dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
1138 gpu->fence_context, ++gpu->next_fence);
1153 static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
1163 remaining = wait_for_completion_timeout(&gpu->event_free, timeout);
1166 dev_err(gpu->dev, "wait_for_completion_timeout failed");
1175 spin_lock(&gpu->event_spinlock);
1178 int event = find_first_zero_bit(gpu->event_bitmap, ETNA_NR_EVENTS);
1181 memset(&gpu->event[event], 0, sizeof(struct etnaviv_event));
1182 set_bit(event, gpu->event_bitmap);
1185 spin_unlock(&gpu->event_spinlock);
1188 ret = pm_runtime_resume_and_get(gpu->dev);
1198 pm_runtime_put_autosuspend(gpu->dev);
1201 complete(&gpu->event_free);
1206 static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
1208 if (!test_bit(event, gpu->event_bitmap)) {
1209 dev_warn(gpu->dev, "event %u is already marked as free",
1212 clear_bit(event, gpu->event_bitmap);
1213 complete(&gpu->event_free);
1216 pm_runtime_put_autosuspend(gpu->dev);
1222 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
1234 fence = xa_load(&gpu->user_fences, id);
1266 * Although the retirement happens under the gpu lock, we don't want to hold
1269 int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
1281 ret = wait_event_interruptible_timeout(gpu->fence_event,
1292 static void sync_point_perfmon_sample(struct etnaviv_gpu *gpu,
1302 etnaviv_perfmon_process(gpu, pmr, submit->exec_state);
1306 static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu,
1312 val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS);
1314 gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val);
1317 val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
1319 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
1321 sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE);
1324 static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
1331 sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST);
1340 val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
1342 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
1345 val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS);
1347 gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val);
1351 /* add bo's to gpu's ring, and kick gpu: */
1354 struct etnaviv_gpu *gpu = submit->gpu;
1361 * - a sync point to re-configure gpu and process ETNA_PM_PROCESS_PRE
1363 * - a sync point to re-configure gpu, process ETNA_PM_PROCESS_POST requests
1369 ret = event_alloc(gpu, nr_events, event);
1372 pm_runtime_put_noidle(gpu->dev);
1376 mutex_lock(&gpu->lock);
1378 gpu_fence = etnaviv_gpu_fence_alloc(gpu);
1381 event_free(gpu, event[i]);
1386 if (gpu->state == ETNA_GPU_STATE_INITIALIZED)
1387 etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
1391 submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
1394 gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
1396 gpu->event[event[1]].submit = submit;
1397 etnaviv_sync_point_queue(gpu, event[1]);
1400 gpu->event[event[0]].fence = gpu_fence;
1402 etnaviv_buffer_queue(gpu, submit->exec_state, submit->mmu_context,
1406 gpu->event[event[2]].sync_point = &sync_point_perfmon_sample_post;
1408 gpu->event[event[2]].submit = submit;
1409 etnaviv_sync_point_queue(gpu, event[2]);
1413 mutex_unlock(&gpu->lock);
1420 struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
1422 struct etnaviv_event *event = &gpu->event[gpu->sync_point_event];
1423 u32 addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
1425 event->sync_point(gpu, event);
1427 event_free(gpu, gpu->sync_point_event);
1430 etnaviv_gpu_start_fe(gpu, addr + 2, 2);
1435 struct etnaviv_gpu *gpu = submit->gpu;
1440 dev_err(gpu->dev, "recover hung GPU!\n");
1450 dev_err(gpu->dev, "offending task: %s (%s)\n", comm, cmd);
1455 if (pm_runtime_get_sync(gpu->dev) < 0)
1458 mutex_lock(&gpu->lock);
1460 etnaviv_hw_reset(gpu);
1463 spin_lock(&gpu->event_spinlock);
1464 for_each_set_bit(i, gpu->event_bitmap, ETNA_NR_EVENTS)
1465 event_free(gpu, i);
1466 spin_unlock(&gpu->event_spinlock);
1468 etnaviv_gpu_hw_init(gpu);
1470 mutex_unlock(&gpu->lock);
1471 pm_runtime_mark_last_busy(gpu->dev);
1473 pm_runtime_put_autosuspend(gpu->dev);
1476 static void dump_mmu_fault(struct etnaviv_gpu *gpu)
1490 if (gpu->sec_mode == ETNA_SEC_NONE)
1495 status = gpu_read(gpu, status_reg);
1496 dev_err_ratelimited(gpu->dev, "MMU fault status 0x%08x\n", status);
1510 if (gpu->sec_mode == ETNA_SEC_NONE)
1515 dev_err_ratelimited(gpu->dev,
1517 i, reason, gpu_read(gpu, address_reg));
1523 struct etnaviv_gpu *gpu = data;
1526 u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE);
1531 pm_runtime_mark_last_busy(gpu->dev);
1533 dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
1536 dev_err(gpu->dev, "AXI bus error\n");
1541 dump_mmu_fault(gpu);
1542 gpu->state = ETNA_GPU_STATE_FAULT;
1543 drm_sched_fault(&gpu->sched);
1554 dev_dbg(gpu->dev, "event %u\n", event);
1556 if (gpu->event[event].sync_point) {
1557 gpu->sync_point_event = event;
1558 queue_work(gpu->wq, &gpu->sync_point_work);
1561 fence = gpu->event[event].fence;
1565 gpu->event[event].fence = NULL;
1576 if (fence_after(fence->seqno, gpu->completed_fence))
1577 gpu->completed_fence = fence->seqno;
1580 event_free(gpu, event);
1589 static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
1593 ret = clk_prepare_enable(gpu->clk_reg);
1597 ret = clk_prepare_enable(gpu->clk_bus);
1601 ret = clk_prepare_enable(gpu->clk_core);
1605 ret = clk_prepare_enable(gpu->clk_shader);
1612 clk_disable_unprepare(gpu->clk_core);
1614 clk_disable_unprepare(gpu->clk_bus);
1616 clk_disable_unprepare(gpu->clk_reg);
1621 static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
1623 clk_disable_unprepare(gpu->clk_shader);
1624 clk_disable_unprepare(gpu->clk_core);
1625 clk_disable_unprepare(gpu->clk_bus);
1626 clk_disable_unprepare(gpu->clk_reg);
1631 int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
1636 u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
1638 if ((idle & gpu->idle_mask) == gpu->idle_mask)
1642 dev_warn(gpu->dev,
1652 static void etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
1654 if (gpu->state == ETNA_GPU_STATE_RUNNING) {
1656 mutex_lock(&gpu->lock);
1657 etnaviv_buffer_end(gpu);
1658 mutex_unlock(&gpu->lock);
1665 etnaviv_gpu_wait_idle(gpu, 100);
1667 gpu->state = ETNA_GPU_STATE_INITIALIZED;
1670 gpu->exec_state = -1;
1673 static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
1677 ret = mutex_lock_killable(&gpu->lock);
1681 etnaviv_gpu_update_clock(gpu);
1682 etnaviv_gpu_hw_init(gpu);
1684 mutex_unlock(&gpu->lock);
1702 struct etnaviv_gpu *gpu = cdev->devdata;
1704 *state = gpu->freq_scale;
1713 struct etnaviv_gpu *gpu = cdev->devdata;
1715 mutex_lock(&gpu->lock);
1716 gpu->freq_scale = state;
1717 if (!pm_runtime_suspended(gpu->dev))
1718 etnaviv_gpu_update_clock(gpu);
1719 mutex_unlock(&gpu->lock);
1735 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1739 gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
1740 (char *)dev_name(dev), gpu, &cooling_ops);
1741 if (IS_ERR(gpu->cooling))
1742 return PTR_ERR(gpu->cooling);
1745 gpu->wq = alloc_ordered_workqueue(dev_name(dev), 0);
1746 if (!gpu->wq) {
1751 ret = etnaviv_sched_init(gpu);
1756 ret = etnaviv_gpu_clk_enable(gpu);
1761 gpu->drm = drm;
1762 gpu->fence_context = dma_fence_context_alloc(1);
1763 xa_init_flags(&gpu->user_fences, XA_FLAGS_ALLOC);
1764 spin_lock_init(&gpu->fence_spinlock);
1766 INIT_WORK(&gpu->sync_point_work, sync_point_worker);
1767 init_waitqueue_head(&gpu->fence_event);
1769 priv->gpu[priv->num_gpus++] = gpu;
1774 etnaviv_sched_fini(gpu);
1777 destroy_workqueue(gpu->wq);
1781 thermal_cooling_device_unregister(gpu->cooling);
1789 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1791 DBG("%s", dev_name(gpu->dev));
1793 destroy_workqueue(gpu->wq);
1795 etnaviv_sched_fini(gpu);
1798 pm_runtime_get_sync(gpu->dev);
1799 pm_runtime_put_sync_suspend(gpu->dev);
1801 etnaviv_gpu_hw_suspend(gpu);
1802 etnaviv_gpu_clk_disable(gpu);
1805 if (gpu->mmu_context)
1806 etnaviv_iommu_context_put(gpu->mmu_context);
1808 etnaviv_cmdbuf_free(&gpu->buffer);
1809 etnaviv_iommu_global_fini(gpu);
1811 gpu->drm = NULL;
1812 xa_destroy(&gpu->user_fences);
1815 thermal_cooling_device_unregister(gpu->cooling);
1816 gpu->cooling = NULL;
1835 struct etnaviv_gpu *gpu;
1838 gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
1839 if (!gpu)
1842 gpu->dev = &pdev->dev;
1843 mutex_init(&gpu->lock);
1844 mutex_init(&gpu->sched_lock);
1847 gpu->mmio = devm_platform_ioremap_resource(pdev, 0);
1848 if (IS_ERR(gpu->mmio))
1849 return PTR_ERR(gpu->mmio);
1852 gpu->irq = platform_get_irq(pdev, 0);
1853 if (gpu->irq < 0)
1854 return gpu->irq;
1856 err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
1857 dev_name(gpu->dev), gpu);
1859 dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
1864 gpu->clk_reg = devm_clk_get_optional(&pdev->dev, "reg");
1865 DBG("clk_reg: %p", gpu->clk_reg);
1866 if (IS_ERR(gpu->clk_reg))
1867 return PTR_ERR(gpu->clk_reg);
1869 gpu->clk_bus = devm_clk_get_optional(&pdev->dev, "bus");
1870 DBG("clk_bus: %p", gpu->clk_bus);
1871 if (IS_ERR(gpu->clk_bus))
1872 return PTR_ERR(gpu->clk_bus);
1874 gpu->clk_core = devm_clk_get(&pdev->dev, "core");
1875 DBG("clk_core: %p", gpu->clk_core);
1876 if (IS_ERR(gpu->clk_core))
1877 return PTR_ERR(gpu->clk_core);
1878 gpu->base_rate_core = clk_get_rate(gpu->clk_core);
1880 gpu->clk_shader = devm_clk_get_optional(&pdev->dev, "shader");
1881 DBG("clk_shader: %p", gpu->clk_shader);
1882 if (IS_ERR(gpu->clk_shader))
1883 return PTR_ERR(gpu->clk_shader);
1884 gpu->base_rate_shader = clk_get_rate(gpu->clk_shader);
1887 dev_set_drvdata(dev, gpu);
1894 pm_runtime_use_autosuspend(gpu->dev);
1895 pm_runtime_set_autosuspend_delay(gpu->dev, 200);
1896 pm_runtime_enable(gpu->dev);
1916 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1920 if (atomic_read(&gpu->sched.hw_rq_count))
1924 mask = gpu->idle_mask & ~(VIVS_HI_IDLE_STATE_FE |
1926 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
1933 etnaviv_gpu_hw_suspend(gpu);
1935 gpu->state = ETNA_GPU_STATE_IDENTIFIED;
1937 return etnaviv_gpu_clk_disable(gpu);
1942 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1945 ret = etnaviv_gpu_clk_enable(gpu);
1950 if (gpu->state == ETNA_GPU_STATE_IDENTIFIED) {
1951 ret = etnaviv_gpu_hw_resume(gpu);
1953 etnaviv_gpu_clk_disable(gpu);
1967 .name = "etnaviv-gpu",