Lines Matching refs:gpu

35 	{ .name = "etnaviv-gpu,2d" },
43 int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
45 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
49 *value = gpu->identity.model;
53 *value = gpu->identity.revision;
57 *value = gpu->identity.features;
61 *value = gpu->identity.minor_features0;
65 *value = gpu->identity.minor_features1;
69 *value = gpu->identity.minor_features2;
73 *value = gpu->identity.minor_features3;
77 *value = gpu->identity.minor_features4;
81 *value = gpu->identity.minor_features5;
85 *value = gpu->identity.minor_features6;
89 *value = gpu->identity.minor_features7;
93 *value = gpu->identity.minor_features8;
97 *value = gpu->identity.minor_features9;
101 *value = gpu->identity.minor_features10;
105 *value = gpu->identity.minor_features11;
109 *value = gpu->identity.stream_count;
113 *value = gpu->identity.register_max;
117 *value = gpu->identity.thread_count;
121 *value = gpu->identity.vertex_cache_size;
125 *value = gpu->identity.shader_core_count;
129 *value = gpu->identity.pixel_pipes;
133 *value = gpu->identity.vertex_output_buffer_size;
137 *value = gpu->identity.buffer_size;
141 *value = gpu->identity.instruction_count;
145 *value = gpu->identity.num_constants;
149 *value = gpu->identity.varyings_count;
160 DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
168 #define etnaviv_is_model_rev(gpu, mod, rev) \
169 ((gpu)->identity.model == chipModel_##mod && \
170 (gpu)->identity.revision == rev)
174 static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
176 if (gpu->identity.minor_features0 &
181 specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
182 specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
183 specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3);
184 specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4);
186 gpu->identity.stream_count = etnaviv_field(specs[0],
188 gpu->identity.register_max = etnaviv_field(specs[0],
190 gpu->identity.thread_count = etnaviv_field(specs[0],
192 gpu->identity.vertex_cache_size = etnaviv_field(specs[0],
194 gpu->identity.shader_core_count = etnaviv_field(specs[0],
196 gpu->identity.pixel_pipes = etnaviv_field(specs[0],
198 gpu->identity.vertex_output_buffer_size =
202 gpu->identity.buffer_size = etnaviv_field(specs[1],
204 gpu->identity.instruction_count = etnaviv_field(specs[1],
206 gpu->identity.num_constants = etnaviv_field(specs[1],
209 gpu->identity.varyings_count = etnaviv_field(specs[2],
216 gpu->identity.stream_count = streams;
220 if (gpu->identity.stream_count == 0) {
221 if (gpu->identity.model >= 0x1000)
222 gpu->identity.stream_count = 4;
224 gpu->identity.stream_count = 1;
228 if (gpu->identity.register_max)
229 gpu->identity.register_max = 1 << gpu->identity.register_max;
230 else if (gpu->identity.model == chipModel_GC400)
231 gpu->identity.register_max = 32;
233 gpu->identity.register_max = 64;
236 if (gpu->identity.thread_count)
237 gpu->identity.thread_count = 1 << gpu->identity.thread_count;
238 else if (gpu->identity.model == chipModel_GC400)
239 gpu->identity.thread_count = 64;
240 else if (gpu->identity.model == chipModel_GC500 ||
241 gpu->identity.model == chipModel_GC530)
242 gpu->identity.thread_count = 128;
244 gpu->identity.thread_count = 256;
246 if (gpu->identity.vertex_cache_size == 0)
247 gpu->identity.vertex_cache_size = 8;
249 if (gpu->identity.shader_core_count == 0) {
250 if (gpu->identity.model >= 0x1000)
251 gpu->identity.shader_core_count = 2;
253 gpu->identity.shader_core_count = 1;
256 if (gpu->identity.pixel_pipes == 0)
257 gpu->identity.pixel_pipes = 1;
260 if (gpu->identity.vertex_output_buffer_size) {
261 gpu->identity.vertex_output_buffer_size =
262 1 << gpu->identity.vertex_output_buffer_size;
263 } else if (gpu->identity.model == chipModel_GC400) {
264 if (gpu->identity.revision < 0x4000)
265 gpu->identity.vertex_output_buffer_size = 512;
266 else if (gpu->identity.revision < 0x4200)
267 gpu->identity.vertex_output_buffer_size = 256;
269 gpu->identity.vertex_output_buffer_size = 128;
271 gpu->identity.vertex_output_buffer_size = 512;
274 switch (gpu->identity.instruction_count) {
276 if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
277 gpu->identity.model == chipModel_GC880)
278 gpu->identity.instruction_count = 512;
280 gpu->identity.instruction_count = 256;
284 gpu->identity.instruction_count = 1024;
288 gpu->identity.instruction_count = 2048;
292 gpu->identity.instruction_count = 256;
296 if (gpu->identity.num_constants == 0)
297 gpu->identity.num_constants = 168;
299 if (gpu->identity.varyings_count == 0) {
300 if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0)
301 gpu->identity.varyings_count = 12;
303 gpu->identity.varyings_count = 8;
310 if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) ||
311 etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
312 etnaviv_is_model_rev(gpu, GC4000, 0x5245) ||
313 etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
314 etnaviv_is_model_rev(gpu, GC3000, 0x5435) ||
315 etnaviv_is_model_rev(gpu, GC2200, 0x5244) ||
316 etnaviv_is_model_rev(gpu, GC2100, 0x5108) ||
317 etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
318 etnaviv_is_model_rev(gpu, GC1500, 0x5246) ||
319 etnaviv_is_model_rev(gpu, GC880, 0x5107) ||
320 etnaviv_is_model_rev(gpu, GC880, 0x5106))
321 gpu->identity.varyings_count -= 1;
324 static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
328 chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
332 gpu->identity.model = chipModel_GC500;
333 gpu->identity.revision = etnaviv_field(chipIdentity,
336 u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
338 gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
339 gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
340 gpu->identity.customer_id = gpu_read(gpu, VIVS_HI_CHIP_CUSTOMER_ID);
346 if (!etnaviv_is_model_rev(gpu, GC600, 0x19)) {
347 gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID);
348 gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID);
357 if ((gpu->identity.model & 0xff00) == 0x0400 &&
358 gpu->identity.model != chipModel_GC420) {
359 gpu->identity.model = gpu->identity.model & 0x0400;
363 if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
364 u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
371 gpu->identity.revision = 0x1051;
382 if (etnaviv_is_model_rev(gpu, GC2000, 0xffff5450)) {
383 gpu->identity.model = chipModel_GC3000;
384 gpu->identity.revision &= 0xffff;
387 if (etnaviv_is_model_rev(gpu, GC1000, 0x5037) && (chipDate == 0x20120617))
388 gpu->identity.eco_id = 1;
390 if (etnaviv_is_model_rev(gpu, GC320, 0x5303) && (chipDate == 0x20140511))
391 gpu->identity.eco_id = 1;
394 dev_info(gpu->dev, "model: GC%x, revision: %x\n",
395 gpu->identity.model, gpu->identity.revision);
397 gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
402 if (etnaviv_fill_identity_from_hwdb(gpu))
405 gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
408 if (gpu->identity.model == chipModel_GC700)
409 gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
412 if ((gpu->identity.model == chipModel_GC500 &&
413 gpu->identity.revision <= 2) ||
414 gpu->identity.model == chipModel_GC300)
415 gpu->identity.features |= chipFeatures_PIPE_2D;
417 if ((gpu->identity.model == chipModel_GC500 &&
418 gpu->identity.revision < 2) ||
419 (gpu->identity.model == chipModel_GC300 &&
420 gpu->identity.revision < 0x2000)) {
426 gpu->identity.minor_features0 = 0;
427 gpu->identity.minor_features1 = 0;
428 gpu->identity.minor_features2 = 0;
429 gpu->identity.minor_features3 = 0;
430 gpu->identity.minor_features4 = 0;
431 gpu->identity.minor_features5 = 0;
433 gpu->identity.minor_features0 =
434 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
436 if (gpu->identity.minor_features0 &
438 gpu->identity.minor_features1 =
439 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
440 gpu->identity.minor_features2 =
441 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
442 gpu->identity.minor_features3 =
443 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
444 gpu->identity.minor_features4 =
445 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_4);
446 gpu->identity.minor_features5 =
447 gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5);
451 if (gpu->identity.model == chipModel_GC600 ||
452 gpu->identity.model == chipModel_GC300)
453 gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
462 etnaviv_hw_specs(gpu);
465 static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
467 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock |
469 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
472 static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
474 if (gpu->identity.minor_features2 &
476 clk_set_rate(gpu->clk_core,
477 gpu->base_rate_core >> gpu->freq_scale);
478 clk_set_rate(gpu->clk_shader,
479 gpu->base_rate_shader >> gpu->freq_scale);
481 unsigned int fscale = 1 << (6 - gpu->freq_scale);
482 u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
486 etnaviv_gpu_load_clock(gpu, clock);
490 static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
501 unsigned int fscale = 1 << (6 - gpu->freq_scale);
503 etnaviv_gpu_load_clock(gpu, control);
507 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
509 if (gpu->sec_mode == ETNA_SEC_KERNEL) {
510 gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL,
515 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
523 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
527 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
530 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
534 dev_dbg(gpu->dev, "FE is not idle\n");
539 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
544 dev_dbg(gpu->dev, "GPU is not idle\n");
550 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
557 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
558 control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
560 dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
569 etnaviv_gpu_update_clock(gpu);
571 gpu->fe_running = false;
572 gpu->exec_state = -1;
573 if (gpu->mmu_context)
574 etnaviv_iommu_context_put(gpu->mmu_context);
575 gpu->mmu_context = NULL;
580 static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
585 ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
589 if (gpu->identity.revision == 0x4301 ||
590 gpu->identity.revision == 0x4302)
593 gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc);
595 pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS);
598 if (gpu->identity.model >= chipModel_GC400 &&
599 gpu->identity.model != chipModel_GC420 &&
600 !(gpu->identity.minor_features3 & chipMinorFeatures3_BUG_FIXES12))
607 if (gpu->identity.revision < 0x5000 &&
608 gpu->identity.minor_features0 & chipMinorFeatures0_HZ &&
609 !(gpu->identity.minor_features1 &
613 if (gpu->identity.revision < 0x5422)
617 if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
618 etnaviv_is_model_rev(gpu, GC2000, 0x5108))
624 gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
627 void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
629 gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS, address);
630 gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
634 if (gpu->sec_mode == ETNA_SEC_KERNEL) {
635 gpu_write(gpu, VIVS_MMUv2_SEC_COMMAND_CONTROL,
640 gpu->fe_running = true;
643 static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
650 etnaviv_iommu_restore(gpu, context);
653 prefetch = etnaviv_buffer_init(gpu);
654 address = etnaviv_cmdbuf_get_va(&gpu->buffer,
655 &gpu->mmu_context->cmdbuf_mapping);
657 etnaviv_gpu_start_fe(gpu, address, prefetch);
660 static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
668 if (etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
669 etnaviv_is_model_rev(gpu, GC4000, 0x5222)) {
674 if (etnaviv_is_model_rev(gpu, GC1000, 0x5039) ||
675 etnaviv_is_model_rev(gpu, GC1000, 0x5040)) {
680 if ((gpu->identity.revision > 0x5420) &&
681 (gpu->identity.features & chipFeatures_PIPE_3D))
684 pulse_eater = gpu_read(gpu, VIVS_PM_PULSE_EATER);
688 gpu_write(gpu, VIVS_PM_PULSE_EATER, pulse_eater);
691 static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
693 if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
694 etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
695 gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
698 mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
700 if (gpu->identity.revision == 0x5007)
705 gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
709 etnaviv_gpu_enable_mlcg(gpu);
715 gpu_write(gpu, VIVS_HI_AXI_CONFIG,
720 if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) {
721 u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
726 gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
729 if (gpu->sec_mode == ETNA_SEC_KERNEL) {
730 u32 val = gpu_read(gpu, VIVS_MMUv2_AHB_CONTROL);
732 gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL, val);
736 etnaviv_gpu_setup_pulse_eater(gpu);
738 gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
741 int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
743 struct etnaviv_drm_private *priv = gpu->drm->dev_private;
746 ret = pm_runtime_get_sync(gpu->dev);
748 dev_err(gpu->dev, "Failed to enable GPU power domain\n");
752 etnaviv_hw_identify(gpu);
754 if (gpu->identity.model == 0) {
755 dev_err(gpu->dev, "Unknown GPU model\n");
761 if (gpu->identity.features & chipFeatures_PIPE_VG &&
762 gpu->identity.features & chipFeatures_FE20) {
763 dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n");
772 if ((gpu->identity.minor_features7 & chipMinorFeatures7_BIT_SECURITY) &&
773 (gpu->identity.minor_features10 & chipMinorFeatures10_SECURITY_AHB))
774 gpu->sec_mode = ETNA_SEC_KERNEL;
776 ret = etnaviv_hw_reset(gpu);
778 dev_err(gpu->dev, "GPU reset failed\n");
782 ret = etnaviv_iommu_global_init(gpu);
795 if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
796 (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
797 u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
803 dev_info(gpu->dev, "Need to move linear window on MC1.0, disabling TS\n");
805 gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
813 if (dma_addressing_limited(gpu->dev))
817 ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer,
820 dev_err(gpu->dev, "could not create command buffer\n");
825 spin_lock_init(&gpu->event_spinlock);
826 init_completion(&gpu->event_free);
827 bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
828 for (i = 0; i < ARRAY_SIZE(gpu->event); i++)
829 complete(&gpu->event_free);
832 mutex_lock(&gpu->lock);
833 etnaviv_gpu_hw_init(gpu);
834 mutex_unlock(&gpu->lock);
836 pm_runtime_mark_last_busy(gpu->dev);
837 pm_runtime_put_autosuspend(gpu->dev);
839 gpu->initialized = true;
844 pm_runtime_mark_last_busy(gpu->dev);
846 pm_runtime_put_autosuspend(gpu->dev);
857 static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug)
861 debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
862 debug->state[0] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
865 debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
866 debug->state[1] = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
876 int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
882 seq_printf(m, "%s Status:\n", dev_name(gpu->dev));
884 ret = pm_runtime_get_sync(gpu->dev);
888 dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
889 dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
890 axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
891 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
893 verify_dma(gpu, &debug);
896 seq_printf(m, "\t model: 0x%x\n", gpu->identity.model);
897 seq_printf(m, "\t revision: 0x%x\n", gpu->identity.revision);
898 seq_printf(m, "\t product_id: 0x%x\n", gpu->identity.product_id);
899 seq_printf(m, "\t customer_id: 0x%x\n", gpu->identity.customer_id);
900 seq_printf(m, "\t eco_id: 0x%x\n", gpu->identity.eco_id);
904 gpu->identity.features);
906 gpu->identity.minor_features0);
908 gpu->identity.minor_features1);
910 gpu->identity.minor_features2);
912 gpu->identity.minor_features3);
914 gpu->identity.minor_features4);
916 gpu->identity.minor_features5);
918 gpu->identity.minor_features6);
920 gpu->identity.minor_features7);
922 gpu->identity.minor_features8);
924 gpu->identity.minor_features9);
926 gpu->identity.minor_features10);
928 gpu->identity.minor_features11);
932 gpu->identity.stream_count);
934 gpu->identity.register_max);
936 gpu->identity.thread_count);
938 gpu->identity.vertex_cache_size);
940 gpu->identity.shader_core_count);
942 gpu->identity.pixel_pipes);
944 gpu->identity.vertex_output_buffer_size);
946 gpu->identity.buffer_size);
948 gpu->identity.instruction_count);
950 gpu->identity.num_constants);
952 gpu->identity.varyings_count);
956 idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP;
998 if (gpu->identity.features & chipFeatures_DEBUG_MODE) {
999 u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0);
1000 u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1);
1001 u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE);
1029 pm_runtime_mark_last_busy(gpu->dev);
1031 pm_runtime_put_autosuspend(gpu->dev);
1037 void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
1041 dev_err(gpu->dev, "recover hung GPU!\n");
1043 if (pm_runtime_get_sync(gpu->dev) < 0)
1046 mutex_lock(&gpu->lock);
1048 etnaviv_hw_reset(gpu);
1051 spin_lock(&gpu->event_spinlock);
1052 for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS)
1053 complete(&gpu->event_free);
1054 bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
1055 spin_unlock(&gpu->event_spinlock);
1057 etnaviv_gpu_hw_init(gpu);
1059 mutex_unlock(&gpu->lock);
1060 pm_runtime_mark_last_busy(gpu->dev);
1062 pm_runtime_put_autosuspend(gpu->dev);
1067 struct etnaviv_gpu *gpu;
1085 return dev_name(f->gpu->dev);
1092 return (s32)(f->gpu->completed_fence - f->base.seqno) >= 0;
1109 static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
1117 lockdep_assert_held(&gpu->lock);
1123 f->gpu = gpu;
1125 dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
1126 gpu->fence_context, ++gpu->next_fence);
1141 static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
1150 ret = wait_for_completion_timeout(&gpu->event_free, timeout);
1153 dev_err(gpu->dev, "wait_for_completion_timeout failed");
1161 spin_lock(&gpu->event_spinlock);
1164 int event = find_first_zero_bit(gpu->event_bitmap, ETNA_NR_EVENTS);
1167 memset(&gpu->event[event], 0, sizeof(struct etnaviv_event));
1168 set_bit(event, gpu->event_bitmap);
1171 spin_unlock(&gpu->event_spinlock);
1177 complete(&gpu->event_free);
1182 static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
1184 if (!test_bit(event, gpu->event_bitmap)) {
1185 dev_warn(gpu->dev, "event %u is already marked as free",
1188 clear_bit(event, gpu->event_bitmap);
1189 complete(&gpu->event_free);
1196 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
1208 fence = idr_find(&gpu->fence_idr, id);
1240 * Although the retirement happens under the gpu lock, we don't want to hold
1243 int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
1255 ret = wait_event_interruptible_timeout(gpu->fence_event,
1266 static void sync_point_perfmon_sample(struct etnaviv_gpu *gpu,
1276 etnaviv_perfmon_process(gpu, pmr, submit->exec_state);
1280 static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu,
1286 val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
1288 gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
1291 val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
1293 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
1295 sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE);
1298 static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
1305 sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST);
1314 val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
1316 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
1319 val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
1321 gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
1325 /* add bo's to gpu's ring, and kick gpu: */
1328 struct etnaviv_gpu *gpu = submit->gpu;
1334 ret = pm_runtime_get_sync(gpu->dev);
1336 pm_runtime_put_noidle(gpu->dev);
1344 * - a sync point to re-configure gpu and process ETNA_PM_PROCESS_PRE
1346 * - a sync point to re-configure gpu, process ETNA_PM_PROCESS_POST requests
1352 ret = event_alloc(gpu, nr_events, event);
1355 pm_runtime_put_noidle(gpu->dev);
1359 mutex_lock(&gpu->lock);
1361 gpu_fence = etnaviv_gpu_fence_alloc(gpu);
1364 event_free(gpu, event[i]);
1369 if (!gpu->fe_running)
1370 etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
1374 submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context);
1377 gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
1379 gpu->event[event[1]].submit = submit;
1380 etnaviv_sync_point_queue(gpu, event[1]);
1383 gpu->event[event[0]].fence = gpu_fence;
1385 etnaviv_buffer_queue(gpu, submit->exec_state, submit->mmu_context,
1389 gpu->event[event[2]].sync_point = &sync_point_perfmon_sample_post;
1391 gpu->event[event[2]].submit = submit;
1392 etnaviv_sync_point_queue(gpu, event[2]);
1396 mutex_unlock(&gpu->lock);
1403 struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
1405 struct etnaviv_event *event = &gpu->event[gpu->sync_point_event];
1406 u32 addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
1408 event->sync_point(gpu, event);
1410 event_free(gpu, gpu->sync_point_event);
1413 etnaviv_gpu_start_fe(gpu, addr + 2, 2);
1416 static void dump_mmu_fault(struct etnaviv_gpu *gpu)
1421 if (gpu->sec_mode == ETNA_SEC_NONE)
1426 status = gpu_read(gpu, status_reg);
1427 dev_err_ratelimited(gpu->dev, "MMU fault status 0x%08x\n", status);
1435 if (gpu->sec_mode == ETNA_SEC_NONE)
1440 dev_err_ratelimited(gpu->dev, "MMU %d fault addr 0x%08x\n", i,
1441 gpu_read(gpu, address_reg));
1447 struct etnaviv_gpu *gpu = data;
1450 u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE);
1455 pm_runtime_mark_last_busy(gpu->dev);
1457 dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
1460 dev_err(gpu->dev, "AXI bus error\n");
1465 dump_mmu_fault(gpu);
1476 dev_dbg(gpu->dev, "event %u\n", event);
1478 if (gpu->event[event].sync_point) {
1479 gpu->sync_point_event = event;
1480 queue_work(gpu->wq, &gpu->sync_point_work);
1483 fence = gpu->event[event].fence;
1487 gpu->event[event].fence = NULL;
1498 if (fence_after(fence->seqno, gpu->completed_fence))
1499 gpu->completed_fence = fence->seqno;
1502 event_free(gpu, event);
1511 static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
1515 ret = clk_prepare_enable(gpu->clk_reg);
1519 ret = clk_prepare_enable(gpu->clk_bus);
1523 ret = clk_prepare_enable(gpu->clk_core);
1527 ret = clk_prepare_enable(gpu->clk_shader);
1534 clk_disable_unprepare(gpu->clk_core);
1536 clk_disable_unprepare(gpu->clk_bus);
1538 clk_disable_unprepare(gpu->clk_reg);
1543 static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
1545 clk_disable_unprepare(gpu->clk_shader);
1546 clk_disable_unprepare(gpu->clk_core);
1547 clk_disable_unprepare(gpu->clk_bus);
1548 clk_disable_unprepare(gpu->clk_reg);
1553 int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
1558 u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
1560 if ((idle & gpu->idle_mask) == gpu->idle_mask)
1564 dev_warn(gpu->dev,
1574 static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
1576 if (gpu->initialized && gpu->fe_running) {
1578 mutex_lock(&gpu->lock);
1579 etnaviv_buffer_end(gpu);
1580 mutex_unlock(&gpu->lock);
1587 etnaviv_gpu_wait_idle(gpu, 100);
1589 gpu->fe_running = false;
1592 gpu->exec_state = -1;
1594 return etnaviv_gpu_clk_disable(gpu);
1598 static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
1602 ret = mutex_lock_killable(&gpu->lock);
1606 etnaviv_gpu_update_clock(gpu);
1607 etnaviv_gpu_hw_init(gpu);
1609 mutex_unlock(&gpu->lock);
1628 struct etnaviv_gpu *gpu = cdev->devdata;
1630 *state = gpu->freq_scale;
1639 struct etnaviv_gpu *gpu = cdev->devdata;
1641 mutex_lock(&gpu->lock);
1642 gpu->freq_scale = state;
1643 if (!pm_runtime_suspended(gpu->dev))
1644 etnaviv_gpu_update_clock(gpu);
1645 mutex_unlock(&gpu->lock);
1661 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1665 gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
1666 (char *)dev_name(dev), gpu, &cooling_ops);
1667 if (IS_ERR(gpu->cooling))
1668 return PTR_ERR(gpu->cooling);
1671 gpu->wq = alloc_ordered_workqueue(dev_name(dev), 0);
1672 if (!gpu->wq) {
1677 ret = etnaviv_sched_init(gpu);
1682 ret = pm_runtime_get_sync(gpu->dev);
1684 ret = etnaviv_gpu_clk_enable(gpu);
1690 gpu->drm = drm;
1691 gpu->fence_context = dma_fence_context_alloc(1);
1692 idr_init(&gpu->fence_idr);
1693 spin_lock_init(&gpu->fence_spinlock);
1695 INIT_WORK(&gpu->sync_point_work, sync_point_worker);
1696 init_waitqueue_head(&gpu->fence_event);
1698 priv->gpu[priv->num_gpus++] = gpu;
1700 pm_runtime_mark_last_busy(gpu->dev);
1701 pm_runtime_put_autosuspend(gpu->dev);
1706 etnaviv_sched_fini(gpu);
1709 destroy_workqueue(gpu->wq);
1713 thermal_cooling_device_unregister(gpu->cooling);
1721 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1723 DBG("%s", dev_name(gpu->dev));
1725 flush_workqueue(gpu->wq);
1726 destroy_workqueue(gpu->wq);
1728 etnaviv_sched_fini(gpu);
1731 pm_runtime_get_sync(gpu->dev);
1732 pm_runtime_put_sync_suspend(gpu->dev);
1734 etnaviv_gpu_hw_suspend(gpu);
1737 if (gpu->mmu_context)
1738 etnaviv_iommu_context_put(gpu->mmu_context);
1740 if (gpu->initialized) {
1741 etnaviv_cmdbuf_free(&gpu->buffer);
1742 etnaviv_iommu_global_fini(gpu);
1743 gpu->initialized = false;
1746 gpu->drm = NULL;
1747 idr_destroy(&gpu->fence_idr);
1750 thermal_cooling_device_unregister(gpu->cooling);
1751 gpu->cooling = NULL;
1770 struct etnaviv_gpu *gpu;
1773 gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
1774 if (!gpu)
1777 gpu->dev = &pdev->dev;
1778 mutex_init(&gpu->lock);
1779 mutex_init(&gpu->fence_lock);
1782 gpu->mmio = devm_platform_ioremap_resource(pdev, 0);
1783 if (IS_ERR(gpu->mmio))
1784 return PTR_ERR(gpu->mmio);
1787 gpu->irq = platform_get_irq(pdev, 0);
1788 if (gpu->irq < 0) {
1789 dev_err(dev, "failed to get irq: %d\n", gpu->irq);
1790 return gpu->irq;
1793 err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
1794 dev_name(gpu->dev), gpu);
1796 dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
1801 gpu->clk_reg = devm_clk_get_optional(&pdev->dev, "reg");
1802 DBG("clk_reg: %p", gpu->clk_reg);
1803 if (IS_ERR(gpu->clk_reg))
1804 return PTR_ERR(gpu->clk_reg);
1806 gpu->clk_bus = devm_clk_get_optional(&pdev->dev, "bus");
1807 DBG("clk_bus: %p", gpu->clk_bus);
1808 if (IS_ERR(gpu->clk_bus))
1809 return PTR_ERR(gpu->clk_bus);
1811 gpu->clk_core = devm_clk_get(&pdev->dev, "core");
1812 DBG("clk_core: %p", gpu->clk_core);
1813 if (IS_ERR(gpu->clk_core))
1814 return PTR_ERR(gpu->clk_core);
1815 gpu->base_rate_core = clk_get_rate(gpu->clk_core);
1817 gpu->clk_shader = devm_clk_get_optional(&pdev->dev, "shader");
1818 DBG("clk_shader: %p", gpu->clk_shader);
1819 if (IS_ERR(gpu->clk_shader))
1820 return PTR_ERR(gpu->clk_shader);
1821 gpu->base_rate_shader = clk_get_rate(gpu->clk_shader);
1824 dev_set_drvdata(dev, gpu);
1831 pm_runtime_use_autosuspend(gpu->dev);
1832 pm_runtime_set_autosuspend_delay(gpu->dev, 200);
1833 pm_runtime_enable(gpu->dev);
1854 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1858 if (atomic_read(&gpu->sched.hw_rq_count))
1862 mask = gpu->idle_mask & ~(VIVS_HI_IDLE_STATE_FE |
1864 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
1871 return etnaviv_gpu_hw_suspend(gpu);
1876 struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
1879 ret = etnaviv_gpu_clk_enable(gpu);
1884 if (gpu->drm && gpu->initialized) {
1885 ret = etnaviv_gpu_hw_resume(gpu);
1887 etnaviv_gpu_clk_disable(gpu);
1903 .name = "etnaviv-gpu",