Lines Matching refs:pmu

82 static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
84 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
92 enable = pmu->enable;
150 struct i915_pmu *pmu = &i915->pmu;
161 spin_lock_irqsave(&pmu->lock, flags);
164 pmu->sample[__I915_SAMPLE_RC6].cur = val;
173 val = ktime_since(pmu->sleep_last);
174 val += pmu->sample[__I915_SAMPLE_RC6].cur;
177 if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur)
178 val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur;
180 pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = val;
182 spin_unlock_irqrestore(&pmu->lock, flags);
187 static void init_rc6(struct i915_pmu *pmu)
189 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
193 pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
194 pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur =
195 pmu->sample[__I915_SAMPLE_RC6].cur;
196 pmu->sleep_last = ktime_get();
202 struct i915_pmu *pmu = &i915->pmu;
204 pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
205 pmu->sleep_last = ktime_get();
215 static void init_rc6(struct i915_pmu *pmu) { }
220 static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
222 if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) {
223 pmu->timer_enabled = true;
224 pmu->timer_last = ktime_get();
225 hrtimer_start_range_ns(&pmu->timer,
233 struct i915_pmu *pmu = &i915->pmu;
235 if (!pmu->base.event_init)
238 spin_lock_irq(&pmu->lock);
246 pmu->timer_enabled = pmu_needs_timer(pmu, false);
248 spin_unlock_irq(&pmu->lock);
253 struct i915_pmu *pmu = &i915->pmu;
255 if (!pmu->base.event_init)
258 spin_lock_irq(&pmu->lock);
263 __i915_pmu_maybe_start_timer(pmu);
265 spin_unlock_irq(&pmu->lock);
286 struct intel_engine_pmu *pmu = &engine->pmu;
295 add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
297 add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
316 add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
327 if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
355 static bool frequency_sampling_enabled(struct i915_pmu *pmu)
357 return pmu->enable &
367 struct i915_pmu *pmu = &i915->pmu;
370 if (!frequency_sampling_enabled(pmu))
377 if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
395 add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
399 if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
400 add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ],
411 container_of(hrtimer, struct drm_i915_private, pmu.timer);
412 struct i915_pmu *pmu = &i915->pmu;
417 if (!READ_ONCE(pmu->timer_enabled))
421 period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last));
422 pmu->timer_last = now;
457 container_of(event->pmu, typeof(*i915), pmu.base);
510 container_of(event->pmu, typeof(*i915), pmu.base);
524 container_of(event->pmu, typeof(*i915), pmu.base);
527 if (event->attr.type != event->pmu->type)
560 container_of(event->pmu, typeof(*i915), pmu.base);
561 struct i915_pmu *pmu = &i915->pmu;
581 val = engine->pmu.sample[sample].cur;
587 div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur,
592 div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur,
625 container_of(event->pmu, typeof(*i915), pmu.base);
627 struct i915_pmu *pmu = &i915->pmu;
630 spin_lock_irqsave(&pmu->lock, flags);
636 BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS);
637 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
638 GEM_BUG_ON(pmu->enable_count[bit] == ~0);
640 pmu->enable |= BIT_ULL(bit);
641 pmu->enable_count[bit]++;
646 __i915_pmu_maybe_start_timer(pmu);
660 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) !=
662 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) !=
664 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
665 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
666 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
668 engine->pmu.enable |= BIT(sample);
669 engine->pmu.enable_count[sample]++;
672 spin_unlock_irqrestore(&pmu->lock, flags);
685 container_of(event->pmu, typeof(*i915), pmu.base);
687 struct i915_pmu *pmu = &i915->pmu;
690 spin_lock_irqsave(&pmu->lock, flags);
700 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
701 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
702 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
708 if (--engine->pmu.enable_count[sample] == 0)
709 engine->pmu.enable &= ~BIT(sample);
712 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
713 GEM_BUG_ON(pmu->enable_count[bit] == 0);
718 if (--pmu->enable_count[bit] == 0) {
719 pmu->enable &= ~BIT_ULL(bit);
720 pmu->timer_enabled &= pmu_needs_timer(pmu, true);
723 spin_unlock_irqrestore(&pmu->lock, flags);
860 create_event_attributes(struct i915_pmu *pmu)
862 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
975 pmu->i915_attr = i915_attr;
976 pmu->pmu_attr = pmu_attr;
992 static void free_event_attributes(struct i915_pmu *pmu)
994 struct attribute **attr_iter = pmu->events_attr_group.attrs;
999 kfree(pmu->events_attr_group.attrs);
1000 kfree(pmu->i915_attr);
1001 kfree(pmu->pmu_attr);
1003 pmu->events_attr_group.attrs = NULL;
1004 pmu->i915_attr = NULL;
1005 pmu->pmu_attr = NULL;
1010 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
1012 GEM_BUG_ON(!pmu->base.event_init);
1023 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
1026 GEM_BUG_ON(!pmu->base.event_init);
1033 perf_pmu_migrate_context(&pmu->base, cpu, target);
1040 static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
1053 ret = cpuhp_state_add_instance(slot, &pmu->cpuhp.node);
1059 pmu->cpuhp.slot = slot;
1063 static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
1065 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
1067 drm_WARN_ON(&i915->drm, pmu->cpuhp.slot == CPUHP_INVALID);
1068 drm_WARN_ON(&i915->drm, cpuhp_state_remove_instance(pmu->cpuhp.slot, &pmu->cpuhp.node));
1069 cpuhp_remove_multi_state(pmu->cpuhp.slot);
1070 pmu->cpuhp.slot = CPUHP_INVALID;
1086 struct i915_pmu *pmu = &i915->pmu;
1089 &pmu->events_attr_group,
1101 spin_lock_init(&pmu->lock);
1102 hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1103 pmu->timer.function = i915_sample;
1104 pmu->cpuhp.slot = CPUHP_INVALID;
1105 init_rc6(pmu);
1108 pmu->name = kasprintf(GFP_KERNEL,
1111 if (pmu->name) {
1113 strreplace((char *)pmu->name, ':', '_');
1116 pmu->name = "i915";
1118 if (!pmu->name)
1121 pmu->events_attr_group.name = "events";
1122 pmu->events_attr_group.attrs = create_event_attributes(pmu);
1123 if (!pmu->events_attr_group.attrs)
1126 pmu->base.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
1128 if (!pmu->base.attr_groups)
1131 pmu->base.module = THIS_MODULE;
1132 pmu->base.task_ctx_nr = perf_invalid_context;
1133 pmu->base.event_init = i915_pmu_event_init;
1134 pmu->base.add = i915_pmu_event_add;
1135 pmu->base.del = i915_pmu_event_del;
1136 pmu->base.start = i915_pmu_event_start;
1137 pmu->base.stop = i915_pmu_event_stop;
1138 pmu->base.read = i915_pmu_event_read;
1139 pmu->base.event_idx = i915_pmu_event_event_idx;
1141 ret = perf_pmu_register(&pmu->base, pmu->name, -1);
1145 ret = i915_pmu_register_cpuhp_state(pmu);
1152 perf_pmu_unregister(&pmu->base);
1154 kfree(pmu->base.attr_groups);
1156 pmu->base.event_init = NULL;
1157 free_event_attributes(pmu);
1160 kfree(pmu->name);
1167 struct i915_pmu *pmu = &i915->pmu;
1169 if (!pmu->base.event_init)
1172 drm_WARN_ON(&i915->drm, pmu->enable);
1174 hrtimer_cancel(&pmu->timer);
1176 i915_pmu_unregister_cpuhp_state(pmu);
1178 perf_pmu_unregister(&pmu->base);
1179 pmu->base.event_init = NULL;
1180 kfree(pmu->base.attr_groups);
1182 kfree(pmu->name);
1183 free_event_attributes(pmu);