Lines Matching refs:pmu
142 static bool pmu_needs_timer(struct i915_pmu *pmu)
144 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
152 enable = pmu->enable;
194 static u64 read_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample)
196 return pmu->sample[gt_id][sample].cur;
200 store_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample, u64 val)
202 pmu->sample[gt_id][sample].cur = val;
206 add_sample_mult(struct i915_pmu *pmu, unsigned int gt_id, int sample, u32 val, u32 mul)
208 pmu->sample[gt_id][sample].cur += mul_u32_u32(val, mul);
215 struct i915_pmu *pmu = &i915->pmu;
226 spin_lock_irqsave(&pmu->lock, flags);
229 store_sample(pmu, gt_id, __I915_SAMPLE_RC6, val);
238 val = ktime_since_raw(pmu->sleep_last[gt_id]);
239 val += read_sample(pmu, gt_id, __I915_SAMPLE_RC6);
242 if (val < read_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED))
243 val = read_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED);
245 store_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED, val);
247 spin_unlock_irqrestore(&pmu->lock, flags);
252 static void init_rc6(struct i915_pmu *pmu)
254 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
264 store_sample(pmu, i, __I915_SAMPLE_RC6, val);
265 store_sample(pmu, i, __I915_SAMPLE_RC6_LAST_REPORTED,
267 pmu->sleep_last[i] = ktime_get_raw();
274 struct i915_pmu *pmu = >->i915->pmu;
276 store_sample(pmu, gt->info.id, __I915_SAMPLE_RC6, __get_rc6(gt));
277 pmu->sleep_last[gt->info.id] = ktime_get_raw();
280 static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
282 if (!pmu->timer_enabled && pmu_needs_timer(pmu)) {
283 pmu->timer_enabled = true;
284 pmu->timer_last = ktime_get();
285 hrtimer_start_range_ns(&pmu->timer,
293 struct i915_pmu *pmu = >->i915->pmu;
295 if (!pmu->base.event_init)
298 spin_lock_irq(&pmu->lock);
306 pmu->unparked &= ~BIT(gt->info.id);
307 if (pmu->unparked == 0)
308 pmu->timer_enabled = false;
310 spin_unlock_irq(&pmu->lock);
315 struct i915_pmu *pmu = >->i915->pmu;
317 if (!pmu->base.event_init)
320 spin_lock_irq(&pmu->lock);
325 if (pmu->unparked == 0)
326 __i915_pmu_maybe_start_timer(pmu);
328 pmu->unparked |= BIT(gt->info.id);
330 spin_unlock_irq(&pmu->lock);
351 struct intel_engine_pmu *pmu = &engine->pmu;
360 add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
362 add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
381 add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
392 if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
399 if (!engine->pmu.enable)
418 frequency_sampling_enabled(struct i915_pmu *pmu, unsigned int gt)
420 return pmu->enable &
430 struct i915_pmu *pmu = &i915->pmu;
433 if (!frequency_sampling_enabled(pmu, gt_id))
440 if (pmu->enable & config_mask(__I915_PMU_ACTUAL_FREQUENCY(gt_id))) {
456 add_sample_mult(pmu, gt_id, __I915_SAMPLE_FREQ_ACT,
460 if (pmu->enable & config_mask(__I915_PMU_REQUESTED_FREQUENCY(gt_id))) {
461 add_sample_mult(pmu, gt_id, __I915_SAMPLE_FREQ_REQ,
472 container_of(hrtimer, struct drm_i915_private, pmu.timer);
473 struct i915_pmu *pmu = &i915->pmu;
479 if (!READ_ONCE(pmu->timer_enabled))
483 period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last));
484 pmu->timer_last = now;
494 if (!(pmu->unparked & BIT(i)))
509 container_of(event->pmu, typeof(*i915), pmu.base);
576 container_of(event->pmu, typeof(*i915), pmu.base);
590 container_of(event->pmu, typeof(*i915), pmu.base);
591 struct i915_pmu *pmu = &i915->pmu;
594 if (pmu->closed)
597 if (event->attr.type != event->pmu->type)
632 container_of(event->pmu, typeof(*i915), pmu.base);
633 struct i915_pmu *pmu = &i915->pmu;
653 val = engine->pmu.sample[sample].cur;
662 div_u64(read_sample(pmu, gt_id,
668 div_u64(read_sample(pmu, gt_id,
673 val = READ_ONCE(pmu->irq_count);
690 container_of(event->pmu, typeof(*i915), pmu.base);
692 struct i915_pmu *pmu = &i915->pmu;
695 if (pmu->closed) {
712 container_of(event->pmu, typeof(*i915), pmu.base);
714 struct i915_pmu *pmu = &i915->pmu;
720 spin_lock_irqsave(&pmu->lock, flags);
726 BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS);
727 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
728 GEM_BUG_ON(pmu->enable_count[bit] == ~0);
730 pmu->enable |= BIT(bit);
731 pmu->enable_count[bit]++;
736 __i915_pmu_maybe_start_timer(pmu);
750 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) !=
752 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) !=
754 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
755 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
756 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
758 engine->pmu.enable |= BIT(sample);
759 engine->pmu.enable_count[sample]++;
762 spin_unlock_irqrestore(&pmu->lock, flags);
776 container_of(event->pmu, typeof(*i915), pmu.base);
778 struct i915_pmu *pmu = &i915->pmu;
784 spin_lock_irqsave(&pmu->lock, flags);
794 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
795 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
796 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
802 if (--engine->pmu.enable_count[sample] == 0)
803 engine->pmu.enable &= ~BIT(sample);
806 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
807 GEM_BUG_ON(pmu->enable_count[bit] == 0);
812 if (--pmu->enable_count[bit] == 0) {
813 pmu->enable &= ~BIT(bit);
814 pmu->timer_enabled &= pmu_needs_timer(pmu);
817 spin_unlock_irqrestore(&pmu->lock, flags);
823 container_of(event->pmu, typeof(*i915), pmu.base);
824 struct i915_pmu *pmu = &i915->pmu;
826 if (pmu->closed)
836 container_of(event->pmu, typeof(*i915), pmu.base);
837 struct i915_pmu *pmu = &i915->pmu;
839 if (pmu->closed)
853 container_of(event->pmu, typeof(*i915), pmu.base);
854 struct i915_pmu *pmu = &i915->pmu;
856 if (pmu->closed)
984 create_event_attributes(struct i915_pmu *pmu)
986 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
1119 pmu->i915_attr = i915_attr;
1120 pmu->pmu_attr = pmu_attr;
1136 static void free_event_attributes(struct i915_pmu *pmu)
1138 struct attribute **attr_iter = pmu->events_attr_group.attrs;
1143 kfree(pmu->events_attr_group.attrs);
1144 kfree(pmu->i915_attr);
1145 kfree(pmu->pmu_attr);
1147 pmu->events_attr_group.attrs = NULL;
1148 pmu->i915_attr = NULL;
1149 pmu->pmu_attr = NULL;
1154 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
1156 GEM_BUG_ON(!pmu->base.event_init);
1167 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
1170 GEM_BUG_ON(!pmu->base.event_init);
1176 if (pmu->closed)
1189 if (target < nr_cpu_ids && target != pmu->cpuhp.cpu) {
1190 perf_pmu_migrate_context(&pmu->base, cpu, target);
1191 pmu->cpuhp.cpu = target;
1222 static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
1227 return cpuhp_state_add_instance(cpuhp_slot, &pmu->cpuhp.node);
1230 static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
1232 cpuhp_state_remove_instance(cpuhp_slot, &pmu->cpuhp.node);
1248 struct i915_pmu *pmu = &i915->pmu;
1251 &pmu->events_attr_group,
1263 spin_lock_init(&pmu->lock);
1264 hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1265 pmu->timer.function = i915_sample;
1266 pmu->cpuhp.cpu = -1;
1267 init_rc6(pmu);
1270 pmu->name = kasprintf(GFP_KERNEL,
1273 if (pmu->name) {
1275 strreplace((char *)pmu->name, ':', '_');
1278 pmu->name = "i915";
1280 if (!pmu->name)
1283 pmu->events_attr_group.name = "events";
1284 pmu->events_attr_group.attrs = create_event_attributes(pmu);
1285 if (!pmu->events_attr_group.attrs)
1288 pmu->base.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
1290 if (!pmu->base.attr_groups)
1293 pmu->base.module = THIS_MODULE;
1294 pmu->base.task_ctx_nr = perf_invalid_context;
1295 pmu->base.event_init = i915_pmu_event_init;
1296 pmu->base.add = i915_pmu_event_add;
1297 pmu->base.del = i915_pmu_event_del;
1298 pmu->base.start = i915_pmu_event_start;
1299 pmu->base.stop = i915_pmu_event_stop;
1300 pmu->base.read = i915_pmu_event_read;
1301 pmu->base.event_idx = i915_pmu_event_event_idx;
1303 ret = perf_pmu_register(&pmu->base, pmu->name, -1);
1307 ret = i915_pmu_register_cpuhp_state(pmu);
1314 perf_pmu_unregister(&pmu->base);
1316 kfree(pmu->base.attr_groups);
1318 pmu->base.event_init = NULL;
1319 free_event_attributes(pmu);
1322 kfree(pmu->name);
1329 struct i915_pmu *pmu = &i915->pmu;
1331 if (!pmu->base.event_init)
1339 pmu->closed = true;
1342 hrtimer_cancel(&pmu->timer);
1344 i915_pmu_unregister_cpuhp_state(pmu);
1346 perf_pmu_unregister(&pmu->base);
1347 pmu->base.event_init = NULL;
1348 kfree(pmu->base.attr_groups);
1350 kfree(pmu->name);
1351 free_event_attributes(pmu);