1/* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2017-2018 Intel Corporation 5 */ 6 7#include <linux/pm_runtime.h> 8 9#include "gt/intel_engine.h" 10#include "gt/intel_engine_pm.h" 11#include "gt/intel_engine_regs.h" 12#include "gt/intel_engine_user.h" 13#include "gt/intel_gt.h" 14#include "gt/intel_gt_pm.h" 15#include "gt/intel_gt_regs.h" 16#include "gt/intel_rc6.h" 17#include "gt/intel_rps.h" 18 19#include "i915_drv.h" 20#include "i915_pmu.h" 21 22/* Frequency for the sampling timer for events which need it. */ 23#define FREQUENCY 200 24#define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY) 25 26#define ENGINE_SAMPLE_MASK \ 27 (BIT(I915_SAMPLE_BUSY) | \ 28 BIT(I915_SAMPLE_WAIT) | \ 29 BIT(I915_SAMPLE_SEMA)) 30 31static cpumask_t i915_pmu_cpumask; 32static unsigned int i915_pmu_target_cpu = -1; 33 34static u8 engine_config_sample(u64 config) 35{ 36 return config & I915_PMU_SAMPLE_MASK; 37} 38 39static u8 engine_event_sample(struct perf_event *event) 40{ 41 return engine_config_sample(event->attr.config); 42} 43 44static u8 engine_event_class(struct perf_event *event) 45{ 46 return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff; 47} 48 49static u8 engine_event_instance(struct perf_event *event) 50{ 51 return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff; 52} 53 54static bool is_engine_config(const u64 config) 55{ 56 return config < __I915_PMU_OTHER(0); 57} 58 59static unsigned int config_gt_id(const u64 config) 60{ 61 return config >> __I915_PMU_GT_SHIFT; 62} 63 64static u64 config_counter(const u64 config) 65{ 66 return config & ~(~0ULL << __I915_PMU_GT_SHIFT); 67} 68 69static unsigned int other_bit(const u64 config) 70{ 71 unsigned int val; 72 73 switch (config_counter(config)) { 74 case I915_PMU_ACTUAL_FREQUENCY: 75 val = __I915_PMU_ACTUAL_FREQUENCY_ENABLED; 76 break; 77 case I915_PMU_REQUESTED_FREQUENCY: 78 val = __I915_PMU_REQUESTED_FREQUENCY_ENABLED; 79 break; 80 case I915_PMU_RC6_RESIDENCY: 81 val = __I915_PMU_RC6_RESIDENCY_ENABLED; 82 break; 83 default: 84 /* 85 * Events that do not require sampling, or tracking state 86 * transitions between enabled and disabled can be ignored. 87 */ 88 return -1; 89 } 90 91 return I915_ENGINE_SAMPLE_COUNT + 92 config_gt_id(config) * __I915_PMU_TRACKED_EVENT_COUNT + 93 val; 94} 95 96static unsigned int config_bit(const u64 config) 97{ 98 if (is_engine_config(config)) 99 return engine_config_sample(config); 100 else 101 return other_bit(config); 102} 103 104static u32 config_mask(const u64 config) 105{ 106 unsigned int bit = config_bit(config); 107 108 if (__builtin_constant_p(config)) 109 BUILD_BUG_ON(bit > 110 BITS_PER_TYPE(typeof_member(struct i915_pmu, 111 enable)) - 1); 112 else 113 WARN_ON_ONCE(bit > 114 BITS_PER_TYPE(typeof_member(struct i915_pmu, 115 enable)) - 1); 116 117 return BIT(config_bit(config)); 118} 119 120static bool is_engine_event(struct perf_event *event) 121{ 122 return is_engine_config(event->attr.config); 123} 124 125static unsigned int event_bit(struct perf_event *event) 126{ 127 return config_bit(event->attr.config); 128} 129 130static u32 frequency_enabled_mask(void) 131{ 132 unsigned int i; 133 u32 mask = 0; 134 135 for (i = 0; i < I915_PMU_MAX_GT; i++) 136 mask |= config_mask(__I915_PMU_ACTUAL_FREQUENCY(i)) | 137 config_mask(__I915_PMU_REQUESTED_FREQUENCY(i)); 138 139 return mask; 140} 141 142static bool pmu_needs_timer(struct i915_pmu *pmu) 143{ 144 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); 145 u32 enable; 146 147 /* 148 * Only some counters need the sampling timer. 149 * 150 * We start with a bitmask of all currently enabled events. 151 */ 152 enable = pmu->enable; 153 154 /* 155 * Mask out all the ones which do not need the timer, or in 156 * other words keep all the ones that could need the timer. 157 */ 158 enable &= frequency_enabled_mask() | ENGINE_SAMPLE_MASK; 159 160 /* 161 * Also there is software busyness tracking available we do not 162 * need the timer for I915_SAMPLE_BUSY counter. 163 */ 164 if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS) 165 enable &= ~BIT(I915_SAMPLE_BUSY); 166 167 /* 168 * If some bits remain it means we need the sampling timer running. 169 */ 170 return enable; 171} 172 173static u64 __get_rc6(struct intel_gt *gt) 174{ 175 struct drm_i915_private *i915 = gt->i915; 176 u64 val; 177 178 val = intel_rc6_residency_ns(>->rc6, INTEL_RC6_RES_RC6); 179 180 if (HAS_RC6p(i915)) 181 val += intel_rc6_residency_ns(>->rc6, INTEL_RC6_RES_RC6p); 182 183 if (HAS_RC6pp(i915)) 184 val += intel_rc6_residency_ns(>->rc6, INTEL_RC6_RES_RC6pp); 185 186 return val; 187} 188 189static inline s64 ktime_since_raw(const ktime_t kt) 190{ 191 return ktime_to_ns(ktime_sub(ktime_get_raw(), kt)); 192} 193 194static u64 read_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample) 195{ 196 return pmu->sample[gt_id][sample].cur; 197} 198 199static void 200store_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample, u64 val) 201{ 202 pmu->sample[gt_id][sample].cur = val; 203} 204 205static void 206add_sample_mult(struct i915_pmu *pmu, unsigned int gt_id, int sample, u32 val, u32 mul) 207{ 208 pmu->sample[gt_id][sample].cur += mul_u32_u32(val, mul); 209} 210 211static u64 get_rc6(struct intel_gt *gt) 212{ 213 struct drm_i915_private *i915 = gt->i915; 214 const unsigned int gt_id = gt->info.id; 215 struct i915_pmu *pmu = &i915->pmu; 216 unsigned long flags; 217 bool awake = false; 218 u64 val; 219 220 if (intel_gt_pm_get_if_awake(gt)) { 221 val = __get_rc6(gt); 222 intel_gt_pm_put_async(gt); 223 awake = true; 224 } 225 226 spin_lock_irqsave(&pmu->lock, flags); 227 228 if (awake) { 229 store_sample(pmu, gt_id, __I915_SAMPLE_RC6, val); 230 } else { 231 /* 232 * We think we are runtime suspended. 233 * 234 * Report the delta from when the device was suspended to now, 235 * on top of the last known real value, as the approximated RC6 236 * counter value. 237 */ 238 val = ktime_since_raw(pmu->sleep_last[gt_id]); 239 val += read_sample(pmu, gt_id, __I915_SAMPLE_RC6); 240 } 241 242 if (val < read_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED)) 243 val = read_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED); 244 else 245 store_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED, val); 246 247 spin_unlock_irqrestore(&pmu->lock, flags); 248 249 return val; 250} 251 252static void init_rc6(struct i915_pmu *pmu) 253{ 254 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); 255 struct intel_gt *gt; 256 unsigned int i; 257 258 for_each_gt(gt, i915, i) { 259 intel_wakeref_t wakeref; 260 261 with_intel_runtime_pm(gt->uncore->rpm, wakeref) { 262 u64 val = __get_rc6(gt); 263 264 store_sample(pmu, i, __I915_SAMPLE_RC6, val); 265 store_sample(pmu, i, __I915_SAMPLE_RC6_LAST_REPORTED, 266 val); 267 pmu->sleep_last[i] = ktime_get_raw(); 268 } 269 } 270} 271 272static void park_rc6(struct intel_gt *gt) 273{ 274 struct i915_pmu *pmu = >->i915->pmu; 275 276 store_sample(pmu, gt->info.id, __I915_SAMPLE_RC6, __get_rc6(gt)); 277 pmu->sleep_last[gt->info.id] = ktime_get_raw(); 278} 279 280static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu) 281{ 282 if (!pmu->timer_enabled && pmu_needs_timer(pmu)) { 283 pmu->timer_enabled = true; 284 pmu->timer_last = ktime_get(); 285 hrtimer_start_range_ns(&pmu->timer, 286 ns_to_ktime(PERIOD), 0, 287 HRTIMER_MODE_REL_PINNED); 288 } 289} 290 291void i915_pmu_gt_parked(struct intel_gt *gt) 292{ 293 struct i915_pmu *pmu = >->i915->pmu; 294 295 if (!pmu->base.event_init) 296 return; 297 298 spin_lock_irq(&pmu->lock); 299 300 park_rc6(gt); 301 302 /* 303 * Signal sampling timer to stop if only engine events are enabled and 304 * GPU went idle. 305 */ 306 pmu->unparked &= ~BIT(gt->info.id); 307 if (pmu->unparked == 0) 308 pmu->timer_enabled = false; 309 310 spin_unlock_irq(&pmu->lock); 311} 312 313void i915_pmu_gt_unparked(struct intel_gt *gt) 314{ 315 struct i915_pmu *pmu = >->i915->pmu; 316 317 if (!pmu->base.event_init) 318 return; 319 320 spin_lock_irq(&pmu->lock); 321 322 /* 323 * Re-enable sampling timer when GPU goes active. 324 */ 325 if (pmu->unparked == 0) 326 __i915_pmu_maybe_start_timer(pmu); 327 328 pmu->unparked |= BIT(gt->info.id); 329 330 spin_unlock_irq(&pmu->lock); 331} 332 333static void 334add_sample(struct i915_pmu_sample *sample, u32 val) 335{ 336 sample->cur += val; 337} 338 339static bool exclusive_mmio_access(const struct drm_i915_private *i915) 340{ 341 /* 342 * We have to avoid concurrent mmio cache line access on gen7 or 343 * risk a machine hang. For a fun history lesson dig out the old 344 * userspace intel_gpu_top and run it on Ivybridge or Haswell! 345 */ 346 return GRAPHICS_VER(i915) == 7; 347} 348 349static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns) 350{ 351 struct intel_engine_pmu *pmu = &engine->pmu; 352 bool busy; 353 u32 val; 354 355 val = ENGINE_READ_FW(engine, RING_CTL); 356 if (val == 0) /* powerwell off => engine idle */ 357 return; 358 359 if (val & RING_WAIT) 360 add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns); 361 if (val & RING_WAIT_SEMAPHORE) 362 add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns); 363 364 /* No need to sample when busy stats are supported. */ 365 if (intel_engine_supports_stats(engine)) 366 return; 367 368 /* 369 * While waiting on a semaphore or event, MI_MODE reports the 370 * ring as idle. However, previously using the seqno, and with 371 * execlists sampling, we account for the ring waiting as the 372 * engine being busy. Therefore, we record the sample as being 373 * busy if either waiting or !idle. 374 */ 375 busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT); 376 if (!busy) { 377 val = ENGINE_READ_FW(engine, RING_MI_MODE); 378 busy = !(val & MODE_IDLE); 379 } 380 if (busy) 381 add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); 382} 383 384static void 385engines_sample(struct intel_gt *gt, unsigned int period_ns) 386{ 387 struct drm_i915_private *i915 = gt->i915; 388 struct intel_engine_cs *engine; 389 enum intel_engine_id id; 390 unsigned long flags; 391 392 if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0) 393 return; 394 395 if (!intel_gt_pm_is_awake(gt)) 396 return; 397 398 for_each_engine(engine, gt, id) { 399 if (!engine->pmu.enable) 400 continue; 401 402 if (!intel_engine_pm_get_if_awake(engine)) 403 continue; 404 405 if (exclusive_mmio_access(i915)) { 406 spin_lock_irqsave(&engine->uncore->lock, flags); 407 engine_sample(engine, period_ns); 408 spin_unlock_irqrestore(&engine->uncore->lock, flags); 409 } else { 410 engine_sample(engine, period_ns); 411 } 412 413 intel_engine_pm_put_async(engine); 414 } 415} 416 417static bool 418frequency_sampling_enabled(struct i915_pmu *pmu, unsigned int gt) 419{ 420 return pmu->enable & 421 (config_mask(__I915_PMU_ACTUAL_FREQUENCY(gt)) | 422 config_mask(__I915_PMU_REQUESTED_FREQUENCY(gt))); 423} 424 425static void 426frequency_sample(struct intel_gt *gt, unsigned int period_ns) 427{ 428 struct drm_i915_private *i915 = gt->i915; 429 const unsigned int gt_id = gt->info.id; 430 struct i915_pmu *pmu = &i915->pmu; 431 struct intel_rps *rps = >->rps; 432 433 if (!frequency_sampling_enabled(pmu, gt_id)) 434 return; 435 436 /* Report 0/0 (actual/requested) frequency while parked. */ 437 if (!intel_gt_pm_get_if_awake(gt)) 438 return; 439 440 if (pmu->enable & config_mask(__I915_PMU_ACTUAL_FREQUENCY(gt_id))) { 441 u32 val; 442 443 /* 444 * We take a quick peek here without using forcewake 445 * so that we don't perturb the system under observation 446 * (forcewake => !rc6 => increased power use). We expect 447 * that if the read fails because it is outside of the 448 * mmio power well, then it will return 0 -- in which 449 * case we assume the system is running at the intended 450 * frequency. Fortunately, the read should rarely fail! 451 */ 452 val = intel_rps_read_actual_frequency_fw(rps); 453 if (!val) 454 val = intel_gpu_freq(rps, rps->cur_freq); 455 456 add_sample_mult(pmu, gt_id, __I915_SAMPLE_FREQ_ACT, 457 val, period_ns / 1000); 458 } 459 460 if (pmu->enable & config_mask(__I915_PMU_REQUESTED_FREQUENCY(gt_id))) { 461 add_sample_mult(pmu, gt_id, __I915_SAMPLE_FREQ_REQ, 462 intel_rps_get_requested_frequency(rps), 463 period_ns / 1000); 464 } 465 466 intel_gt_pm_put_async(gt); 467} 468 469static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) 470{ 471 struct drm_i915_private *i915 = 472 container_of(hrtimer, struct drm_i915_private, pmu.timer); 473 struct i915_pmu *pmu = &i915->pmu; 474 unsigned int period_ns; 475 struct intel_gt *gt; 476 unsigned int i; 477 ktime_t now; 478 479 if (!READ_ONCE(pmu->timer_enabled)) 480 return HRTIMER_NORESTART; 481 482 now = ktime_get(); 483 period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last)); 484 pmu->timer_last = now; 485 486 /* 487 * Strictly speaking the passed in period may not be 100% accurate for 488 * all internal calculation, since some amount of time can be spent on 489 * grabbing the forcewake. However the potential error from timer call- 490 * back delay greatly dominates this so we keep it simple. 491 */ 492 493 for_each_gt(gt, i915, i) { 494 if (!(pmu->unparked & BIT(i))) 495 continue; 496 497 engines_sample(gt, period_ns); 498 frequency_sample(gt, period_ns); 499 } 500 501 hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD)); 502 503 return HRTIMER_RESTART; 504} 505 506static void i915_pmu_event_destroy(struct perf_event *event) 507{ 508 struct drm_i915_private *i915 = 509 container_of(event->pmu, typeof(*i915), pmu.base); 510 511 drm_WARN_ON(&i915->drm, event->parent); 512 513 drm_dev_put(&i915->drm); 514} 515 516static int 517engine_event_status(struct intel_engine_cs *engine, 518 enum drm_i915_pmu_engine_sample sample) 519{ 520 switch (sample) { 521 case I915_SAMPLE_BUSY: 522 case I915_SAMPLE_WAIT: 523 break; 524 case I915_SAMPLE_SEMA: 525 if (GRAPHICS_VER(engine->i915) < 6) 526 return -ENODEV; 527 break; 528 default: 529 return -ENOENT; 530 } 531 532 return 0; 533} 534 535static int 536config_status(struct drm_i915_private *i915, u64 config) 537{ 538 struct intel_gt *gt = to_gt(i915); 539 540 unsigned int gt_id = config_gt_id(config); 541 unsigned int max_gt_id = HAS_EXTRA_GT_LIST(i915) ? 1 : 0; 542 543 if (gt_id > max_gt_id) 544 return -ENOENT; 545 546 switch (config_counter(config)) { 547 case I915_PMU_ACTUAL_FREQUENCY: 548 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 549 /* Requires a mutex for sampling! */ 550 return -ENODEV; 551 fallthrough; 552 case I915_PMU_REQUESTED_FREQUENCY: 553 if (GRAPHICS_VER(i915) < 6) 554 return -ENODEV; 555 break; 556 case I915_PMU_INTERRUPTS: 557 if (gt_id) 558 return -ENOENT; 559 break; 560 case I915_PMU_RC6_RESIDENCY: 561 if (!gt->rc6.supported) 562 return -ENODEV; 563 break; 564 case I915_PMU_SOFTWARE_GT_AWAKE_TIME: 565 break; 566 default: 567 return -ENOENT; 568 } 569 570 return 0; 571} 572 573static int engine_event_init(struct perf_event *event) 574{ 575 struct drm_i915_private *i915 = 576 container_of(event->pmu, typeof(*i915), pmu.base); 577 struct intel_engine_cs *engine; 578 579 engine = intel_engine_lookup_user(i915, engine_event_class(event), 580 engine_event_instance(event)); 581 if (!engine) 582 return -ENODEV; 583 584 return engine_event_status(engine, engine_event_sample(event)); 585} 586 587static int i915_pmu_event_init(struct perf_event *event) 588{ 589 struct drm_i915_private *i915 = 590 container_of(event->pmu, typeof(*i915), pmu.base); 591 struct i915_pmu *pmu = &i915->pmu; 592 int ret; 593 594 if (pmu->closed) 595 return -ENODEV; 596 597 if (event->attr.type != event->pmu->type) 598 return -ENOENT; 599 600 /* unsupported modes and filters */ 601 if (event->attr.sample_period) /* no sampling */ 602 return -EINVAL; 603 604 if (has_branch_stack(event)) 605 return -EOPNOTSUPP; 606 607 if (event->cpu < 0) 608 return -EINVAL; 609 610 /* only allow running on one cpu at a time */ 611 if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask)) 612 return -EINVAL; 613 614 if (is_engine_event(event)) 615 ret = engine_event_init(event); 616 else 617 ret = config_status(i915, event->attr.config); 618 if (ret) 619 return ret; 620 621 if (!event->parent) { 622 drm_dev_get(&i915->drm); 623 event->destroy = i915_pmu_event_destroy; 624 } 625 626 return 0; 627} 628 629static u64 __i915_pmu_event_read(struct perf_event *event) 630{ 631 struct drm_i915_private *i915 = 632 container_of(event->pmu, typeof(*i915), pmu.base); 633 struct i915_pmu *pmu = &i915->pmu; 634 u64 val = 0; 635 636 if (is_engine_event(event)) { 637 u8 sample = engine_event_sample(event); 638 struct intel_engine_cs *engine; 639 640 engine = intel_engine_lookup_user(i915, 641 engine_event_class(event), 642 engine_event_instance(event)); 643 644 if (drm_WARN_ON_ONCE(&i915->drm, !engine)) { 645 /* Do nothing */ 646 } else if (sample == I915_SAMPLE_BUSY && 647 intel_engine_supports_stats(engine)) { 648 ktime_t unused; 649 650 val = ktime_to_ns(intel_engine_get_busy_time(engine, 651 &unused)); 652 } else { 653 val = engine->pmu.sample[sample].cur; 654 } 655 } else { 656 const unsigned int gt_id = config_gt_id(event->attr.config); 657 const u64 config = config_counter(event->attr.config); 658 659 switch (config) { 660 case I915_PMU_ACTUAL_FREQUENCY: 661 val = 662 div_u64(read_sample(pmu, gt_id, 663 __I915_SAMPLE_FREQ_ACT), 664 USEC_PER_SEC /* to MHz */); 665 break; 666 case I915_PMU_REQUESTED_FREQUENCY: 667 val = 668 div_u64(read_sample(pmu, gt_id, 669 __I915_SAMPLE_FREQ_REQ), 670 USEC_PER_SEC /* to MHz */); 671 break; 672 case I915_PMU_INTERRUPTS: 673 val = READ_ONCE(pmu->irq_count); 674 break; 675 case I915_PMU_RC6_RESIDENCY: 676 val = get_rc6(i915->gt[gt_id]); 677 break; 678 case I915_PMU_SOFTWARE_GT_AWAKE_TIME: 679 val = ktime_to_ns(intel_gt_get_awake_time(to_gt(i915))); 680 break; 681 } 682 } 683 684 return val; 685} 686 687static void i915_pmu_event_read(struct perf_event *event) 688{ 689 struct drm_i915_private *i915 = 690 container_of(event->pmu, typeof(*i915), pmu.base); 691 struct hw_perf_event *hwc = &event->hw; 692 struct i915_pmu *pmu = &i915->pmu; 693 u64 prev, new; 694 695 if (pmu->closed) { 696 event->hw.state = PERF_HES_STOPPED; 697 return; 698 } 699again: 700 prev = local64_read(&hwc->prev_count); 701 new = __i915_pmu_event_read(event); 702 703 if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev) 704 goto again; 705 706 local64_add(new - prev, &event->count); 707} 708 709static void i915_pmu_enable(struct perf_event *event) 710{ 711 struct drm_i915_private *i915 = 712 container_of(event->pmu, typeof(*i915), pmu.base); 713 const unsigned int bit = event_bit(event); 714 struct i915_pmu *pmu = &i915->pmu; 715 unsigned long flags; 716 717 if (bit == -1) 718 goto update; 719 720 spin_lock_irqsave(&pmu->lock, flags); 721 722 /* 723 * Update the bitmask of enabled events and increment 724 * the event reference counter. 725 */ 726 BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS); 727 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); 728 GEM_BUG_ON(pmu->enable_count[bit] == ~0); 729 730 pmu->enable |= BIT(bit); 731 pmu->enable_count[bit]++; 732 733 /* 734 * Start the sampling timer if needed and not already enabled. 735 */ 736 __i915_pmu_maybe_start_timer(pmu); 737 738 /* 739 * For per-engine events the bitmask and reference counting 740 * is stored per engine. 741 */ 742 if (is_engine_event(event)) { 743 u8 sample = engine_event_sample(event); 744 struct intel_engine_cs *engine; 745 746 engine = intel_engine_lookup_user(i915, 747 engine_event_class(event), 748 engine_event_instance(event)); 749 750 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) != 751 I915_ENGINE_SAMPLE_COUNT); 752 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) != 753 I915_ENGINE_SAMPLE_COUNT); 754 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); 755 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); 756 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); 757 758 engine->pmu.enable |= BIT(sample); 759 engine->pmu.enable_count[sample]++; 760 } 761 762 spin_unlock_irqrestore(&pmu->lock, flags); 763 764update: 765 /* 766 * Store the current counter value so we can report the correct delta 767 * for all listeners. Even when the event was already enabled and has 768 * an existing non-zero value. 769 */ 770 local64_set(&event->hw.prev_count, __i915_pmu_event_read(event)); 771} 772 773static void i915_pmu_disable(struct perf_event *event) 774{ 775 struct drm_i915_private *i915 = 776 container_of(event->pmu, typeof(*i915), pmu.base); 777 const unsigned int bit = event_bit(event); 778 struct i915_pmu *pmu = &i915->pmu; 779 unsigned long flags; 780 781 if (bit == -1) 782 return; 783 784 spin_lock_irqsave(&pmu->lock, flags); 785 786 if (is_engine_event(event)) { 787 u8 sample = engine_event_sample(event); 788 struct intel_engine_cs *engine; 789 790 engine = intel_engine_lookup_user(i915, 791 engine_event_class(event), 792 engine_event_instance(event)); 793 794 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); 795 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); 796 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); 797 798 /* 799 * Decrement the reference count and clear the enabled 800 * bitmask when the last listener on an event goes away. 801 */ 802 if (--engine->pmu.enable_count[sample] == 0) 803 engine->pmu.enable &= ~BIT(sample); 804 } 805 806 GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); 807 GEM_BUG_ON(pmu->enable_count[bit] == 0); 808 /* 809 * Decrement the reference count and clear the enabled 810 * bitmask when the last listener on an event goes away. 811 */ 812 if (--pmu->enable_count[bit] == 0) { 813 pmu->enable &= ~BIT(bit); 814 pmu->timer_enabled &= pmu_needs_timer(pmu); 815 } 816 817 spin_unlock_irqrestore(&pmu->lock, flags); 818} 819 820static void i915_pmu_event_start(struct perf_event *event, int flags) 821{ 822 struct drm_i915_private *i915 = 823 container_of(event->pmu, typeof(*i915), pmu.base); 824 struct i915_pmu *pmu = &i915->pmu; 825 826 if (pmu->closed) 827 return; 828 829 i915_pmu_enable(event); 830 event->hw.state = 0; 831} 832 833static void i915_pmu_event_stop(struct perf_event *event, int flags) 834{ 835 struct drm_i915_private *i915 = 836 container_of(event->pmu, typeof(*i915), pmu.base); 837 struct i915_pmu *pmu = &i915->pmu; 838 839 if (pmu->closed) 840 goto out; 841 842 if (flags & PERF_EF_UPDATE) 843 i915_pmu_event_read(event); 844 i915_pmu_disable(event); 845 846out: 847 event->hw.state = PERF_HES_STOPPED; 848} 849 850static int i915_pmu_event_add(struct perf_event *event, int flags) 851{ 852 struct drm_i915_private *i915 = 853 container_of(event->pmu, typeof(*i915), pmu.base); 854 struct i915_pmu *pmu = &i915->pmu; 855 856 if (pmu->closed) 857 return -ENODEV; 858 859 if (flags & PERF_EF_START) 860 i915_pmu_event_start(event, flags); 861 862 return 0; 863} 864 865static void i915_pmu_event_del(struct perf_event *event, int flags) 866{ 867 i915_pmu_event_stop(event, PERF_EF_UPDATE); 868} 869 870static int i915_pmu_event_event_idx(struct perf_event *event) 871{ 872 return 0; 873} 874 875struct i915_str_attribute { 876 struct device_attribute attr; 877 const char *str; 878}; 879 880static ssize_t i915_pmu_format_show(struct device *dev, 881 struct device_attribute *attr, char *buf) 882{ 883 struct i915_str_attribute *eattr; 884 885 eattr = container_of(attr, struct i915_str_attribute, attr); 886 return sprintf(buf, "%s\n", eattr->str); 887} 888 889#define I915_PMU_FORMAT_ATTR(_name, _config) \ 890 (&((struct i915_str_attribute[]) { \ 891 { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \ 892 .str = _config, } \ 893 })[0].attr.attr) 894 895static struct attribute *i915_pmu_format_attrs[] = { 896 I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"), 897 NULL, 898}; 899 900static const struct attribute_group i915_pmu_format_attr_group = { 901 .name = "format", 902 .attrs = i915_pmu_format_attrs, 903}; 904 905struct i915_ext_attribute { 906 struct device_attribute attr; 907 unsigned long val; 908}; 909 910static ssize_t i915_pmu_event_show(struct device *dev, 911 struct device_attribute *attr, char *buf) 912{ 913 struct i915_ext_attribute *eattr; 914 915 eattr = container_of(attr, struct i915_ext_attribute, attr); 916 return sprintf(buf, "config=0x%lx\n", eattr->val); 917} 918 919static ssize_t cpumask_show(struct device *dev, 920 struct device_attribute *attr, char *buf) 921{ 922 return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask); 923} 924 925static DEVICE_ATTR_RO(cpumask); 926 927static struct attribute *i915_cpumask_attrs[] = { 928 &dev_attr_cpumask.attr, 929 NULL, 930}; 931 932static const struct attribute_group i915_pmu_cpumask_attr_group = { 933 .attrs = i915_cpumask_attrs, 934}; 935 936#define __event(__counter, __name, __unit) \ 937{ \ 938 .counter = (__counter), \ 939 .name = (__name), \ 940 .unit = (__unit), \ 941 .global = false, \ 942} 943 944#define __global_event(__counter, __name, __unit) \ 945{ \ 946 .counter = (__counter), \ 947 .name = (__name), \ 948 .unit = (__unit), \ 949 .global = true, \ 950} 951 952#define __engine_event(__sample, __name) \ 953{ \ 954 .sample = (__sample), \ 955 .name = (__name), \ 956} 957 958static struct i915_ext_attribute * 959add_i915_attr(struct i915_ext_attribute *attr, const char *name, u64 config) 960{ 961 sysfs_attr_init(&attr->attr.attr); 962 attr->attr.attr.name = name; 963 attr->attr.attr.mode = 0444; 964 attr->attr.show = i915_pmu_event_show; 965 attr->val = config; 966 967 return ++attr; 968} 969 970static struct perf_pmu_events_attr * 971add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name, 972 const char *str) 973{ 974 sysfs_attr_init(&attr->attr.attr); 975 attr->attr.attr.name = name; 976 attr->attr.attr.mode = 0444; 977 attr->attr.show = perf_event_sysfs_show; 978 attr->event_str = str; 979 980 return ++attr; 981} 982 983static struct attribute ** 984create_event_attributes(struct i915_pmu *pmu) 985{ 986 struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); 987 static const struct { 988 unsigned int counter; 989 const char *name; 990 const char *unit; 991 bool global; 992 } events[] = { 993 __event(0, "actual-frequency", "M"), 994 __event(1, "requested-frequency", "M"), 995 __global_event(2, "interrupts", NULL), 996 __event(3, "rc6-residency", "ns"), 997 __event(4, "software-gt-awake-time", "ns"), 998 }; 999 static const struct { 1000 enum drm_i915_pmu_engine_sample sample; 1001 char *name; 1002 } engine_events[] = { 1003 __engine_event(I915_SAMPLE_BUSY, "busy"), 1004 __engine_event(I915_SAMPLE_SEMA, "sema"), 1005 __engine_event(I915_SAMPLE_WAIT, "wait"), 1006 }; 1007 unsigned int count = 0; 1008 struct perf_pmu_events_attr *pmu_attr = NULL, *pmu_iter; 1009 struct i915_ext_attribute *i915_attr = NULL, *i915_iter; 1010 struct attribute **attr = NULL, **attr_iter; 1011 struct intel_engine_cs *engine; 1012 struct intel_gt *gt; 1013 unsigned int i, j; 1014 1015 /* Count how many counters we will be exposing. */ 1016 for_each_gt(gt, i915, j) { 1017 for (i = 0; i < ARRAY_SIZE(events); i++) { 1018 u64 config = ___I915_PMU_OTHER(j, events[i].counter); 1019 1020 if (!config_status(i915, config)) 1021 count++; 1022 } 1023 } 1024 1025 for_each_uabi_engine(engine, i915) { 1026 for (i = 0; i < ARRAY_SIZE(engine_events); i++) { 1027 if (!engine_event_status(engine, 1028 engine_events[i].sample)) 1029 count++; 1030 } 1031 } 1032 1033 /* Allocate attribute objects and table. */ 1034 i915_attr = kcalloc(count, sizeof(*i915_attr), GFP_KERNEL); 1035 if (!i915_attr) 1036 goto err_alloc; 1037 1038 pmu_attr = kcalloc(count, sizeof(*pmu_attr), GFP_KERNEL); 1039 if (!pmu_attr) 1040 goto err_alloc; 1041 1042 /* Max one pointer of each attribute type plus a termination entry. */ 1043 attr = kcalloc(count * 2 + 1, sizeof(*attr), GFP_KERNEL); 1044 if (!attr) 1045 goto err_alloc; 1046 1047 i915_iter = i915_attr; 1048 pmu_iter = pmu_attr; 1049 attr_iter = attr; 1050 1051 /* Initialize supported non-engine counters. */ 1052 for_each_gt(gt, i915, j) { 1053 for (i = 0; i < ARRAY_SIZE(events); i++) { 1054 u64 config = ___I915_PMU_OTHER(j, events[i].counter); 1055 char *str; 1056 1057 if (config_status(i915, config)) 1058 continue; 1059 1060 if (events[i].global || !HAS_EXTRA_GT_LIST(i915)) 1061 str = kstrdup(events[i].name, GFP_KERNEL); 1062 else 1063 str = kasprintf(GFP_KERNEL, "%s-gt%u", 1064 events[i].name, j); 1065 if (!str) 1066 goto err; 1067 1068 *attr_iter++ = &i915_iter->attr.attr; 1069 i915_iter = add_i915_attr(i915_iter, str, config); 1070 1071 if (events[i].unit) { 1072 if (events[i].global || !HAS_EXTRA_GT_LIST(i915)) 1073 str = kasprintf(GFP_KERNEL, "%s.unit", 1074 events[i].name); 1075 else 1076 str = kasprintf(GFP_KERNEL, "%s-gt%u.unit", 1077 events[i].name, j); 1078 if (!str) 1079 goto err; 1080 1081 *attr_iter++ = &pmu_iter->attr.attr; 1082 pmu_iter = add_pmu_attr(pmu_iter, str, 1083 events[i].unit); 1084 } 1085 } 1086 } 1087 1088 /* Initialize supported engine counters. */ 1089 for_each_uabi_engine(engine, i915) { 1090 for (i = 0; i < ARRAY_SIZE(engine_events); i++) { 1091 char *str; 1092 1093 if (engine_event_status(engine, 1094 engine_events[i].sample)) 1095 continue; 1096 1097 str = kasprintf(GFP_KERNEL, "%s-%s", 1098 engine->name, engine_events[i].name); 1099 if (!str) 1100 goto err; 1101 1102 *attr_iter++ = &i915_iter->attr.attr; 1103 i915_iter = 1104 add_i915_attr(i915_iter, str, 1105 __I915_PMU_ENGINE(engine->uabi_class, 1106 engine->uabi_instance, 1107 engine_events[i].sample)); 1108 1109 str = kasprintf(GFP_KERNEL, "%s-%s.unit", 1110 engine->name, engine_events[i].name); 1111 if (!str) 1112 goto err; 1113 1114 *attr_iter++ = &pmu_iter->attr.attr; 1115 pmu_iter = add_pmu_attr(pmu_iter, str, "ns"); 1116 } 1117 } 1118 1119 pmu->i915_attr = i915_attr; 1120 pmu->pmu_attr = pmu_attr; 1121 1122 return attr; 1123 1124err:; 1125 for (attr_iter = attr; *attr_iter; attr_iter++) 1126 kfree((*attr_iter)->name); 1127 1128err_alloc: 1129 kfree(attr); 1130 kfree(i915_attr); 1131 kfree(pmu_attr); 1132 1133 return NULL; 1134} 1135 1136static void free_event_attributes(struct i915_pmu *pmu) 1137{ 1138 struct attribute **attr_iter = pmu->events_attr_group.attrs; 1139 1140 for (; *attr_iter; attr_iter++) 1141 kfree((*attr_iter)->name); 1142 1143 kfree(pmu->events_attr_group.attrs); 1144 kfree(pmu->i915_attr); 1145 kfree(pmu->pmu_attr); 1146 1147 pmu->events_attr_group.attrs = NULL; 1148 pmu->i915_attr = NULL; 1149 pmu->pmu_attr = NULL; 1150} 1151 1152static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node) 1153{ 1154 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node); 1155 1156 GEM_BUG_ON(!pmu->base.event_init); 1157 1158 /* Select the first online CPU as a designated reader. */ 1159 if (cpumask_empty(&i915_pmu_cpumask)) 1160 cpumask_set_cpu(cpu, &i915_pmu_cpumask); 1161 1162 return 0; 1163} 1164 1165static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node) 1166{ 1167 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node); 1168 unsigned int target = i915_pmu_target_cpu; 1169 1170 GEM_BUG_ON(!pmu->base.event_init); 1171 1172 /* 1173 * Unregistering an instance generates a CPU offline event which we must 1174 * ignore to avoid incorrectly modifying the shared i915_pmu_cpumask. 1175 */ 1176 if (pmu->closed) 1177 return 0; 1178 1179 if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) { 1180 target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu); 1181 1182 /* Migrate events if there is a valid target */ 1183 if (target < nr_cpu_ids) { 1184 cpumask_set_cpu(target, &i915_pmu_cpumask); 1185 i915_pmu_target_cpu = target; 1186 } 1187 } 1188 1189 if (target < nr_cpu_ids && target != pmu->cpuhp.cpu) { 1190 perf_pmu_migrate_context(&pmu->base, cpu, target); 1191 pmu->cpuhp.cpu = target; 1192 } 1193 1194 return 0; 1195} 1196 1197static enum cpuhp_state cpuhp_slot = CPUHP_INVALID; 1198 1199int i915_pmu_init(void) 1200{ 1201 int ret; 1202 1203 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 1204 "perf/x86/intel/i915:online", 1205 i915_pmu_cpu_online, 1206 i915_pmu_cpu_offline); 1207 if (ret < 0) 1208 pr_notice("Failed to setup cpuhp state for i915 PMU! (%d)\n", 1209 ret); 1210 else 1211 cpuhp_slot = ret; 1212 1213 return 0; 1214} 1215 1216void i915_pmu_exit(void) 1217{ 1218 if (cpuhp_slot != CPUHP_INVALID) 1219 cpuhp_remove_multi_state(cpuhp_slot); 1220} 1221 1222static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu) 1223{ 1224 if (cpuhp_slot == CPUHP_INVALID) 1225 return -EINVAL; 1226 1227 return cpuhp_state_add_instance(cpuhp_slot, &pmu->cpuhp.node); 1228} 1229 1230static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu) 1231{ 1232 cpuhp_state_remove_instance(cpuhp_slot, &pmu->cpuhp.node); 1233} 1234 1235static bool is_igp(struct drm_i915_private *i915) 1236{ 1237 struct pci_dev *pdev = to_pci_dev(i915->drm.dev); 1238 1239 /* IGP is 0000:00:02.0 */ 1240 return pci_domain_nr(pdev->bus) == 0 && 1241 pdev->bus->number == 0 && 1242 PCI_SLOT(pdev->devfn) == 2 && 1243 PCI_FUNC(pdev->devfn) == 0; 1244} 1245 1246void i915_pmu_register(struct drm_i915_private *i915) 1247{ 1248 struct i915_pmu *pmu = &i915->pmu; 1249 const struct attribute_group *attr_groups[] = { 1250 &i915_pmu_format_attr_group, 1251 &pmu->events_attr_group, 1252 &i915_pmu_cpumask_attr_group, 1253 NULL 1254 }; 1255 1256 int ret = -ENOMEM; 1257 1258 if (GRAPHICS_VER(i915) <= 2) { 1259 drm_info(&i915->drm, "PMU not supported for this GPU."); 1260 return; 1261 } 1262 1263 spin_lock_init(&pmu->lock); 1264 hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1265 pmu->timer.function = i915_sample; 1266 pmu->cpuhp.cpu = -1; 1267 init_rc6(pmu); 1268 1269 if (!is_igp(i915)) { 1270 pmu->name = kasprintf(GFP_KERNEL, 1271 "i915_%s", 1272 dev_name(i915->drm.dev)); 1273 if (pmu->name) { 1274 /* tools/perf reserves colons as special. */ 1275 strreplace((char *)pmu->name, ':', '_'); 1276 } 1277 } else { 1278 pmu->name = "i915"; 1279 } 1280 if (!pmu->name) 1281 goto err; 1282 1283 pmu->events_attr_group.name = "events"; 1284 pmu->events_attr_group.attrs = create_event_attributes(pmu); 1285 if (!pmu->events_attr_group.attrs) 1286 goto err_name; 1287 1288 pmu->base.attr_groups = kmemdup(attr_groups, sizeof(attr_groups), 1289 GFP_KERNEL); 1290 if (!pmu->base.attr_groups) 1291 goto err_attr; 1292 1293 pmu->base.module = THIS_MODULE; 1294 pmu->base.task_ctx_nr = perf_invalid_context; 1295 pmu->base.event_init = i915_pmu_event_init; 1296 pmu->base.add = i915_pmu_event_add; 1297 pmu->base.del = i915_pmu_event_del; 1298 pmu->base.start = i915_pmu_event_start; 1299 pmu->base.stop = i915_pmu_event_stop; 1300 pmu->base.read = i915_pmu_event_read; 1301 pmu->base.event_idx = i915_pmu_event_event_idx; 1302 1303 ret = perf_pmu_register(&pmu->base, pmu->name, -1); 1304 if (ret) 1305 goto err_groups; 1306 1307 ret = i915_pmu_register_cpuhp_state(pmu); 1308 if (ret) 1309 goto err_unreg; 1310 1311 return; 1312 1313err_unreg: 1314 perf_pmu_unregister(&pmu->base); 1315err_groups: 1316 kfree(pmu->base.attr_groups); 1317err_attr: 1318 pmu->base.event_init = NULL; 1319 free_event_attributes(pmu); 1320err_name: 1321 if (!is_igp(i915)) 1322 kfree(pmu->name); 1323err: 1324 drm_notice(&i915->drm, "Failed to register PMU!\n"); 1325} 1326 1327void i915_pmu_unregister(struct drm_i915_private *i915) 1328{ 1329 struct i915_pmu *pmu = &i915->pmu; 1330 1331 if (!pmu->base.event_init) 1332 return; 1333 1334 /* 1335 * "Disconnect" the PMU callbacks - since all are atomic synchronize_rcu 1336 * ensures all currently executing ones will have exited before we 1337 * proceed with unregistration. 1338 */ 1339 pmu->closed = true; 1340 synchronize_rcu(); 1341 1342 hrtimer_cancel(&pmu->timer); 1343 1344 i915_pmu_unregister_cpuhp_state(pmu); 1345 1346 perf_pmu_unregister(&pmu->base); 1347 pmu->base.event_init = NULL; 1348 kfree(pmu->base.attr_groups); 1349 if (!is_igp(i915)) 1350 kfree(pmu->name); 1351 free_event_attributes(pmu); 1352} 1353