162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 262306a36Sopenharmony_ci/* 362306a36Sopenharmony_ci * RISC-V performance counter support. 462306a36Sopenharmony_ci * 562306a36Sopenharmony_ci * Copyright (C) 2021 Western Digital Corporation or its affiliates. 662306a36Sopenharmony_ci * 762306a36Sopenharmony_ci * This implementation is based on old RISC-V perf and ARM perf event code 862306a36Sopenharmony_ci * which are in turn based on sparc64 and x86 code. 962306a36Sopenharmony_ci */ 1062306a36Sopenharmony_ci 1162306a36Sopenharmony_ci#include <linux/cpumask.h> 1262306a36Sopenharmony_ci#include <linux/irq.h> 1362306a36Sopenharmony_ci#include <linux/irqdesc.h> 1462306a36Sopenharmony_ci#include <linux/perf/riscv_pmu.h> 1562306a36Sopenharmony_ci#include <linux/printk.h> 1662306a36Sopenharmony_ci#include <linux/smp.h> 1762306a36Sopenharmony_ci#include <linux/sched_clock.h> 1862306a36Sopenharmony_ci 1962306a36Sopenharmony_ci#include <asm/sbi.h> 2062306a36Sopenharmony_ci 2162306a36Sopenharmony_cistatic bool riscv_perf_user_access(struct perf_event *event) 2262306a36Sopenharmony_ci{ 2362306a36Sopenharmony_ci return ((event->attr.type == PERF_TYPE_HARDWARE) || 2462306a36Sopenharmony_ci (event->attr.type == PERF_TYPE_HW_CACHE) || 2562306a36Sopenharmony_ci (event->attr.type == PERF_TYPE_RAW)) && 2662306a36Sopenharmony_ci !!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT) && 2762306a36Sopenharmony_ci (event->hw.idx != -1); 2862306a36Sopenharmony_ci} 2962306a36Sopenharmony_ci 3062306a36Sopenharmony_civoid arch_perf_update_userpage(struct perf_event *event, 3162306a36Sopenharmony_ci struct perf_event_mmap_page *userpg, u64 now) 3262306a36Sopenharmony_ci{ 3362306a36Sopenharmony_ci struct clock_read_data *rd; 3462306a36Sopenharmony_ci unsigned int seq; 3562306a36Sopenharmony_ci u64 ns; 3662306a36Sopenharmony_ci 3762306a36Sopenharmony_ci userpg->cap_user_time = 0; 3862306a36Sopenharmony_ci userpg->cap_user_time_zero = 0; 3962306a36Sopenharmony_ci userpg->cap_user_time_short = 0; 4062306a36Sopenharmony_ci userpg->cap_user_rdpmc = riscv_perf_user_access(event); 4162306a36Sopenharmony_ci 4262306a36Sopenharmony_ci#ifdef CONFIG_RISCV_PMU 4362306a36Sopenharmony_ci /* 4462306a36Sopenharmony_ci * The counters are 64-bit but the priv spec doesn't mandate all the 4562306a36Sopenharmony_ci * bits to be implemented: that's why, counter width can vary based on 4662306a36Sopenharmony_ci * the cpu vendor. 4762306a36Sopenharmony_ci */ 4862306a36Sopenharmony_ci if (userpg->cap_user_rdpmc) 4962306a36Sopenharmony_ci userpg->pmc_width = to_riscv_pmu(event->pmu)->ctr_get_width(event->hw.idx) + 1; 5062306a36Sopenharmony_ci#endif 5162306a36Sopenharmony_ci 5262306a36Sopenharmony_ci do { 5362306a36Sopenharmony_ci rd = sched_clock_read_begin(&seq); 5462306a36Sopenharmony_ci 5562306a36Sopenharmony_ci userpg->time_mult = rd->mult; 5662306a36Sopenharmony_ci userpg->time_shift = rd->shift; 5762306a36Sopenharmony_ci userpg->time_zero = rd->epoch_ns; 5862306a36Sopenharmony_ci userpg->time_cycles = rd->epoch_cyc; 5962306a36Sopenharmony_ci userpg->time_mask = rd->sched_clock_mask; 6062306a36Sopenharmony_ci 6162306a36Sopenharmony_ci /* 6262306a36Sopenharmony_ci * Subtract the cycle base, such that software that 6362306a36Sopenharmony_ci * doesn't know about cap_user_time_short still 'works' 6462306a36Sopenharmony_ci * assuming no wraps. 6562306a36Sopenharmony_ci */ 6662306a36Sopenharmony_ci ns = mul_u64_u32_shr(rd->epoch_cyc, rd->mult, rd->shift); 6762306a36Sopenharmony_ci userpg->time_zero -= ns; 6862306a36Sopenharmony_ci 6962306a36Sopenharmony_ci } while (sched_clock_read_retry(seq)); 7062306a36Sopenharmony_ci 7162306a36Sopenharmony_ci userpg->time_offset = userpg->time_zero - now; 7262306a36Sopenharmony_ci 7362306a36Sopenharmony_ci /* 7462306a36Sopenharmony_ci * time_shift is not expected to be greater than 31 due to 7562306a36Sopenharmony_ci * the original published conversion algorithm shifting a 7662306a36Sopenharmony_ci * 32-bit value (now specifies a 64-bit value) - refer 7762306a36Sopenharmony_ci * perf_event_mmap_page documentation in perf_event.h. 7862306a36Sopenharmony_ci */ 7962306a36Sopenharmony_ci if (userpg->time_shift == 32) { 8062306a36Sopenharmony_ci userpg->time_shift = 31; 8162306a36Sopenharmony_ci userpg->time_mult >>= 1; 8262306a36Sopenharmony_ci } 8362306a36Sopenharmony_ci 8462306a36Sopenharmony_ci /* 8562306a36Sopenharmony_ci * Internal timekeeping for enabled/running/stopped times 8662306a36Sopenharmony_ci * is always computed with the sched_clock. 8762306a36Sopenharmony_ci */ 8862306a36Sopenharmony_ci userpg->cap_user_time = 1; 8962306a36Sopenharmony_ci userpg->cap_user_time_zero = 1; 9062306a36Sopenharmony_ci userpg->cap_user_time_short = 1; 9162306a36Sopenharmony_ci} 9262306a36Sopenharmony_ci 9362306a36Sopenharmony_cistatic unsigned long csr_read_num(int csr_num) 9462306a36Sopenharmony_ci{ 9562306a36Sopenharmony_ci#define switchcase_csr_read(__csr_num, __val) {\ 9662306a36Sopenharmony_ci case __csr_num: \ 9762306a36Sopenharmony_ci __val = csr_read(__csr_num); \ 9862306a36Sopenharmony_ci break; } 9962306a36Sopenharmony_ci#define switchcase_csr_read_2(__csr_num, __val) {\ 10062306a36Sopenharmony_ci switchcase_csr_read(__csr_num + 0, __val) \ 10162306a36Sopenharmony_ci switchcase_csr_read(__csr_num + 1, __val)} 10262306a36Sopenharmony_ci#define switchcase_csr_read_4(__csr_num, __val) {\ 10362306a36Sopenharmony_ci switchcase_csr_read_2(__csr_num + 0, __val) \ 10462306a36Sopenharmony_ci switchcase_csr_read_2(__csr_num + 2, __val)} 10562306a36Sopenharmony_ci#define switchcase_csr_read_8(__csr_num, __val) {\ 10662306a36Sopenharmony_ci switchcase_csr_read_4(__csr_num + 0, __val) \ 10762306a36Sopenharmony_ci switchcase_csr_read_4(__csr_num + 4, __val)} 10862306a36Sopenharmony_ci#define switchcase_csr_read_16(__csr_num, __val) {\ 10962306a36Sopenharmony_ci switchcase_csr_read_8(__csr_num + 0, __val) \ 11062306a36Sopenharmony_ci switchcase_csr_read_8(__csr_num + 8, __val)} 11162306a36Sopenharmony_ci#define switchcase_csr_read_32(__csr_num, __val) {\ 11262306a36Sopenharmony_ci switchcase_csr_read_16(__csr_num + 0, __val) \ 11362306a36Sopenharmony_ci switchcase_csr_read_16(__csr_num + 16, __val)} 11462306a36Sopenharmony_ci 11562306a36Sopenharmony_ci unsigned long ret = 0; 11662306a36Sopenharmony_ci 11762306a36Sopenharmony_ci switch (csr_num) { 11862306a36Sopenharmony_ci switchcase_csr_read_32(CSR_CYCLE, ret) 11962306a36Sopenharmony_ci switchcase_csr_read_32(CSR_CYCLEH, ret) 12062306a36Sopenharmony_ci default : 12162306a36Sopenharmony_ci break; 12262306a36Sopenharmony_ci } 12362306a36Sopenharmony_ci 12462306a36Sopenharmony_ci return ret; 12562306a36Sopenharmony_ci#undef switchcase_csr_read_32 12662306a36Sopenharmony_ci#undef switchcase_csr_read_16 12762306a36Sopenharmony_ci#undef switchcase_csr_read_8 12862306a36Sopenharmony_ci#undef switchcase_csr_read_4 12962306a36Sopenharmony_ci#undef switchcase_csr_read_2 13062306a36Sopenharmony_ci#undef switchcase_csr_read 13162306a36Sopenharmony_ci} 13262306a36Sopenharmony_ci 13362306a36Sopenharmony_ci/* 13462306a36Sopenharmony_ci * Read the CSR of a corresponding counter. 13562306a36Sopenharmony_ci */ 13662306a36Sopenharmony_ciunsigned long riscv_pmu_ctr_read_csr(unsigned long csr) 13762306a36Sopenharmony_ci{ 13862306a36Sopenharmony_ci if (csr < CSR_CYCLE || csr > CSR_HPMCOUNTER31H || 13962306a36Sopenharmony_ci (csr > CSR_HPMCOUNTER31 && csr < CSR_CYCLEH)) { 14062306a36Sopenharmony_ci pr_err("Invalid performance counter csr %lx\n", csr); 14162306a36Sopenharmony_ci return -EINVAL; 14262306a36Sopenharmony_ci } 14362306a36Sopenharmony_ci 14462306a36Sopenharmony_ci return csr_read_num(csr); 14562306a36Sopenharmony_ci} 14662306a36Sopenharmony_ci 14762306a36Sopenharmony_ciu64 riscv_pmu_ctr_get_width_mask(struct perf_event *event) 14862306a36Sopenharmony_ci{ 14962306a36Sopenharmony_ci int cwidth; 15062306a36Sopenharmony_ci struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); 15162306a36Sopenharmony_ci struct hw_perf_event *hwc = &event->hw; 15262306a36Sopenharmony_ci 15362306a36Sopenharmony_ci if (hwc->idx == -1) 15462306a36Sopenharmony_ci /* Handle init case where idx is not initialized yet */ 15562306a36Sopenharmony_ci cwidth = rvpmu->ctr_get_width(0); 15662306a36Sopenharmony_ci else 15762306a36Sopenharmony_ci cwidth = rvpmu->ctr_get_width(hwc->idx); 15862306a36Sopenharmony_ci 15962306a36Sopenharmony_ci return GENMASK_ULL(cwidth, 0); 16062306a36Sopenharmony_ci} 16162306a36Sopenharmony_ci 16262306a36Sopenharmony_ciu64 riscv_pmu_event_update(struct perf_event *event) 16362306a36Sopenharmony_ci{ 16462306a36Sopenharmony_ci struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); 16562306a36Sopenharmony_ci struct hw_perf_event *hwc = &event->hw; 16662306a36Sopenharmony_ci u64 prev_raw_count, new_raw_count; 16762306a36Sopenharmony_ci unsigned long cmask; 16862306a36Sopenharmony_ci u64 oldval, delta; 16962306a36Sopenharmony_ci 17062306a36Sopenharmony_ci if (!rvpmu->ctr_read) 17162306a36Sopenharmony_ci return 0; 17262306a36Sopenharmony_ci 17362306a36Sopenharmony_ci cmask = riscv_pmu_ctr_get_width_mask(event); 17462306a36Sopenharmony_ci 17562306a36Sopenharmony_ci do { 17662306a36Sopenharmony_ci prev_raw_count = local64_read(&hwc->prev_count); 17762306a36Sopenharmony_ci new_raw_count = rvpmu->ctr_read(event); 17862306a36Sopenharmony_ci oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count, 17962306a36Sopenharmony_ci new_raw_count); 18062306a36Sopenharmony_ci } while (oldval != prev_raw_count); 18162306a36Sopenharmony_ci 18262306a36Sopenharmony_ci delta = (new_raw_count - prev_raw_count) & cmask; 18362306a36Sopenharmony_ci local64_add(delta, &event->count); 18462306a36Sopenharmony_ci local64_sub(delta, &hwc->period_left); 18562306a36Sopenharmony_ci 18662306a36Sopenharmony_ci return delta; 18762306a36Sopenharmony_ci} 18862306a36Sopenharmony_ci 18962306a36Sopenharmony_civoid riscv_pmu_stop(struct perf_event *event, int flags) 19062306a36Sopenharmony_ci{ 19162306a36Sopenharmony_ci struct hw_perf_event *hwc = &event->hw; 19262306a36Sopenharmony_ci struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); 19362306a36Sopenharmony_ci 19462306a36Sopenharmony_ci WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); 19562306a36Sopenharmony_ci 19662306a36Sopenharmony_ci if (!(hwc->state & PERF_HES_STOPPED)) { 19762306a36Sopenharmony_ci if (rvpmu->ctr_stop) { 19862306a36Sopenharmony_ci rvpmu->ctr_stop(event, 0); 19962306a36Sopenharmony_ci hwc->state |= PERF_HES_STOPPED; 20062306a36Sopenharmony_ci } 20162306a36Sopenharmony_ci riscv_pmu_event_update(event); 20262306a36Sopenharmony_ci hwc->state |= PERF_HES_UPTODATE; 20362306a36Sopenharmony_ci } 20462306a36Sopenharmony_ci} 20562306a36Sopenharmony_ci 20662306a36Sopenharmony_ciint riscv_pmu_event_set_period(struct perf_event *event) 20762306a36Sopenharmony_ci{ 20862306a36Sopenharmony_ci struct hw_perf_event *hwc = &event->hw; 20962306a36Sopenharmony_ci s64 left = local64_read(&hwc->period_left); 21062306a36Sopenharmony_ci s64 period = hwc->sample_period; 21162306a36Sopenharmony_ci int overflow = 0; 21262306a36Sopenharmony_ci uint64_t max_period = riscv_pmu_ctr_get_width_mask(event); 21362306a36Sopenharmony_ci 21462306a36Sopenharmony_ci if (unlikely(left <= -period)) { 21562306a36Sopenharmony_ci left = period; 21662306a36Sopenharmony_ci local64_set(&hwc->period_left, left); 21762306a36Sopenharmony_ci hwc->last_period = period; 21862306a36Sopenharmony_ci overflow = 1; 21962306a36Sopenharmony_ci } 22062306a36Sopenharmony_ci 22162306a36Sopenharmony_ci if (unlikely(left <= 0)) { 22262306a36Sopenharmony_ci left += period; 22362306a36Sopenharmony_ci local64_set(&hwc->period_left, left); 22462306a36Sopenharmony_ci hwc->last_period = period; 22562306a36Sopenharmony_ci overflow = 1; 22662306a36Sopenharmony_ci } 22762306a36Sopenharmony_ci 22862306a36Sopenharmony_ci /* 22962306a36Sopenharmony_ci * Limit the maximum period to prevent the counter value 23062306a36Sopenharmony_ci * from overtaking the one we are about to program. In 23162306a36Sopenharmony_ci * effect we are reducing max_period to account for 23262306a36Sopenharmony_ci * interrupt latency (and we are being very conservative). 23362306a36Sopenharmony_ci */ 23462306a36Sopenharmony_ci if (left > (max_period >> 1)) 23562306a36Sopenharmony_ci left = (max_period >> 1); 23662306a36Sopenharmony_ci 23762306a36Sopenharmony_ci local64_set(&hwc->prev_count, (u64)-left); 23862306a36Sopenharmony_ci 23962306a36Sopenharmony_ci perf_event_update_userpage(event); 24062306a36Sopenharmony_ci 24162306a36Sopenharmony_ci return overflow; 24262306a36Sopenharmony_ci} 24362306a36Sopenharmony_ci 24462306a36Sopenharmony_civoid riscv_pmu_start(struct perf_event *event, int flags) 24562306a36Sopenharmony_ci{ 24662306a36Sopenharmony_ci struct hw_perf_event *hwc = &event->hw; 24762306a36Sopenharmony_ci struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); 24862306a36Sopenharmony_ci uint64_t max_period = riscv_pmu_ctr_get_width_mask(event); 24962306a36Sopenharmony_ci u64 init_val; 25062306a36Sopenharmony_ci 25162306a36Sopenharmony_ci if (flags & PERF_EF_RELOAD) 25262306a36Sopenharmony_ci WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); 25362306a36Sopenharmony_ci 25462306a36Sopenharmony_ci hwc->state = 0; 25562306a36Sopenharmony_ci riscv_pmu_event_set_period(event); 25662306a36Sopenharmony_ci init_val = local64_read(&hwc->prev_count) & max_period; 25762306a36Sopenharmony_ci rvpmu->ctr_start(event, init_val); 25862306a36Sopenharmony_ci perf_event_update_userpage(event); 25962306a36Sopenharmony_ci} 26062306a36Sopenharmony_ci 26162306a36Sopenharmony_cistatic int riscv_pmu_add(struct perf_event *event, int flags) 26262306a36Sopenharmony_ci{ 26362306a36Sopenharmony_ci struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); 26462306a36Sopenharmony_ci struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); 26562306a36Sopenharmony_ci struct hw_perf_event *hwc = &event->hw; 26662306a36Sopenharmony_ci int idx; 26762306a36Sopenharmony_ci 26862306a36Sopenharmony_ci idx = rvpmu->ctr_get_idx(event); 26962306a36Sopenharmony_ci if (idx < 0) 27062306a36Sopenharmony_ci return idx; 27162306a36Sopenharmony_ci 27262306a36Sopenharmony_ci hwc->idx = idx; 27362306a36Sopenharmony_ci cpuc->events[idx] = event; 27462306a36Sopenharmony_ci cpuc->n_events++; 27562306a36Sopenharmony_ci hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; 27662306a36Sopenharmony_ci if (flags & PERF_EF_START) 27762306a36Sopenharmony_ci riscv_pmu_start(event, PERF_EF_RELOAD); 27862306a36Sopenharmony_ci 27962306a36Sopenharmony_ci /* Propagate our changes to the userspace mapping. */ 28062306a36Sopenharmony_ci perf_event_update_userpage(event); 28162306a36Sopenharmony_ci 28262306a36Sopenharmony_ci return 0; 28362306a36Sopenharmony_ci} 28462306a36Sopenharmony_ci 28562306a36Sopenharmony_cistatic void riscv_pmu_del(struct perf_event *event, int flags) 28662306a36Sopenharmony_ci{ 28762306a36Sopenharmony_ci struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); 28862306a36Sopenharmony_ci struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); 28962306a36Sopenharmony_ci struct hw_perf_event *hwc = &event->hw; 29062306a36Sopenharmony_ci 29162306a36Sopenharmony_ci riscv_pmu_stop(event, PERF_EF_UPDATE); 29262306a36Sopenharmony_ci cpuc->events[hwc->idx] = NULL; 29362306a36Sopenharmony_ci /* The firmware need to reset the counter mapping */ 29462306a36Sopenharmony_ci if (rvpmu->ctr_stop) 29562306a36Sopenharmony_ci rvpmu->ctr_stop(event, RISCV_PMU_STOP_FLAG_RESET); 29662306a36Sopenharmony_ci cpuc->n_events--; 29762306a36Sopenharmony_ci if (rvpmu->ctr_clear_idx) 29862306a36Sopenharmony_ci rvpmu->ctr_clear_idx(event); 29962306a36Sopenharmony_ci perf_event_update_userpage(event); 30062306a36Sopenharmony_ci hwc->idx = -1; 30162306a36Sopenharmony_ci} 30262306a36Sopenharmony_ci 30362306a36Sopenharmony_cistatic void riscv_pmu_read(struct perf_event *event) 30462306a36Sopenharmony_ci{ 30562306a36Sopenharmony_ci riscv_pmu_event_update(event); 30662306a36Sopenharmony_ci} 30762306a36Sopenharmony_ci 30862306a36Sopenharmony_cistatic int riscv_pmu_event_init(struct perf_event *event) 30962306a36Sopenharmony_ci{ 31062306a36Sopenharmony_ci struct hw_perf_event *hwc = &event->hw; 31162306a36Sopenharmony_ci struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); 31262306a36Sopenharmony_ci int mapped_event; 31362306a36Sopenharmony_ci u64 event_config = 0; 31462306a36Sopenharmony_ci uint64_t cmask; 31562306a36Sopenharmony_ci 31662306a36Sopenharmony_ci hwc->flags = 0; 31762306a36Sopenharmony_ci mapped_event = rvpmu->event_map(event, &event_config); 31862306a36Sopenharmony_ci if (mapped_event < 0) { 31962306a36Sopenharmony_ci pr_debug("event %x:%llx not supported\n", event->attr.type, 32062306a36Sopenharmony_ci event->attr.config); 32162306a36Sopenharmony_ci return mapped_event; 32262306a36Sopenharmony_ci } 32362306a36Sopenharmony_ci 32462306a36Sopenharmony_ci /* 32562306a36Sopenharmony_ci * idx is set to -1 because the index of a general event should not be 32662306a36Sopenharmony_ci * decided until binding to some counter in pmu->add(). 32762306a36Sopenharmony_ci * config will contain the information about counter CSR 32862306a36Sopenharmony_ci * the idx will contain the counter index 32962306a36Sopenharmony_ci */ 33062306a36Sopenharmony_ci hwc->config = event_config; 33162306a36Sopenharmony_ci hwc->idx = -1; 33262306a36Sopenharmony_ci hwc->event_base = mapped_event; 33362306a36Sopenharmony_ci 33462306a36Sopenharmony_ci if (rvpmu->event_init) 33562306a36Sopenharmony_ci rvpmu->event_init(event); 33662306a36Sopenharmony_ci 33762306a36Sopenharmony_ci if (!is_sampling_event(event)) { 33862306a36Sopenharmony_ci /* 33962306a36Sopenharmony_ci * For non-sampling runs, limit the sample_period to half 34062306a36Sopenharmony_ci * of the counter width. That way, the new counter value 34162306a36Sopenharmony_ci * is far less likely to overtake the previous one unless 34262306a36Sopenharmony_ci * you have some serious IRQ latency issues. 34362306a36Sopenharmony_ci */ 34462306a36Sopenharmony_ci cmask = riscv_pmu_ctr_get_width_mask(event); 34562306a36Sopenharmony_ci hwc->sample_period = cmask >> 1; 34662306a36Sopenharmony_ci hwc->last_period = hwc->sample_period; 34762306a36Sopenharmony_ci local64_set(&hwc->period_left, hwc->sample_period); 34862306a36Sopenharmony_ci } 34962306a36Sopenharmony_ci 35062306a36Sopenharmony_ci return 0; 35162306a36Sopenharmony_ci} 35262306a36Sopenharmony_ci 35362306a36Sopenharmony_cistatic int riscv_pmu_event_idx(struct perf_event *event) 35462306a36Sopenharmony_ci{ 35562306a36Sopenharmony_ci struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); 35662306a36Sopenharmony_ci 35762306a36Sopenharmony_ci if (!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT)) 35862306a36Sopenharmony_ci return 0; 35962306a36Sopenharmony_ci 36062306a36Sopenharmony_ci if (rvpmu->csr_index) 36162306a36Sopenharmony_ci return rvpmu->csr_index(event) + 1; 36262306a36Sopenharmony_ci 36362306a36Sopenharmony_ci return 0; 36462306a36Sopenharmony_ci} 36562306a36Sopenharmony_ci 36662306a36Sopenharmony_cistatic void riscv_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm) 36762306a36Sopenharmony_ci{ 36862306a36Sopenharmony_ci struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); 36962306a36Sopenharmony_ci 37062306a36Sopenharmony_ci if (rvpmu->event_mapped) { 37162306a36Sopenharmony_ci rvpmu->event_mapped(event, mm); 37262306a36Sopenharmony_ci perf_event_update_userpage(event); 37362306a36Sopenharmony_ci } 37462306a36Sopenharmony_ci} 37562306a36Sopenharmony_ci 37662306a36Sopenharmony_cistatic void riscv_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm) 37762306a36Sopenharmony_ci{ 37862306a36Sopenharmony_ci struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); 37962306a36Sopenharmony_ci 38062306a36Sopenharmony_ci if (rvpmu->event_unmapped) { 38162306a36Sopenharmony_ci rvpmu->event_unmapped(event, mm); 38262306a36Sopenharmony_ci perf_event_update_userpage(event); 38362306a36Sopenharmony_ci } 38462306a36Sopenharmony_ci} 38562306a36Sopenharmony_ci 38662306a36Sopenharmony_cistruct riscv_pmu *riscv_pmu_alloc(void) 38762306a36Sopenharmony_ci{ 38862306a36Sopenharmony_ci struct riscv_pmu *pmu; 38962306a36Sopenharmony_ci int cpuid, i; 39062306a36Sopenharmony_ci struct cpu_hw_events *cpuc; 39162306a36Sopenharmony_ci 39262306a36Sopenharmony_ci pmu = kzalloc(sizeof(*pmu), GFP_KERNEL); 39362306a36Sopenharmony_ci if (!pmu) 39462306a36Sopenharmony_ci goto out; 39562306a36Sopenharmony_ci 39662306a36Sopenharmony_ci pmu->hw_events = alloc_percpu_gfp(struct cpu_hw_events, GFP_KERNEL); 39762306a36Sopenharmony_ci if (!pmu->hw_events) { 39862306a36Sopenharmony_ci pr_info("failed to allocate per-cpu PMU data.\n"); 39962306a36Sopenharmony_ci goto out_free_pmu; 40062306a36Sopenharmony_ci } 40162306a36Sopenharmony_ci 40262306a36Sopenharmony_ci for_each_possible_cpu(cpuid) { 40362306a36Sopenharmony_ci cpuc = per_cpu_ptr(pmu->hw_events, cpuid); 40462306a36Sopenharmony_ci cpuc->n_events = 0; 40562306a36Sopenharmony_ci for (i = 0; i < RISCV_MAX_COUNTERS; i++) 40662306a36Sopenharmony_ci cpuc->events[i] = NULL; 40762306a36Sopenharmony_ci } 40862306a36Sopenharmony_ci pmu->pmu = (struct pmu) { 40962306a36Sopenharmony_ci .event_init = riscv_pmu_event_init, 41062306a36Sopenharmony_ci .event_mapped = riscv_pmu_event_mapped, 41162306a36Sopenharmony_ci .event_unmapped = riscv_pmu_event_unmapped, 41262306a36Sopenharmony_ci .event_idx = riscv_pmu_event_idx, 41362306a36Sopenharmony_ci .add = riscv_pmu_add, 41462306a36Sopenharmony_ci .del = riscv_pmu_del, 41562306a36Sopenharmony_ci .start = riscv_pmu_start, 41662306a36Sopenharmony_ci .stop = riscv_pmu_stop, 41762306a36Sopenharmony_ci .read = riscv_pmu_read, 41862306a36Sopenharmony_ci }; 41962306a36Sopenharmony_ci 42062306a36Sopenharmony_ci return pmu; 42162306a36Sopenharmony_ci 42262306a36Sopenharmony_ciout_free_pmu: 42362306a36Sopenharmony_ci kfree(pmu); 42462306a36Sopenharmony_ciout: 42562306a36Sopenharmony_ci return NULL; 42662306a36Sopenharmony_ci} 427