18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-or-later 28c2ecf20Sopenharmony_ci/* 38c2ecf20Sopenharmony_ci * Performance event support - powerpc architecture code 48c2ecf20Sopenharmony_ci * 58c2ecf20Sopenharmony_ci * Copyright 2008-2009 Paul Mackerras, IBM Corporation. 68c2ecf20Sopenharmony_ci */ 78c2ecf20Sopenharmony_ci#include <linux/kernel.h> 88c2ecf20Sopenharmony_ci#include <linux/sched.h> 98c2ecf20Sopenharmony_ci#include <linux/sched/clock.h> 108c2ecf20Sopenharmony_ci#include <linux/perf_event.h> 118c2ecf20Sopenharmony_ci#include <linux/percpu.h> 128c2ecf20Sopenharmony_ci#include <linux/hardirq.h> 138c2ecf20Sopenharmony_ci#include <linux/uaccess.h> 148c2ecf20Sopenharmony_ci#include <asm/reg.h> 158c2ecf20Sopenharmony_ci#include <asm/pmc.h> 168c2ecf20Sopenharmony_ci#include <asm/machdep.h> 178c2ecf20Sopenharmony_ci#include <asm/firmware.h> 188c2ecf20Sopenharmony_ci#include <asm/ptrace.h> 198c2ecf20Sopenharmony_ci#include <asm/code-patching.h> 208c2ecf20Sopenharmony_ci 218c2ecf20Sopenharmony_ci#ifdef CONFIG_PPC64 228c2ecf20Sopenharmony_ci#include "internal.h" 238c2ecf20Sopenharmony_ci#endif 248c2ecf20Sopenharmony_ci 258c2ecf20Sopenharmony_ci#define BHRB_MAX_ENTRIES 32 268c2ecf20Sopenharmony_ci#define BHRB_TARGET 0x0000000000000002 278c2ecf20Sopenharmony_ci#define BHRB_PREDICTION 0x0000000000000001 288c2ecf20Sopenharmony_ci#define BHRB_EA 0xFFFFFFFFFFFFFFFCUL 298c2ecf20Sopenharmony_ci 308c2ecf20Sopenharmony_cistruct cpu_hw_events { 318c2ecf20Sopenharmony_ci int n_events; 328c2ecf20Sopenharmony_ci int n_percpu; 338c2ecf20Sopenharmony_ci int disabled; 348c2ecf20Sopenharmony_ci int n_added; 358c2ecf20Sopenharmony_ci int n_limited; 368c2ecf20Sopenharmony_ci u8 pmcs_enabled; 378c2ecf20Sopenharmony_ci struct perf_event *event[MAX_HWEVENTS]; 388c2ecf20Sopenharmony_ci u64 events[MAX_HWEVENTS]; 398c2ecf20Sopenharmony_ci unsigned int flags[MAX_HWEVENTS]; 408c2ecf20Sopenharmony_ci struct mmcr_regs mmcr; 418c2ecf20Sopenharmony_ci struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS]; 428c2ecf20Sopenharmony_ci u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS]; 438c2ecf20Sopenharmony_ci u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; 448c2ecf20Sopenharmony_ci unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; 458c2ecf20Sopenharmony_ci unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; 468c2ecf20Sopenharmony_ci 478c2ecf20Sopenharmony_ci unsigned int txn_flags; 488c2ecf20Sopenharmony_ci int n_txn_start; 498c2ecf20Sopenharmony_ci 508c2ecf20Sopenharmony_ci /* BHRB bits */ 518c2ecf20Sopenharmony_ci u64 bhrb_filter; /* BHRB HW branch filter */ 528c2ecf20Sopenharmony_ci unsigned int bhrb_users; 538c2ecf20Sopenharmony_ci void *bhrb_context; 548c2ecf20Sopenharmony_ci struct perf_branch_stack bhrb_stack; 558c2ecf20Sopenharmony_ci struct perf_branch_entry bhrb_entries[BHRB_MAX_ENTRIES]; 568c2ecf20Sopenharmony_ci u64 ic_init; 578c2ecf20Sopenharmony_ci}; 588c2ecf20Sopenharmony_ci 598c2ecf20Sopenharmony_cistatic DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); 608c2ecf20Sopenharmony_ci 618c2ecf20Sopenharmony_cistatic struct power_pmu *ppmu; 628c2ecf20Sopenharmony_ci 638c2ecf20Sopenharmony_ci/* 648c2ecf20Sopenharmony_ci * Normally, to ignore kernel events we set the FCS (freeze counters 658c2ecf20Sopenharmony_ci * in supervisor mode) bit in MMCR0, but if the kernel runs with the 668c2ecf20Sopenharmony_ci * hypervisor bit set in the MSR, or if we are running on a processor 678c2ecf20Sopenharmony_ci * where the hypervisor bit is forced to 1 (as on Apple G5 processors), 688c2ecf20Sopenharmony_ci * then we need to use the FCHV bit to ignore kernel events. 698c2ecf20Sopenharmony_ci */ 708c2ecf20Sopenharmony_cistatic unsigned int freeze_events_kernel = MMCR0_FCS; 718c2ecf20Sopenharmony_ci 728c2ecf20Sopenharmony_ci/* 738c2ecf20Sopenharmony_ci * 32-bit doesn't have MMCRA but does have an MMCR2, 748c2ecf20Sopenharmony_ci * and a few other names are different. 758c2ecf20Sopenharmony_ci * Also 32-bit doesn't have MMCR3, SIER2 and SIER3. 768c2ecf20Sopenharmony_ci * Define them as zero knowing that any code path accessing 778c2ecf20Sopenharmony_ci * these registers (via mtspr/mfspr) are done under ppmu flag 788c2ecf20Sopenharmony_ci * check for PPMU_ARCH_31 and we will not enter that code path 798c2ecf20Sopenharmony_ci * for 32-bit. 808c2ecf20Sopenharmony_ci */ 818c2ecf20Sopenharmony_ci#ifdef CONFIG_PPC32 828c2ecf20Sopenharmony_ci 838c2ecf20Sopenharmony_ci#define MMCR0_FCHV 0 848c2ecf20Sopenharmony_ci#define MMCR0_PMCjCE MMCR0_PMCnCE 858c2ecf20Sopenharmony_ci#define MMCR0_FC56 0 868c2ecf20Sopenharmony_ci#define MMCR0_PMAO 0 878c2ecf20Sopenharmony_ci#define MMCR0_EBE 0 888c2ecf20Sopenharmony_ci#define MMCR0_BHRBA 0 898c2ecf20Sopenharmony_ci#define MMCR0_PMCC 0 908c2ecf20Sopenharmony_ci#define MMCR0_PMCC_U6 0 918c2ecf20Sopenharmony_ci 928c2ecf20Sopenharmony_ci#define SPRN_MMCRA SPRN_MMCR2 938c2ecf20Sopenharmony_ci#define SPRN_MMCR3 0 948c2ecf20Sopenharmony_ci#define SPRN_SIER2 0 958c2ecf20Sopenharmony_ci#define SPRN_SIER3 0 968c2ecf20Sopenharmony_ci#define MMCRA_SAMPLE_ENABLE 0 978c2ecf20Sopenharmony_ci#define MMCRA_BHRB_DISABLE 0 988c2ecf20Sopenharmony_ci#define MMCR0_PMCCEXT 0 998c2ecf20Sopenharmony_ci 1008c2ecf20Sopenharmony_cistatic inline unsigned long perf_ip_adjust(struct pt_regs *regs) 1018c2ecf20Sopenharmony_ci{ 1028c2ecf20Sopenharmony_ci return 0; 1038c2ecf20Sopenharmony_ci} 1048c2ecf20Sopenharmony_cistatic inline void perf_get_data_addr(struct perf_event *event, struct pt_regs *regs, u64 *addrp) { } 1058c2ecf20Sopenharmony_cistatic inline u32 perf_get_misc_flags(struct pt_regs *regs) 1068c2ecf20Sopenharmony_ci{ 1078c2ecf20Sopenharmony_ci return 0; 1088c2ecf20Sopenharmony_ci} 1098c2ecf20Sopenharmony_cistatic inline void perf_read_regs(struct pt_regs *regs) 1108c2ecf20Sopenharmony_ci{ 1118c2ecf20Sopenharmony_ci regs->result = 0; 1128c2ecf20Sopenharmony_ci} 1138c2ecf20Sopenharmony_ci 1148c2ecf20Sopenharmony_cistatic inline int siar_valid(struct pt_regs *regs) 1158c2ecf20Sopenharmony_ci{ 1168c2ecf20Sopenharmony_ci return 1; 1178c2ecf20Sopenharmony_ci} 1188c2ecf20Sopenharmony_ci 1198c2ecf20Sopenharmony_cistatic bool is_ebb_event(struct perf_event *event) { return false; } 1208c2ecf20Sopenharmony_cistatic int ebb_event_check(struct perf_event *event) { return 0; } 1218c2ecf20Sopenharmony_cistatic void ebb_event_add(struct perf_event *event) { } 1228c2ecf20Sopenharmony_cistatic void ebb_switch_out(unsigned long mmcr0) { } 1238c2ecf20Sopenharmony_cistatic unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw) 1248c2ecf20Sopenharmony_ci{ 1258c2ecf20Sopenharmony_ci return cpuhw->mmcr.mmcr0; 1268c2ecf20Sopenharmony_ci} 1278c2ecf20Sopenharmony_ci 1288c2ecf20Sopenharmony_cistatic inline void power_pmu_bhrb_enable(struct perf_event *event) {} 1298c2ecf20Sopenharmony_cistatic inline void power_pmu_bhrb_disable(struct perf_event *event) {} 1308c2ecf20Sopenharmony_cistatic void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) {} 1318c2ecf20Sopenharmony_cistatic inline void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw) {} 1328c2ecf20Sopenharmony_cistatic void pmao_restore_workaround(bool ebb) { } 1338c2ecf20Sopenharmony_ci#endif /* CONFIG_PPC32 */ 1348c2ecf20Sopenharmony_ci 1358c2ecf20Sopenharmony_cibool is_sier_available(void) 1368c2ecf20Sopenharmony_ci{ 1378c2ecf20Sopenharmony_ci if (!ppmu) 1388c2ecf20Sopenharmony_ci return false; 1398c2ecf20Sopenharmony_ci 1408c2ecf20Sopenharmony_ci if (ppmu->flags & PPMU_HAS_SIER) 1418c2ecf20Sopenharmony_ci return true; 1428c2ecf20Sopenharmony_ci 1438c2ecf20Sopenharmony_ci return false; 1448c2ecf20Sopenharmony_ci} 1458c2ecf20Sopenharmony_ci 1468c2ecf20Sopenharmony_cistatic bool regs_use_siar(struct pt_regs *regs) 1478c2ecf20Sopenharmony_ci{ 1488c2ecf20Sopenharmony_ci /* 1498c2ecf20Sopenharmony_ci * When we take a performance monitor exception the regs are setup 1508c2ecf20Sopenharmony_ci * using perf_read_regs() which overloads some fields, in particular 1518c2ecf20Sopenharmony_ci * regs->result to tell us whether to use SIAR. 1528c2ecf20Sopenharmony_ci * 1538c2ecf20Sopenharmony_ci * However if the regs are from another exception, eg. a syscall, then 1548c2ecf20Sopenharmony_ci * they have not been setup using perf_read_regs() and so regs->result 1558c2ecf20Sopenharmony_ci * is something random. 1568c2ecf20Sopenharmony_ci */ 1578c2ecf20Sopenharmony_ci return ((TRAP(regs) == 0xf00) && regs->result); 1588c2ecf20Sopenharmony_ci} 1598c2ecf20Sopenharmony_ci 1608c2ecf20Sopenharmony_ci/* 1618c2ecf20Sopenharmony_ci * Things that are specific to 64-bit implementations. 1628c2ecf20Sopenharmony_ci */ 1638c2ecf20Sopenharmony_ci#ifdef CONFIG_PPC64 1648c2ecf20Sopenharmony_ci 1658c2ecf20Sopenharmony_cistatic inline unsigned long perf_ip_adjust(struct pt_regs *regs) 1668c2ecf20Sopenharmony_ci{ 1678c2ecf20Sopenharmony_ci unsigned long mmcra = regs->dsisr; 1688c2ecf20Sopenharmony_ci 1698c2ecf20Sopenharmony_ci if ((ppmu->flags & PPMU_HAS_SSLOT) && (mmcra & MMCRA_SAMPLE_ENABLE)) { 1708c2ecf20Sopenharmony_ci unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT; 1718c2ecf20Sopenharmony_ci if (slot > 1) 1728c2ecf20Sopenharmony_ci return 4 * (slot - 1); 1738c2ecf20Sopenharmony_ci } 1748c2ecf20Sopenharmony_ci 1758c2ecf20Sopenharmony_ci return 0; 1768c2ecf20Sopenharmony_ci} 1778c2ecf20Sopenharmony_ci 1788c2ecf20Sopenharmony_ci/* 1798c2ecf20Sopenharmony_ci * The user wants a data address recorded. 1808c2ecf20Sopenharmony_ci * If we're not doing instruction sampling, give them the SDAR 1818c2ecf20Sopenharmony_ci * (sampled data address). If we are doing instruction sampling, then 1828c2ecf20Sopenharmony_ci * only give them the SDAR if it corresponds to the instruction 1838c2ecf20Sopenharmony_ci * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC, the 1848c2ecf20Sopenharmony_ci * [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA, or the SDAR_VALID bit in SIER. 1858c2ecf20Sopenharmony_ci */ 1868c2ecf20Sopenharmony_cistatic inline void perf_get_data_addr(struct perf_event *event, struct pt_regs *regs, u64 *addrp) 1878c2ecf20Sopenharmony_ci{ 1888c2ecf20Sopenharmony_ci unsigned long mmcra = regs->dsisr; 1898c2ecf20Sopenharmony_ci bool sdar_valid; 1908c2ecf20Sopenharmony_ci 1918c2ecf20Sopenharmony_ci if (ppmu->flags & PPMU_HAS_SIER) 1928c2ecf20Sopenharmony_ci sdar_valid = regs->dar & SIER_SDAR_VALID; 1938c2ecf20Sopenharmony_ci else { 1948c2ecf20Sopenharmony_ci unsigned long sdsync; 1958c2ecf20Sopenharmony_ci 1968c2ecf20Sopenharmony_ci if (ppmu->flags & PPMU_SIAR_VALID) 1978c2ecf20Sopenharmony_ci sdsync = POWER7P_MMCRA_SDAR_VALID; 1988c2ecf20Sopenharmony_ci else if (ppmu->flags & PPMU_ALT_SIPR) 1998c2ecf20Sopenharmony_ci sdsync = POWER6_MMCRA_SDSYNC; 2008c2ecf20Sopenharmony_ci else if (ppmu->flags & PPMU_NO_SIAR) 2018c2ecf20Sopenharmony_ci sdsync = MMCRA_SAMPLE_ENABLE; 2028c2ecf20Sopenharmony_ci else 2038c2ecf20Sopenharmony_ci sdsync = MMCRA_SDSYNC; 2048c2ecf20Sopenharmony_ci 2058c2ecf20Sopenharmony_ci sdar_valid = mmcra & sdsync; 2068c2ecf20Sopenharmony_ci } 2078c2ecf20Sopenharmony_ci 2088c2ecf20Sopenharmony_ci if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid) 2098c2ecf20Sopenharmony_ci *addrp = mfspr(SPRN_SDAR); 2108c2ecf20Sopenharmony_ci 2118c2ecf20Sopenharmony_ci if (is_kernel_addr(mfspr(SPRN_SDAR)) && event->attr.exclude_kernel) 2128c2ecf20Sopenharmony_ci *addrp = 0; 2138c2ecf20Sopenharmony_ci} 2148c2ecf20Sopenharmony_ci 2158c2ecf20Sopenharmony_cistatic bool regs_sihv(struct pt_regs *regs) 2168c2ecf20Sopenharmony_ci{ 2178c2ecf20Sopenharmony_ci unsigned long sihv = MMCRA_SIHV; 2188c2ecf20Sopenharmony_ci 2198c2ecf20Sopenharmony_ci if (ppmu->flags & PPMU_HAS_SIER) 2208c2ecf20Sopenharmony_ci return !!(regs->dar & SIER_SIHV); 2218c2ecf20Sopenharmony_ci 2228c2ecf20Sopenharmony_ci if (ppmu->flags & PPMU_ALT_SIPR) 2238c2ecf20Sopenharmony_ci sihv = POWER6_MMCRA_SIHV; 2248c2ecf20Sopenharmony_ci 2258c2ecf20Sopenharmony_ci return !!(regs->dsisr & sihv); 2268c2ecf20Sopenharmony_ci} 2278c2ecf20Sopenharmony_ci 2288c2ecf20Sopenharmony_cistatic bool regs_sipr(struct pt_regs *regs) 2298c2ecf20Sopenharmony_ci{ 2308c2ecf20Sopenharmony_ci unsigned long sipr = MMCRA_SIPR; 2318c2ecf20Sopenharmony_ci 2328c2ecf20Sopenharmony_ci if (ppmu->flags & PPMU_HAS_SIER) 2338c2ecf20Sopenharmony_ci return !!(regs->dar & SIER_SIPR); 2348c2ecf20Sopenharmony_ci 2358c2ecf20Sopenharmony_ci if (ppmu->flags & PPMU_ALT_SIPR) 2368c2ecf20Sopenharmony_ci sipr = POWER6_MMCRA_SIPR; 2378c2ecf20Sopenharmony_ci 2388c2ecf20Sopenharmony_ci return !!(regs->dsisr & sipr); 2398c2ecf20Sopenharmony_ci} 2408c2ecf20Sopenharmony_ci 2418c2ecf20Sopenharmony_cistatic inline u32 perf_flags_from_msr(struct pt_regs *regs) 2428c2ecf20Sopenharmony_ci{ 2438c2ecf20Sopenharmony_ci if (regs->msr & MSR_PR) 2448c2ecf20Sopenharmony_ci return PERF_RECORD_MISC_USER; 2458c2ecf20Sopenharmony_ci if ((regs->msr & MSR_HV) && freeze_events_kernel != MMCR0_FCHV) 2468c2ecf20Sopenharmony_ci return PERF_RECORD_MISC_HYPERVISOR; 2478c2ecf20Sopenharmony_ci return PERF_RECORD_MISC_KERNEL; 2488c2ecf20Sopenharmony_ci} 2498c2ecf20Sopenharmony_ci 2508c2ecf20Sopenharmony_cistatic inline u32 perf_get_misc_flags(struct pt_regs *regs) 2518c2ecf20Sopenharmony_ci{ 2528c2ecf20Sopenharmony_ci bool use_siar = regs_use_siar(regs); 2538c2ecf20Sopenharmony_ci 2548c2ecf20Sopenharmony_ci if (!use_siar) 2558c2ecf20Sopenharmony_ci return perf_flags_from_msr(regs); 2568c2ecf20Sopenharmony_ci 2578c2ecf20Sopenharmony_ci /* 2588c2ecf20Sopenharmony_ci * If we don't have flags in MMCRA, rather than using 2598c2ecf20Sopenharmony_ci * the MSR, we intuit the flags from the address in 2608c2ecf20Sopenharmony_ci * SIAR which should give slightly more reliable 2618c2ecf20Sopenharmony_ci * results 2628c2ecf20Sopenharmony_ci */ 2638c2ecf20Sopenharmony_ci if (ppmu->flags & PPMU_NO_SIPR) { 2648c2ecf20Sopenharmony_ci unsigned long siar = mfspr(SPRN_SIAR); 2658c2ecf20Sopenharmony_ci if (is_kernel_addr(siar)) 2668c2ecf20Sopenharmony_ci return PERF_RECORD_MISC_KERNEL; 2678c2ecf20Sopenharmony_ci return PERF_RECORD_MISC_USER; 2688c2ecf20Sopenharmony_ci } 2698c2ecf20Sopenharmony_ci 2708c2ecf20Sopenharmony_ci /* PR has priority over HV, so order below is important */ 2718c2ecf20Sopenharmony_ci if (regs_sipr(regs)) 2728c2ecf20Sopenharmony_ci return PERF_RECORD_MISC_USER; 2738c2ecf20Sopenharmony_ci 2748c2ecf20Sopenharmony_ci if (regs_sihv(regs) && (freeze_events_kernel != MMCR0_FCHV)) 2758c2ecf20Sopenharmony_ci return PERF_RECORD_MISC_HYPERVISOR; 2768c2ecf20Sopenharmony_ci 2778c2ecf20Sopenharmony_ci return PERF_RECORD_MISC_KERNEL; 2788c2ecf20Sopenharmony_ci} 2798c2ecf20Sopenharmony_ci 2808c2ecf20Sopenharmony_ci/* 2818c2ecf20Sopenharmony_ci * Overload regs->dsisr to store MMCRA so we only need to read it once 2828c2ecf20Sopenharmony_ci * on each interrupt. 2838c2ecf20Sopenharmony_ci * Overload regs->dar to store SIER if we have it. 2848c2ecf20Sopenharmony_ci * Overload regs->result to specify whether we should use the MSR (result 2858c2ecf20Sopenharmony_ci * is zero) or the SIAR (result is non zero). 2868c2ecf20Sopenharmony_ci */ 2878c2ecf20Sopenharmony_cistatic inline void perf_read_regs(struct pt_regs *regs) 2888c2ecf20Sopenharmony_ci{ 2898c2ecf20Sopenharmony_ci unsigned long mmcra = mfspr(SPRN_MMCRA); 2908c2ecf20Sopenharmony_ci int marked = mmcra & MMCRA_SAMPLE_ENABLE; 2918c2ecf20Sopenharmony_ci int use_siar; 2928c2ecf20Sopenharmony_ci 2938c2ecf20Sopenharmony_ci regs->dsisr = mmcra; 2948c2ecf20Sopenharmony_ci 2958c2ecf20Sopenharmony_ci if (ppmu->flags & PPMU_HAS_SIER) 2968c2ecf20Sopenharmony_ci regs->dar = mfspr(SPRN_SIER); 2978c2ecf20Sopenharmony_ci 2988c2ecf20Sopenharmony_ci /* 2998c2ecf20Sopenharmony_ci * If this isn't a PMU exception (eg a software event) the SIAR is 3008c2ecf20Sopenharmony_ci * not valid. Use pt_regs. 3018c2ecf20Sopenharmony_ci * 3028c2ecf20Sopenharmony_ci * If it is a marked event use the SIAR. 3038c2ecf20Sopenharmony_ci * 3048c2ecf20Sopenharmony_ci * If the PMU doesn't update the SIAR for non marked events use 3058c2ecf20Sopenharmony_ci * pt_regs. 3068c2ecf20Sopenharmony_ci * 3078c2ecf20Sopenharmony_ci * If the PMU has HV/PR flags then check to see if they 3088c2ecf20Sopenharmony_ci * place the exception in userspace. If so, use pt_regs. In 3098c2ecf20Sopenharmony_ci * continuous sampling mode the SIAR and the PMU exception are 3108c2ecf20Sopenharmony_ci * not synchronised, so they may be many instructions apart. 3118c2ecf20Sopenharmony_ci * This can result in confusing backtraces. We still want 3128c2ecf20Sopenharmony_ci * hypervisor samples as well as samples in the kernel with 3138c2ecf20Sopenharmony_ci * interrupts off hence the userspace check. 3148c2ecf20Sopenharmony_ci */ 3158c2ecf20Sopenharmony_ci if (TRAP(regs) != 0xf00) 3168c2ecf20Sopenharmony_ci use_siar = 0; 3178c2ecf20Sopenharmony_ci else if ((ppmu->flags & PPMU_NO_SIAR)) 3188c2ecf20Sopenharmony_ci use_siar = 0; 3198c2ecf20Sopenharmony_ci else if (marked) 3208c2ecf20Sopenharmony_ci use_siar = 1; 3218c2ecf20Sopenharmony_ci else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING)) 3228c2ecf20Sopenharmony_ci use_siar = 0; 3238c2ecf20Sopenharmony_ci else if (!(ppmu->flags & PPMU_NO_SIPR) && regs_sipr(regs)) 3248c2ecf20Sopenharmony_ci use_siar = 0; 3258c2ecf20Sopenharmony_ci else 3268c2ecf20Sopenharmony_ci use_siar = 1; 3278c2ecf20Sopenharmony_ci 3288c2ecf20Sopenharmony_ci regs->result = use_siar; 3298c2ecf20Sopenharmony_ci} 3308c2ecf20Sopenharmony_ci 3318c2ecf20Sopenharmony_ci/* 3328c2ecf20Sopenharmony_ci * On processors like P7+ that have the SIAR-Valid bit, marked instructions 3338c2ecf20Sopenharmony_ci * must be sampled only if the SIAR-valid bit is set. 3348c2ecf20Sopenharmony_ci * 3358c2ecf20Sopenharmony_ci * For unmarked instructions and for processors that don't have the SIAR-Valid 3368c2ecf20Sopenharmony_ci * bit, assume that SIAR is valid. 3378c2ecf20Sopenharmony_ci */ 3388c2ecf20Sopenharmony_cistatic inline int siar_valid(struct pt_regs *regs) 3398c2ecf20Sopenharmony_ci{ 3408c2ecf20Sopenharmony_ci unsigned long mmcra = regs->dsisr; 3418c2ecf20Sopenharmony_ci int marked = mmcra & MMCRA_SAMPLE_ENABLE; 3428c2ecf20Sopenharmony_ci 3438c2ecf20Sopenharmony_ci if (marked) { 3448c2ecf20Sopenharmony_ci if (ppmu->flags & PPMU_HAS_SIER) 3458c2ecf20Sopenharmony_ci return regs->dar & SIER_SIAR_VALID; 3468c2ecf20Sopenharmony_ci 3478c2ecf20Sopenharmony_ci if (ppmu->flags & PPMU_SIAR_VALID) 3488c2ecf20Sopenharmony_ci return mmcra & POWER7P_MMCRA_SIAR_VALID; 3498c2ecf20Sopenharmony_ci } 3508c2ecf20Sopenharmony_ci 3518c2ecf20Sopenharmony_ci return 1; 3528c2ecf20Sopenharmony_ci} 3538c2ecf20Sopenharmony_ci 3548c2ecf20Sopenharmony_ci 3558c2ecf20Sopenharmony_ci/* Reset all possible BHRB entries */ 3568c2ecf20Sopenharmony_cistatic void power_pmu_bhrb_reset(void) 3578c2ecf20Sopenharmony_ci{ 3588c2ecf20Sopenharmony_ci asm volatile(PPC_CLRBHRB); 3598c2ecf20Sopenharmony_ci} 3608c2ecf20Sopenharmony_ci 3618c2ecf20Sopenharmony_cistatic void power_pmu_bhrb_enable(struct perf_event *event) 3628c2ecf20Sopenharmony_ci{ 3638c2ecf20Sopenharmony_ci struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); 3648c2ecf20Sopenharmony_ci 3658c2ecf20Sopenharmony_ci if (!ppmu->bhrb_nr) 3668c2ecf20Sopenharmony_ci return; 3678c2ecf20Sopenharmony_ci 3688c2ecf20Sopenharmony_ci /* Clear BHRB if we changed task context to avoid data leaks */ 3698c2ecf20Sopenharmony_ci if (event->ctx->task && cpuhw->bhrb_context != event->ctx) { 3708c2ecf20Sopenharmony_ci power_pmu_bhrb_reset(); 3718c2ecf20Sopenharmony_ci cpuhw->bhrb_context = event->ctx; 3728c2ecf20Sopenharmony_ci } 3738c2ecf20Sopenharmony_ci cpuhw->bhrb_users++; 3748c2ecf20Sopenharmony_ci perf_sched_cb_inc(event->ctx->pmu); 3758c2ecf20Sopenharmony_ci} 3768c2ecf20Sopenharmony_ci 3778c2ecf20Sopenharmony_cistatic void power_pmu_bhrb_disable(struct perf_event *event) 3788c2ecf20Sopenharmony_ci{ 3798c2ecf20Sopenharmony_ci struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); 3808c2ecf20Sopenharmony_ci 3818c2ecf20Sopenharmony_ci if (!ppmu->bhrb_nr) 3828c2ecf20Sopenharmony_ci return; 3838c2ecf20Sopenharmony_ci 3848c2ecf20Sopenharmony_ci WARN_ON_ONCE(!cpuhw->bhrb_users); 3858c2ecf20Sopenharmony_ci cpuhw->bhrb_users--; 3868c2ecf20Sopenharmony_ci perf_sched_cb_dec(event->ctx->pmu); 3878c2ecf20Sopenharmony_ci 3888c2ecf20Sopenharmony_ci if (!cpuhw->disabled && !cpuhw->bhrb_users) { 3898c2ecf20Sopenharmony_ci /* BHRB cannot be turned off when other 3908c2ecf20Sopenharmony_ci * events are active on the PMU. 3918c2ecf20Sopenharmony_ci */ 3928c2ecf20Sopenharmony_ci 3938c2ecf20Sopenharmony_ci /* avoid stale pointer */ 3948c2ecf20Sopenharmony_ci cpuhw->bhrb_context = NULL; 3958c2ecf20Sopenharmony_ci } 3968c2ecf20Sopenharmony_ci} 3978c2ecf20Sopenharmony_ci 3988c2ecf20Sopenharmony_ci/* Called from ctxsw to prevent one process's branch entries to 3998c2ecf20Sopenharmony_ci * mingle with the other process's entries during context switch. 4008c2ecf20Sopenharmony_ci */ 4018c2ecf20Sopenharmony_cistatic void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) 4028c2ecf20Sopenharmony_ci{ 4038c2ecf20Sopenharmony_ci if (!ppmu->bhrb_nr) 4048c2ecf20Sopenharmony_ci return; 4058c2ecf20Sopenharmony_ci 4068c2ecf20Sopenharmony_ci if (sched_in) 4078c2ecf20Sopenharmony_ci power_pmu_bhrb_reset(); 4088c2ecf20Sopenharmony_ci} 4098c2ecf20Sopenharmony_ci/* Calculate the to address for a branch */ 4108c2ecf20Sopenharmony_cistatic __u64 power_pmu_bhrb_to(u64 addr) 4118c2ecf20Sopenharmony_ci{ 4128c2ecf20Sopenharmony_ci unsigned int instr; 4138c2ecf20Sopenharmony_ci __u64 target; 4148c2ecf20Sopenharmony_ci 4158c2ecf20Sopenharmony_ci if (is_kernel_addr(addr)) { 4168c2ecf20Sopenharmony_ci if (copy_from_kernel_nofault(&instr, (void *)addr, 4178c2ecf20Sopenharmony_ci sizeof(instr))) 4188c2ecf20Sopenharmony_ci return 0; 4198c2ecf20Sopenharmony_ci 4208c2ecf20Sopenharmony_ci return branch_target((struct ppc_inst *)&instr); 4218c2ecf20Sopenharmony_ci } 4228c2ecf20Sopenharmony_ci 4238c2ecf20Sopenharmony_ci /* Userspace: need copy instruction here then translate it */ 4248c2ecf20Sopenharmony_ci if (copy_from_user_nofault(&instr, (unsigned int __user *)addr, 4258c2ecf20Sopenharmony_ci sizeof(instr))) 4268c2ecf20Sopenharmony_ci return 0; 4278c2ecf20Sopenharmony_ci 4288c2ecf20Sopenharmony_ci target = branch_target((struct ppc_inst *)&instr); 4298c2ecf20Sopenharmony_ci if ((!target) || (instr & BRANCH_ABSOLUTE)) 4308c2ecf20Sopenharmony_ci return target; 4318c2ecf20Sopenharmony_ci 4328c2ecf20Sopenharmony_ci /* Translate relative branch target from kernel to user address */ 4338c2ecf20Sopenharmony_ci return target - (unsigned long)&instr + addr; 4348c2ecf20Sopenharmony_ci} 4358c2ecf20Sopenharmony_ci 4368c2ecf20Sopenharmony_ci/* Processing BHRB entries */ 4378c2ecf20Sopenharmony_cistatic void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw) 4388c2ecf20Sopenharmony_ci{ 4398c2ecf20Sopenharmony_ci u64 val; 4408c2ecf20Sopenharmony_ci u64 addr; 4418c2ecf20Sopenharmony_ci int r_index, u_index, pred; 4428c2ecf20Sopenharmony_ci 4438c2ecf20Sopenharmony_ci r_index = 0; 4448c2ecf20Sopenharmony_ci u_index = 0; 4458c2ecf20Sopenharmony_ci while (r_index < ppmu->bhrb_nr) { 4468c2ecf20Sopenharmony_ci /* Assembly read function */ 4478c2ecf20Sopenharmony_ci val = read_bhrb(r_index++); 4488c2ecf20Sopenharmony_ci if (!val) 4498c2ecf20Sopenharmony_ci /* Terminal marker: End of valid BHRB entries */ 4508c2ecf20Sopenharmony_ci break; 4518c2ecf20Sopenharmony_ci else { 4528c2ecf20Sopenharmony_ci addr = val & BHRB_EA; 4538c2ecf20Sopenharmony_ci pred = val & BHRB_PREDICTION; 4548c2ecf20Sopenharmony_ci 4558c2ecf20Sopenharmony_ci if (!addr) 4568c2ecf20Sopenharmony_ci /* invalid entry */ 4578c2ecf20Sopenharmony_ci continue; 4588c2ecf20Sopenharmony_ci 4598c2ecf20Sopenharmony_ci /* 4608c2ecf20Sopenharmony_ci * BHRB rolling buffer could very much contain the kernel 4618c2ecf20Sopenharmony_ci * addresses at this point. Check the privileges before 4628c2ecf20Sopenharmony_ci * exporting it to userspace (avoid exposure of regions 4638c2ecf20Sopenharmony_ci * where we could have speculative execution) 4648c2ecf20Sopenharmony_ci * Incase of ISA v3.1, BHRB will capture only user-space 4658c2ecf20Sopenharmony_ci * addresses, hence include a check before filtering code 4668c2ecf20Sopenharmony_ci */ 4678c2ecf20Sopenharmony_ci if (!(ppmu->flags & PPMU_ARCH_31) && 4688c2ecf20Sopenharmony_ci is_kernel_addr(addr) && event->attr.exclude_kernel) 4698c2ecf20Sopenharmony_ci continue; 4708c2ecf20Sopenharmony_ci 4718c2ecf20Sopenharmony_ci /* Branches are read most recent first (ie. mfbhrb 0 is 4728c2ecf20Sopenharmony_ci * the most recent branch). 4738c2ecf20Sopenharmony_ci * There are two types of valid entries: 4748c2ecf20Sopenharmony_ci * 1) a target entry which is the to address of a 4758c2ecf20Sopenharmony_ci * computed goto like a blr,bctr,btar. The next 4768c2ecf20Sopenharmony_ci * entry read from the bhrb will be branch 4778c2ecf20Sopenharmony_ci * corresponding to this target (ie. the actual 4788c2ecf20Sopenharmony_ci * blr/bctr/btar instruction). 4798c2ecf20Sopenharmony_ci * 2) a from address which is an actual branch. If a 4808c2ecf20Sopenharmony_ci * target entry proceeds this, then this is the 4818c2ecf20Sopenharmony_ci * matching branch for that target. If this is not 4828c2ecf20Sopenharmony_ci * following a target entry, then this is a branch 4838c2ecf20Sopenharmony_ci * where the target is given as an immediate field 4848c2ecf20Sopenharmony_ci * in the instruction (ie. an i or b form branch). 4858c2ecf20Sopenharmony_ci * In this case we need to read the instruction from 4868c2ecf20Sopenharmony_ci * memory to determine the target/to address. 4878c2ecf20Sopenharmony_ci */ 4888c2ecf20Sopenharmony_ci 4898c2ecf20Sopenharmony_ci if (val & BHRB_TARGET) { 4908c2ecf20Sopenharmony_ci /* Target branches use two entries 4918c2ecf20Sopenharmony_ci * (ie. computed gotos/XL form) 4928c2ecf20Sopenharmony_ci */ 4938c2ecf20Sopenharmony_ci cpuhw->bhrb_entries[u_index].to = addr; 4948c2ecf20Sopenharmony_ci cpuhw->bhrb_entries[u_index].mispred = pred; 4958c2ecf20Sopenharmony_ci cpuhw->bhrb_entries[u_index].predicted = ~pred; 4968c2ecf20Sopenharmony_ci 4978c2ecf20Sopenharmony_ci /* Get from address in next entry */ 4988c2ecf20Sopenharmony_ci val = read_bhrb(r_index++); 4998c2ecf20Sopenharmony_ci addr = val & BHRB_EA; 5008c2ecf20Sopenharmony_ci if (val & BHRB_TARGET) { 5018c2ecf20Sopenharmony_ci /* Shouldn't have two targets in a 5028c2ecf20Sopenharmony_ci row.. Reset index and try again */ 5038c2ecf20Sopenharmony_ci r_index--; 5048c2ecf20Sopenharmony_ci addr = 0; 5058c2ecf20Sopenharmony_ci } 5068c2ecf20Sopenharmony_ci cpuhw->bhrb_entries[u_index].from = addr; 5078c2ecf20Sopenharmony_ci } else { 5088c2ecf20Sopenharmony_ci /* Branches to immediate field 5098c2ecf20Sopenharmony_ci (ie I or B form) */ 5108c2ecf20Sopenharmony_ci cpuhw->bhrb_entries[u_index].from = addr; 5118c2ecf20Sopenharmony_ci cpuhw->bhrb_entries[u_index].to = 5128c2ecf20Sopenharmony_ci power_pmu_bhrb_to(addr); 5138c2ecf20Sopenharmony_ci cpuhw->bhrb_entries[u_index].mispred = pred; 5148c2ecf20Sopenharmony_ci cpuhw->bhrb_entries[u_index].predicted = ~pred; 5158c2ecf20Sopenharmony_ci } 5168c2ecf20Sopenharmony_ci u_index++; 5178c2ecf20Sopenharmony_ci 5188c2ecf20Sopenharmony_ci } 5198c2ecf20Sopenharmony_ci } 5208c2ecf20Sopenharmony_ci cpuhw->bhrb_stack.nr = u_index; 5218c2ecf20Sopenharmony_ci cpuhw->bhrb_stack.hw_idx = -1ULL; 5228c2ecf20Sopenharmony_ci return; 5238c2ecf20Sopenharmony_ci} 5248c2ecf20Sopenharmony_ci 5258c2ecf20Sopenharmony_cistatic bool is_ebb_event(struct perf_event *event) 5268c2ecf20Sopenharmony_ci{ 5278c2ecf20Sopenharmony_ci /* 5288c2ecf20Sopenharmony_ci * This could be a per-PMU callback, but we'd rather avoid the cost. We 5298c2ecf20Sopenharmony_ci * check that the PMU supports EBB, meaning those that don't can still 5308c2ecf20Sopenharmony_ci * use bit 63 of the event code for something else if they wish. 5318c2ecf20Sopenharmony_ci */ 5328c2ecf20Sopenharmony_ci return (ppmu->flags & PPMU_ARCH_207S) && 5338c2ecf20Sopenharmony_ci ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1); 5348c2ecf20Sopenharmony_ci} 5358c2ecf20Sopenharmony_ci 5368c2ecf20Sopenharmony_cistatic int ebb_event_check(struct perf_event *event) 5378c2ecf20Sopenharmony_ci{ 5388c2ecf20Sopenharmony_ci struct perf_event *leader = event->group_leader; 5398c2ecf20Sopenharmony_ci 5408c2ecf20Sopenharmony_ci /* Event and group leader must agree on EBB */ 5418c2ecf20Sopenharmony_ci if (is_ebb_event(leader) != is_ebb_event(event)) 5428c2ecf20Sopenharmony_ci return -EINVAL; 5438c2ecf20Sopenharmony_ci 5448c2ecf20Sopenharmony_ci if (is_ebb_event(event)) { 5458c2ecf20Sopenharmony_ci if (!(event->attach_state & PERF_ATTACH_TASK)) 5468c2ecf20Sopenharmony_ci return -EINVAL; 5478c2ecf20Sopenharmony_ci 5488c2ecf20Sopenharmony_ci if (!leader->attr.pinned || !leader->attr.exclusive) 5498c2ecf20Sopenharmony_ci return -EINVAL; 5508c2ecf20Sopenharmony_ci 5518c2ecf20Sopenharmony_ci if (event->attr.freq || 5528c2ecf20Sopenharmony_ci event->attr.inherit || 5538c2ecf20Sopenharmony_ci event->attr.sample_type || 5548c2ecf20Sopenharmony_ci event->attr.sample_period || 5558c2ecf20Sopenharmony_ci event->attr.enable_on_exec) 5568c2ecf20Sopenharmony_ci return -EINVAL; 5578c2ecf20Sopenharmony_ci } 5588c2ecf20Sopenharmony_ci 5598c2ecf20Sopenharmony_ci return 0; 5608c2ecf20Sopenharmony_ci} 5618c2ecf20Sopenharmony_ci 5628c2ecf20Sopenharmony_cistatic void ebb_event_add(struct perf_event *event) 5638c2ecf20Sopenharmony_ci{ 5648c2ecf20Sopenharmony_ci if (!is_ebb_event(event) || current->thread.used_ebb) 5658c2ecf20Sopenharmony_ci return; 5668c2ecf20Sopenharmony_ci 5678c2ecf20Sopenharmony_ci /* 5688c2ecf20Sopenharmony_ci * IFF this is the first time we've added an EBB event, set 5698c2ecf20Sopenharmony_ci * PMXE in the user MMCR0 so we can detect when it's cleared by 5708c2ecf20Sopenharmony_ci * userspace. We need this so that we can context switch while 5718c2ecf20Sopenharmony_ci * userspace is in the EBB handler (where PMXE is 0). 5728c2ecf20Sopenharmony_ci */ 5738c2ecf20Sopenharmony_ci current->thread.used_ebb = 1; 5748c2ecf20Sopenharmony_ci current->thread.mmcr0 |= MMCR0_PMXE; 5758c2ecf20Sopenharmony_ci} 5768c2ecf20Sopenharmony_ci 5778c2ecf20Sopenharmony_cistatic void ebb_switch_out(unsigned long mmcr0) 5788c2ecf20Sopenharmony_ci{ 5798c2ecf20Sopenharmony_ci if (!(mmcr0 & MMCR0_EBE)) 5808c2ecf20Sopenharmony_ci return; 5818c2ecf20Sopenharmony_ci 5828c2ecf20Sopenharmony_ci current->thread.siar = mfspr(SPRN_SIAR); 5838c2ecf20Sopenharmony_ci current->thread.sier = mfspr(SPRN_SIER); 5848c2ecf20Sopenharmony_ci current->thread.sdar = mfspr(SPRN_SDAR); 5858c2ecf20Sopenharmony_ci current->thread.mmcr0 = mmcr0 & MMCR0_USER_MASK; 5868c2ecf20Sopenharmony_ci current->thread.mmcr2 = mfspr(SPRN_MMCR2) & MMCR2_USER_MASK; 5878c2ecf20Sopenharmony_ci if (ppmu->flags & PPMU_ARCH_31) { 5888c2ecf20Sopenharmony_ci current->thread.mmcr3 = mfspr(SPRN_MMCR3); 5898c2ecf20Sopenharmony_ci current->thread.sier2 = mfspr(SPRN_SIER2); 5908c2ecf20Sopenharmony_ci current->thread.sier3 = mfspr(SPRN_SIER3); 5918c2ecf20Sopenharmony_ci } 5928c2ecf20Sopenharmony_ci} 5938c2ecf20Sopenharmony_ci 5948c2ecf20Sopenharmony_cistatic unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw) 5958c2ecf20Sopenharmony_ci{ 5968c2ecf20Sopenharmony_ci unsigned long mmcr0 = cpuhw->mmcr.mmcr0; 5978c2ecf20Sopenharmony_ci 5988c2ecf20Sopenharmony_ci if (!ebb) 5998c2ecf20Sopenharmony_ci goto out; 6008c2ecf20Sopenharmony_ci 6018c2ecf20Sopenharmony_ci /* Enable EBB and read/write to all 6 PMCs and BHRB for userspace */ 6028c2ecf20Sopenharmony_ci mmcr0 |= MMCR0_EBE | MMCR0_BHRBA | MMCR0_PMCC_U6; 6038c2ecf20Sopenharmony_ci 6048c2ecf20Sopenharmony_ci /* 6058c2ecf20Sopenharmony_ci * Add any bits from the user MMCR0, FC or PMAO. This is compatible 6068c2ecf20Sopenharmony_ci * with pmao_restore_workaround() because we may add PMAO but we never 6078c2ecf20Sopenharmony_ci * clear it here. 6088c2ecf20Sopenharmony_ci */ 6098c2ecf20Sopenharmony_ci mmcr0 |= current->thread.mmcr0; 6108c2ecf20Sopenharmony_ci 6118c2ecf20Sopenharmony_ci /* 6128c2ecf20Sopenharmony_ci * Be careful not to set PMXE if userspace had it cleared. This is also 6138c2ecf20Sopenharmony_ci * compatible with pmao_restore_workaround() because it has already 6148c2ecf20Sopenharmony_ci * cleared PMXE and we leave PMAO alone. 6158c2ecf20Sopenharmony_ci */ 6168c2ecf20Sopenharmony_ci if (!(current->thread.mmcr0 & MMCR0_PMXE)) 6178c2ecf20Sopenharmony_ci mmcr0 &= ~MMCR0_PMXE; 6188c2ecf20Sopenharmony_ci 6198c2ecf20Sopenharmony_ci mtspr(SPRN_SIAR, current->thread.siar); 6208c2ecf20Sopenharmony_ci mtspr(SPRN_SIER, current->thread.sier); 6218c2ecf20Sopenharmony_ci mtspr(SPRN_SDAR, current->thread.sdar); 6228c2ecf20Sopenharmony_ci 6238c2ecf20Sopenharmony_ci /* 6248c2ecf20Sopenharmony_ci * Merge the kernel & user values of MMCR2. The semantics we implement 6258c2ecf20Sopenharmony_ci * are that the user MMCR2 can set bits, ie. cause counters to freeze, 6268c2ecf20Sopenharmony_ci * but not clear bits. If a task wants to be able to clear bits, ie. 6278c2ecf20Sopenharmony_ci * unfreeze counters, it should not set exclude_xxx in its events and 6288c2ecf20Sopenharmony_ci * instead manage the MMCR2 entirely by itself. 6298c2ecf20Sopenharmony_ci */ 6308c2ecf20Sopenharmony_ci mtspr(SPRN_MMCR2, cpuhw->mmcr.mmcr2 | current->thread.mmcr2); 6318c2ecf20Sopenharmony_ci 6328c2ecf20Sopenharmony_ci if (ppmu->flags & PPMU_ARCH_31) { 6338c2ecf20Sopenharmony_ci mtspr(SPRN_MMCR3, current->thread.mmcr3); 6348c2ecf20Sopenharmony_ci mtspr(SPRN_SIER2, current->thread.sier2); 6358c2ecf20Sopenharmony_ci mtspr(SPRN_SIER3, current->thread.sier3); 6368c2ecf20Sopenharmony_ci } 6378c2ecf20Sopenharmony_ciout: 6388c2ecf20Sopenharmony_ci return mmcr0; 6398c2ecf20Sopenharmony_ci} 6408c2ecf20Sopenharmony_ci 6418c2ecf20Sopenharmony_cistatic void pmao_restore_workaround(bool ebb) 6428c2ecf20Sopenharmony_ci{ 6438c2ecf20Sopenharmony_ci unsigned pmcs[6]; 6448c2ecf20Sopenharmony_ci 6458c2ecf20Sopenharmony_ci if (!cpu_has_feature(CPU_FTR_PMAO_BUG)) 6468c2ecf20Sopenharmony_ci return; 6478c2ecf20Sopenharmony_ci 6488c2ecf20Sopenharmony_ci /* 6498c2ecf20Sopenharmony_ci * On POWER8E there is a hardware defect which affects the PMU context 6508c2ecf20Sopenharmony_ci * switch logic, ie. power_pmu_disable/enable(). 6518c2ecf20Sopenharmony_ci * 6528c2ecf20Sopenharmony_ci * When a counter overflows PMXE is cleared and FC/PMAO is set in MMCR0 6538c2ecf20Sopenharmony_ci * by the hardware. Sometime later the actual PMU exception is 6548c2ecf20Sopenharmony_ci * delivered. 6558c2ecf20Sopenharmony_ci * 6568c2ecf20Sopenharmony_ci * If we context switch, or simply disable/enable, the PMU prior to the 6578c2ecf20Sopenharmony_ci * exception arriving, the exception will be lost when we clear PMAO. 6588c2ecf20Sopenharmony_ci * 6598c2ecf20Sopenharmony_ci * When we reenable the PMU, we will write the saved MMCR0 with PMAO 6608c2ecf20Sopenharmony_ci * set, and this _should_ generate an exception. However because of the 6618c2ecf20Sopenharmony_ci * defect no exception is generated when we write PMAO, and we get 6628c2ecf20Sopenharmony_ci * stuck with no counters counting but no exception delivered. 6638c2ecf20Sopenharmony_ci * 6648c2ecf20Sopenharmony_ci * The workaround is to detect this case and tweak the hardware to 6658c2ecf20Sopenharmony_ci * create another pending PMU exception. 6668c2ecf20Sopenharmony_ci * 6678c2ecf20Sopenharmony_ci * We do that by setting up PMC6 (cycles) for an imminent overflow and 6688c2ecf20Sopenharmony_ci * enabling the PMU. That causes a new exception to be generated in the 6698c2ecf20Sopenharmony_ci * chip, but we don't take it yet because we have interrupts hard 6708c2ecf20Sopenharmony_ci * disabled. We then write back the PMU state as we want it to be seen 6718c2ecf20Sopenharmony_ci * by the exception handler. When we reenable interrupts the exception 6728c2ecf20Sopenharmony_ci * handler will be called and see the correct state. 6738c2ecf20Sopenharmony_ci * 6748c2ecf20Sopenharmony_ci * The logic is the same for EBB, except that the exception is gated by 6758c2ecf20Sopenharmony_ci * us having interrupts hard disabled as well as the fact that we are 6768c2ecf20Sopenharmony_ci * not in userspace. The exception is finally delivered when we return 6778c2ecf20Sopenharmony_ci * to userspace. 6788c2ecf20Sopenharmony_ci */ 6798c2ecf20Sopenharmony_ci 6808c2ecf20Sopenharmony_ci /* Only if PMAO is set and PMAO_SYNC is clear */ 6818c2ecf20Sopenharmony_ci if ((current->thread.mmcr0 & (MMCR0_PMAO | MMCR0_PMAO_SYNC)) != MMCR0_PMAO) 6828c2ecf20Sopenharmony_ci return; 6838c2ecf20Sopenharmony_ci 6848c2ecf20Sopenharmony_ci /* If we're doing EBB, only if BESCR[GE] is set */ 6858c2ecf20Sopenharmony_ci if (ebb && !(current->thread.bescr & BESCR_GE)) 6868c2ecf20Sopenharmony_ci return; 6878c2ecf20Sopenharmony_ci 6888c2ecf20Sopenharmony_ci /* 6898c2ecf20Sopenharmony_ci * We are already soft-disabled in power_pmu_enable(). We need to hard 6908c2ecf20Sopenharmony_ci * disable to actually prevent the PMU exception from firing. 6918c2ecf20Sopenharmony_ci */ 6928c2ecf20Sopenharmony_ci hard_irq_disable(); 6938c2ecf20Sopenharmony_ci 6948c2ecf20Sopenharmony_ci /* 6958c2ecf20Sopenharmony_ci * This is a bit gross, but we know we're on POWER8E and have 6 PMCs. 6968c2ecf20Sopenharmony_ci * Using read/write_pmc() in a for loop adds 12 function calls and 6978c2ecf20Sopenharmony_ci * almost doubles our code size. 6988c2ecf20Sopenharmony_ci */ 6998c2ecf20Sopenharmony_ci pmcs[0] = mfspr(SPRN_PMC1); 7008c2ecf20Sopenharmony_ci pmcs[1] = mfspr(SPRN_PMC2); 7018c2ecf20Sopenharmony_ci pmcs[2] = mfspr(SPRN_PMC3); 7028c2ecf20Sopenharmony_ci pmcs[3] = mfspr(SPRN_PMC4); 7038c2ecf20Sopenharmony_ci pmcs[4] = mfspr(SPRN_PMC5); 7048c2ecf20Sopenharmony_ci pmcs[5] = mfspr(SPRN_PMC6); 7058c2ecf20Sopenharmony_ci 7068c2ecf20Sopenharmony_ci /* Ensure all freeze bits are unset */ 7078c2ecf20Sopenharmony_ci mtspr(SPRN_MMCR2, 0); 7088c2ecf20Sopenharmony_ci 7098c2ecf20Sopenharmony_ci /* Set up PMC6 to overflow in one cycle */ 7108c2ecf20Sopenharmony_ci mtspr(SPRN_PMC6, 0x7FFFFFFE); 7118c2ecf20Sopenharmony_ci 7128c2ecf20Sopenharmony_ci /* Enable exceptions and unfreeze PMC6 */ 7138c2ecf20Sopenharmony_ci mtspr(SPRN_MMCR0, MMCR0_PMXE | MMCR0_PMCjCE | MMCR0_PMAO); 7148c2ecf20Sopenharmony_ci 7158c2ecf20Sopenharmony_ci /* Now we need to refreeze and restore the PMCs */ 7168c2ecf20Sopenharmony_ci mtspr(SPRN_MMCR0, MMCR0_FC | MMCR0_PMAO); 7178c2ecf20Sopenharmony_ci 7188c2ecf20Sopenharmony_ci mtspr(SPRN_PMC1, pmcs[0]); 7198c2ecf20Sopenharmony_ci mtspr(SPRN_PMC2, pmcs[1]); 7208c2ecf20Sopenharmony_ci mtspr(SPRN_PMC3, pmcs[2]); 7218c2ecf20Sopenharmony_ci mtspr(SPRN_PMC4, pmcs[3]); 7228c2ecf20Sopenharmony_ci mtspr(SPRN_PMC5, pmcs[4]); 7238c2ecf20Sopenharmony_ci mtspr(SPRN_PMC6, pmcs[5]); 7248c2ecf20Sopenharmony_ci} 7258c2ecf20Sopenharmony_ci 7268c2ecf20Sopenharmony_ci#endif /* CONFIG_PPC64 */ 7278c2ecf20Sopenharmony_ci 7288c2ecf20Sopenharmony_cistatic void perf_event_interrupt(struct pt_regs *regs); 7298c2ecf20Sopenharmony_ci 7308c2ecf20Sopenharmony_ci/* 7318c2ecf20Sopenharmony_ci * Read one performance monitor counter (PMC). 7328c2ecf20Sopenharmony_ci */ 7338c2ecf20Sopenharmony_cistatic unsigned long read_pmc(int idx) 7348c2ecf20Sopenharmony_ci{ 7358c2ecf20Sopenharmony_ci unsigned long val; 7368c2ecf20Sopenharmony_ci 7378c2ecf20Sopenharmony_ci switch (idx) { 7388c2ecf20Sopenharmony_ci case 1: 7398c2ecf20Sopenharmony_ci val = mfspr(SPRN_PMC1); 7408c2ecf20Sopenharmony_ci break; 7418c2ecf20Sopenharmony_ci case 2: 7428c2ecf20Sopenharmony_ci val = mfspr(SPRN_PMC2); 7438c2ecf20Sopenharmony_ci break; 7448c2ecf20Sopenharmony_ci case 3: 7458c2ecf20Sopenharmony_ci val = mfspr(SPRN_PMC3); 7468c2ecf20Sopenharmony_ci break; 7478c2ecf20Sopenharmony_ci case 4: 7488c2ecf20Sopenharmony_ci val = mfspr(SPRN_PMC4); 7498c2ecf20Sopenharmony_ci break; 7508c2ecf20Sopenharmony_ci case 5: 7518c2ecf20Sopenharmony_ci val = mfspr(SPRN_PMC5); 7528c2ecf20Sopenharmony_ci break; 7538c2ecf20Sopenharmony_ci case 6: 7548c2ecf20Sopenharmony_ci val = mfspr(SPRN_PMC6); 7558c2ecf20Sopenharmony_ci break; 7568c2ecf20Sopenharmony_ci#ifdef CONFIG_PPC64 7578c2ecf20Sopenharmony_ci case 7: 7588c2ecf20Sopenharmony_ci val = mfspr(SPRN_PMC7); 7598c2ecf20Sopenharmony_ci break; 7608c2ecf20Sopenharmony_ci case 8: 7618c2ecf20Sopenharmony_ci val = mfspr(SPRN_PMC8); 7628c2ecf20Sopenharmony_ci break; 7638c2ecf20Sopenharmony_ci#endif /* CONFIG_PPC64 */ 7648c2ecf20Sopenharmony_ci default: 7658c2ecf20Sopenharmony_ci printk(KERN_ERR "oops trying to read PMC%d\n", idx); 7668c2ecf20Sopenharmony_ci val = 0; 7678c2ecf20Sopenharmony_ci } 7688c2ecf20Sopenharmony_ci return val; 7698c2ecf20Sopenharmony_ci} 7708c2ecf20Sopenharmony_ci 7718c2ecf20Sopenharmony_ci/* 7728c2ecf20Sopenharmony_ci * Write one PMC. 7738c2ecf20Sopenharmony_ci */ 7748c2ecf20Sopenharmony_cistatic void write_pmc(int idx, unsigned long val) 7758c2ecf20Sopenharmony_ci{ 7768c2ecf20Sopenharmony_ci switch (idx) { 7778c2ecf20Sopenharmony_ci case 1: 7788c2ecf20Sopenharmony_ci mtspr(SPRN_PMC1, val); 7798c2ecf20Sopenharmony_ci break; 7808c2ecf20Sopenharmony_ci case 2: 7818c2ecf20Sopenharmony_ci mtspr(SPRN_PMC2, val); 7828c2ecf20Sopenharmony_ci break; 7838c2ecf20Sopenharmony_ci case 3: 7848c2ecf20Sopenharmony_ci mtspr(SPRN_PMC3, val); 7858c2ecf20Sopenharmony_ci break; 7868c2ecf20Sopenharmony_ci case 4: 7878c2ecf20Sopenharmony_ci mtspr(SPRN_PMC4, val); 7888c2ecf20Sopenharmony_ci break; 7898c2ecf20Sopenharmony_ci case 5: 7908c2ecf20Sopenharmony_ci mtspr(SPRN_PMC5, val); 7918c2ecf20Sopenharmony_ci break; 7928c2ecf20Sopenharmony_ci case 6: 7938c2ecf20Sopenharmony_ci mtspr(SPRN_PMC6, val); 7948c2ecf20Sopenharmony_ci break; 7958c2ecf20Sopenharmony_ci#ifdef CONFIG_PPC64 7968c2ecf20Sopenharmony_ci case 7: 7978c2ecf20Sopenharmony_ci mtspr(SPRN_PMC7, val); 7988c2ecf20Sopenharmony_ci break; 7998c2ecf20Sopenharmony_ci case 8: 8008c2ecf20Sopenharmony_ci mtspr(SPRN_PMC8, val); 8018c2ecf20Sopenharmony_ci break; 8028c2ecf20Sopenharmony_ci#endif /* CONFIG_PPC64 */ 8038c2ecf20Sopenharmony_ci default: 8048c2ecf20Sopenharmony_ci printk(KERN_ERR "oops trying to write PMC%d\n", idx); 8058c2ecf20Sopenharmony_ci } 8068c2ecf20Sopenharmony_ci} 8078c2ecf20Sopenharmony_ci 8088c2ecf20Sopenharmony_cistatic int any_pmc_overflown(struct cpu_hw_events *cpuhw) 8098c2ecf20Sopenharmony_ci{ 8108c2ecf20Sopenharmony_ci int i, idx; 8118c2ecf20Sopenharmony_ci 8128c2ecf20Sopenharmony_ci for (i = 0; i < cpuhw->n_events; i++) { 8138c2ecf20Sopenharmony_ci idx = cpuhw->event[i]->hw.idx; 8148c2ecf20Sopenharmony_ci if ((idx) && ((int)read_pmc(idx) < 0)) 8158c2ecf20Sopenharmony_ci return idx; 8168c2ecf20Sopenharmony_ci } 8178c2ecf20Sopenharmony_ci 8188c2ecf20Sopenharmony_ci return 0; 8198c2ecf20Sopenharmony_ci} 8208c2ecf20Sopenharmony_ci 8218c2ecf20Sopenharmony_ci/* Called from sysrq_handle_showregs() */ 8228c2ecf20Sopenharmony_civoid perf_event_print_debug(void) 8238c2ecf20Sopenharmony_ci{ 8248c2ecf20Sopenharmony_ci unsigned long sdar, sier, flags; 8258c2ecf20Sopenharmony_ci u32 pmcs[MAX_HWEVENTS]; 8268c2ecf20Sopenharmony_ci int i; 8278c2ecf20Sopenharmony_ci 8288c2ecf20Sopenharmony_ci if (!ppmu) { 8298c2ecf20Sopenharmony_ci pr_info("Performance monitor hardware not registered.\n"); 8308c2ecf20Sopenharmony_ci return; 8318c2ecf20Sopenharmony_ci } 8328c2ecf20Sopenharmony_ci 8338c2ecf20Sopenharmony_ci if (!ppmu->n_counter) 8348c2ecf20Sopenharmony_ci return; 8358c2ecf20Sopenharmony_ci 8368c2ecf20Sopenharmony_ci local_irq_save(flags); 8378c2ecf20Sopenharmony_ci 8388c2ecf20Sopenharmony_ci pr_info("CPU: %d PMU registers, ppmu = %s n_counters = %d", 8398c2ecf20Sopenharmony_ci smp_processor_id(), ppmu->name, ppmu->n_counter); 8408c2ecf20Sopenharmony_ci 8418c2ecf20Sopenharmony_ci for (i = 0; i < ppmu->n_counter; i++) 8428c2ecf20Sopenharmony_ci pmcs[i] = read_pmc(i + 1); 8438c2ecf20Sopenharmony_ci 8448c2ecf20Sopenharmony_ci for (; i < MAX_HWEVENTS; i++) 8458c2ecf20Sopenharmony_ci pmcs[i] = 0xdeadbeef; 8468c2ecf20Sopenharmony_ci 8478c2ecf20Sopenharmony_ci pr_info("PMC1: %08x PMC2: %08x PMC3: %08x PMC4: %08x\n", 8488c2ecf20Sopenharmony_ci pmcs[0], pmcs[1], pmcs[2], pmcs[3]); 8498c2ecf20Sopenharmony_ci 8508c2ecf20Sopenharmony_ci if (ppmu->n_counter > 4) 8518c2ecf20Sopenharmony_ci pr_info("PMC5: %08x PMC6: %08x PMC7: %08x PMC8: %08x\n", 8528c2ecf20Sopenharmony_ci pmcs[4], pmcs[5], pmcs[6], pmcs[7]); 8538c2ecf20Sopenharmony_ci 8548c2ecf20Sopenharmony_ci pr_info("MMCR0: %016lx MMCR1: %016lx MMCRA: %016lx\n", 8558c2ecf20Sopenharmony_ci mfspr(SPRN_MMCR0), mfspr(SPRN_MMCR1), mfspr(SPRN_MMCRA)); 8568c2ecf20Sopenharmony_ci 8578c2ecf20Sopenharmony_ci sdar = sier = 0; 8588c2ecf20Sopenharmony_ci#ifdef CONFIG_PPC64 8598c2ecf20Sopenharmony_ci sdar = mfspr(SPRN_SDAR); 8608c2ecf20Sopenharmony_ci 8618c2ecf20Sopenharmony_ci if (ppmu->flags & PPMU_HAS_SIER) 8628c2ecf20Sopenharmony_ci sier = mfspr(SPRN_SIER); 8638c2ecf20Sopenharmony_ci 8648c2ecf20Sopenharmony_ci if (ppmu->flags & PPMU_ARCH_207S) { 8658c2ecf20Sopenharmony_ci pr_info("MMCR2: %016lx EBBHR: %016lx\n", 8668c2ecf20Sopenharmony_ci mfspr(SPRN_MMCR2), mfspr(SPRN_EBBHR)); 8678c2ecf20Sopenharmony_ci pr_info("EBBRR: %016lx BESCR: %016lx\n", 8688c2ecf20Sopenharmony_ci mfspr(SPRN_EBBRR), mfspr(SPRN_BESCR)); 8698c2ecf20Sopenharmony_ci } 8708c2ecf20Sopenharmony_ci 8718c2ecf20Sopenharmony_ci if (ppmu->flags & PPMU_ARCH_31) { 8728c2ecf20Sopenharmony_ci pr_info("MMCR3: %016lx SIER2: %016lx SIER3: %016lx\n", 8738c2ecf20Sopenharmony_ci mfspr(SPRN_MMCR3), mfspr(SPRN_SIER2), mfspr(SPRN_SIER3)); 8748c2ecf20Sopenharmony_ci } 8758c2ecf20Sopenharmony_ci#endif 8768c2ecf20Sopenharmony_ci pr_info("SIAR: %016lx SDAR: %016lx SIER: %016lx\n", 8778c2ecf20Sopenharmony_ci mfspr(SPRN_SIAR), sdar, sier); 8788c2ecf20Sopenharmony_ci 8798c2ecf20Sopenharmony_ci local_irq_restore(flags); 8808c2ecf20Sopenharmony_ci} 8818c2ecf20Sopenharmony_ci 8828c2ecf20Sopenharmony_ci/* 8838c2ecf20Sopenharmony_ci * Check if a set of events can all go on the PMU at once. 8848c2ecf20Sopenharmony_ci * If they can't, this will look at alternative codes for the events 8858c2ecf20Sopenharmony_ci * and see if any combination of alternative codes is feasible. 8868c2ecf20Sopenharmony_ci * The feasible set is returned in event_id[]. 8878c2ecf20Sopenharmony_ci */ 8888c2ecf20Sopenharmony_cistatic int power_check_constraints(struct cpu_hw_events *cpuhw, 8898c2ecf20Sopenharmony_ci u64 event_id[], unsigned int cflags[], 8908c2ecf20Sopenharmony_ci int n_ev) 8918c2ecf20Sopenharmony_ci{ 8928c2ecf20Sopenharmony_ci unsigned long mask, value, nv; 8938c2ecf20Sopenharmony_ci unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS]; 8948c2ecf20Sopenharmony_ci int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS]; 8958c2ecf20Sopenharmony_ci int i, j; 8968c2ecf20Sopenharmony_ci unsigned long addf = ppmu->add_fields; 8978c2ecf20Sopenharmony_ci unsigned long tadd = ppmu->test_adder; 8988c2ecf20Sopenharmony_ci unsigned long grp_mask = ppmu->group_constraint_mask; 8998c2ecf20Sopenharmony_ci unsigned long grp_val = ppmu->group_constraint_val; 9008c2ecf20Sopenharmony_ci 9018c2ecf20Sopenharmony_ci if (n_ev > ppmu->n_counter) 9028c2ecf20Sopenharmony_ci return -1; 9038c2ecf20Sopenharmony_ci 9048c2ecf20Sopenharmony_ci /* First see if the events will go on as-is */ 9058c2ecf20Sopenharmony_ci for (i = 0; i < n_ev; ++i) { 9068c2ecf20Sopenharmony_ci if ((cflags[i] & PPMU_LIMITED_PMC_REQD) 9078c2ecf20Sopenharmony_ci && !ppmu->limited_pmc_event(event_id[i])) { 9088c2ecf20Sopenharmony_ci ppmu->get_alternatives(event_id[i], cflags[i], 9098c2ecf20Sopenharmony_ci cpuhw->alternatives[i]); 9108c2ecf20Sopenharmony_ci event_id[i] = cpuhw->alternatives[i][0]; 9118c2ecf20Sopenharmony_ci } 9128c2ecf20Sopenharmony_ci if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0], 9138c2ecf20Sopenharmony_ci &cpuhw->avalues[i][0])) 9148c2ecf20Sopenharmony_ci return -1; 9158c2ecf20Sopenharmony_ci } 9168c2ecf20Sopenharmony_ci value = mask = 0; 9178c2ecf20Sopenharmony_ci for (i = 0; i < n_ev; ++i) { 9188c2ecf20Sopenharmony_ci nv = (value | cpuhw->avalues[i][0]) + 9198c2ecf20Sopenharmony_ci (value & cpuhw->avalues[i][0] & addf); 9208c2ecf20Sopenharmony_ci 9218c2ecf20Sopenharmony_ci if (((((nv + tadd) ^ value) & mask) & (~grp_mask)) != 0) 9228c2ecf20Sopenharmony_ci break; 9238c2ecf20Sopenharmony_ci 9248c2ecf20Sopenharmony_ci if (((((nv + tadd) ^ cpuhw->avalues[i][0]) & cpuhw->amasks[i][0]) 9258c2ecf20Sopenharmony_ci & (~grp_mask)) != 0) 9268c2ecf20Sopenharmony_ci break; 9278c2ecf20Sopenharmony_ci 9288c2ecf20Sopenharmony_ci value = nv; 9298c2ecf20Sopenharmony_ci mask |= cpuhw->amasks[i][0]; 9308c2ecf20Sopenharmony_ci } 9318c2ecf20Sopenharmony_ci if (i == n_ev) { 9328c2ecf20Sopenharmony_ci if ((value & mask & grp_mask) != (mask & grp_val)) 9338c2ecf20Sopenharmony_ci return -1; 9348c2ecf20Sopenharmony_ci else 9358c2ecf20Sopenharmony_ci return 0; /* all OK */ 9368c2ecf20Sopenharmony_ci } 9378c2ecf20Sopenharmony_ci 9388c2ecf20Sopenharmony_ci /* doesn't work, gather alternatives... */ 9398c2ecf20Sopenharmony_ci if (!ppmu->get_alternatives) 9408c2ecf20Sopenharmony_ci return -1; 9418c2ecf20Sopenharmony_ci for (i = 0; i < n_ev; ++i) { 9428c2ecf20Sopenharmony_ci choice[i] = 0; 9438c2ecf20Sopenharmony_ci n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i], 9448c2ecf20Sopenharmony_ci cpuhw->alternatives[i]); 9458c2ecf20Sopenharmony_ci for (j = 1; j < n_alt[i]; ++j) 9468c2ecf20Sopenharmony_ci ppmu->get_constraint(cpuhw->alternatives[i][j], 9478c2ecf20Sopenharmony_ci &cpuhw->amasks[i][j], 9488c2ecf20Sopenharmony_ci &cpuhw->avalues[i][j]); 9498c2ecf20Sopenharmony_ci } 9508c2ecf20Sopenharmony_ci 9518c2ecf20Sopenharmony_ci /* enumerate all possibilities and see if any will work */ 9528c2ecf20Sopenharmony_ci i = 0; 9538c2ecf20Sopenharmony_ci j = -1; 9548c2ecf20Sopenharmony_ci value = mask = nv = 0; 9558c2ecf20Sopenharmony_ci while (i < n_ev) { 9568c2ecf20Sopenharmony_ci if (j >= 0) { 9578c2ecf20Sopenharmony_ci /* we're backtracking, restore context */ 9588c2ecf20Sopenharmony_ci value = svalues[i]; 9598c2ecf20Sopenharmony_ci mask = smasks[i]; 9608c2ecf20Sopenharmony_ci j = choice[i]; 9618c2ecf20Sopenharmony_ci } 9628c2ecf20Sopenharmony_ci /* 9638c2ecf20Sopenharmony_ci * See if any alternative k for event_id i, 9648c2ecf20Sopenharmony_ci * where k > j, will satisfy the constraints. 9658c2ecf20Sopenharmony_ci */ 9668c2ecf20Sopenharmony_ci while (++j < n_alt[i]) { 9678c2ecf20Sopenharmony_ci nv = (value | cpuhw->avalues[i][j]) + 9688c2ecf20Sopenharmony_ci (value & cpuhw->avalues[i][j] & addf); 9698c2ecf20Sopenharmony_ci if ((((nv + tadd) ^ value) & mask) == 0 && 9708c2ecf20Sopenharmony_ci (((nv + tadd) ^ cpuhw->avalues[i][j]) 9718c2ecf20Sopenharmony_ci & cpuhw->amasks[i][j]) == 0) 9728c2ecf20Sopenharmony_ci break; 9738c2ecf20Sopenharmony_ci } 9748c2ecf20Sopenharmony_ci if (j >= n_alt[i]) { 9758c2ecf20Sopenharmony_ci /* 9768c2ecf20Sopenharmony_ci * No feasible alternative, backtrack 9778c2ecf20Sopenharmony_ci * to event_id i-1 and continue enumerating its 9788c2ecf20Sopenharmony_ci * alternatives from where we got up to. 9798c2ecf20Sopenharmony_ci */ 9808c2ecf20Sopenharmony_ci if (--i < 0) 9818c2ecf20Sopenharmony_ci return -1; 9828c2ecf20Sopenharmony_ci } else { 9838c2ecf20Sopenharmony_ci /* 9848c2ecf20Sopenharmony_ci * Found a feasible alternative for event_id i, 9858c2ecf20Sopenharmony_ci * remember where we got up to with this event_id, 9868c2ecf20Sopenharmony_ci * go on to the next event_id, and start with 9878c2ecf20Sopenharmony_ci * the first alternative for it. 9888c2ecf20Sopenharmony_ci */ 9898c2ecf20Sopenharmony_ci choice[i] = j; 9908c2ecf20Sopenharmony_ci svalues[i] = value; 9918c2ecf20Sopenharmony_ci smasks[i] = mask; 9928c2ecf20Sopenharmony_ci value = nv; 9938c2ecf20Sopenharmony_ci mask |= cpuhw->amasks[i][j]; 9948c2ecf20Sopenharmony_ci ++i; 9958c2ecf20Sopenharmony_ci j = -1; 9968c2ecf20Sopenharmony_ci } 9978c2ecf20Sopenharmony_ci } 9988c2ecf20Sopenharmony_ci 9998c2ecf20Sopenharmony_ci /* OK, we have a feasible combination, tell the caller the solution */ 10008c2ecf20Sopenharmony_ci for (i = 0; i < n_ev; ++i) 10018c2ecf20Sopenharmony_ci event_id[i] = cpuhw->alternatives[i][choice[i]]; 10028c2ecf20Sopenharmony_ci return 0; 10038c2ecf20Sopenharmony_ci} 10048c2ecf20Sopenharmony_ci 10058c2ecf20Sopenharmony_ci/* 10068c2ecf20Sopenharmony_ci * Check if newly-added events have consistent settings for 10078c2ecf20Sopenharmony_ci * exclude_{user,kernel,hv} with each other and any previously 10088c2ecf20Sopenharmony_ci * added events. 10098c2ecf20Sopenharmony_ci */ 10108c2ecf20Sopenharmony_cistatic int check_excludes(struct perf_event **ctrs, unsigned int cflags[], 10118c2ecf20Sopenharmony_ci int n_prev, int n_new) 10128c2ecf20Sopenharmony_ci{ 10138c2ecf20Sopenharmony_ci int eu = 0, ek = 0, eh = 0; 10148c2ecf20Sopenharmony_ci int i, n, first; 10158c2ecf20Sopenharmony_ci struct perf_event *event; 10168c2ecf20Sopenharmony_ci 10178c2ecf20Sopenharmony_ci /* 10188c2ecf20Sopenharmony_ci * If the PMU we're on supports per event exclude settings then we 10198c2ecf20Sopenharmony_ci * don't need to do any of this logic. NB. This assumes no PMU has both 10208c2ecf20Sopenharmony_ci * per event exclude and limited PMCs. 10218c2ecf20Sopenharmony_ci */ 10228c2ecf20Sopenharmony_ci if (ppmu->flags & PPMU_ARCH_207S) 10238c2ecf20Sopenharmony_ci return 0; 10248c2ecf20Sopenharmony_ci 10258c2ecf20Sopenharmony_ci n = n_prev + n_new; 10268c2ecf20Sopenharmony_ci if (n <= 1) 10278c2ecf20Sopenharmony_ci return 0; 10288c2ecf20Sopenharmony_ci 10298c2ecf20Sopenharmony_ci first = 1; 10308c2ecf20Sopenharmony_ci for (i = 0; i < n; ++i) { 10318c2ecf20Sopenharmony_ci if (cflags[i] & PPMU_LIMITED_PMC_OK) { 10328c2ecf20Sopenharmony_ci cflags[i] &= ~PPMU_LIMITED_PMC_REQD; 10338c2ecf20Sopenharmony_ci continue; 10348c2ecf20Sopenharmony_ci } 10358c2ecf20Sopenharmony_ci event = ctrs[i]; 10368c2ecf20Sopenharmony_ci if (first) { 10378c2ecf20Sopenharmony_ci eu = event->attr.exclude_user; 10388c2ecf20Sopenharmony_ci ek = event->attr.exclude_kernel; 10398c2ecf20Sopenharmony_ci eh = event->attr.exclude_hv; 10408c2ecf20Sopenharmony_ci first = 0; 10418c2ecf20Sopenharmony_ci } else if (event->attr.exclude_user != eu || 10428c2ecf20Sopenharmony_ci event->attr.exclude_kernel != ek || 10438c2ecf20Sopenharmony_ci event->attr.exclude_hv != eh) { 10448c2ecf20Sopenharmony_ci return -EAGAIN; 10458c2ecf20Sopenharmony_ci } 10468c2ecf20Sopenharmony_ci } 10478c2ecf20Sopenharmony_ci 10488c2ecf20Sopenharmony_ci if (eu || ek || eh) 10498c2ecf20Sopenharmony_ci for (i = 0; i < n; ++i) 10508c2ecf20Sopenharmony_ci if (cflags[i] & PPMU_LIMITED_PMC_OK) 10518c2ecf20Sopenharmony_ci cflags[i] |= PPMU_LIMITED_PMC_REQD; 10528c2ecf20Sopenharmony_ci 10538c2ecf20Sopenharmony_ci return 0; 10548c2ecf20Sopenharmony_ci} 10558c2ecf20Sopenharmony_ci 10568c2ecf20Sopenharmony_cistatic u64 check_and_compute_delta(u64 prev, u64 val) 10578c2ecf20Sopenharmony_ci{ 10588c2ecf20Sopenharmony_ci u64 delta = (val - prev) & 0xfffffffful; 10598c2ecf20Sopenharmony_ci 10608c2ecf20Sopenharmony_ci /* 10618c2ecf20Sopenharmony_ci * POWER7 can roll back counter values, if the new value is smaller 10628c2ecf20Sopenharmony_ci * than the previous value it will cause the delta and the counter to 10638c2ecf20Sopenharmony_ci * have bogus values unless we rolled a counter over. If a coutner is 10648c2ecf20Sopenharmony_ci * rolled back, it will be smaller, but within 256, which is the maximum 10658c2ecf20Sopenharmony_ci * number of events to rollback at once. If we detect a rollback 10668c2ecf20Sopenharmony_ci * return 0. This can lead to a small lack of precision in the 10678c2ecf20Sopenharmony_ci * counters. 10688c2ecf20Sopenharmony_ci */ 10698c2ecf20Sopenharmony_ci if (prev > val && (prev - val) < 256) 10708c2ecf20Sopenharmony_ci delta = 0; 10718c2ecf20Sopenharmony_ci 10728c2ecf20Sopenharmony_ci return delta; 10738c2ecf20Sopenharmony_ci} 10748c2ecf20Sopenharmony_ci 10758c2ecf20Sopenharmony_cistatic void power_pmu_read(struct perf_event *event) 10768c2ecf20Sopenharmony_ci{ 10778c2ecf20Sopenharmony_ci s64 val, delta, prev; 10788c2ecf20Sopenharmony_ci 10798c2ecf20Sopenharmony_ci if (event->hw.state & PERF_HES_STOPPED) 10808c2ecf20Sopenharmony_ci return; 10818c2ecf20Sopenharmony_ci 10828c2ecf20Sopenharmony_ci if (!event->hw.idx) 10838c2ecf20Sopenharmony_ci return; 10848c2ecf20Sopenharmony_ci 10858c2ecf20Sopenharmony_ci if (is_ebb_event(event)) { 10868c2ecf20Sopenharmony_ci val = read_pmc(event->hw.idx); 10878c2ecf20Sopenharmony_ci local64_set(&event->hw.prev_count, val); 10888c2ecf20Sopenharmony_ci return; 10898c2ecf20Sopenharmony_ci } 10908c2ecf20Sopenharmony_ci 10918c2ecf20Sopenharmony_ci /* 10928c2ecf20Sopenharmony_ci * Performance monitor interrupts come even when interrupts 10938c2ecf20Sopenharmony_ci * are soft-disabled, as long as interrupts are hard-enabled. 10948c2ecf20Sopenharmony_ci * Therefore we treat them like NMIs. 10958c2ecf20Sopenharmony_ci */ 10968c2ecf20Sopenharmony_ci do { 10978c2ecf20Sopenharmony_ci prev = local64_read(&event->hw.prev_count); 10988c2ecf20Sopenharmony_ci barrier(); 10998c2ecf20Sopenharmony_ci val = read_pmc(event->hw.idx); 11008c2ecf20Sopenharmony_ci delta = check_and_compute_delta(prev, val); 11018c2ecf20Sopenharmony_ci if (!delta) 11028c2ecf20Sopenharmony_ci return; 11038c2ecf20Sopenharmony_ci } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); 11048c2ecf20Sopenharmony_ci 11058c2ecf20Sopenharmony_ci local64_add(delta, &event->count); 11068c2ecf20Sopenharmony_ci 11078c2ecf20Sopenharmony_ci /* 11088c2ecf20Sopenharmony_ci * A number of places program the PMC with (0x80000000 - period_left). 11098c2ecf20Sopenharmony_ci * We never want period_left to be less than 1 because we will program 11108c2ecf20Sopenharmony_ci * the PMC with a value >= 0x800000000 and an edge detected PMC will 11118c2ecf20Sopenharmony_ci * roll around to 0 before taking an exception. We have seen this 11128c2ecf20Sopenharmony_ci * on POWER8. 11138c2ecf20Sopenharmony_ci * 11148c2ecf20Sopenharmony_ci * To fix this, clamp the minimum value of period_left to 1. 11158c2ecf20Sopenharmony_ci */ 11168c2ecf20Sopenharmony_ci do { 11178c2ecf20Sopenharmony_ci prev = local64_read(&event->hw.period_left); 11188c2ecf20Sopenharmony_ci val = prev - delta; 11198c2ecf20Sopenharmony_ci if (val < 1) 11208c2ecf20Sopenharmony_ci val = 1; 11218c2ecf20Sopenharmony_ci } while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev); 11228c2ecf20Sopenharmony_ci} 11238c2ecf20Sopenharmony_ci 11248c2ecf20Sopenharmony_ci/* 11258c2ecf20Sopenharmony_ci * On some machines, PMC5 and PMC6 can't be written, don't respect 11268c2ecf20Sopenharmony_ci * the freeze conditions, and don't generate interrupts. This tells 11278c2ecf20Sopenharmony_ci * us if `event' is using such a PMC. 11288c2ecf20Sopenharmony_ci */ 11298c2ecf20Sopenharmony_cistatic int is_limited_pmc(int pmcnum) 11308c2ecf20Sopenharmony_ci{ 11318c2ecf20Sopenharmony_ci return (ppmu->flags & PPMU_LIMITED_PMC5_6) 11328c2ecf20Sopenharmony_ci && (pmcnum == 5 || pmcnum == 6); 11338c2ecf20Sopenharmony_ci} 11348c2ecf20Sopenharmony_ci 11358c2ecf20Sopenharmony_cistatic void freeze_limited_counters(struct cpu_hw_events *cpuhw, 11368c2ecf20Sopenharmony_ci unsigned long pmc5, unsigned long pmc6) 11378c2ecf20Sopenharmony_ci{ 11388c2ecf20Sopenharmony_ci struct perf_event *event; 11398c2ecf20Sopenharmony_ci u64 val, prev, delta; 11408c2ecf20Sopenharmony_ci int i; 11418c2ecf20Sopenharmony_ci 11428c2ecf20Sopenharmony_ci for (i = 0; i < cpuhw->n_limited; ++i) { 11438c2ecf20Sopenharmony_ci event = cpuhw->limited_counter[i]; 11448c2ecf20Sopenharmony_ci if (!event->hw.idx) 11458c2ecf20Sopenharmony_ci continue; 11468c2ecf20Sopenharmony_ci val = (event->hw.idx == 5) ? pmc5 : pmc6; 11478c2ecf20Sopenharmony_ci prev = local64_read(&event->hw.prev_count); 11488c2ecf20Sopenharmony_ci event->hw.idx = 0; 11498c2ecf20Sopenharmony_ci delta = check_and_compute_delta(prev, val); 11508c2ecf20Sopenharmony_ci if (delta) 11518c2ecf20Sopenharmony_ci local64_add(delta, &event->count); 11528c2ecf20Sopenharmony_ci } 11538c2ecf20Sopenharmony_ci} 11548c2ecf20Sopenharmony_ci 11558c2ecf20Sopenharmony_cistatic void thaw_limited_counters(struct cpu_hw_events *cpuhw, 11568c2ecf20Sopenharmony_ci unsigned long pmc5, unsigned long pmc6) 11578c2ecf20Sopenharmony_ci{ 11588c2ecf20Sopenharmony_ci struct perf_event *event; 11598c2ecf20Sopenharmony_ci u64 val, prev; 11608c2ecf20Sopenharmony_ci int i; 11618c2ecf20Sopenharmony_ci 11628c2ecf20Sopenharmony_ci for (i = 0; i < cpuhw->n_limited; ++i) { 11638c2ecf20Sopenharmony_ci event = cpuhw->limited_counter[i]; 11648c2ecf20Sopenharmony_ci event->hw.idx = cpuhw->limited_hwidx[i]; 11658c2ecf20Sopenharmony_ci val = (event->hw.idx == 5) ? pmc5 : pmc6; 11668c2ecf20Sopenharmony_ci prev = local64_read(&event->hw.prev_count); 11678c2ecf20Sopenharmony_ci if (check_and_compute_delta(prev, val)) 11688c2ecf20Sopenharmony_ci local64_set(&event->hw.prev_count, val); 11698c2ecf20Sopenharmony_ci perf_event_update_userpage(event); 11708c2ecf20Sopenharmony_ci } 11718c2ecf20Sopenharmony_ci} 11728c2ecf20Sopenharmony_ci 11738c2ecf20Sopenharmony_ci/* 11748c2ecf20Sopenharmony_ci * Since limited events don't respect the freeze conditions, we 11758c2ecf20Sopenharmony_ci * have to read them immediately after freezing or unfreezing the 11768c2ecf20Sopenharmony_ci * other events. We try to keep the values from the limited 11778c2ecf20Sopenharmony_ci * events as consistent as possible by keeping the delay (in 11788c2ecf20Sopenharmony_ci * cycles and instructions) between freezing/unfreezing and reading 11798c2ecf20Sopenharmony_ci * the limited events as small and consistent as possible. 11808c2ecf20Sopenharmony_ci * Therefore, if any limited events are in use, we read them 11818c2ecf20Sopenharmony_ci * both, and always in the same order, to minimize variability, 11828c2ecf20Sopenharmony_ci * and do it inside the same asm that writes MMCR0. 11838c2ecf20Sopenharmony_ci */ 11848c2ecf20Sopenharmony_cistatic void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0) 11858c2ecf20Sopenharmony_ci{ 11868c2ecf20Sopenharmony_ci unsigned long pmc5, pmc6; 11878c2ecf20Sopenharmony_ci 11888c2ecf20Sopenharmony_ci if (!cpuhw->n_limited) { 11898c2ecf20Sopenharmony_ci mtspr(SPRN_MMCR0, mmcr0); 11908c2ecf20Sopenharmony_ci return; 11918c2ecf20Sopenharmony_ci } 11928c2ecf20Sopenharmony_ci 11938c2ecf20Sopenharmony_ci /* 11948c2ecf20Sopenharmony_ci * Write MMCR0, then read PMC5 and PMC6 immediately. 11958c2ecf20Sopenharmony_ci * To ensure we don't get a performance monitor interrupt 11968c2ecf20Sopenharmony_ci * between writing MMCR0 and freezing/thawing the limited 11978c2ecf20Sopenharmony_ci * events, we first write MMCR0 with the event overflow 11988c2ecf20Sopenharmony_ci * interrupt enable bits turned off. 11998c2ecf20Sopenharmony_ci */ 12008c2ecf20Sopenharmony_ci asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5" 12018c2ecf20Sopenharmony_ci : "=&r" (pmc5), "=&r" (pmc6) 12028c2ecf20Sopenharmony_ci : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)), 12038c2ecf20Sopenharmony_ci "i" (SPRN_MMCR0), 12048c2ecf20Sopenharmony_ci "i" (SPRN_PMC5), "i" (SPRN_PMC6)); 12058c2ecf20Sopenharmony_ci 12068c2ecf20Sopenharmony_ci if (mmcr0 & MMCR0_FC) 12078c2ecf20Sopenharmony_ci freeze_limited_counters(cpuhw, pmc5, pmc6); 12088c2ecf20Sopenharmony_ci else 12098c2ecf20Sopenharmony_ci thaw_limited_counters(cpuhw, pmc5, pmc6); 12108c2ecf20Sopenharmony_ci 12118c2ecf20Sopenharmony_ci /* 12128c2ecf20Sopenharmony_ci * Write the full MMCR0 including the event overflow interrupt 12138c2ecf20Sopenharmony_ci * enable bits, if necessary. 12148c2ecf20Sopenharmony_ci */ 12158c2ecf20Sopenharmony_ci if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE)) 12168c2ecf20Sopenharmony_ci mtspr(SPRN_MMCR0, mmcr0); 12178c2ecf20Sopenharmony_ci} 12188c2ecf20Sopenharmony_ci 12198c2ecf20Sopenharmony_ci/* 12208c2ecf20Sopenharmony_ci * Disable all events to prevent PMU interrupts and to allow 12218c2ecf20Sopenharmony_ci * events to be added or removed. 12228c2ecf20Sopenharmony_ci */ 12238c2ecf20Sopenharmony_cistatic void power_pmu_disable(struct pmu *pmu) 12248c2ecf20Sopenharmony_ci{ 12258c2ecf20Sopenharmony_ci struct cpu_hw_events *cpuhw; 12268c2ecf20Sopenharmony_ci unsigned long flags, mmcr0, val, mmcra; 12278c2ecf20Sopenharmony_ci 12288c2ecf20Sopenharmony_ci if (!ppmu) 12298c2ecf20Sopenharmony_ci return; 12308c2ecf20Sopenharmony_ci local_irq_save(flags); 12318c2ecf20Sopenharmony_ci cpuhw = this_cpu_ptr(&cpu_hw_events); 12328c2ecf20Sopenharmony_ci 12338c2ecf20Sopenharmony_ci if (!cpuhw->disabled) { 12348c2ecf20Sopenharmony_ci /* 12358c2ecf20Sopenharmony_ci * Check if we ever enabled the PMU on this cpu. 12368c2ecf20Sopenharmony_ci */ 12378c2ecf20Sopenharmony_ci if (!cpuhw->pmcs_enabled) { 12388c2ecf20Sopenharmony_ci ppc_enable_pmcs(); 12398c2ecf20Sopenharmony_ci cpuhw->pmcs_enabled = 1; 12408c2ecf20Sopenharmony_ci } 12418c2ecf20Sopenharmony_ci 12428c2ecf20Sopenharmony_ci /* 12438c2ecf20Sopenharmony_ci * Set the 'freeze counters' bit, clear EBE/BHRBA/PMCC/PMAO/FC56 12448c2ecf20Sopenharmony_ci * Also clear PMXE to disable PMI's getting triggered in some 12458c2ecf20Sopenharmony_ci * corner cases during PMU disable. 12468c2ecf20Sopenharmony_ci */ 12478c2ecf20Sopenharmony_ci val = mmcr0 = mfspr(SPRN_MMCR0); 12488c2ecf20Sopenharmony_ci val |= MMCR0_FC; 12498c2ecf20Sopenharmony_ci val &= ~(MMCR0_EBE | MMCR0_BHRBA | MMCR0_PMCC | MMCR0_PMAO | 12508c2ecf20Sopenharmony_ci MMCR0_PMXE | MMCR0_FC56); 12518c2ecf20Sopenharmony_ci /* Set mmcr0 PMCCEXT for p10 */ 12528c2ecf20Sopenharmony_ci if (ppmu->flags & PPMU_ARCH_31) 12538c2ecf20Sopenharmony_ci val |= MMCR0_PMCCEXT; 12548c2ecf20Sopenharmony_ci 12558c2ecf20Sopenharmony_ci /* 12568c2ecf20Sopenharmony_ci * The barrier is to make sure the mtspr has been 12578c2ecf20Sopenharmony_ci * executed and the PMU has frozen the events etc. 12588c2ecf20Sopenharmony_ci * before we return. 12598c2ecf20Sopenharmony_ci */ 12608c2ecf20Sopenharmony_ci write_mmcr0(cpuhw, val); 12618c2ecf20Sopenharmony_ci mb(); 12628c2ecf20Sopenharmony_ci isync(); 12638c2ecf20Sopenharmony_ci 12648c2ecf20Sopenharmony_ci /* 12658c2ecf20Sopenharmony_ci * Some corner cases could clear the PMU counter overflow 12668c2ecf20Sopenharmony_ci * while a masked PMI is pending. One such case is when 12678c2ecf20Sopenharmony_ci * a PMI happens during interrupt replay and perf counter 12688c2ecf20Sopenharmony_ci * values are cleared by PMU callbacks before replay. 12698c2ecf20Sopenharmony_ci * 12708c2ecf20Sopenharmony_ci * Disable the interrupt by clearing the paca bit for PMI 12718c2ecf20Sopenharmony_ci * since we are disabling the PMU now. Otherwise provide a 12728c2ecf20Sopenharmony_ci * warning if there is PMI pending, but no counter is found 12738c2ecf20Sopenharmony_ci * overflown. 12748c2ecf20Sopenharmony_ci * 12758c2ecf20Sopenharmony_ci * Since power_pmu_disable runs under local_irq_save, it 12768c2ecf20Sopenharmony_ci * could happen that code hits a PMC overflow without PMI 12778c2ecf20Sopenharmony_ci * pending in paca. Hence only clear PMI pending if it was 12788c2ecf20Sopenharmony_ci * set. 12798c2ecf20Sopenharmony_ci * 12808c2ecf20Sopenharmony_ci * If a PMI is pending, then MSR[EE] must be disabled (because 12818c2ecf20Sopenharmony_ci * the masked PMI handler disabling EE). So it is safe to 12828c2ecf20Sopenharmony_ci * call clear_pmi_irq_pending(). 12838c2ecf20Sopenharmony_ci */ 12848c2ecf20Sopenharmony_ci if (pmi_irq_pending()) 12858c2ecf20Sopenharmony_ci clear_pmi_irq_pending(); 12868c2ecf20Sopenharmony_ci 12878c2ecf20Sopenharmony_ci val = mmcra = cpuhw->mmcr.mmcra; 12888c2ecf20Sopenharmony_ci 12898c2ecf20Sopenharmony_ci /* 12908c2ecf20Sopenharmony_ci * Disable instruction sampling if it was enabled 12918c2ecf20Sopenharmony_ci */ 12928c2ecf20Sopenharmony_ci val &= ~MMCRA_SAMPLE_ENABLE; 12938c2ecf20Sopenharmony_ci 12948c2ecf20Sopenharmony_ci /* Disable BHRB via mmcra (BHRBRD) for p10 */ 12958c2ecf20Sopenharmony_ci if (ppmu->flags & PPMU_ARCH_31) 12968c2ecf20Sopenharmony_ci val |= MMCRA_BHRB_DISABLE; 12978c2ecf20Sopenharmony_ci 12988c2ecf20Sopenharmony_ci /* 12998c2ecf20Sopenharmony_ci * Write SPRN_MMCRA if mmcra has either disabled 13008c2ecf20Sopenharmony_ci * instruction sampling or BHRB. 13018c2ecf20Sopenharmony_ci */ 13028c2ecf20Sopenharmony_ci if (val != mmcra) { 13038c2ecf20Sopenharmony_ci mtspr(SPRN_MMCRA, val); 13048c2ecf20Sopenharmony_ci mb(); 13058c2ecf20Sopenharmony_ci isync(); 13068c2ecf20Sopenharmony_ci } 13078c2ecf20Sopenharmony_ci 13088c2ecf20Sopenharmony_ci cpuhw->disabled = 1; 13098c2ecf20Sopenharmony_ci cpuhw->n_added = 0; 13108c2ecf20Sopenharmony_ci 13118c2ecf20Sopenharmony_ci ebb_switch_out(mmcr0); 13128c2ecf20Sopenharmony_ci 13138c2ecf20Sopenharmony_ci#ifdef CONFIG_PPC64 13148c2ecf20Sopenharmony_ci /* 13158c2ecf20Sopenharmony_ci * These are readable by userspace, may contain kernel 13168c2ecf20Sopenharmony_ci * addresses and are not switched by context switch, so clear 13178c2ecf20Sopenharmony_ci * them now to avoid leaking anything to userspace in general 13188c2ecf20Sopenharmony_ci * including to another process. 13198c2ecf20Sopenharmony_ci */ 13208c2ecf20Sopenharmony_ci if (ppmu->flags & PPMU_ARCH_207S) { 13218c2ecf20Sopenharmony_ci mtspr(SPRN_SDAR, 0); 13228c2ecf20Sopenharmony_ci mtspr(SPRN_SIAR, 0); 13238c2ecf20Sopenharmony_ci } 13248c2ecf20Sopenharmony_ci#endif 13258c2ecf20Sopenharmony_ci } 13268c2ecf20Sopenharmony_ci 13278c2ecf20Sopenharmony_ci local_irq_restore(flags); 13288c2ecf20Sopenharmony_ci} 13298c2ecf20Sopenharmony_ci 13308c2ecf20Sopenharmony_ci/* 13318c2ecf20Sopenharmony_ci * Re-enable all events if disable == 0. 13328c2ecf20Sopenharmony_ci * If we were previously disabled and events were added, then 13338c2ecf20Sopenharmony_ci * put the new config on the PMU. 13348c2ecf20Sopenharmony_ci */ 13358c2ecf20Sopenharmony_cistatic void power_pmu_enable(struct pmu *pmu) 13368c2ecf20Sopenharmony_ci{ 13378c2ecf20Sopenharmony_ci struct perf_event *event; 13388c2ecf20Sopenharmony_ci struct cpu_hw_events *cpuhw; 13398c2ecf20Sopenharmony_ci unsigned long flags; 13408c2ecf20Sopenharmony_ci long i; 13418c2ecf20Sopenharmony_ci unsigned long val, mmcr0; 13428c2ecf20Sopenharmony_ci s64 left; 13438c2ecf20Sopenharmony_ci unsigned int hwc_index[MAX_HWEVENTS]; 13448c2ecf20Sopenharmony_ci int n_lim; 13458c2ecf20Sopenharmony_ci int idx; 13468c2ecf20Sopenharmony_ci bool ebb; 13478c2ecf20Sopenharmony_ci 13488c2ecf20Sopenharmony_ci if (!ppmu) 13498c2ecf20Sopenharmony_ci return; 13508c2ecf20Sopenharmony_ci local_irq_save(flags); 13518c2ecf20Sopenharmony_ci 13528c2ecf20Sopenharmony_ci cpuhw = this_cpu_ptr(&cpu_hw_events); 13538c2ecf20Sopenharmony_ci if (!cpuhw->disabled) 13548c2ecf20Sopenharmony_ci goto out; 13558c2ecf20Sopenharmony_ci 13568c2ecf20Sopenharmony_ci if (cpuhw->n_events == 0) { 13578c2ecf20Sopenharmony_ci ppc_set_pmu_inuse(0); 13588c2ecf20Sopenharmony_ci goto out; 13598c2ecf20Sopenharmony_ci } 13608c2ecf20Sopenharmony_ci 13618c2ecf20Sopenharmony_ci cpuhw->disabled = 0; 13628c2ecf20Sopenharmony_ci 13638c2ecf20Sopenharmony_ci /* 13648c2ecf20Sopenharmony_ci * EBB requires an exclusive group and all events must have the EBB 13658c2ecf20Sopenharmony_ci * flag set, or not set, so we can just check a single event. Also we 13668c2ecf20Sopenharmony_ci * know we have at least one event. 13678c2ecf20Sopenharmony_ci */ 13688c2ecf20Sopenharmony_ci ebb = is_ebb_event(cpuhw->event[0]); 13698c2ecf20Sopenharmony_ci 13708c2ecf20Sopenharmony_ci /* 13718c2ecf20Sopenharmony_ci * If we didn't change anything, or only removed events, 13728c2ecf20Sopenharmony_ci * no need to recalculate MMCR* settings and reset the PMCs. 13738c2ecf20Sopenharmony_ci * Just reenable the PMU with the current MMCR* settings 13748c2ecf20Sopenharmony_ci * (possibly updated for removal of events). 13758c2ecf20Sopenharmony_ci */ 13768c2ecf20Sopenharmony_ci if (!cpuhw->n_added) { 13778c2ecf20Sopenharmony_ci /* 13788c2ecf20Sopenharmony_ci * If there is any active event with an overflown PMC 13798c2ecf20Sopenharmony_ci * value, set back PACA_IRQ_PMI which would have been 13808c2ecf20Sopenharmony_ci * cleared in power_pmu_disable(). 13818c2ecf20Sopenharmony_ci */ 13828c2ecf20Sopenharmony_ci hard_irq_disable(); 13838c2ecf20Sopenharmony_ci if (any_pmc_overflown(cpuhw)) 13848c2ecf20Sopenharmony_ci set_pmi_irq_pending(); 13858c2ecf20Sopenharmony_ci 13868c2ecf20Sopenharmony_ci mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra & ~MMCRA_SAMPLE_ENABLE); 13878c2ecf20Sopenharmony_ci mtspr(SPRN_MMCR1, cpuhw->mmcr.mmcr1); 13888c2ecf20Sopenharmony_ci if (ppmu->flags & PPMU_ARCH_31) 13898c2ecf20Sopenharmony_ci mtspr(SPRN_MMCR3, cpuhw->mmcr.mmcr3); 13908c2ecf20Sopenharmony_ci goto out_enable; 13918c2ecf20Sopenharmony_ci } 13928c2ecf20Sopenharmony_ci 13938c2ecf20Sopenharmony_ci /* 13948c2ecf20Sopenharmony_ci * Clear all MMCR settings and recompute them for the new set of events. 13958c2ecf20Sopenharmony_ci */ 13968c2ecf20Sopenharmony_ci memset(&cpuhw->mmcr, 0, sizeof(cpuhw->mmcr)); 13978c2ecf20Sopenharmony_ci 13988c2ecf20Sopenharmony_ci if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index, 13998c2ecf20Sopenharmony_ci &cpuhw->mmcr, cpuhw->event)) { 14008c2ecf20Sopenharmony_ci /* shouldn't ever get here */ 14018c2ecf20Sopenharmony_ci printk(KERN_ERR "oops compute_mmcr failed\n"); 14028c2ecf20Sopenharmony_ci goto out; 14038c2ecf20Sopenharmony_ci } 14048c2ecf20Sopenharmony_ci 14058c2ecf20Sopenharmony_ci if (!(ppmu->flags & PPMU_ARCH_207S)) { 14068c2ecf20Sopenharmony_ci /* 14078c2ecf20Sopenharmony_ci * Add in MMCR0 freeze bits corresponding to the attr.exclude_* 14088c2ecf20Sopenharmony_ci * bits for the first event. We have already checked that all 14098c2ecf20Sopenharmony_ci * events have the same value for these bits as the first event. 14108c2ecf20Sopenharmony_ci */ 14118c2ecf20Sopenharmony_ci event = cpuhw->event[0]; 14128c2ecf20Sopenharmony_ci if (event->attr.exclude_user) 14138c2ecf20Sopenharmony_ci cpuhw->mmcr.mmcr0 |= MMCR0_FCP; 14148c2ecf20Sopenharmony_ci if (event->attr.exclude_kernel) 14158c2ecf20Sopenharmony_ci cpuhw->mmcr.mmcr0 |= freeze_events_kernel; 14168c2ecf20Sopenharmony_ci if (event->attr.exclude_hv) 14178c2ecf20Sopenharmony_ci cpuhw->mmcr.mmcr0 |= MMCR0_FCHV; 14188c2ecf20Sopenharmony_ci } 14198c2ecf20Sopenharmony_ci 14208c2ecf20Sopenharmony_ci /* 14218c2ecf20Sopenharmony_ci * Write the new configuration to MMCR* with the freeze 14228c2ecf20Sopenharmony_ci * bit set and set the hardware events to their initial values. 14238c2ecf20Sopenharmony_ci * Then unfreeze the events. 14248c2ecf20Sopenharmony_ci */ 14258c2ecf20Sopenharmony_ci ppc_set_pmu_inuse(1); 14268c2ecf20Sopenharmony_ci mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra & ~MMCRA_SAMPLE_ENABLE); 14278c2ecf20Sopenharmony_ci mtspr(SPRN_MMCR1, cpuhw->mmcr.mmcr1); 14288c2ecf20Sopenharmony_ci mtspr(SPRN_MMCR0, (cpuhw->mmcr.mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) 14298c2ecf20Sopenharmony_ci | MMCR0_FC); 14308c2ecf20Sopenharmony_ci if (ppmu->flags & PPMU_ARCH_207S) 14318c2ecf20Sopenharmony_ci mtspr(SPRN_MMCR2, cpuhw->mmcr.mmcr2); 14328c2ecf20Sopenharmony_ci 14338c2ecf20Sopenharmony_ci if (ppmu->flags & PPMU_ARCH_31) 14348c2ecf20Sopenharmony_ci mtspr(SPRN_MMCR3, cpuhw->mmcr.mmcr3); 14358c2ecf20Sopenharmony_ci 14368c2ecf20Sopenharmony_ci /* 14378c2ecf20Sopenharmony_ci * Read off any pre-existing events that need to move 14388c2ecf20Sopenharmony_ci * to another PMC. 14398c2ecf20Sopenharmony_ci */ 14408c2ecf20Sopenharmony_ci for (i = 0; i < cpuhw->n_events; ++i) { 14418c2ecf20Sopenharmony_ci event = cpuhw->event[i]; 14428c2ecf20Sopenharmony_ci if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) { 14438c2ecf20Sopenharmony_ci power_pmu_read(event); 14448c2ecf20Sopenharmony_ci write_pmc(event->hw.idx, 0); 14458c2ecf20Sopenharmony_ci event->hw.idx = 0; 14468c2ecf20Sopenharmony_ci } 14478c2ecf20Sopenharmony_ci } 14488c2ecf20Sopenharmony_ci 14498c2ecf20Sopenharmony_ci /* 14508c2ecf20Sopenharmony_ci * Initialize the PMCs for all the new and moved events. 14518c2ecf20Sopenharmony_ci */ 14528c2ecf20Sopenharmony_ci cpuhw->n_limited = n_lim = 0; 14538c2ecf20Sopenharmony_ci for (i = 0; i < cpuhw->n_events; ++i) { 14548c2ecf20Sopenharmony_ci event = cpuhw->event[i]; 14558c2ecf20Sopenharmony_ci if (event->hw.idx) 14568c2ecf20Sopenharmony_ci continue; 14578c2ecf20Sopenharmony_ci idx = hwc_index[i] + 1; 14588c2ecf20Sopenharmony_ci if (is_limited_pmc(idx)) { 14598c2ecf20Sopenharmony_ci cpuhw->limited_counter[n_lim] = event; 14608c2ecf20Sopenharmony_ci cpuhw->limited_hwidx[n_lim] = idx; 14618c2ecf20Sopenharmony_ci ++n_lim; 14628c2ecf20Sopenharmony_ci continue; 14638c2ecf20Sopenharmony_ci } 14648c2ecf20Sopenharmony_ci 14658c2ecf20Sopenharmony_ci if (ebb) 14668c2ecf20Sopenharmony_ci val = local64_read(&event->hw.prev_count); 14678c2ecf20Sopenharmony_ci else { 14688c2ecf20Sopenharmony_ci val = 0; 14698c2ecf20Sopenharmony_ci if (event->hw.sample_period) { 14708c2ecf20Sopenharmony_ci left = local64_read(&event->hw.period_left); 14718c2ecf20Sopenharmony_ci if (left < 0x80000000L) 14728c2ecf20Sopenharmony_ci val = 0x80000000L - left; 14738c2ecf20Sopenharmony_ci } 14748c2ecf20Sopenharmony_ci local64_set(&event->hw.prev_count, val); 14758c2ecf20Sopenharmony_ci } 14768c2ecf20Sopenharmony_ci 14778c2ecf20Sopenharmony_ci event->hw.idx = idx; 14788c2ecf20Sopenharmony_ci if (event->hw.state & PERF_HES_STOPPED) 14798c2ecf20Sopenharmony_ci val = 0; 14808c2ecf20Sopenharmony_ci write_pmc(idx, val); 14818c2ecf20Sopenharmony_ci 14828c2ecf20Sopenharmony_ci perf_event_update_userpage(event); 14838c2ecf20Sopenharmony_ci } 14848c2ecf20Sopenharmony_ci cpuhw->n_limited = n_lim; 14858c2ecf20Sopenharmony_ci cpuhw->mmcr.mmcr0 |= MMCR0_PMXE | MMCR0_FCECE; 14868c2ecf20Sopenharmony_ci 14878c2ecf20Sopenharmony_ci out_enable: 14888c2ecf20Sopenharmony_ci pmao_restore_workaround(ebb); 14898c2ecf20Sopenharmony_ci 14908c2ecf20Sopenharmony_ci mmcr0 = ebb_switch_in(ebb, cpuhw); 14918c2ecf20Sopenharmony_ci 14928c2ecf20Sopenharmony_ci mb(); 14938c2ecf20Sopenharmony_ci if (cpuhw->bhrb_users) 14948c2ecf20Sopenharmony_ci ppmu->config_bhrb(cpuhw->bhrb_filter); 14958c2ecf20Sopenharmony_ci 14968c2ecf20Sopenharmony_ci write_mmcr0(cpuhw, mmcr0); 14978c2ecf20Sopenharmony_ci 14988c2ecf20Sopenharmony_ci /* 14998c2ecf20Sopenharmony_ci * Enable instruction sampling if necessary 15008c2ecf20Sopenharmony_ci */ 15018c2ecf20Sopenharmony_ci if (cpuhw->mmcr.mmcra & MMCRA_SAMPLE_ENABLE) { 15028c2ecf20Sopenharmony_ci mb(); 15038c2ecf20Sopenharmony_ci mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra); 15048c2ecf20Sopenharmony_ci } 15058c2ecf20Sopenharmony_ci 15068c2ecf20Sopenharmony_ci out: 15078c2ecf20Sopenharmony_ci 15088c2ecf20Sopenharmony_ci local_irq_restore(flags); 15098c2ecf20Sopenharmony_ci} 15108c2ecf20Sopenharmony_ci 15118c2ecf20Sopenharmony_cistatic int collect_events(struct perf_event *group, int max_count, 15128c2ecf20Sopenharmony_ci struct perf_event *ctrs[], u64 *events, 15138c2ecf20Sopenharmony_ci unsigned int *flags) 15148c2ecf20Sopenharmony_ci{ 15158c2ecf20Sopenharmony_ci int n = 0; 15168c2ecf20Sopenharmony_ci struct perf_event *event; 15178c2ecf20Sopenharmony_ci 15188c2ecf20Sopenharmony_ci if (group->pmu->task_ctx_nr == perf_hw_context) { 15198c2ecf20Sopenharmony_ci if (n >= max_count) 15208c2ecf20Sopenharmony_ci return -1; 15218c2ecf20Sopenharmony_ci ctrs[n] = group; 15228c2ecf20Sopenharmony_ci flags[n] = group->hw.event_base; 15238c2ecf20Sopenharmony_ci events[n++] = group->hw.config; 15248c2ecf20Sopenharmony_ci } 15258c2ecf20Sopenharmony_ci for_each_sibling_event(event, group) { 15268c2ecf20Sopenharmony_ci if (event->pmu->task_ctx_nr == perf_hw_context && 15278c2ecf20Sopenharmony_ci event->state != PERF_EVENT_STATE_OFF) { 15288c2ecf20Sopenharmony_ci if (n >= max_count) 15298c2ecf20Sopenharmony_ci return -1; 15308c2ecf20Sopenharmony_ci ctrs[n] = event; 15318c2ecf20Sopenharmony_ci flags[n] = event->hw.event_base; 15328c2ecf20Sopenharmony_ci events[n++] = event->hw.config; 15338c2ecf20Sopenharmony_ci } 15348c2ecf20Sopenharmony_ci } 15358c2ecf20Sopenharmony_ci return n; 15368c2ecf20Sopenharmony_ci} 15378c2ecf20Sopenharmony_ci 15388c2ecf20Sopenharmony_ci/* 15398c2ecf20Sopenharmony_ci * Add an event to the PMU. 15408c2ecf20Sopenharmony_ci * If all events are not already frozen, then we disable and 15418c2ecf20Sopenharmony_ci * re-enable the PMU in order to get hw_perf_enable to do the 15428c2ecf20Sopenharmony_ci * actual work of reconfiguring the PMU. 15438c2ecf20Sopenharmony_ci */ 15448c2ecf20Sopenharmony_cistatic int power_pmu_add(struct perf_event *event, int ef_flags) 15458c2ecf20Sopenharmony_ci{ 15468c2ecf20Sopenharmony_ci struct cpu_hw_events *cpuhw; 15478c2ecf20Sopenharmony_ci unsigned long flags; 15488c2ecf20Sopenharmony_ci int n0; 15498c2ecf20Sopenharmony_ci int ret = -EAGAIN; 15508c2ecf20Sopenharmony_ci 15518c2ecf20Sopenharmony_ci local_irq_save(flags); 15528c2ecf20Sopenharmony_ci perf_pmu_disable(event->pmu); 15538c2ecf20Sopenharmony_ci 15548c2ecf20Sopenharmony_ci /* 15558c2ecf20Sopenharmony_ci * Add the event to the list (if there is room) 15568c2ecf20Sopenharmony_ci * and check whether the total set is still feasible. 15578c2ecf20Sopenharmony_ci */ 15588c2ecf20Sopenharmony_ci cpuhw = this_cpu_ptr(&cpu_hw_events); 15598c2ecf20Sopenharmony_ci n0 = cpuhw->n_events; 15608c2ecf20Sopenharmony_ci if (n0 >= ppmu->n_counter) 15618c2ecf20Sopenharmony_ci goto out; 15628c2ecf20Sopenharmony_ci cpuhw->event[n0] = event; 15638c2ecf20Sopenharmony_ci cpuhw->events[n0] = event->hw.config; 15648c2ecf20Sopenharmony_ci cpuhw->flags[n0] = event->hw.event_base; 15658c2ecf20Sopenharmony_ci 15668c2ecf20Sopenharmony_ci /* 15678c2ecf20Sopenharmony_ci * This event may have been disabled/stopped in record_and_restart() 15688c2ecf20Sopenharmony_ci * because we exceeded the ->event_limit. If re-starting the event, 15698c2ecf20Sopenharmony_ci * clear the ->hw.state (STOPPED and UPTODATE flags), so the user 15708c2ecf20Sopenharmony_ci * notification is re-enabled. 15718c2ecf20Sopenharmony_ci */ 15728c2ecf20Sopenharmony_ci if (!(ef_flags & PERF_EF_START)) 15738c2ecf20Sopenharmony_ci event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; 15748c2ecf20Sopenharmony_ci else 15758c2ecf20Sopenharmony_ci event->hw.state = 0; 15768c2ecf20Sopenharmony_ci 15778c2ecf20Sopenharmony_ci /* 15788c2ecf20Sopenharmony_ci * If group events scheduling transaction was started, 15798c2ecf20Sopenharmony_ci * skip the schedulability test here, it will be performed 15808c2ecf20Sopenharmony_ci * at commit time(->commit_txn) as a whole 15818c2ecf20Sopenharmony_ci */ 15828c2ecf20Sopenharmony_ci if (cpuhw->txn_flags & PERF_PMU_TXN_ADD) 15838c2ecf20Sopenharmony_ci goto nocheck; 15848c2ecf20Sopenharmony_ci 15858c2ecf20Sopenharmony_ci if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) 15868c2ecf20Sopenharmony_ci goto out; 15878c2ecf20Sopenharmony_ci if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1)) 15888c2ecf20Sopenharmony_ci goto out; 15898c2ecf20Sopenharmony_ci event->hw.config = cpuhw->events[n0]; 15908c2ecf20Sopenharmony_ci 15918c2ecf20Sopenharmony_cinocheck: 15928c2ecf20Sopenharmony_ci ebb_event_add(event); 15938c2ecf20Sopenharmony_ci 15948c2ecf20Sopenharmony_ci ++cpuhw->n_events; 15958c2ecf20Sopenharmony_ci ++cpuhw->n_added; 15968c2ecf20Sopenharmony_ci 15978c2ecf20Sopenharmony_ci ret = 0; 15988c2ecf20Sopenharmony_ci out: 15998c2ecf20Sopenharmony_ci if (has_branch_stack(event)) { 16008c2ecf20Sopenharmony_ci u64 bhrb_filter = -1; 16018c2ecf20Sopenharmony_ci 16028c2ecf20Sopenharmony_ci if (ppmu->bhrb_filter_map) 16038c2ecf20Sopenharmony_ci bhrb_filter = ppmu->bhrb_filter_map( 16048c2ecf20Sopenharmony_ci event->attr.branch_sample_type); 16058c2ecf20Sopenharmony_ci 16068c2ecf20Sopenharmony_ci if (bhrb_filter != -1) { 16078c2ecf20Sopenharmony_ci cpuhw->bhrb_filter = bhrb_filter; 16088c2ecf20Sopenharmony_ci power_pmu_bhrb_enable(event); 16098c2ecf20Sopenharmony_ci } 16108c2ecf20Sopenharmony_ci } 16118c2ecf20Sopenharmony_ci 16128c2ecf20Sopenharmony_ci perf_pmu_enable(event->pmu); 16138c2ecf20Sopenharmony_ci local_irq_restore(flags); 16148c2ecf20Sopenharmony_ci return ret; 16158c2ecf20Sopenharmony_ci} 16168c2ecf20Sopenharmony_ci 16178c2ecf20Sopenharmony_ci/* 16188c2ecf20Sopenharmony_ci * Remove an event from the PMU. 16198c2ecf20Sopenharmony_ci */ 16208c2ecf20Sopenharmony_cistatic void power_pmu_del(struct perf_event *event, int ef_flags) 16218c2ecf20Sopenharmony_ci{ 16228c2ecf20Sopenharmony_ci struct cpu_hw_events *cpuhw; 16238c2ecf20Sopenharmony_ci long i; 16248c2ecf20Sopenharmony_ci unsigned long flags; 16258c2ecf20Sopenharmony_ci 16268c2ecf20Sopenharmony_ci local_irq_save(flags); 16278c2ecf20Sopenharmony_ci perf_pmu_disable(event->pmu); 16288c2ecf20Sopenharmony_ci 16298c2ecf20Sopenharmony_ci power_pmu_read(event); 16308c2ecf20Sopenharmony_ci 16318c2ecf20Sopenharmony_ci cpuhw = this_cpu_ptr(&cpu_hw_events); 16328c2ecf20Sopenharmony_ci for (i = 0; i < cpuhw->n_events; ++i) { 16338c2ecf20Sopenharmony_ci if (event == cpuhw->event[i]) { 16348c2ecf20Sopenharmony_ci while (++i < cpuhw->n_events) { 16358c2ecf20Sopenharmony_ci cpuhw->event[i-1] = cpuhw->event[i]; 16368c2ecf20Sopenharmony_ci cpuhw->events[i-1] = cpuhw->events[i]; 16378c2ecf20Sopenharmony_ci cpuhw->flags[i-1] = cpuhw->flags[i]; 16388c2ecf20Sopenharmony_ci } 16398c2ecf20Sopenharmony_ci --cpuhw->n_events; 16408c2ecf20Sopenharmony_ci ppmu->disable_pmc(event->hw.idx - 1, &cpuhw->mmcr); 16418c2ecf20Sopenharmony_ci if (event->hw.idx) { 16428c2ecf20Sopenharmony_ci write_pmc(event->hw.idx, 0); 16438c2ecf20Sopenharmony_ci event->hw.idx = 0; 16448c2ecf20Sopenharmony_ci } 16458c2ecf20Sopenharmony_ci perf_event_update_userpage(event); 16468c2ecf20Sopenharmony_ci break; 16478c2ecf20Sopenharmony_ci } 16488c2ecf20Sopenharmony_ci } 16498c2ecf20Sopenharmony_ci for (i = 0; i < cpuhw->n_limited; ++i) 16508c2ecf20Sopenharmony_ci if (event == cpuhw->limited_counter[i]) 16518c2ecf20Sopenharmony_ci break; 16528c2ecf20Sopenharmony_ci if (i < cpuhw->n_limited) { 16538c2ecf20Sopenharmony_ci while (++i < cpuhw->n_limited) { 16548c2ecf20Sopenharmony_ci cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i]; 16558c2ecf20Sopenharmony_ci cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i]; 16568c2ecf20Sopenharmony_ci } 16578c2ecf20Sopenharmony_ci --cpuhw->n_limited; 16588c2ecf20Sopenharmony_ci } 16598c2ecf20Sopenharmony_ci if (cpuhw->n_events == 0) { 16608c2ecf20Sopenharmony_ci /* disable exceptions if no events are running */ 16618c2ecf20Sopenharmony_ci cpuhw->mmcr.mmcr0 &= ~(MMCR0_PMXE | MMCR0_FCECE); 16628c2ecf20Sopenharmony_ci } 16638c2ecf20Sopenharmony_ci 16648c2ecf20Sopenharmony_ci if (has_branch_stack(event)) 16658c2ecf20Sopenharmony_ci power_pmu_bhrb_disable(event); 16668c2ecf20Sopenharmony_ci 16678c2ecf20Sopenharmony_ci perf_pmu_enable(event->pmu); 16688c2ecf20Sopenharmony_ci local_irq_restore(flags); 16698c2ecf20Sopenharmony_ci} 16708c2ecf20Sopenharmony_ci 16718c2ecf20Sopenharmony_ci/* 16728c2ecf20Sopenharmony_ci * POWER-PMU does not support disabling individual counters, hence 16738c2ecf20Sopenharmony_ci * program their cycle counter to their max value and ignore the interrupts. 16748c2ecf20Sopenharmony_ci */ 16758c2ecf20Sopenharmony_ci 16768c2ecf20Sopenharmony_cistatic void power_pmu_start(struct perf_event *event, int ef_flags) 16778c2ecf20Sopenharmony_ci{ 16788c2ecf20Sopenharmony_ci unsigned long flags; 16798c2ecf20Sopenharmony_ci s64 left; 16808c2ecf20Sopenharmony_ci unsigned long val; 16818c2ecf20Sopenharmony_ci 16828c2ecf20Sopenharmony_ci if (!event->hw.idx || !event->hw.sample_period) 16838c2ecf20Sopenharmony_ci return; 16848c2ecf20Sopenharmony_ci 16858c2ecf20Sopenharmony_ci if (!(event->hw.state & PERF_HES_STOPPED)) 16868c2ecf20Sopenharmony_ci return; 16878c2ecf20Sopenharmony_ci 16888c2ecf20Sopenharmony_ci if (ef_flags & PERF_EF_RELOAD) 16898c2ecf20Sopenharmony_ci WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); 16908c2ecf20Sopenharmony_ci 16918c2ecf20Sopenharmony_ci local_irq_save(flags); 16928c2ecf20Sopenharmony_ci perf_pmu_disable(event->pmu); 16938c2ecf20Sopenharmony_ci 16948c2ecf20Sopenharmony_ci event->hw.state = 0; 16958c2ecf20Sopenharmony_ci left = local64_read(&event->hw.period_left); 16968c2ecf20Sopenharmony_ci 16978c2ecf20Sopenharmony_ci val = 0; 16988c2ecf20Sopenharmony_ci if (left < 0x80000000L) 16998c2ecf20Sopenharmony_ci val = 0x80000000L - left; 17008c2ecf20Sopenharmony_ci 17018c2ecf20Sopenharmony_ci write_pmc(event->hw.idx, val); 17028c2ecf20Sopenharmony_ci 17038c2ecf20Sopenharmony_ci perf_event_update_userpage(event); 17048c2ecf20Sopenharmony_ci perf_pmu_enable(event->pmu); 17058c2ecf20Sopenharmony_ci local_irq_restore(flags); 17068c2ecf20Sopenharmony_ci} 17078c2ecf20Sopenharmony_ci 17088c2ecf20Sopenharmony_cistatic void power_pmu_stop(struct perf_event *event, int ef_flags) 17098c2ecf20Sopenharmony_ci{ 17108c2ecf20Sopenharmony_ci unsigned long flags; 17118c2ecf20Sopenharmony_ci 17128c2ecf20Sopenharmony_ci if (!event->hw.idx || !event->hw.sample_period) 17138c2ecf20Sopenharmony_ci return; 17148c2ecf20Sopenharmony_ci 17158c2ecf20Sopenharmony_ci if (event->hw.state & PERF_HES_STOPPED) 17168c2ecf20Sopenharmony_ci return; 17178c2ecf20Sopenharmony_ci 17188c2ecf20Sopenharmony_ci local_irq_save(flags); 17198c2ecf20Sopenharmony_ci perf_pmu_disable(event->pmu); 17208c2ecf20Sopenharmony_ci 17218c2ecf20Sopenharmony_ci power_pmu_read(event); 17228c2ecf20Sopenharmony_ci event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; 17238c2ecf20Sopenharmony_ci write_pmc(event->hw.idx, 0); 17248c2ecf20Sopenharmony_ci 17258c2ecf20Sopenharmony_ci perf_event_update_userpage(event); 17268c2ecf20Sopenharmony_ci perf_pmu_enable(event->pmu); 17278c2ecf20Sopenharmony_ci local_irq_restore(flags); 17288c2ecf20Sopenharmony_ci} 17298c2ecf20Sopenharmony_ci 17308c2ecf20Sopenharmony_ci/* 17318c2ecf20Sopenharmony_ci * Start group events scheduling transaction 17328c2ecf20Sopenharmony_ci * Set the flag to make pmu::enable() not perform the 17338c2ecf20Sopenharmony_ci * schedulability test, it will be performed at commit time 17348c2ecf20Sopenharmony_ci * 17358c2ecf20Sopenharmony_ci * We only support PERF_PMU_TXN_ADD transactions. Save the 17368c2ecf20Sopenharmony_ci * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD 17378c2ecf20Sopenharmony_ci * transactions. 17388c2ecf20Sopenharmony_ci */ 17398c2ecf20Sopenharmony_cistatic void power_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags) 17408c2ecf20Sopenharmony_ci{ 17418c2ecf20Sopenharmony_ci struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); 17428c2ecf20Sopenharmony_ci 17438c2ecf20Sopenharmony_ci WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */ 17448c2ecf20Sopenharmony_ci 17458c2ecf20Sopenharmony_ci cpuhw->txn_flags = txn_flags; 17468c2ecf20Sopenharmony_ci if (txn_flags & ~PERF_PMU_TXN_ADD) 17478c2ecf20Sopenharmony_ci return; 17488c2ecf20Sopenharmony_ci 17498c2ecf20Sopenharmony_ci perf_pmu_disable(pmu); 17508c2ecf20Sopenharmony_ci cpuhw->n_txn_start = cpuhw->n_events; 17518c2ecf20Sopenharmony_ci} 17528c2ecf20Sopenharmony_ci 17538c2ecf20Sopenharmony_ci/* 17548c2ecf20Sopenharmony_ci * Stop group events scheduling transaction 17558c2ecf20Sopenharmony_ci * Clear the flag and pmu::enable() will perform the 17568c2ecf20Sopenharmony_ci * schedulability test. 17578c2ecf20Sopenharmony_ci */ 17588c2ecf20Sopenharmony_cistatic void power_pmu_cancel_txn(struct pmu *pmu) 17598c2ecf20Sopenharmony_ci{ 17608c2ecf20Sopenharmony_ci struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); 17618c2ecf20Sopenharmony_ci unsigned int txn_flags; 17628c2ecf20Sopenharmony_ci 17638c2ecf20Sopenharmony_ci WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */ 17648c2ecf20Sopenharmony_ci 17658c2ecf20Sopenharmony_ci txn_flags = cpuhw->txn_flags; 17668c2ecf20Sopenharmony_ci cpuhw->txn_flags = 0; 17678c2ecf20Sopenharmony_ci if (txn_flags & ~PERF_PMU_TXN_ADD) 17688c2ecf20Sopenharmony_ci return; 17698c2ecf20Sopenharmony_ci 17708c2ecf20Sopenharmony_ci perf_pmu_enable(pmu); 17718c2ecf20Sopenharmony_ci} 17728c2ecf20Sopenharmony_ci 17738c2ecf20Sopenharmony_ci/* 17748c2ecf20Sopenharmony_ci * Commit group events scheduling transaction 17758c2ecf20Sopenharmony_ci * Perform the group schedulability test as a whole 17768c2ecf20Sopenharmony_ci * Return 0 if success 17778c2ecf20Sopenharmony_ci */ 17788c2ecf20Sopenharmony_cistatic int power_pmu_commit_txn(struct pmu *pmu) 17798c2ecf20Sopenharmony_ci{ 17808c2ecf20Sopenharmony_ci struct cpu_hw_events *cpuhw; 17818c2ecf20Sopenharmony_ci long i, n; 17828c2ecf20Sopenharmony_ci 17838c2ecf20Sopenharmony_ci if (!ppmu) 17848c2ecf20Sopenharmony_ci return -EAGAIN; 17858c2ecf20Sopenharmony_ci 17868c2ecf20Sopenharmony_ci cpuhw = this_cpu_ptr(&cpu_hw_events); 17878c2ecf20Sopenharmony_ci WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */ 17888c2ecf20Sopenharmony_ci 17898c2ecf20Sopenharmony_ci if (cpuhw->txn_flags & ~PERF_PMU_TXN_ADD) { 17908c2ecf20Sopenharmony_ci cpuhw->txn_flags = 0; 17918c2ecf20Sopenharmony_ci return 0; 17928c2ecf20Sopenharmony_ci } 17938c2ecf20Sopenharmony_ci 17948c2ecf20Sopenharmony_ci n = cpuhw->n_events; 17958c2ecf20Sopenharmony_ci if (check_excludes(cpuhw->event, cpuhw->flags, 0, n)) 17968c2ecf20Sopenharmony_ci return -EAGAIN; 17978c2ecf20Sopenharmony_ci i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n); 17988c2ecf20Sopenharmony_ci if (i < 0) 17998c2ecf20Sopenharmony_ci return -EAGAIN; 18008c2ecf20Sopenharmony_ci 18018c2ecf20Sopenharmony_ci for (i = cpuhw->n_txn_start; i < n; ++i) 18028c2ecf20Sopenharmony_ci cpuhw->event[i]->hw.config = cpuhw->events[i]; 18038c2ecf20Sopenharmony_ci 18048c2ecf20Sopenharmony_ci cpuhw->txn_flags = 0; 18058c2ecf20Sopenharmony_ci perf_pmu_enable(pmu); 18068c2ecf20Sopenharmony_ci return 0; 18078c2ecf20Sopenharmony_ci} 18088c2ecf20Sopenharmony_ci 18098c2ecf20Sopenharmony_ci/* 18108c2ecf20Sopenharmony_ci * Return 1 if we might be able to put event on a limited PMC, 18118c2ecf20Sopenharmony_ci * or 0 if not. 18128c2ecf20Sopenharmony_ci * An event can only go on a limited PMC if it counts something 18138c2ecf20Sopenharmony_ci * that a limited PMC can count, doesn't require interrupts, and 18148c2ecf20Sopenharmony_ci * doesn't exclude any processor mode. 18158c2ecf20Sopenharmony_ci */ 18168c2ecf20Sopenharmony_cistatic int can_go_on_limited_pmc(struct perf_event *event, u64 ev, 18178c2ecf20Sopenharmony_ci unsigned int flags) 18188c2ecf20Sopenharmony_ci{ 18198c2ecf20Sopenharmony_ci int n; 18208c2ecf20Sopenharmony_ci u64 alt[MAX_EVENT_ALTERNATIVES]; 18218c2ecf20Sopenharmony_ci 18228c2ecf20Sopenharmony_ci if (event->attr.exclude_user 18238c2ecf20Sopenharmony_ci || event->attr.exclude_kernel 18248c2ecf20Sopenharmony_ci || event->attr.exclude_hv 18258c2ecf20Sopenharmony_ci || event->attr.sample_period) 18268c2ecf20Sopenharmony_ci return 0; 18278c2ecf20Sopenharmony_ci 18288c2ecf20Sopenharmony_ci if (ppmu->limited_pmc_event(ev)) 18298c2ecf20Sopenharmony_ci return 1; 18308c2ecf20Sopenharmony_ci 18318c2ecf20Sopenharmony_ci /* 18328c2ecf20Sopenharmony_ci * The requested event_id isn't on a limited PMC already; 18338c2ecf20Sopenharmony_ci * see if any alternative code goes on a limited PMC. 18348c2ecf20Sopenharmony_ci */ 18358c2ecf20Sopenharmony_ci if (!ppmu->get_alternatives) 18368c2ecf20Sopenharmony_ci return 0; 18378c2ecf20Sopenharmony_ci 18388c2ecf20Sopenharmony_ci flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD; 18398c2ecf20Sopenharmony_ci n = ppmu->get_alternatives(ev, flags, alt); 18408c2ecf20Sopenharmony_ci 18418c2ecf20Sopenharmony_ci return n > 0; 18428c2ecf20Sopenharmony_ci} 18438c2ecf20Sopenharmony_ci 18448c2ecf20Sopenharmony_ci/* 18458c2ecf20Sopenharmony_ci * Find an alternative event_id that goes on a normal PMC, if possible, 18468c2ecf20Sopenharmony_ci * and return the event_id code, or 0 if there is no such alternative. 18478c2ecf20Sopenharmony_ci * (Note: event_id code 0 is "don't count" on all machines.) 18488c2ecf20Sopenharmony_ci */ 18498c2ecf20Sopenharmony_cistatic u64 normal_pmc_alternative(u64 ev, unsigned long flags) 18508c2ecf20Sopenharmony_ci{ 18518c2ecf20Sopenharmony_ci u64 alt[MAX_EVENT_ALTERNATIVES]; 18528c2ecf20Sopenharmony_ci int n; 18538c2ecf20Sopenharmony_ci 18548c2ecf20Sopenharmony_ci flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD); 18558c2ecf20Sopenharmony_ci n = ppmu->get_alternatives(ev, flags, alt); 18568c2ecf20Sopenharmony_ci if (!n) 18578c2ecf20Sopenharmony_ci return 0; 18588c2ecf20Sopenharmony_ci return alt[0]; 18598c2ecf20Sopenharmony_ci} 18608c2ecf20Sopenharmony_ci 18618c2ecf20Sopenharmony_ci/* Number of perf_events counting hardware events */ 18628c2ecf20Sopenharmony_cistatic atomic_t num_events; 18638c2ecf20Sopenharmony_ci/* Used to avoid races in calling reserve/release_pmc_hardware */ 18648c2ecf20Sopenharmony_cistatic DEFINE_MUTEX(pmc_reserve_mutex); 18658c2ecf20Sopenharmony_ci 18668c2ecf20Sopenharmony_ci/* 18678c2ecf20Sopenharmony_ci * Release the PMU if this is the last perf_event. 18688c2ecf20Sopenharmony_ci */ 18698c2ecf20Sopenharmony_cistatic void hw_perf_event_destroy(struct perf_event *event) 18708c2ecf20Sopenharmony_ci{ 18718c2ecf20Sopenharmony_ci if (!atomic_add_unless(&num_events, -1, 1)) { 18728c2ecf20Sopenharmony_ci mutex_lock(&pmc_reserve_mutex); 18738c2ecf20Sopenharmony_ci if (atomic_dec_return(&num_events) == 0) 18748c2ecf20Sopenharmony_ci release_pmc_hardware(); 18758c2ecf20Sopenharmony_ci mutex_unlock(&pmc_reserve_mutex); 18768c2ecf20Sopenharmony_ci } 18778c2ecf20Sopenharmony_ci} 18788c2ecf20Sopenharmony_ci 18798c2ecf20Sopenharmony_ci/* 18808c2ecf20Sopenharmony_ci * Translate a generic cache event_id config to a raw event_id code. 18818c2ecf20Sopenharmony_ci */ 18828c2ecf20Sopenharmony_cistatic int hw_perf_cache_event(u64 config, u64 *eventp) 18838c2ecf20Sopenharmony_ci{ 18848c2ecf20Sopenharmony_ci unsigned long type, op, result; 18858c2ecf20Sopenharmony_ci u64 ev; 18868c2ecf20Sopenharmony_ci 18878c2ecf20Sopenharmony_ci if (!ppmu->cache_events) 18888c2ecf20Sopenharmony_ci return -EINVAL; 18898c2ecf20Sopenharmony_ci 18908c2ecf20Sopenharmony_ci /* unpack config */ 18918c2ecf20Sopenharmony_ci type = config & 0xff; 18928c2ecf20Sopenharmony_ci op = (config >> 8) & 0xff; 18938c2ecf20Sopenharmony_ci result = (config >> 16) & 0xff; 18948c2ecf20Sopenharmony_ci 18958c2ecf20Sopenharmony_ci if (type >= PERF_COUNT_HW_CACHE_MAX || 18968c2ecf20Sopenharmony_ci op >= PERF_COUNT_HW_CACHE_OP_MAX || 18978c2ecf20Sopenharmony_ci result >= PERF_COUNT_HW_CACHE_RESULT_MAX) 18988c2ecf20Sopenharmony_ci return -EINVAL; 18998c2ecf20Sopenharmony_ci 19008c2ecf20Sopenharmony_ci ev = (*ppmu->cache_events)[type][op][result]; 19018c2ecf20Sopenharmony_ci if (ev == 0) 19028c2ecf20Sopenharmony_ci return -EOPNOTSUPP; 19038c2ecf20Sopenharmony_ci if (ev == -1) 19048c2ecf20Sopenharmony_ci return -EINVAL; 19058c2ecf20Sopenharmony_ci *eventp = ev; 19068c2ecf20Sopenharmony_ci return 0; 19078c2ecf20Sopenharmony_ci} 19088c2ecf20Sopenharmony_ci 19098c2ecf20Sopenharmony_cistatic bool is_event_blacklisted(u64 ev) 19108c2ecf20Sopenharmony_ci{ 19118c2ecf20Sopenharmony_ci int i; 19128c2ecf20Sopenharmony_ci 19138c2ecf20Sopenharmony_ci for (i=0; i < ppmu->n_blacklist_ev; i++) { 19148c2ecf20Sopenharmony_ci if (ppmu->blacklist_ev[i] == ev) 19158c2ecf20Sopenharmony_ci return true; 19168c2ecf20Sopenharmony_ci } 19178c2ecf20Sopenharmony_ci 19188c2ecf20Sopenharmony_ci return false; 19198c2ecf20Sopenharmony_ci} 19208c2ecf20Sopenharmony_ci 19218c2ecf20Sopenharmony_cistatic int power_pmu_event_init(struct perf_event *event) 19228c2ecf20Sopenharmony_ci{ 19238c2ecf20Sopenharmony_ci u64 ev; 19248c2ecf20Sopenharmony_ci unsigned long flags, irq_flags; 19258c2ecf20Sopenharmony_ci struct perf_event *ctrs[MAX_HWEVENTS]; 19268c2ecf20Sopenharmony_ci u64 events[MAX_HWEVENTS]; 19278c2ecf20Sopenharmony_ci unsigned int cflags[MAX_HWEVENTS]; 19288c2ecf20Sopenharmony_ci int n; 19298c2ecf20Sopenharmony_ci int err; 19308c2ecf20Sopenharmony_ci struct cpu_hw_events *cpuhw; 19318c2ecf20Sopenharmony_ci 19328c2ecf20Sopenharmony_ci if (!ppmu) 19338c2ecf20Sopenharmony_ci return -ENOENT; 19348c2ecf20Sopenharmony_ci 19358c2ecf20Sopenharmony_ci if (has_branch_stack(event)) { 19368c2ecf20Sopenharmony_ci /* PMU has BHRB enabled */ 19378c2ecf20Sopenharmony_ci if (!(ppmu->flags & PPMU_ARCH_207S)) 19388c2ecf20Sopenharmony_ci return -EOPNOTSUPP; 19398c2ecf20Sopenharmony_ci } 19408c2ecf20Sopenharmony_ci 19418c2ecf20Sopenharmony_ci switch (event->attr.type) { 19428c2ecf20Sopenharmony_ci case PERF_TYPE_HARDWARE: 19438c2ecf20Sopenharmony_ci ev = event->attr.config; 19448c2ecf20Sopenharmony_ci if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) 19458c2ecf20Sopenharmony_ci return -EOPNOTSUPP; 19468c2ecf20Sopenharmony_ci 19478c2ecf20Sopenharmony_ci if (ppmu->blacklist_ev && is_event_blacklisted(ev)) 19488c2ecf20Sopenharmony_ci return -EINVAL; 19498c2ecf20Sopenharmony_ci ev = ppmu->generic_events[ev]; 19508c2ecf20Sopenharmony_ci break; 19518c2ecf20Sopenharmony_ci case PERF_TYPE_HW_CACHE: 19528c2ecf20Sopenharmony_ci err = hw_perf_cache_event(event->attr.config, &ev); 19538c2ecf20Sopenharmony_ci if (err) 19548c2ecf20Sopenharmony_ci return err; 19558c2ecf20Sopenharmony_ci 19568c2ecf20Sopenharmony_ci if (ppmu->blacklist_ev && is_event_blacklisted(ev)) 19578c2ecf20Sopenharmony_ci return -EINVAL; 19588c2ecf20Sopenharmony_ci break; 19598c2ecf20Sopenharmony_ci case PERF_TYPE_RAW: 19608c2ecf20Sopenharmony_ci ev = event->attr.config; 19618c2ecf20Sopenharmony_ci 19628c2ecf20Sopenharmony_ci if (ppmu->blacklist_ev && is_event_blacklisted(ev)) 19638c2ecf20Sopenharmony_ci return -EINVAL; 19648c2ecf20Sopenharmony_ci break; 19658c2ecf20Sopenharmony_ci default: 19668c2ecf20Sopenharmony_ci return -ENOENT; 19678c2ecf20Sopenharmony_ci } 19688c2ecf20Sopenharmony_ci 19698c2ecf20Sopenharmony_ci event->hw.config_base = ev; 19708c2ecf20Sopenharmony_ci event->hw.idx = 0; 19718c2ecf20Sopenharmony_ci 19728c2ecf20Sopenharmony_ci /* 19738c2ecf20Sopenharmony_ci * If we are not running on a hypervisor, force the 19748c2ecf20Sopenharmony_ci * exclude_hv bit to 0 so that we don't care what 19758c2ecf20Sopenharmony_ci * the user set it to. 19768c2ecf20Sopenharmony_ci */ 19778c2ecf20Sopenharmony_ci if (!firmware_has_feature(FW_FEATURE_LPAR)) 19788c2ecf20Sopenharmony_ci event->attr.exclude_hv = 0; 19798c2ecf20Sopenharmony_ci 19808c2ecf20Sopenharmony_ci /* 19818c2ecf20Sopenharmony_ci * If this is a per-task event, then we can use 19828c2ecf20Sopenharmony_ci * PM_RUN_* events interchangeably with their non RUN_* 19838c2ecf20Sopenharmony_ci * equivalents, e.g. PM_RUN_CYC instead of PM_CYC. 19848c2ecf20Sopenharmony_ci * XXX we should check if the task is an idle task. 19858c2ecf20Sopenharmony_ci */ 19868c2ecf20Sopenharmony_ci flags = 0; 19878c2ecf20Sopenharmony_ci if (event->attach_state & PERF_ATTACH_TASK) 19888c2ecf20Sopenharmony_ci flags |= PPMU_ONLY_COUNT_RUN; 19898c2ecf20Sopenharmony_ci 19908c2ecf20Sopenharmony_ci /* 19918c2ecf20Sopenharmony_ci * If this machine has limited events, check whether this 19928c2ecf20Sopenharmony_ci * event_id could go on a limited event. 19938c2ecf20Sopenharmony_ci */ 19948c2ecf20Sopenharmony_ci if (ppmu->flags & PPMU_LIMITED_PMC5_6) { 19958c2ecf20Sopenharmony_ci if (can_go_on_limited_pmc(event, ev, flags)) { 19968c2ecf20Sopenharmony_ci flags |= PPMU_LIMITED_PMC_OK; 19978c2ecf20Sopenharmony_ci } else if (ppmu->limited_pmc_event(ev)) { 19988c2ecf20Sopenharmony_ci /* 19998c2ecf20Sopenharmony_ci * The requested event_id is on a limited PMC, 20008c2ecf20Sopenharmony_ci * but we can't use a limited PMC; see if any 20018c2ecf20Sopenharmony_ci * alternative goes on a normal PMC. 20028c2ecf20Sopenharmony_ci */ 20038c2ecf20Sopenharmony_ci ev = normal_pmc_alternative(ev, flags); 20048c2ecf20Sopenharmony_ci if (!ev) 20058c2ecf20Sopenharmony_ci return -EINVAL; 20068c2ecf20Sopenharmony_ci } 20078c2ecf20Sopenharmony_ci } 20088c2ecf20Sopenharmony_ci 20098c2ecf20Sopenharmony_ci /* Extra checks for EBB */ 20108c2ecf20Sopenharmony_ci err = ebb_event_check(event); 20118c2ecf20Sopenharmony_ci if (err) 20128c2ecf20Sopenharmony_ci return err; 20138c2ecf20Sopenharmony_ci 20148c2ecf20Sopenharmony_ci /* 20158c2ecf20Sopenharmony_ci * If this is in a group, check if it can go on with all the 20168c2ecf20Sopenharmony_ci * other hardware events in the group. We assume the event 20178c2ecf20Sopenharmony_ci * hasn't been linked into its leader's sibling list at this point. 20188c2ecf20Sopenharmony_ci */ 20198c2ecf20Sopenharmony_ci n = 0; 20208c2ecf20Sopenharmony_ci if (event->group_leader != event) { 20218c2ecf20Sopenharmony_ci n = collect_events(event->group_leader, ppmu->n_counter - 1, 20228c2ecf20Sopenharmony_ci ctrs, events, cflags); 20238c2ecf20Sopenharmony_ci if (n < 0) 20248c2ecf20Sopenharmony_ci return -EINVAL; 20258c2ecf20Sopenharmony_ci } 20268c2ecf20Sopenharmony_ci events[n] = ev; 20278c2ecf20Sopenharmony_ci ctrs[n] = event; 20288c2ecf20Sopenharmony_ci cflags[n] = flags; 20298c2ecf20Sopenharmony_ci if (check_excludes(ctrs, cflags, n, 1)) 20308c2ecf20Sopenharmony_ci return -EINVAL; 20318c2ecf20Sopenharmony_ci 20328c2ecf20Sopenharmony_ci local_irq_save(irq_flags); 20338c2ecf20Sopenharmony_ci cpuhw = this_cpu_ptr(&cpu_hw_events); 20348c2ecf20Sopenharmony_ci 20358c2ecf20Sopenharmony_ci err = power_check_constraints(cpuhw, events, cflags, n + 1); 20368c2ecf20Sopenharmony_ci 20378c2ecf20Sopenharmony_ci if (has_branch_stack(event)) { 20388c2ecf20Sopenharmony_ci u64 bhrb_filter = -1; 20398c2ecf20Sopenharmony_ci 20408c2ecf20Sopenharmony_ci if (ppmu->bhrb_filter_map) 20418c2ecf20Sopenharmony_ci bhrb_filter = ppmu->bhrb_filter_map( 20428c2ecf20Sopenharmony_ci event->attr.branch_sample_type); 20438c2ecf20Sopenharmony_ci 20448c2ecf20Sopenharmony_ci if (bhrb_filter == -1) { 20458c2ecf20Sopenharmony_ci local_irq_restore(irq_flags); 20468c2ecf20Sopenharmony_ci return -EOPNOTSUPP; 20478c2ecf20Sopenharmony_ci } 20488c2ecf20Sopenharmony_ci cpuhw->bhrb_filter = bhrb_filter; 20498c2ecf20Sopenharmony_ci } 20508c2ecf20Sopenharmony_ci 20518c2ecf20Sopenharmony_ci local_irq_restore(irq_flags); 20528c2ecf20Sopenharmony_ci if (err) 20538c2ecf20Sopenharmony_ci return -EINVAL; 20548c2ecf20Sopenharmony_ci 20558c2ecf20Sopenharmony_ci event->hw.config = events[n]; 20568c2ecf20Sopenharmony_ci event->hw.event_base = cflags[n]; 20578c2ecf20Sopenharmony_ci event->hw.last_period = event->hw.sample_period; 20588c2ecf20Sopenharmony_ci local64_set(&event->hw.period_left, event->hw.last_period); 20598c2ecf20Sopenharmony_ci 20608c2ecf20Sopenharmony_ci /* 20618c2ecf20Sopenharmony_ci * For EBB events we just context switch the PMC value, we don't do any 20628c2ecf20Sopenharmony_ci * of the sample_period logic. We use hw.prev_count for this. 20638c2ecf20Sopenharmony_ci */ 20648c2ecf20Sopenharmony_ci if (is_ebb_event(event)) 20658c2ecf20Sopenharmony_ci local64_set(&event->hw.prev_count, 0); 20668c2ecf20Sopenharmony_ci 20678c2ecf20Sopenharmony_ci /* 20688c2ecf20Sopenharmony_ci * See if we need to reserve the PMU. 20698c2ecf20Sopenharmony_ci * If no events are currently in use, then we have to take a 20708c2ecf20Sopenharmony_ci * mutex to ensure that we don't race with another task doing 20718c2ecf20Sopenharmony_ci * reserve_pmc_hardware or release_pmc_hardware. 20728c2ecf20Sopenharmony_ci */ 20738c2ecf20Sopenharmony_ci err = 0; 20748c2ecf20Sopenharmony_ci if (!atomic_inc_not_zero(&num_events)) { 20758c2ecf20Sopenharmony_ci mutex_lock(&pmc_reserve_mutex); 20768c2ecf20Sopenharmony_ci if (atomic_read(&num_events) == 0 && 20778c2ecf20Sopenharmony_ci reserve_pmc_hardware(perf_event_interrupt)) 20788c2ecf20Sopenharmony_ci err = -EBUSY; 20798c2ecf20Sopenharmony_ci else 20808c2ecf20Sopenharmony_ci atomic_inc(&num_events); 20818c2ecf20Sopenharmony_ci mutex_unlock(&pmc_reserve_mutex); 20828c2ecf20Sopenharmony_ci } 20838c2ecf20Sopenharmony_ci event->destroy = hw_perf_event_destroy; 20848c2ecf20Sopenharmony_ci 20858c2ecf20Sopenharmony_ci return err; 20868c2ecf20Sopenharmony_ci} 20878c2ecf20Sopenharmony_ci 20888c2ecf20Sopenharmony_cistatic int power_pmu_event_idx(struct perf_event *event) 20898c2ecf20Sopenharmony_ci{ 20908c2ecf20Sopenharmony_ci return event->hw.idx; 20918c2ecf20Sopenharmony_ci} 20928c2ecf20Sopenharmony_ci 20938c2ecf20Sopenharmony_cissize_t power_events_sysfs_show(struct device *dev, 20948c2ecf20Sopenharmony_ci struct device_attribute *attr, char *page) 20958c2ecf20Sopenharmony_ci{ 20968c2ecf20Sopenharmony_ci struct perf_pmu_events_attr *pmu_attr; 20978c2ecf20Sopenharmony_ci 20988c2ecf20Sopenharmony_ci pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); 20998c2ecf20Sopenharmony_ci 21008c2ecf20Sopenharmony_ci return sprintf(page, "event=0x%02llx\n", pmu_attr->id); 21018c2ecf20Sopenharmony_ci} 21028c2ecf20Sopenharmony_ci 21038c2ecf20Sopenharmony_cistatic struct pmu power_pmu = { 21048c2ecf20Sopenharmony_ci .pmu_enable = power_pmu_enable, 21058c2ecf20Sopenharmony_ci .pmu_disable = power_pmu_disable, 21068c2ecf20Sopenharmony_ci .event_init = power_pmu_event_init, 21078c2ecf20Sopenharmony_ci .add = power_pmu_add, 21088c2ecf20Sopenharmony_ci .del = power_pmu_del, 21098c2ecf20Sopenharmony_ci .start = power_pmu_start, 21108c2ecf20Sopenharmony_ci .stop = power_pmu_stop, 21118c2ecf20Sopenharmony_ci .read = power_pmu_read, 21128c2ecf20Sopenharmony_ci .start_txn = power_pmu_start_txn, 21138c2ecf20Sopenharmony_ci .cancel_txn = power_pmu_cancel_txn, 21148c2ecf20Sopenharmony_ci .commit_txn = power_pmu_commit_txn, 21158c2ecf20Sopenharmony_ci .event_idx = power_pmu_event_idx, 21168c2ecf20Sopenharmony_ci .sched_task = power_pmu_sched_task, 21178c2ecf20Sopenharmony_ci}; 21188c2ecf20Sopenharmony_ci 21198c2ecf20Sopenharmony_ci/* 21208c2ecf20Sopenharmony_ci * A counter has overflowed; update its count and record 21218c2ecf20Sopenharmony_ci * things if requested. Note that interrupts are hard-disabled 21228c2ecf20Sopenharmony_ci * here so there is no possibility of being interrupted. 21238c2ecf20Sopenharmony_ci */ 21248c2ecf20Sopenharmony_cistatic void record_and_restart(struct perf_event *event, unsigned long val, 21258c2ecf20Sopenharmony_ci struct pt_regs *regs) 21268c2ecf20Sopenharmony_ci{ 21278c2ecf20Sopenharmony_ci u64 period = event->hw.sample_period; 21288c2ecf20Sopenharmony_ci s64 prev, delta, left; 21298c2ecf20Sopenharmony_ci int record = 0; 21308c2ecf20Sopenharmony_ci 21318c2ecf20Sopenharmony_ci if (event->hw.state & PERF_HES_STOPPED) { 21328c2ecf20Sopenharmony_ci write_pmc(event->hw.idx, 0); 21338c2ecf20Sopenharmony_ci return; 21348c2ecf20Sopenharmony_ci } 21358c2ecf20Sopenharmony_ci 21368c2ecf20Sopenharmony_ci /* we don't have to worry about interrupts here */ 21378c2ecf20Sopenharmony_ci prev = local64_read(&event->hw.prev_count); 21388c2ecf20Sopenharmony_ci delta = check_and_compute_delta(prev, val); 21398c2ecf20Sopenharmony_ci local64_add(delta, &event->count); 21408c2ecf20Sopenharmony_ci 21418c2ecf20Sopenharmony_ci /* 21428c2ecf20Sopenharmony_ci * See if the total period for this event has expired, 21438c2ecf20Sopenharmony_ci * and update for the next period. 21448c2ecf20Sopenharmony_ci */ 21458c2ecf20Sopenharmony_ci val = 0; 21468c2ecf20Sopenharmony_ci left = local64_read(&event->hw.period_left) - delta; 21478c2ecf20Sopenharmony_ci if (delta == 0) 21488c2ecf20Sopenharmony_ci left++; 21498c2ecf20Sopenharmony_ci if (period) { 21508c2ecf20Sopenharmony_ci if (left <= 0) { 21518c2ecf20Sopenharmony_ci left += period; 21528c2ecf20Sopenharmony_ci if (left <= 0) 21538c2ecf20Sopenharmony_ci left = period; 21548c2ecf20Sopenharmony_ci 21558c2ecf20Sopenharmony_ci /* 21568c2ecf20Sopenharmony_ci * If address is not requested in the sample via 21578c2ecf20Sopenharmony_ci * PERF_SAMPLE_IP, just record that sample irrespective 21588c2ecf20Sopenharmony_ci * of SIAR valid check. 21598c2ecf20Sopenharmony_ci */ 21608c2ecf20Sopenharmony_ci if (event->attr.sample_type & PERF_SAMPLE_IP) 21618c2ecf20Sopenharmony_ci record = siar_valid(regs); 21628c2ecf20Sopenharmony_ci else 21638c2ecf20Sopenharmony_ci record = 1; 21648c2ecf20Sopenharmony_ci 21658c2ecf20Sopenharmony_ci event->hw.last_period = event->hw.sample_period; 21668c2ecf20Sopenharmony_ci } 21678c2ecf20Sopenharmony_ci if (left < 0x80000000LL) 21688c2ecf20Sopenharmony_ci val = 0x80000000LL - left; 21698c2ecf20Sopenharmony_ci } 21708c2ecf20Sopenharmony_ci 21718c2ecf20Sopenharmony_ci write_pmc(event->hw.idx, val); 21728c2ecf20Sopenharmony_ci local64_set(&event->hw.prev_count, val); 21738c2ecf20Sopenharmony_ci local64_set(&event->hw.period_left, left); 21748c2ecf20Sopenharmony_ci perf_event_update_userpage(event); 21758c2ecf20Sopenharmony_ci 21768c2ecf20Sopenharmony_ci /* 21778c2ecf20Sopenharmony_ci * Due to hardware limitation, sometimes SIAR could sample a kernel 21788c2ecf20Sopenharmony_ci * address even when freeze on supervisor state (kernel) is set in 21798c2ecf20Sopenharmony_ci * MMCR2. Check attr.exclude_kernel and address to drop the sample in 21808c2ecf20Sopenharmony_ci * these cases. 21818c2ecf20Sopenharmony_ci */ 21828c2ecf20Sopenharmony_ci if (event->attr.exclude_kernel && 21838c2ecf20Sopenharmony_ci (event->attr.sample_type & PERF_SAMPLE_IP) && 21848c2ecf20Sopenharmony_ci is_kernel_addr(mfspr(SPRN_SIAR))) 21858c2ecf20Sopenharmony_ci record = 0; 21868c2ecf20Sopenharmony_ci 21878c2ecf20Sopenharmony_ci /* 21888c2ecf20Sopenharmony_ci * Finally record data if requested. 21898c2ecf20Sopenharmony_ci */ 21908c2ecf20Sopenharmony_ci if (record) { 21918c2ecf20Sopenharmony_ci struct perf_sample_data data; 21928c2ecf20Sopenharmony_ci 21938c2ecf20Sopenharmony_ci perf_sample_data_init(&data, ~0ULL, event->hw.last_period); 21948c2ecf20Sopenharmony_ci 21958c2ecf20Sopenharmony_ci if (event->attr.sample_type & 21968c2ecf20Sopenharmony_ci (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR)) 21978c2ecf20Sopenharmony_ci perf_get_data_addr(event, regs, &data.addr); 21988c2ecf20Sopenharmony_ci 21998c2ecf20Sopenharmony_ci if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) { 22008c2ecf20Sopenharmony_ci struct cpu_hw_events *cpuhw; 22018c2ecf20Sopenharmony_ci cpuhw = this_cpu_ptr(&cpu_hw_events); 22028c2ecf20Sopenharmony_ci power_pmu_bhrb_read(event, cpuhw); 22038c2ecf20Sopenharmony_ci data.br_stack = &cpuhw->bhrb_stack; 22048c2ecf20Sopenharmony_ci } 22058c2ecf20Sopenharmony_ci 22068c2ecf20Sopenharmony_ci if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC && 22078c2ecf20Sopenharmony_ci ppmu->get_mem_data_src) 22088c2ecf20Sopenharmony_ci ppmu->get_mem_data_src(&data.data_src, ppmu->flags, regs); 22098c2ecf20Sopenharmony_ci 22108c2ecf20Sopenharmony_ci if (event->attr.sample_type & PERF_SAMPLE_WEIGHT && 22118c2ecf20Sopenharmony_ci ppmu->get_mem_weight) 22128c2ecf20Sopenharmony_ci ppmu->get_mem_weight(&data.weight); 22138c2ecf20Sopenharmony_ci 22148c2ecf20Sopenharmony_ci if (perf_event_overflow(event, &data, regs)) 22158c2ecf20Sopenharmony_ci power_pmu_stop(event, 0); 22168c2ecf20Sopenharmony_ci } else if (period) { 22178c2ecf20Sopenharmony_ci /* Account for interrupt in case of invalid SIAR */ 22188c2ecf20Sopenharmony_ci if (perf_event_account_interrupt(event)) 22198c2ecf20Sopenharmony_ci power_pmu_stop(event, 0); 22208c2ecf20Sopenharmony_ci } 22218c2ecf20Sopenharmony_ci} 22228c2ecf20Sopenharmony_ci 22238c2ecf20Sopenharmony_ci/* 22248c2ecf20Sopenharmony_ci * Called from generic code to get the misc flags (i.e. processor mode) 22258c2ecf20Sopenharmony_ci * for an event_id. 22268c2ecf20Sopenharmony_ci */ 22278c2ecf20Sopenharmony_ciunsigned long perf_misc_flags(struct pt_regs *regs) 22288c2ecf20Sopenharmony_ci{ 22298c2ecf20Sopenharmony_ci u32 flags = perf_get_misc_flags(regs); 22308c2ecf20Sopenharmony_ci 22318c2ecf20Sopenharmony_ci if (flags) 22328c2ecf20Sopenharmony_ci return flags; 22338c2ecf20Sopenharmony_ci return user_mode(regs) ? PERF_RECORD_MISC_USER : 22348c2ecf20Sopenharmony_ci PERF_RECORD_MISC_KERNEL; 22358c2ecf20Sopenharmony_ci} 22368c2ecf20Sopenharmony_ci 22378c2ecf20Sopenharmony_ci/* 22388c2ecf20Sopenharmony_ci * Called from generic code to get the instruction pointer 22398c2ecf20Sopenharmony_ci * for an event_id. 22408c2ecf20Sopenharmony_ci */ 22418c2ecf20Sopenharmony_ciunsigned long perf_instruction_pointer(struct pt_regs *regs) 22428c2ecf20Sopenharmony_ci{ 22438c2ecf20Sopenharmony_ci bool use_siar = regs_use_siar(regs); 22448c2ecf20Sopenharmony_ci 22458c2ecf20Sopenharmony_ci if (use_siar && siar_valid(regs)) 22468c2ecf20Sopenharmony_ci return mfspr(SPRN_SIAR) + perf_ip_adjust(regs); 22478c2ecf20Sopenharmony_ci else if (use_siar) 22488c2ecf20Sopenharmony_ci return 0; // no valid instruction pointer 22498c2ecf20Sopenharmony_ci else 22508c2ecf20Sopenharmony_ci return regs->nip; 22518c2ecf20Sopenharmony_ci} 22528c2ecf20Sopenharmony_ci 22538c2ecf20Sopenharmony_cistatic bool pmc_overflow_power7(unsigned long val) 22548c2ecf20Sopenharmony_ci{ 22558c2ecf20Sopenharmony_ci /* 22568c2ecf20Sopenharmony_ci * Events on POWER7 can roll back if a speculative event doesn't 22578c2ecf20Sopenharmony_ci * eventually complete. Unfortunately in some rare cases they will 22588c2ecf20Sopenharmony_ci * raise a performance monitor exception. We need to catch this to 22598c2ecf20Sopenharmony_ci * ensure we reset the PMC. In all cases the PMC will be 256 or less 22608c2ecf20Sopenharmony_ci * cycles from overflow. 22618c2ecf20Sopenharmony_ci * 22628c2ecf20Sopenharmony_ci * We only do this if the first pass fails to find any overflowing 22638c2ecf20Sopenharmony_ci * PMCs because a user might set a period of less than 256 and we 22648c2ecf20Sopenharmony_ci * don't want to mistakenly reset them. 22658c2ecf20Sopenharmony_ci */ 22668c2ecf20Sopenharmony_ci if ((0x80000000 - val) <= 256) 22678c2ecf20Sopenharmony_ci return true; 22688c2ecf20Sopenharmony_ci 22698c2ecf20Sopenharmony_ci return false; 22708c2ecf20Sopenharmony_ci} 22718c2ecf20Sopenharmony_ci 22728c2ecf20Sopenharmony_cistatic bool pmc_overflow(unsigned long val) 22738c2ecf20Sopenharmony_ci{ 22748c2ecf20Sopenharmony_ci if ((int)val < 0) 22758c2ecf20Sopenharmony_ci return true; 22768c2ecf20Sopenharmony_ci 22778c2ecf20Sopenharmony_ci return false; 22788c2ecf20Sopenharmony_ci} 22798c2ecf20Sopenharmony_ci 22808c2ecf20Sopenharmony_ci/* 22818c2ecf20Sopenharmony_ci * Performance monitor interrupt stuff 22828c2ecf20Sopenharmony_ci */ 22838c2ecf20Sopenharmony_cistatic void __perf_event_interrupt(struct pt_regs *regs) 22848c2ecf20Sopenharmony_ci{ 22858c2ecf20Sopenharmony_ci int i, j; 22868c2ecf20Sopenharmony_ci struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); 22878c2ecf20Sopenharmony_ci struct perf_event *event; 22888c2ecf20Sopenharmony_ci unsigned long val[8]; 22898c2ecf20Sopenharmony_ci int found, active; 22908c2ecf20Sopenharmony_ci 22918c2ecf20Sopenharmony_ci if (cpuhw->n_limited) 22928c2ecf20Sopenharmony_ci freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5), 22938c2ecf20Sopenharmony_ci mfspr(SPRN_PMC6)); 22948c2ecf20Sopenharmony_ci 22958c2ecf20Sopenharmony_ci perf_read_regs(regs); 22968c2ecf20Sopenharmony_ci 22978c2ecf20Sopenharmony_ci /* Read all the PMCs since we'll need them a bunch of times */ 22988c2ecf20Sopenharmony_ci for (i = 0; i < ppmu->n_counter; ++i) 22998c2ecf20Sopenharmony_ci val[i] = read_pmc(i + 1); 23008c2ecf20Sopenharmony_ci 23018c2ecf20Sopenharmony_ci /* Try to find what caused the IRQ */ 23028c2ecf20Sopenharmony_ci found = 0; 23038c2ecf20Sopenharmony_ci for (i = 0; i < ppmu->n_counter; ++i) { 23048c2ecf20Sopenharmony_ci if (!pmc_overflow(val[i])) 23058c2ecf20Sopenharmony_ci continue; 23068c2ecf20Sopenharmony_ci if (is_limited_pmc(i + 1)) 23078c2ecf20Sopenharmony_ci continue; /* these won't generate IRQs */ 23088c2ecf20Sopenharmony_ci /* 23098c2ecf20Sopenharmony_ci * We've found one that's overflowed. For active 23108c2ecf20Sopenharmony_ci * counters we need to log this. For inactive 23118c2ecf20Sopenharmony_ci * counters, we need to reset it anyway 23128c2ecf20Sopenharmony_ci */ 23138c2ecf20Sopenharmony_ci found = 1; 23148c2ecf20Sopenharmony_ci active = 0; 23158c2ecf20Sopenharmony_ci for (j = 0; j < cpuhw->n_events; ++j) { 23168c2ecf20Sopenharmony_ci event = cpuhw->event[j]; 23178c2ecf20Sopenharmony_ci if (event->hw.idx == (i + 1)) { 23188c2ecf20Sopenharmony_ci active = 1; 23198c2ecf20Sopenharmony_ci record_and_restart(event, val[i], regs); 23208c2ecf20Sopenharmony_ci break; 23218c2ecf20Sopenharmony_ci } 23228c2ecf20Sopenharmony_ci } 23238c2ecf20Sopenharmony_ci 23248c2ecf20Sopenharmony_ci /* 23258c2ecf20Sopenharmony_ci * Clear PACA_IRQ_PMI in case it was set by 23268c2ecf20Sopenharmony_ci * set_pmi_irq_pending() when PMU was enabled 23278c2ecf20Sopenharmony_ci * after accounting for interrupts. 23288c2ecf20Sopenharmony_ci */ 23298c2ecf20Sopenharmony_ci clear_pmi_irq_pending(); 23308c2ecf20Sopenharmony_ci 23318c2ecf20Sopenharmony_ci if (!active) 23328c2ecf20Sopenharmony_ci /* reset non active counters that have overflowed */ 23338c2ecf20Sopenharmony_ci write_pmc(i + 1, 0); 23348c2ecf20Sopenharmony_ci } 23358c2ecf20Sopenharmony_ci if (!found && pvr_version_is(PVR_POWER7)) { 23368c2ecf20Sopenharmony_ci /* check active counters for special buggy p7 overflow */ 23378c2ecf20Sopenharmony_ci for (i = 0; i < cpuhw->n_events; ++i) { 23388c2ecf20Sopenharmony_ci event = cpuhw->event[i]; 23398c2ecf20Sopenharmony_ci if (!event->hw.idx || is_limited_pmc(event->hw.idx)) 23408c2ecf20Sopenharmony_ci continue; 23418c2ecf20Sopenharmony_ci if (pmc_overflow_power7(val[event->hw.idx - 1])) { 23428c2ecf20Sopenharmony_ci /* event has overflowed in a buggy way*/ 23438c2ecf20Sopenharmony_ci found = 1; 23448c2ecf20Sopenharmony_ci record_and_restart(event, 23458c2ecf20Sopenharmony_ci val[event->hw.idx - 1], 23468c2ecf20Sopenharmony_ci regs); 23478c2ecf20Sopenharmony_ci } 23488c2ecf20Sopenharmony_ci } 23498c2ecf20Sopenharmony_ci } 23508c2ecf20Sopenharmony_ci 23518c2ecf20Sopenharmony_ci /* 23528c2ecf20Sopenharmony_ci * During system wide profling or while specific CPU is monitored for an 23538c2ecf20Sopenharmony_ci * event, some corner cases could cause PMC to overflow in idle path. This 23548c2ecf20Sopenharmony_ci * will trigger a PMI after waking up from idle. Since counter values are _not_ 23558c2ecf20Sopenharmony_ci * saved/restored in idle path, can lead to below "Can't find PMC" message. 23568c2ecf20Sopenharmony_ci */ 23578c2ecf20Sopenharmony_ci if (unlikely(!found) && !arch_irq_disabled_regs(regs)) 23588c2ecf20Sopenharmony_ci printk_ratelimited(KERN_WARNING "Can't find PMC that caused IRQ\n"); 23598c2ecf20Sopenharmony_ci 23608c2ecf20Sopenharmony_ci /* 23618c2ecf20Sopenharmony_ci * Reset MMCR0 to its normal value. This will set PMXE and 23628c2ecf20Sopenharmony_ci * clear FC (freeze counters) and PMAO (perf mon alert occurred) 23638c2ecf20Sopenharmony_ci * and thus allow interrupts to occur again. 23648c2ecf20Sopenharmony_ci * XXX might want to use MSR.PM to keep the events frozen until 23658c2ecf20Sopenharmony_ci * we get back out of this interrupt. 23668c2ecf20Sopenharmony_ci */ 23678c2ecf20Sopenharmony_ci write_mmcr0(cpuhw, cpuhw->mmcr.mmcr0); 23688c2ecf20Sopenharmony_ci} 23698c2ecf20Sopenharmony_ci 23708c2ecf20Sopenharmony_cistatic void perf_event_interrupt(struct pt_regs *regs) 23718c2ecf20Sopenharmony_ci{ 23728c2ecf20Sopenharmony_ci u64 start_clock = sched_clock(); 23738c2ecf20Sopenharmony_ci 23748c2ecf20Sopenharmony_ci __perf_event_interrupt(regs); 23758c2ecf20Sopenharmony_ci perf_sample_event_took(sched_clock() - start_clock); 23768c2ecf20Sopenharmony_ci} 23778c2ecf20Sopenharmony_ci 23788c2ecf20Sopenharmony_cistatic int power_pmu_prepare_cpu(unsigned int cpu) 23798c2ecf20Sopenharmony_ci{ 23808c2ecf20Sopenharmony_ci struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); 23818c2ecf20Sopenharmony_ci 23828c2ecf20Sopenharmony_ci if (ppmu) { 23838c2ecf20Sopenharmony_ci memset(cpuhw, 0, sizeof(*cpuhw)); 23848c2ecf20Sopenharmony_ci cpuhw->mmcr.mmcr0 = MMCR0_FC; 23858c2ecf20Sopenharmony_ci } 23868c2ecf20Sopenharmony_ci return 0; 23878c2ecf20Sopenharmony_ci} 23888c2ecf20Sopenharmony_ci 23898c2ecf20Sopenharmony_ciint register_power_pmu(struct power_pmu *pmu) 23908c2ecf20Sopenharmony_ci{ 23918c2ecf20Sopenharmony_ci if (ppmu) 23928c2ecf20Sopenharmony_ci return -EBUSY; /* something's already registered */ 23938c2ecf20Sopenharmony_ci 23948c2ecf20Sopenharmony_ci ppmu = pmu; 23958c2ecf20Sopenharmony_ci pr_info("%s performance monitor hardware support registered\n", 23968c2ecf20Sopenharmony_ci pmu->name); 23978c2ecf20Sopenharmony_ci 23988c2ecf20Sopenharmony_ci power_pmu.attr_groups = ppmu->attr_groups; 23998c2ecf20Sopenharmony_ci power_pmu.capabilities |= (ppmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS); 24008c2ecf20Sopenharmony_ci 24018c2ecf20Sopenharmony_ci#ifdef MSR_HV 24028c2ecf20Sopenharmony_ci /* 24038c2ecf20Sopenharmony_ci * Use FCHV to ignore kernel events if MSR.HV is set. 24048c2ecf20Sopenharmony_ci */ 24058c2ecf20Sopenharmony_ci if (mfmsr() & MSR_HV) 24068c2ecf20Sopenharmony_ci freeze_events_kernel = MMCR0_FCHV; 24078c2ecf20Sopenharmony_ci#endif /* CONFIG_PPC64 */ 24088c2ecf20Sopenharmony_ci 24098c2ecf20Sopenharmony_ci perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW); 24108c2ecf20Sopenharmony_ci cpuhp_setup_state(CPUHP_PERF_POWER, "perf/powerpc:prepare", 24118c2ecf20Sopenharmony_ci power_pmu_prepare_cpu, NULL); 24128c2ecf20Sopenharmony_ci return 0; 24138c2ecf20Sopenharmony_ci} 24148c2ecf20Sopenharmony_ci 24158c2ecf20Sopenharmony_ci#ifdef CONFIG_PPC64 24168c2ecf20Sopenharmony_cistatic int __init init_ppc64_pmu(void) 24178c2ecf20Sopenharmony_ci{ 24188c2ecf20Sopenharmony_ci /* run through all the pmu drivers one at a time */ 24198c2ecf20Sopenharmony_ci if (!init_power5_pmu()) 24208c2ecf20Sopenharmony_ci return 0; 24218c2ecf20Sopenharmony_ci else if (!init_power5p_pmu()) 24228c2ecf20Sopenharmony_ci return 0; 24238c2ecf20Sopenharmony_ci else if (!init_power6_pmu()) 24248c2ecf20Sopenharmony_ci return 0; 24258c2ecf20Sopenharmony_ci else if (!init_power7_pmu()) 24268c2ecf20Sopenharmony_ci return 0; 24278c2ecf20Sopenharmony_ci else if (!init_power8_pmu()) 24288c2ecf20Sopenharmony_ci return 0; 24298c2ecf20Sopenharmony_ci else if (!init_power9_pmu()) 24308c2ecf20Sopenharmony_ci return 0; 24318c2ecf20Sopenharmony_ci else if (!init_power10_pmu()) 24328c2ecf20Sopenharmony_ci return 0; 24338c2ecf20Sopenharmony_ci else if (!init_ppc970_pmu()) 24348c2ecf20Sopenharmony_ci return 0; 24358c2ecf20Sopenharmony_ci else 24368c2ecf20Sopenharmony_ci return init_generic_compat_pmu(); 24378c2ecf20Sopenharmony_ci} 24388c2ecf20Sopenharmony_ciearly_initcall(init_ppc64_pmu); 24398c2ecf20Sopenharmony_ci#endif 2440