18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only 28c2ecf20Sopenharmony_ci/* 38c2ecf20Sopenharmony_ci * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers. 48c2ecf20Sopenharmony_ci * 58c2ecf20Sopenharmony_ci * (C) Copyright 2014, 2015 Linaro Ltd. 68c2ecf20Sopenharmony_ci * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org> 78c2ecf20Sopenharmony_ci * 88c2ecf20Sopenharmony_ci * CPPC describes a few methods for controlling CPU performance using 98c2ecf20Sopenharmony_ci * information from a per CPU table called CPC. This table is described in 108c2ecf20Sopenharmony_ci * the ACPI v5.0+ specification. The table consists of a list of 118c2ecf20Sopenharmony_ci * registers which may be memory mapped or hardware registers and also may 128c2ecf20Sopenharmony_ci * include some static integer values. 138c2ecf20Sopenharmony_ci * 148c2ecf20Sopenharmony_ci * CPU performance is on an abstract continuous scale as against a discretized 158c2ecf20Sopenharmony_ci * P-state scale which is tied to CPU frequency only. In brief, the basic 168c2ecf20Sopenharmony_ci * operation involves: 178c2ecf20Sopenharmony_ci * 188c2ecf20Sopenharmony_ci * - OS makes a CPU performance request. (Can provide min and max bounds) 198c2ecf20Sopenharmony_ci * 208c2ecf20Sopenharmony_ci * - Platform (such as BMC) is free to optimize request within requested bounds 218c2ecf20Sopenharmony_ci * depending on power/thermal budgets etc. 228c2ecf20Sopenharmony_ci * 238c2ecf20Sopenharmony_ci * - Platform conveys its decision back to OS 248c2ecf20Sopenharmony_ci * 258c2ecf20Sopenharmony_ci * The communication between OS and platform occurs through another medium 268c2ecf20Sopenharmony_ci * called (PCC) Platform Communication Channel. This is a generic mailbox like 278c2ecf20Sopenharmony_ci * mechanism which includes doorbell semantics to indicate register updates. 288c2ecf20Sopenharmony_ci * See drivers/mailbox/pcc.c for details on PCC. 298c2ecf20Sopenharmony_ci * 308c2ecf20Sopenharmony_ci * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and 318c2ecf20Sopenharmony_ci * above specifications. 328c2ecf20Sopenharmony_ci */ 338c2ecf20Sopenharmony_ci 348c2ecf20Sopenharmony_ci#define pr_fmt(fmt) "ACPI CPPC: " fmt 358c2ecf20Sopenharmony_ci 368c2ecf20Sopenharmony_ci#include <linux/cpufreq.h> 378c2ecf20Sopenharmony_ci#include <linux/delay.h> 388c2ecf20Sopenharmony_ci#include <linux/iopoll.h> 398c2ecf20Sopenharmony_ci#include <linux/ktime.h> 408c2ecf20Sopenharmony_ci#include <linux/rwsem.h> 418c2ecf20Sopenharmony_ci#include <linux/wait.h> 428c2ecf20Sopenharmony_ci 438c2ecf20Sopenharmony_ci#include <acpi/cppc_acpi.h> 448c2ecf20Sopenharmony_ci 458c2ecf20Sopenharmony_cistruct cppc_pcc_data { 468c2ecf20Sopenharmony_ci struct mbox_chan *pcc_channel; 478c2ecf20Sopenharmony_ci void __iomem *pcc_comm_addr; 488c2ecf20Sopenharmony_ci bool pcc_channel_acquired; 498c2ecf20Sopenharmony_ci unsigned int deadline_us; 508c2ecf20Sopenharmony_ci unsigned int pcc_mpar, pcc_mrtt, pcc_nominal; 518c2ecf20Sopenharmony_ci 528c2ecf20Sopenharmony_ci bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */ 538c2ecf20Sopenharmony_ci bool platform_owns_pcc; /* Ownership of PCC subspace */ 548c2ecf20Sopenharmony_ci unsigned int pcc_write_cnt; /* Running count of PCC write commands */ 558c2ecf20Sopenharmony_ci 568c2ecf20Sopenharmony_ci /* 578c2ecf20Sopenharmony_ci * Lock to provide controlled access to the PCC channel. 588c2ecf20Sopenharmony_ci * 598c2ecf20Sopenharmony_ci * For performance critical usecases(currently cppc_set_perf) 608c2ecf20Sopenharmony_ci * We need to take read_lock and check if channel belongs to OSPM 618c2ecf20Sopenharmony_ci * before reading or writing to PCC subspace 628c2ecf20Sopenharmony_ci * We need to take write_lock before transferring the channel 638c2ecf20Sopenharmony_ci * ownership to the platform via a Doorbell 648c2ecf20Sopenharmony_ci * This allows us to batch a number of CPPC requests if they happen 658c2ecf20Sopenharmony_ci * to originate in about the same time 668c2ecf20Sopenharmony_ci * 678c2ecf20Sopenharmony_ci * For non-performance critical usecases(init) 688c2ecf20Sopenharmony_ci * Take write_lock for all purposes which gives exclusive access 698c2ecf20Sopenharmony_ci */ 708c2ecf20Sopenharmony_ci struct rw_semaphore pcc_lock; 718c2ecf20Sopenharmony_ci 728c2ecf20Sopenharmony_ci /* Wait queue for CPUs whose requests were batched */ 738c2ecf20Sopenharmony_ci wait_queue_head_t pcc_write_wait_q; 748c2ecf20Sopenharmony_ci ktime_t last_cmd_cmpl_time; 758c2ecf20Sopenharmony_ci ktime_t last_mpar_reset; 768c2ecf20Sopenharmony_ci int mpar_count; 778c2ecf20Sopenharmony_ci int refcount; 788c2ecf20Sopenharmony_ci}; 798c2ecf20Sopenharmony_ci 808c2ecf20Sopenharmony_ci/* Array to represent the PCC channel per subspace ID */ 818c2ecf20Sopenharmony_cistatic struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES]; 828c2ecf20Sopenharmony_ci/* The cpu_pcc_subspace_idx contains per CPU subspace ID */ 838c2ecf20Sopenharmony_cistatic DEFINE_PER_CPU(int, cpu_pcc_subspace_idx); 848c2ecf20Sopenharmony_ci 858c2ecf20Sopenharmony_ci/* 868c2ecf20Sopenharmony_ci * The cpc_desc structure contains the ACPI register details 878c2ecf20Sopenharmony_ci * as described in the per CPU _CPC tables. The details 888c2ecf20Sopenharmony_ci * include the type of register (e.g. PCC, System IO, FFH etc.) 898c2ecf20Sopenharmony_ci * and destination addresses which lets us READ/WRITE CPU performance 908c2ecf20Sopenharmony_ci * information using the appropriate I/O methods. 918c2ecf20Sopenharmony_ci */ 928c2ecf20Sopenharmony_cistatic DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr); 938c2ecf20Sopenharmony_ci 948c2ecf20Sopenharmony_ci/* pcc mapped address + header size + offset within PCC subspace */ 958c2ecf20Sopenharmony_ci#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \ 968c2ecf20Sopenharmony_ci 0x8 + (offs)) 978c2ecf20Sopenharmony_ci 988c2ecf20Sopenharmony_ci/* Check if a CPC register is in PCC */ 998c2ecf20Sopenharmony_ci#define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ 1008c2ecf20Sopenharmony_ci (cpc)->cpc_entry.reg.space_id == \ 1018c2ecf20Sopenharmony_ci ACPI_ADR_SPACE_PLATFORM_COMM) 1028c2ecf20Sopenharmony_ci 1038c2ecf20Sopenharmony_ci/* Evalutes to True if reg is a NULL register descriptor */ 1048c2ecf20Sopenharmony_ci#define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \ 1058c2ecf20Sopenharmony_ci (reg)->address == 0 && \ 1068c2ecf20Sopenharmony_ci (reg)->bit_width == 0 && \ 1078c2ecf20Sopenharmony_ci (reg)->bit_offset == 0 && \ 1088c2ecf20Sopenharmony_ci (reg)->access_width == 0) 1098c2ecf20Sopenharmony_ci 1108c2ecf20Sopenharmony_ci/* Evalutes to True if an optional cpc field is supported */ 1118c2ecf20Sopenharmony_ci#define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \ 1128c2ecf20Sopenharmony_ci !!(cpc)->cpc_entry.int_value : \ 1138c2ecf20Sopenharmony_ci !IS_NULL_REG(&(cpc)->cpc_entry.reg)) 1148c2ecf20Sopenharmony_ci/* 1158c2ecf20Sopenharmony_ci * Arbitrary Retries in case the remote processor is slow to respond 1168c2ecf20Sopenharmony_ci * to PCC commands. Keeping it high enough to cover emulators where 1178c2ecf20Sopenharmony_ci * the processors run painfully slow. 1188c2ecf20Sopenharmony_ci */ 1198c2ecf20Sopenharmony_ci#define NUM_RETRIES 500ULL 1208c2ecf20Sopenharmony_ci 1218c2ecf20Sopenharmony_ci#define define_one_cppc_ro(_name) \ 1228c2ecf20Sopenharmony_cistatic struct kobj_attribute _name = \ 1238c2ecf20Sopenharmony_ci__ATTR(_name, 0444, show_##_name, NULL) 1248c2ecf20Sopenharmony_ci 1258c2ecf20Sopenharmony_ci#define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj) 1268c2ecf20Sopenharmony_ci 1278c2ecf20Sopenharmony_ci#define show_cppc_data(access_fn, struct_name, member_name) \ 1288c2ecf20Sopenharmony_ci static ssize_t show_##member_name(struct kobject *kobj, \ 1298c2ecf20Sopenharmony_ci struct kobj_attribute *attr, char *buf) \ 1308c2ecf20Sopenharmony_ci { \ 1318c2ecf20Sopenharmony_ci struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \ 1328c2ecf20Sopenharmony_ci struct struct_name st_name = {0}; \ 1338c2ecf20Sopenharmony_ci int ret; \ 1348c2ecf20Sopenharmony_ci \ 1358c2ecf20Sopenharmony_ci ret = access_fn(cpc_ptr->cpu_id, &st_name); \ 1368c2ecf20Sopenharmony_ci if (ret) \ 1378c2ecf20Sopenharmony_ci return ret; \ 1388c2ecf20Sopenharmony_ci \ 1398c2ecf20Sopenharmony_ci return scnprintf(buf, PAGE_SIZE, "%llu\n", \ 1408c2ecf20Sopenharmony_ci (u64)st_name.member_name); \ 1418c2ecf20Sopenharmony_ci } \ 1428c2ecf20Sopenharmony_ci define_one_cppc_ro(member_name) 1438c2ecf20Sopenharmony_ci 1448c2ecf20Sopenharmony_cishow_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf); 1458c2ecf20Sopenharmony_cishow_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf); 1468c2ecf20Sopenharmony_cishow_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf); 1478c2ecf20Sopenharmony_cishow_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf); 1488c2ecf20Sopenharmony_cishow_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq); 1498c2ecf20Sopenharmony_cishow_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq); 1508c2ecf20Sopenharmony_ci 1518c2ecf20Sopenharmony_cishow_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf); 1528c2ecf20Sopenharmony_cishow_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time); 1538c2ecf20Sopenharmony_ci 1548c2ecf20Sopenharmony_cistatic ssize_t show_feedback_ctrs(struct kobject *kobj, 1558c2ecf20Sopenharmony_ci struct kobj_attribute *attr, char *buf) 1568c2ecf20Sopenharmony_ci{ 1578c2ecf20Sopenharmony_ci struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); 1588c2ecf20Sopenharmony_ci struct cppc_perf_fb_ctrs fb_ctrs = {0}; 1598c2ecf20Sopenharmony_ci int ret; 1608c2ecf20Sopenharmony_ci 1618c2ecf20Sopenharmony_ci ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs); 1628c2ecf20Sopenharmony_ci if (ret) 1638c2ecf20Sopenharmony_ci return ret; 1648c2ecf20Sopenharmony_ci 1658c2ecf20Sopenharmony_ci return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n", 1668c2ecf20Sopenharmony_ci fb_ctrs.reference, fb_ctrs.delivered); 1678c2ecf20Sopenharmony_ci} 1688c2ecf20Sopenharmony_cidefine_one_cppc_ro(feedback_ctrs); 1698c2ecf20Sopenharmony_ci 1708c2ecf20Sopenharmony_cistatic struct attribute *cppc_attrs[] = { 1718c2ecf20Sopenharmony_ci &feedback_ctrs.attr, 1728c2ecf20Sopenharmony_ci &reference_perf.attr, 1738c2ecf20Sopenharmony_ci &wraparound_time.attr, 1748c2ecf20Sopenharmony_ci &highest_perf.attr, 1758c2ecf20Sopenharmony_ci &lowest_perf.attr, 1768c2ecf20Sopenharmony_ci &lowest_nonlinear_perf.attr, 1778c2ecf20Sopenharmony_ci &nominal_perf.attr, 1788c2ecf20Sopenharmony_ci &nominal_freq.attr, 1798c2ecf20Sopenharmony_ci &lowest_freq.attr, 1808c2ecf20Sopenharmony_ci NULL 1818c2ecf20Sopenharmony_ci}; 1828c2ecf20Sopenharmony_ci 1838c2ecf20Sopenharmony_cistatic struct kobj_type cppc_ktype = { 1848c2ecf20Sopenharmony_ci .sysfs_ops = &kobj_sysfs_ops, 1858c2ecf20Sopenharmony_ci .default_attrs = cppc_attrs, 1868c2ecf20Sopenharmony_ci}; 1878c2ecf20Sopenharmony_ci 1888c2ecf20Sopenharmony_cistatic int check_pcc_chan(int pcc_ss_id, bool chk_err_bit) 1898c2ecf20Sopenharmony_ci{ 1908c2ecf20Sopenharmony_ci int ret, status; 1918c2ecf20Sopenharmony_ci struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id]; 1928c2ecf20Sopenharmony_ci struct acpi_pcct_shared_memory __iomem *generic_comm_base = 1938c2ecf20Sopenharmony_ci pcc_ss_data->pcc_comm_addr; 1948c2ecf20Sopenharmony_ci 1958c2ecf20Sopenharmony_ci if (!pcc_ss_data->platform_owns_pcc) 1968c2ecf20Sopenharmony_ci return 0; 1978c2ecf20Sopenharmony_ci 1988c2ecf20Sopenharmony_ci /* 1998c2ecf20Sopenharmony_ci * Poll PCC status register every 3us(delay_us) for maximum of 2008c2ecf20Sopenharmony_ci * deadline_us(timeout_us) until PCC command complete bit is set(cond) 2018c2ecf20Sopenharmony_ci */ 2028c2ecf20Sopenharmony_ci ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status, 2038c2ecf20Sopenharmony_ci status & PCC_CMD_COMPLETE_MASK, 3, 2048c2ecf20Sopenharmony_ci pcc_ss_data->deadline_us); 2058c2ecf20Sopenharmony_ci 2068c2ecf20Sopenharmony_ci if (likely(!ret)) { 2078c2ecf20Sopenharmony_ci pcc_ss_data->platform_owns_pcc = false; 2088c2ecf20Sopenharmony_ci if (chk_err_bit && (status & PCC_ERROR_MASK)) 2098c2ecf20Sopenharmony_ci ret = -EIO; 2108c2ecf20Sopenharmony_ci } 2118c2ecf20Sopenharmony_ci 2128c2ecf20Sopenharmony_ci if (unlikely(ret)) 2138c2ecf20Sopenharmony_ci pr_err("PCC check channel failed for ss: %d. ret=%d\n", 2148c2ecf20Sopenharmony_ci pcc_ss_id, ret); 2158c2ecf20Sopenharmony_ci 2168c2ecf20Sopenharmony_ci return ret; 2178c2ecf20Sopenharmony_ci} 2188c2ecf20Sopenharmony_ci 2198c2ecf20Sopenharmony_ci/* 2208c2ecf20Sopenharmony_ci * This function transfers the ownership of the PCC to the platform 2218c2ecf20Sopenharmony_ci * So it must be called while holding write_lock(pcc_lock) 2228c2ecf20Sopenharmony_ci */ 2238c2ecf20Sopenharmony_cistatic int send_pcc_cmd(int pcc_ss_id, u16 cmd) 2248c2ecf20Sopenharmony_ci{ 2258c2ecf20Sopenharmony_ci int ret = -EIO, i; 2268c2ecf20Sopenharmony_ci struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id]; 2278c2ecf20Sopenharmony_ci struct acpi_pcct_shared_memory *generic_comm_base = 2288c2ecf20Sopenharmony_ci (struct acpi_pcct_shared_memory *)pcc_ss_data->pcc_comm_addr; 2298c2ecf20Sopenharmony_ci unsigned int time_delta; 2308c2ecf20Sopenharmony_ci 2318c2ecf20Sopenharmony_ci /* 2328c2ecf20Sopenharmony_ci * For CMD_WRITE we know for a fact the caller should have checked 2338c2ecf20Sopenharmony_ci * the channel before writing to PCC space 2348c2ecf20Sopenharmony_ci */ 2358c2ecf20Sopenharmony_ci if (cmd == CMD_READ) { 2368c2ecf20Sopenharmony_ci /* 2378c2ecf20Sopenharmony_ci * If there are pending cpc_writes, then we stole the channel 2388c2ecf20Sopenharmony_ci * before write completion, so first send a WRITE command to 2398c2ecf20Sopenharmony_ci * platform 2408c2ecf20Sopenharmony_ci */ 2418c2ecf20Sopenharmony_ci if (pcc_ss_data->pending_pcc_write_cmd) 2428c2ecf20Sopenharmony_ci send_pcc_cmd(pcc_ss_id, CMD_WRITE); 2438c2ecf20Sopenharmony_ci 2448c2ecf20Sopenharmony_ci ret = check_pcc_chan(pcc_ss_id, false); 2458c2ecf20Sopenharmony_ci if (ret) 2468c2ecf20Sopenharmony_ci goto end; 2478c2ecf20Sopenharmony_ci } else /* CMD_WRITE */ 2488c2ecf20Sopenharmony_ci pcc_ss_data->pending_pcc_write_cmd = FALSE; 2498c2ecf20Sopenharmony_ci 2508c2ecf20Sopenharmony_ci /* 2518c2ecf20Sopenharmony_ci * Handle the Minimum Request Turnaround Time(MRTT) 2528c2ecf20Sopenharmony_ci * "The minimum amount of time that OSPM must wait after the completion 2538c2ecf20Sopenharmony_ci * of a command before issuing the next command, in microseconds" 2548c2ecf20Sopenharmony_ci */ 2558c2ecf20Sopenharmony_ci if (pcc_ss_data->pcc_mrtt) { 2568c2ecf20Sopenharmony_ci time_delta = ktime_us_delta(ktime_get(), 2578c2ecf20Sopenharmony_ci pcc_ss_data->last_cmd_cmpl_time); 2588c2ecf20Sopenharmony_ci if (pcc_ss_data->pcc_mrtt > time_delta) 2598c2ecf20Sopenharmony_ci udelay(pcc_ss_data->pcc_mrtt - time_delta); 2608c2ecf20Sopenharmony_ci } 2618c2ecf20Sopenharmony_ci 2628c2ecf20Sopenharmony_ci /* 2638c2ecf20Sopenharmony_ci * Handle the non-zero Maximum Periodic Access Rate(MPAR) 2648c2ecf20Sopenharmony_ci * "The maximum number of periodic requests that the subspace channel can 2658c2ecf20Sopenharmony_ci * support, reported in commands per minute. 0 indicates no limitation." 2668c2ecf20Sopenharmony_ci * 2678c2ecf20Sopenharmony_ci * This parameter should be ideally zero or large enough so that it can 2688c2ecf20Sopenharmony_ci * handle maximum number of requests that all the cores in the system can 2698c2ecf20Sopenharmony_ci * collectively generate. If it is not, we will follow the spec and just 2708c2ecf20Sopenharmony_ci * not send the request to the platform after hitting the MPAR limit in 2718c2ecf20Sopenharmony_ci * any 60s window 2728c2ecf20Sopenharmony_ci */ 2738c2ecf20Sopenharmony_ci if (pcc_ss_data->pcc_mpar) { 2748c2ecf20Sopenharmony_ci if (pcc_ss_data->mpar_count == 0) { 2758c2ecf20Sopenharmony_ci time_delta = ktime_ms_delta(ktime_get(), 2768c2ecf20Sopenharmony_ci pcc_ss_data->last_mpar_reset); 2778c2ecf20Sopenharmony_ci if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) { 2788c2ecf20Sopenharmony_ci pr_debug("PCC cmd for subspace %d not sent due to MPAR limit", 2798c2ecf20Sopenharmony_ci pcc_ss_id); 2808c2ecf20Sopenharmony_ci ret = -EIO; 2818c2ecf20Sopenharmony_ci goto end; 2828c2ecf20Sopenharmony_ci } 2838c2ecf20Sopenharmony_ci pcc_ss_data->last_mpar_reset = ktime_get(); 2848c2ecf20Sopenharmony_ci pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar; 2858c2ecf20Sopenharmony_ci } 2868c2ecf20Sopenharmony_ci pcc_ss_data->mpar_count--; 2878c2ecf20Sopenharmony_ci } 2888c2ecf20Sopenharmony_ci 2898c2ecf20Sopenharmony_ci /* Write to the shared comm region. */ 2908c2ecf20Sopenharmony_ci writew_relaxed(cmd, &generic_comm_base->command); 2918c2ecf20Sopenharmony_ci 2928c2ecf20Sopenharmony_ci /* Flip CMD COMPLETE bit */ 2938c2ecf20Sopenharmony_ci writew_relaxed(0, &generic_comm_base->status); 2948c2ecf20Sopenharmony_ci 2958c2ecf20Sopenharmony_ci pcc_ss_data->platform_owns_pcc = true; 2968c2ecf20Sopenharmony_ci 2978c2ecf20Sopenharmony_ci /* Ring doorbell */ 2988c2ecf20Sopenharmony_ci ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd); 2998c2ecf20Sopenharmony_ci if (ret < 0) { 3008c2ecf20Sopenharmony_ci pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n", 3018c2ecf20Sopenharmony_ci pcc_ss_id, cmd, ret); 3028c2ecf20Sopenharmony_ci goto end; 3038c2ecf20Sopenharmony_ci } 3048c2ecf20Sopenharmony_ci 3058c2ecf20Sopenharmony_ci /* wait for completion and check for PCC errro bit */ 3068c2ecf20Sopenharmony_ci ret = check_pcc_chan(pcc_ss_id, true); 3078c2ecf20Sopenharmony_ci 3088c2ecf20Sopenharmony_ci if (pcc_ss_data->pcc_mrtt) 3098c2ecf20Sopenharmony_ci pcc_ss_data->last_cmd_cmpl_time = ktime_get(); 3108c2ecf20Sopenharmony_ci 3118c2ecf20Sopenharmony_ci if (pcc_ss_data->pcc_channel->mbox->txdone_irq) 3128c2ecf20Sopenharmony_ci mbox_chan_txdone(pcc_ss_data->pcc_channel, ret); 3138c2ecf20Sopenharmony_ci else 3148c2ecf20Sopenharmony_ci mbox_client_txdone(pcc_ss_data->pcc_channel, ret); 3158c2ecf20Sopenharmony_ci 3168c2ecf20Sopenharmony_ciend: 3178c2ecf20Sopenharmony_ci if (cmd == CMD_WRITE) { 3188c2ecf20Sopenharmony_ci if (unlikely(ret)) { 3198c2ecf20Sopenharmony_ci for_each_possible_cpu(i) { 3208c2ecf20Sopenharmony_ci struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i); 3218c2ecf20Sopenharmony_ci if (!desc) 3228c2ecf20Sopenharmony_ci continue; 3238c2ecf20Sopenharmony_ci 3248c2ecf20Sopenharmony_ci if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt) 3258c2ecf20Sopenharmony_ci desc->write_cmd_status = ret; 3268c2ecf20Sopenharmony_ci } 3278c2ecf20Sopenharmony_ci } 3288c2ecf20Sopenharmony_ci pcc_ss_data->pcc_write_cnt++; 3298c2ecf20Sopenharmony_ci wake_up_all(&pcc_ss_data->pcc_write_wait_q); 3308c2ecf20Sopenharmony_ci } 3318c2ecf20Sopenharmony_ci 3328c2ecf20Sopenharmony_ci return ret; 3338c2ecf20Sopenharmony_ci} 3348c2ecf20Sopenharmony_ci 3358c2ecf20Sopenharmony_cistatic void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret) 3368c2ecf20Sopenharmony_ci{ 3378c2ecf20Sopenharmony_ci if (ret < 0) 3388c2ecf20Sopenharmony_ci pr_debug("TX did not complete: CMD sent:%x, ret:%d\n", 3398c2ecf20Sopenharmony_ci *(u16 *)msg, ret); 3408c2ecf20Sopenharmony_ci else 3418c2ecf20Sopenharmony_ci pr_debug("TX completed. CMD sent:%x, ret:%d\n", 3428c2ecf20Sopenharmony_ci *(u16 *)msg, ret); 3438c2ecf20Sopenharmony_ci} 3448c2ecf20Sopenharmony_ci 3458c2ecf20Sopenharmony_cistatic struct mbox_client cppc_mbox_cl = { 3468c2ecf20Sopenharmony_ci .tx_done = cppc_chan_tx_done, 3478c2ecf20Sopenharmony_ci .knows_txdone = true, 3488c2ecf20Sopenharmony_ci}; 3498c2ecf20Sopenharmony_ci 3508c2ecf20Sopenharmony_cistatic int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle) 3518c2ecf20Sopenharmony_ci{ 3528c2ecf20Sopenharmony_ci int result = -EFAULT; 3538c2ecf20Sopenharmony_ci acpi_status status = AE_OK; 3548c2ecf20Sopenharmony_ci struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 3558c2ecf20Sopenharmony_ci struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"}; 3568c2ecf20Sopenharmony_ci struct acpi_buffer state = {0, NULL}; 3578c2ecf20Sopenharmony_ci union acpi_object *psd = NULL; 3588c2ecf20Sopenharmony_ci struct acpi_psd_package *pdomain; 3598c2ecf20Sopenharmony_ci 3608c2ecf20Sopenharmony_ci status = acpi_evaluate_object_typed(handle, "_PSD", NULL, 3618c2ecf20Sopenharmony_ci &buffer, ACPI_TYPE_PACKAGE); 3628c2ecf20Sopenharmony_ci if (status == AE_NOT_FOUND) /* _PSD is optional */ 3638c2ecf20Sopenharmony_ci return 0; 3648c2ecf20Sopenharmony_ci if (ACPI_FAILURE(status)) 3658c2ecf20Sopenharmony_ci return -ENODEV; 3668c2ecf20Sopenharmony_ci 3678c2ecf20Sopenharmony_ci psd = buffer.pointer; 3688c2ecf20Sopenharmony_ci if (!psd || psd->package.count != 1) { 3698c2ecf20Sopenharmony_ci pr_debug("Invalid _PSD data\n"); 3708c2ecf20Sopenharmony_ci goto end; 3718c2ecf20Sopenharmony_ci } 3728c2ecf20Sopenharmony_ci 3738c2ecf20Sopenharmony_ci pdomain = &(cpc_ptr->domain_info); 3748c2ecf20Sopenharmony_ci 3758c2ecf20Sopenharmony_ci state.length = sizeof(struct acpi_psd_package); 3768c2ecf20Sopenharmony_ci state.pointer = pdomain; 3778c2ecf20Sopenharmony_ci 3788c2ecf20Sopenharmony_ci status = acpi_extract_package(&(psd->package.elements[0]), 3798c2ecf20Sopenharmony_ci &format, &state); 3808c2ecf20Sopenharmony_ci if (ACPI_FAILURE(status)) { 3818c2ecf20Sopenharmony_ci pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id); 3828c2ecf20Sopenharmony_ci goto end; 3838c2ecf20Sopenharmony_ci } 3848c2ecf20Sopenharmony_ci 3858c2ecf20Sopenharmony_ci if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) { 3868c2ecf20Sopenharmony_ci pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id); 3878c2ecf20Sopenharmony_ci goto end; 3888c2ecf20Sopenharmony_ci } 3898c2ecf20Sopenharmony_ci 3908c2ecf20Sopenharmony_ci if (pdomain->revision != ACPI_PSD_REV0_REVISION) { 3918c2ecf20Sopenharmony_ci pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id); 3928c2ecf20Sopenharmony_ci goto end; 3938c2ecf20Sopenharmony_ci } 3948c2ecf20Sopenharmony_ci 3958c2ecf20Sopenharmony_ci if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL && 3968c2ecf20Sopenharmony_ci pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY && 3978c2ecf20Sopenharmony_ci pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) { 3988c2ecf20Sopenharmony_ci pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id); 3998c2ecf20Sopenharmony_ci goto end; 4008c2ecf20Sopenharmony_ci } 4018c2ecf20Sopenharmony_ci 4028c2ecf20Sopenharmony_ci result = 0; 4038c2ecf20Sopenharmony_ciend: 4048c2ecf20Sopenharmony_ci kfree(buffer.pointer); 4058c2ecf20Sopenharmony_ci return result; 4068c2ecf20Sopenharmony_ci} 4078c2ecf20Sopenharmony_ci 4088c2ecf20Sopenharmony_ci/** 4098c2ecf20Sopenharmony_ci * acpi_get_psd_map - Map the CPUs in a common freq domain. 4108c2ecf20Sopenharmony_ci * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info. 4118c2ecf20Sopenharmony_ci * 4128c2ecf20Sopenharmony_ci * Return: 0 for success or negative value for err. 4138c2ecf20Sopenharmony_ci */ 4148c2ecf20Sopenharmony_ciint acpi_get_psd_map(struct cppc_cpudata **all_cpu_data) 4158c2ecf20Sopenharmony_ci{ 4168c2ecf20Sopenharmony_ci int count_target; 4178c2ecf20Sopenharmony_ci int retval = 0; 4188c2ecf20Sopenharmony_ci unsigned int i, j; 4198c2ecf20Sopenharmony_ci cpumask_var_t covered_cpus; 4208c2ecf20Sopenharmony_ci struct cppc_cpudata *pr, *match_pr; 4218c2ecf20Sopenharmony_ci struct acpi_psd_package *pdomain; 4228c2ecf20Sopenharmony_ci struct acpi_psd_package *match_pdomain; 4238c2ecf20Sopenharmony_ci struct cpc_desc *cpc_ptr, *match_cpc_ptr; 4248c2ecf20Sopenharmony_ci 4258c2ecf20Sopenharmony_ci if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)) 4268c2ecf20Sopenharmony_ci return -ENOMEM; 4278c2ecf20Sopenharmony_ci 4288c2ecf20Sopenharmony_ci /* 4298c2ecf20Sopenharmony_ci * Now that we have _PSD data from all CPUs, let's setup P-state 4308c2ecf20Sopenharmony_ci * domain info. 4318c2ecf20Sopenharmony_ci */ 4328c2ecf20Sopenharmony_ci for_each_possible_cpu(i) { 4338c2ecf20Sopenharmony_ci if (cpumask_test_cpu(i, covered_cpus)) 4348c2ecf20Sopenharmony_ci continue; 4358c2ecf20Sopenharmony_ci 4368c2ecf20Sopenharmony_ci pr = all_cpu_data[i]; 4378c2ecf20Sopenharmony_ci cpc_ptr = per_cpu(cpc_desc_ptr, i); 4388c2ecf20Sopenharmony_ci if (!cpc_ptr) { 4398c2ecf20Sopenharmony_ci retval = -EFAULT; 4408c2ecf20Sopenharmony_ci goto err_ret; 4418c2ecf20Sopenharmony_ci } 4428c2ecf20Sopenharmony_ci 4438c2ecf20Sopenharmony_ci pdomain = &(cpc_ptr->domain_info); 4448c2ecf20Sopenharmony_ci cpumask_set_cpu(i, pr->shared_cpu_map); 4458c2ecf20Sopenharmony_ci cpumask_set_cpu(i, covered_cpus); 4468c2ecf20Sopenharmony_ci if (pdomain->num_processors <= 1) 4478c2ecf20Sopenharmony_ci continue; 4488c2ecf20Sopenharmony_ci 4498c2ecf20Sopenharmony_ci /* Validate the Domain info */ 4508c2ecf20Sopenharmony_ci count_target = pdomain->num_processors; 4518c2ecf20Sopenharmony_ci if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL) 4528c2ecf20Sopenharmony_ci pr->shared_type = CPUFREQ_SHARED_TYPE_ALL; 4538c2ecf20Sopenharmony_ci else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) 4548c2ecf20Sopenharmony_ci pr->shared_type = CPUFREQ_SHARED_TYPE_HW; 4558c2ecf20Sopenharmony_ci else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) 4568c2ecf20Sopenharmony_ci pr->shared_type = CPUFREQ_SHARED_TYPE_ANY; 4578c2ecf20Sopenharmony_ci 4588c2ecf20Sopenharmony_ci for_each_possible_cpu(j) { 4598c2ecf20Sopenharmony_ci if (i == j) 4608c2ecf20Sopenharmony_ci continue; 4618c2ecf20Sopenharmony_ci 4628c2ecf20Sopenharmony_ci match_cpc_ptr = per_cpu(cpc_desc_ptr, j); 4638c2ecf20Sopenharmony_ci if (!match_cpc_ptr) { 4648c2ecf20Sopenharmony_ci retval = -EFAULT; 4658c2ecf20Sopenharmony_ci goto err_ret; 4668c2ecf20Sopenharmony_ci } 4678c2ecf20Sopenharmony_ci 4688c2ecf20Sopenharmony_ci match_pdomain = &(match_cpc_ptr->domain_info); 4698c2ecf20Sopenharmony_ci if (match_pdomain->domain != pdomain->domain) 4708c2ecf20Sopenharmony_ci continue; 4718c2ecf20Sopenharmony_ci 4728c2ecf20Sopenharmony_ci /* Here i and j are in the same domain */ 4738c2ecf20Sopenharmony_ci if (match_pdomain->num_processors != count_target) { 4748c2ecf20Sopenharmony_ci retval = -EFAULT; 4758c2ecf20Sopenharmony_ci goto err_ret; 4768c2ecf20Sopenharmony_ci } 4778c2ecf20Sopenharmony_ci 4788c2ecf20Sopenharmony_ci if (pdomain->coord_type != match_pdomain->coord_type) { 4798c2ecf20Sopenharmony_ci retval = -EFAULT; 4808c2ecf20Sopenharmony_ci goto err_ret; 4818c2ecf20Sopenharmony_ci } 4828c2ecf20Sopenharmony_ci 4838c2ecf20Sopenharmony_ci cpumask_set_cpu(j, covered_cpus); 4848c2ecf20Sopenharmony_ci cpumask_set_cpu(j, pr->shared_cpu_map); 4858c2ecf20Sopenharmony_ci } 4868c2ecf20Sopenharmony_ci 4878c2ecf20Sopenharmony_ci for_each_cpu(j, pr->shared_cpu_map) { 4888c2ecf20Sopenharmony_ci if (i == j) 4898c2ecf20Sopenharmony_ci continue; 4908c2ecf20Sopenharmony_ci 4918c2ecf20Sopenharmony_ci match_pr = all_cpu_data[j]; 4928c2ecf20Sopenharmony_ci match_pr->shared_type = pr->shared_type; 4938c2ecf20Sopenharmony_ci cpumask_copy(match_pr->shared_cpu_map, 4948c2ecf20Sopenharmony_ci pr->shared_cpu_map); 4958c2ecf20Sopenharmony_ci } 4968c2ecf20Sopenharmony_ci } 4978c2ecf20Sopenharmony_ci goto out; 4988c2ecf20Sopenharmony_ci 4998c2ecf20Sopenharmony_cierr_ret: 5008c2ecf20Sopenharmony_ci for_each_possible_cpu(i) { 5018c2ecf20Sopenharmony_ci pr = all_cpu_data[i]; 5028c2ecf20Sopenharmony_ci 5038c2ecf20Sopenharmony_ci /* Assume no coordination on any error parsing domain info */ 5048c2ecf20Sopenharmony_ci cpumask_clear(pr->shared_cpu_map); 5058c2ecf20Sopenharmony_ci cpumask_set_cpu(i, pr->shared_cpu_map); 5068c2ecf20Sopenharmony_ci pr->shared_type = CPUFREQ_SHARED_TYPE_ALL; 5078c2ecf20Sopenharmony_ci } 5088c2ecf20Sopenharmony_ciout: 5098c2ecf20Sopenharmony_ci free_cpumask_var(covered_cpus); 5108c2ecf20Sopenharmony_ci return retval; 5118c2ecf20Sopenharmony_ci} 5128c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(acpi_get_psd_map); 5138c2ecf20Sopenharmony_ci 5148c2ecf20Sopenharmony_cistatic int register_pcc_channel(int pcc_ss_idx) 5158c2ecf20Sopenharmony_ci{ 5168c2ecf20Sopenharmony_ci struct acpi_pcct_hw_reduced *cppc_ss; 5178c2ecf20Sopenharmony_ci u64 usecs_lat; 5188c2ecf20Sopenharmony_ci 5198c2ecf20Sopenharmony_ci if (pcc_ss_idx >= 0) { 5208c2ecf20Sopenharmony_ci pcc_data[pcc_ss_idx]->pcc_channel = 5218c2ecf20Sopenharmony_ci pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx); 5228c2ecf20Sopenharmony_ci 5238c2ecf20Sopenharmony_ci if (IS_ERR(pcc_data[pcc_ss_idx]->pcc_channel)) { 5248c2ecf20Sopenharmony_ci pr_err("Failed to find PCC channel for subspace %d\n", 5258c2ecf20Sopenharmony_ci pcc_ss_idx); 5268c2ecf20Sopenharmony_ci return -ENODEV; 5278c2ecf20Sopenharmony_ci } 5288c2ecf20Sopenharmony_ci 5298c2ecf20Sopenharmony_ci /* 5308c2ecf20Sopenharmony_ci * The PCC mailbox controller driver should 5318c2ecf20Sopenharmony_ci * have parsed the PCCT (global table of all 5328c2ecf20Sopenharmony_ci * PCC channels) and stored pointers to the 5338c2ecf20Sopenharmony_ci * subspace communication region in con_priv. 5348c2ecf20Sopenharmony_ci */ 5358c2ecf20Sopenharmony_ci cppc_ss = (pcc_data[pcc_ss_idx]->pcc_channel)->con_priv; 5368c2ecf20Sopenharmony_ci 5378c2ecf20Sopenharmony_ci if (!cppc_ss) { 5388c2ecf20Sopenharmony_ci pr_err("No PCC subspace found for %d CPPC\n", 5398c2ecf20Sopenharmony_ci pcc_ss_idx); 5408c2ecf20Sopenharmony_ci return -ENODEV; 5418c2ecf20Sopenharmony_ci } 5428c2ecf20Sopenharmony_ci 5438c2ecf20Sopenharmony_ci /* 5448c2ecf20Sopenharmony_ci * cppc_ss->latency is just a Nominal value. In reality 5458c2ecf20Sopenharmony_ci * the remote processor could be much slower to reply. 5468c2ecf20Sopenharmony_ci * So add an arbitrary amount of wait on top of Nominal. 5478c2ecf20Sopenharmony_ci */ 5488c2ecf20Sopenharmony_ci usecs_lat = NUM_RETRIES * cppc_ss->latency; 5498c2ecf20Sopenharmony_ci pcc_data[pcc_ss_idx]->deadline_us = usecs_lat; 5508c2ecf20Sopenharmony_ci pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time; 5518c2ecf20Sopenharmony_ci pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate; 5528c2ecf20Sopenharmony_ci pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency; 5538c2ecf20Sopenharmony_ci 5548c2ecf20Sopenharmony_ci pcc_data[pcc_ss_idx]->pcc_comm_addr = 5558c2ecf20Sopenharmony_ci acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length); 5568c2ecf20Sopenharmony_ci if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) { 5578c2ecf20Sopenharmony_ci pr_err("Failed to ioremap PCC comm region mem for %d\n", 5588c2ecf20Sopenharmony_ci pcc_ss_idx); 5598c2ecf20Sopenharmony_ci return -ENOMEM; 5608c2ecf20Sopenharmony_ci } 5618c2ecf20Sopenharmony_ci 5628c2ecf20Sopenharmony_ci /* Set flag so that we don't come here for each CPU. */ 5638c2ecf20Sopenharmony_ci pcc_data[pcc_ss_idx]->pcc_channel_acquired = true; 5648c2ecf20Sopenharmony_ci } 5658c2ecf20Sopenharmony_ci 5668c2ecf20Sopenharmony_ci return 0; 5678c2ecf20Sopenharmony_ci} 5688c2ecf20Sopenharmony_ci 5698c2ecf20Sopenharmony_ci/** 5708c2ecf20Sopenharmony_ci * cpc_ffh_supported() - check if FFH reading supported 5718c2ecf20Sopenharmony_ci * 5728c2ecf20Sopenharmony_ci * Check if the architecture has support for functional fixed hardware 5738c2ecf20Sopenharmony_ci * read/write capability. 5748c2ecf20Sopenharmony_ci * 5758c2ecf20Sopenharmony_ci * Return: true for supported, false for not supported 5768c2ecf20Sopenharmony_ci */ 5778c2ecf20Sopenharmony_cibool __weak cpc_ffh_supported(void) 5788c2ecf20Sopenharmony_ci{ 5798c2ecf20Sopenharmony_ci return false; 5808c2ecf20Sopenharmony_ci} 5818c2ecf20Sopenharmony_ci 5828c2ecf20Sopenharmony_ci/** 5838c2ecf20Sopenharmony_ci * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace 5848c2ecf20Sopenharmony_ci * 5858c2ecf20Sopenharmony_ci * Check and allocate the cppc_pcc_data memory. 5868c2ecf20Sopenharmony_ci * In some processor configurations it is possible that same subspace 5878c2ecf20Sopenharmony_ci * is shared between multiple CPUs. This is seen especially in CPUs 5888c2ecf20Sopenharmony_ci * with hardware multi-threading support. 5898c2ecf20Sopenharmony_ci * 5908c2ecf20Sopenharmony_ci * Return: 0 for success, errno for failure 5918c2ecf20Sopenharmony_ci */ 5928c2ecf20Sopenharmony_cistatic int pcc_data_alloc(int pcc_ss_id) 5938c2ecf20Sopenharmony_ci{ 5948c2ecf20Sopenharmony_ci if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES) 5958c2ecf20Sopenharmony_ci return -EINVAL; 5968c2ecf20Sopenharmony_ci 5978c2ecf20Sopenharmony_ci if (pcc_data[pcc_ss_id]) { 5988c2ecf20Sopenharmony_ci pcc_data[pcc_ss_id]->refcount++; 5998c2ecf20Sopenharmony_ci } else { 6008c2ecf20Sopenharmony_ci pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data), 6018c2ecf20Sopenharmony_ci GFP_KERNEL); 6028c2ecf20Sopenharmony_ci if (!pcc_data[pcc_ss_id]) 6038c2ecf20Sopenharmony_ci return -ENOMEM; 6048c2ecf20Sopenharmony_ci pcc_data[pcc_ss_id]->refcount++; 6058c2ecf20Sopenharmony_ci } 6068c2ecf20Sopenharmony_ci 6078c2ecf20Sopenharmony_ci return 0; 6088c2ecf20Sopenharmony_ci} 6098c2ecf20Sopenharmony_ci 6108c2ecf20Sopenharmony_ci/* 6118c2ecf20Sopenharmony_ci * An example CPC table looks like the following. 6128c2ecf20Sopenharmony_ci * 6138c2ecf20Sopenharmony_ci * Name(_CPC, Package() 6148c2ecf20Sopenharmony_ci * { 6158c2ecf20Sopenharmony_ci * 17, 6168c2ecf20Sopenharmony_ci * NumEntries 6178c2ecf20Sopenharmony_ci * 1, 6188c2ecf20Sopenharmony_ci * // Revision 6198c2ecf20Sopenharmony_ci * ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)}, 6208c2ecf20Sopenharmony_ci * // Highest Performance 6218c2ecf20Sopenharmony_ci * ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)}, 6228c2ecf20Sopenharmony_ci * // Nominal Performance 6238c2ecf20Sopenharmony_ci * ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)}, 6248c2ecf20Sopenharmony_ci * // Lowest Nonlinear Performance 6258c2ecf20Sopenharmony_ci * ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)}, 6268c2ecf20Sopenharmony_ci * // Lowest Performance 6278c2ecf20Sopenharmony_ci * ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)}, 6288c2ecf20Sopenharmony_ci * // Guaranteed Performance Register 6298c2ecf20Sopenharmony_ci * ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)}, 6308c2ecf20Sopenharmony_ci * // Desired Performance Register 6318c2ecf20Sopenharmony_ci * ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)}, 6328c2ecf20Sopenharmony_ci * .. 6338c2ecf20Sopenharmony_ci * .. 6348c2ecf20Sopenharmony_ci * .. 6358c2ecf20Sopenharmony_ci * 6368c2ecf20Sopenharmony_ci * } 6378c2ecf20Sopenharmony_ci * Each Register() encodes how to access that specific register. 6388c2ecf20Sopenharmony_ci * e.g. a sample PCC entry has the following encoding: 6398c2ecf20Sopenharmony_ci * 6408c2ecf20Sopenharmony_ci * Register ( 6418c2ecf20Sopenharmony_ci * PCC, 6428c2ecf20Sopenharmony_ci * AddressSpaceKeyword 6438c2ecf20Sopenharmony_ci * 8, 6448c2ecf20Sopenharmony_ci * //RegisterBitWidth 6458c2ecf20Sopenharmony_ci * 8, 6468c2ecf20Sopenharmony_ci * //RegisterBitOffset 6478c2ecf20Sopenharmony_ci * 0x30, 6488c2ecf20Sopenharmony_ci * //RegisterAddress 6498c2ecf20Sopenharmony_ci * 9 6508c2ecf20Sopenharmony_ci * //AccessSize (subspace ID) 6518c2ecf20Sopenharmony_ci * 0 6528c2ecf20Sopenharmony_ci * ) 6538c2ecf20Sopenharmony_ci * } 6548c2ecf20Sopenharmony_ci */ 6558c2ecf20Sopenharmony_ci 6568c2ecf20Sopenharmony_ci/** 6578c2ecf20Sopenharmony_ci * acpi_cppc_processor_probe - Search for per CPU _CPC objects. 6588c2ecf20Sopenharmony_ci * @pr: Ptr to acpi_processor containing this CPU's logical ID. 6598c2ecf20Sopenharmony_ci * 6608c2ecf20Sopenharmony_ci * Return: 0 for success or negative value for err. 6618c2ecf20Sopenharmony_ci */ 6628c2ecf20Sopenharmony_ciint acpi_cppc_processor_probe(struct acpi_processor *pr) 6638c2ecf20Sopenharmony_ci{ 6648c2ecf20Sopenharmony_ci struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; 6658c2ecf20Sopenharmony_ci union acpi_object *out_obj, *cpc_obj; 6668c2ecf20Sopenharmony_ci struct cpc_desc *cpc_ptr; 6678c2ecf20Sopenharmony_ci struct cpc_reg *gas_t; 6688c2ecf20Sopenharmony_ci struct device *cpu_dev; 6698c2ecf20Sopenharmony_ci acpi_handle handle = pr->handle; 6708c2ecf20Sopenharmony_ci unsigned int num_ent, i, cpc_rev; 6718c2ecf20Sopenharmony_ci int pcc_subspace_id = -1; 6728c2ecf20Sopenharmony_ci acpi_status status; 6738c2ecf20Sopenharmony_ci int ret = -EFAULT; 6748c2ecf20Sopenharmony_ci 6758c2ecf20Sopenharmony_ci /* Parse the ACPI _CPC table for this CPU. */ 6768c2ecf20Sopenharmony_ci status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output, 6778c2ecf20Sopenharmony_ci ACPI_TYPE_PACKAGE); 6788c2ecf20Sopenharmony_ci if (ACPI_FAILURE(status)) { 6798c2ecf20Sopenharmony_ci ret = -ENODEV; 6808c2ecf20Sopenharmony_ci goto out_buf_free; 6818c2ecf20Sopenharmony_ci } 6828c2ecf20Sopenharmony_ci 6838c2ecf20Sopenharmony_ci out_obj = (union acpi_object *) output.pointer; 6848c2ecf20Sopenharmony_ci 6858c2ecf20Sopenharmony_ci cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL); 6868c2ecf20Sopenharmony_ci if (!cpc_ptr) { 6878c2ecf20Sopenharmony_ci ret = -ENOMEM; 6888c2ecf20Sopenharmony_ci goto out_buf_free; 6898c2ecf20Sopenharmony_ci } 6908c2ecf20Sopenharmony_ci 6918c2ecf20Sopenharmony_ci /* First entry is NumEntries. */ 6928c2ecf20Sopenharmony_ci cpc_obj = &out_obj->package.elements[0]; 6938c2ecf20Sopenharmony_ci if (cpc_obj->type == ACPI_TYPE_INTEGER) { 6948c2ecf20Sopenharmony_ci num_ent = cpc_obj->integer.value; 6958c2ecf20Sopenharmony_ci if (num_ent <= 1) { 6968c2ecf20Sopenharmony_ci pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n", 6978c2ecf20Sopenharmony_ci num_ent, pr->id); 6988c2ecf20Sopenharmony_ci goto out_free; 6998c2ecf20Sopenharmony_ci } 7008c2ecf20Sopenharmony_ci } else { 7018c2ecf20Sopenharmony_ci pr_debug("Unexpected entry type(%d) for NumEntries\n", 7028c2ecf20Sopenharmony_ci cpc_obj->type); 7038c2ecf20Sopenharmony_ci goto out_free; 7048c2ecf20Sopenharmony_ci } 7058c2ecf20Sopenharmony_ci 7068c2ecf20Sopenharmony_ci /* Second entry should be revision. */ 7078c2ecf20Sopenharmony_ci cpc_obj = &out_obj->package.elements[1]; 7088c2ecf20Sopenharmony_ci if (cpc_obj->type == ACPI_TYPE_INTEGER) { 7098c2ecf20Sopenharmony_ci cpc_rev = cpc_obj->integer.value; 7108c2ecf20Sopenharmony_ci } else { 7118c2ecf20Sopenharmony_ci pr_debug("Unexpected entry type(%d) for Revision\n", 7128c2ecf20Sopenharmony_ci cpc_obj->type); 7138c2ecf20Sopenharmony_ci goto out_free; 7148c2ecf20Sopenharmony_ci } 7158c2ecf20Sopenharmony_ci 7168c2ecf20Sopenharmony_ci if (cpc_rev < CPPC_V2_REV) { 7178c2ecf20Sopenharmony_ci pr_debug("Unsupported _CPC Revision (%d) for CPU:%d\n", cpc_rev, 7188c2ecf20Sopenharmony_ci pr->id); 7198c2ecf20Sopenharmony_ci goto out_free; 7208c2ecf20Sopenharmony_ci } 7218c2ecf20Sopenharmony_ci 7228c2ecf20Sopenharmony_ci /* 7238c2ecf20Sopenharmony_ci * Disregard _CPC if the number of entries in the return pachage is not 7248c2ecf20Sopenharmony_ci * as expected, but support future revisions being proper supersets of 7258c2ecf20Sopenharmony_ci * the v3 and only causing more entries to be returned by _CPC. 7268c2ecf20Sopenharmony_ci */ 7278c2ecf20Sopenharmony_ci if ((cpc_rev == CPPC_V2_REV && num_ent != CPPC_V2_NUM_ENT) || 7288c2ecf20Sopenharmony_ci (cpc_rev == CPPC_V3_REV && num_ent != CPPC_V3_NUM_ENT) || 7298c2ecf20Sopenharmony_ci (cpc_rev > CPPC_V3_REV && num_ent <= CPPC_V3_NUM_ENT)) { 7308c2ecf20Sopenharmony_ci pr_debug("Unexpected number of _CPC return package entries (%d) for CPU:%d\n", 7318c2ecf20Sopenharmony_ci num_ent, pr->id); 7328c2ecf20Sopenharmony_ci goto out_free; 7338c2ecf20Sopenharmony_ci } 7348c2ecf20Sopenharmony_ci if (cpc_rev > CPPC_V3_REV) { 7358c2ecf20Sopenharmony_ci num_ent = CPPC_V3_NUM_ENT; 7368c2ecf20Sopenharmony_ci cpc_rev = CPPC_V3_REV; 7378c2ecf20Sopenharmony_ci } 7388c2ecf20Sopenharmony_ci 7398c2ecf20Sopenharmony_ci cpc_ptr->num_entries = num_ent; 7408c2ecf20Sopenharmony_ci cpc_ptr->version = cpc_rev; 7418c2ecf20Sopenharmony_ci 7428c2ecf20Sopenharmony_ci /* Iterate through remaining entries in _CPC */ 7438c2ecf20Sopenharmony_ci for (i = 2; i < num_ent; i++) { 7448c2ecf20Sopenharmony_ci cpc_obj = &out_obj->package.elements[i]; 7458c2ecf20Sopenharmony_ci 7468c2ecf20Sopenharmony_ci if (cpc_obj->type == ACPI_TYPE_INTEGER) { 7478c2ecf20Sopenharmony_ci cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER; 7488c2ecf20Sopenharmony_ci cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value; 7498c2ecf20Sopenharmony_ci } else if (cpc_obj->type == ACPI_TYPE_BUFFER) { 7508c2ecf20Sopenharmony_ci gas_t = (struct cpc_reg *) 7518c2ecf20Sopenharmony_ci cpc_obj->buffer.pointer; 7528c2ecf20Sopenharmony_ci 7538c2ecf20Sopenharmony_ci /* 7548c2ecf20Sopenharmony_ci * The PCC Subspace index is encoded inside 7558c2ecf20Sopenharmony_ci * the CPC table entries. The same PCC index 7568c2ecf20Sopenharmony_ci * will be used for all the PCC entries, 7578c2ecf20Sopenharmony_ci * so extract it only once. 7588c2ecf20Sopenharmony_ci */ 7598c2ecf20Sopenharmony_ci if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { 7608c2ecf20Sopenharmony_ci if (pcc_subspace_id < 0) { 7618c2ecf20Sopenharmony_ci pcc_subspace_id = gas_t->access_width; 7628c2ecf20Sopenharmony_ci if (pcc_data_alloc(pcc_subspace_id)) 7638c2ecf20Sopenharmony_ci goto out_free; 7648c2ecf20Sopenharmony_ci } else if (pcc_subspace_id != gas_t->access_width) { 7658c2ecf20Sopenharmony_ci pr_debug("Mismatched PCC ids.\n"); 7668c2ecf20Sopenharmony_ci goto out_free; 7678c2ecf20Sopenharmony_ci } 7688c2ecf20Sopenharmony_ci } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { 7698c2ecf20Sopenharmony_ci if (gas_t->address) { 7708c2ecf20Sopenharmony_ci void __iomem *addr; 7718c2ecf20Sopenharmony_ci 7728c2ecf20Sopenharmony_ci addr = ioremap(gas_t->address, gas_t->bit_width/8); 7738c2ecf20Sopenharmony_ci if (!addr) 7748c2ecf20Sopenharmony_ci goto out_free; 7758c2ecf20Sopenharmony_ci cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr; 7768c2ecf20Sopenharmony_ci } 7778c2ecf20Sopenharmony_ci } else { 7788c2ecf20Sopenharmony_ci if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) { 7798c2ecf20Sopenharmony_ci /* Support only PCC ,SYS MEM and FFH type regs */ 7808c2ecf20Sopenharmony_ci pr_debug("Unsupported register type: %d\n", gas_t->space_id); 7818c2ecf20Sopenharmony_ci goto out_free; 7828c2ecf20Sopenharmony_ci } 7838c2ecf20Sopenharmony_ci } 7848c2ecf20Sopenharmony_ci 7858c2ecf20Sopenharmony_ci cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER; 7868c2ecf20Sopenharmony_ci memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t)); 7878c2ecf20Sopenharmony_ci } else { 7888c2ecf20Sopenharmony_ci pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id); 7898c2ecf20Sopenharmony_ci goto out_free; 7908c2ecf20Sopenharmony_ci } 7918c2ecf20Sopenharmony_ci } 7928c2ecf20Sopenharmony_ci per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id; 7938c2ecf20Sopenharmony_ci 7948c2ecf20Sopenharmony_ci /* 7958c2ecf20Sopenharmony_ci * Initialize the remaining cpc_regs as unsupported. 7968c2ecf20Sopenharmony_ci * Example: In case FW exposes CPPC v2, the below loop will initialize 7978c2ecf20Sopenharmony_ci * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported 7988c2ecf20Sopenharmony_ci */ 7998c2ecf20Sopenharmony_ci for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) { 8008c2ecf20Sopenharmony_ci cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER; 8018c2ecf20Sopenharmony_ci cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0; 8028c2ecf20Sopenharmony_ci } 8038c2ecf20Sopenharmony_ci 8048c2ecf20Sopenharmony_ci 8058c2ecf20Sopenharmony_ci /* Store CPU Logical ID */ 8068c2ecf20Sopenharmony_ci cpc_ptr->cpu_id = pr->id; 8078c2ecf20Sopenharmony_ci 8088c2ecf20Sopenharmony_ci /* Parse PSD data for this CPU */ 8098c2ecf20Sopenharmony_ci ret = acpi_get_psd(cpc_ptr, handle); 8108c2ecf20Sopenharmony_ci if (ret) 8118c2ecf20Sopenharmony_ci goto out_free; 8128c2ecf20Sopenharmony_ci 8138c2ecf20Sopenharmony_ci /* Register PCC channel once for all PCC subspace ID. */ 8148c2ecf20Sopenharmony_ci if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) { 8158c2ecf20Sopenharmony_ci ret = register_pcc_channel(pcc_subspace_id); 8168c2ecf20Sopenharmony_ci if (ret) 8178c2ecf20Sopenharmony_ci goto out_free; 8188c2ecf20Sopenharmony_ci 8198c2ecf20Sopenharmony_ci init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock); 8208c2ecf20Sopenharmony_ci init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q); 8218c2ecf20Sopenharmony_ci } 8228c2ecf20Sopenharmony_ci 8238c2ecf20Sopenharmony_ci /* Everything looks okay */ 8248c2ecf20Sopenharmony_ci pr_debug("Parsed CPC struct for CPU: %d\n", pr->id); 8258c2ecf20Sopenharmony_ci 8268c2ecf20Sopenharmony_ci /* Add per logical CPU nodes for reading its feedback counters. */ 8278c2ecf20Sopenharmony_ci cpu_dev = get_cpu_device(pr->id); 8288c2ecf20Sopenharmony_ci if (!cpu_dev) { 8298c2ecf20Sopenharmony_ci ret = -EINVAL; 8308c2ecf20Sopenharmony_ci goto out_free; 8318c2ecf20Sopenharmony_ci } 8328c2ecf20Sopenharmony_ci 8338c2ecf20Sopenharmony_ci /* Plug PSD data into this CPU's CPC descriptor. */ 8348c2ecf20Sopenharmony_ci per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr; 8358c2ecf20Sopenharmony_ci 8368c2ecf20Sopenharmony_ci ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj, 8378c2ecf20Sopenharmony_ci "acpi_cppc"); 8388c2ecf20Sopenharmony_ci if (ret) { 8398c2ecf20Sopenharmony_ci per_cpu(cpc_desc_ptr, pr->id) = NULL; 8408c2ecf20Sopenharmony_ci kobject_put(&cpc_ptr->kobj); 8418c2ecf20Sopenharmony_ci goto out_free; 8428c2ecf20Sopenharmony_ci } 8438c2ecf20Sopenharmony_ci 8448c2ecf20Sopenharmony_ci kfree(output.pointer); 8458c2ecf20Sopenharmony_ci return 0; 8468c2ecf20Sopenharmony_ci 8478c2ecf20Sopenharmony_ciout_free: 8488c2ecf20Sopenharmony_ci /* Free all the mapped sys mem areas for this CPU */ 8498c2ecf20Sopenharmony_ci for (i = 2; i < cpc_ptr->num_entries; i++) { 8508c2ecf20Sopenharmony_ci void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr; 8518c2ecf20Sopenharmony_ci 8528c2ecf20Sopenharmony_ci if (addr) 8538c2ecf20Sopenharmony_ci iounmap(addr); 8548c2ecf20Sopenharmony_ci } 8558c2ecf20Sopenharmony_ci kfree(cpc_ptr); 8568c2ecf20Sopenharmony_ci 8578c2ecf20Sopenharmony_ciout_buf_free: 8588c2ecf20Sopenharmony_ci kfree(output.pointer); 8598c2ecf20Sopenharmony_ci return ret; 8608c2ecf20Sopenharmony_ci} 8618c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(acpi_cppc_processor_probe); 8628c2ecf20Sopenharmony_ci 8638c2ecf20Sopenharmony_ci/** 8648c2ecf20Sopenharmony_ci * acpi_cppc_processor_exit - Cleanup CPC structs. 8658c2ecf20Sopenharmony_ci * @pr: Ptr to acpi_processor containing this CPU's logical ID. 8668c2ecf20Sopenharmony_ci * 8678c2ecf20Sopenharmony_ci * Return: Void 8688c2ecf20Sopenharmony_ci */ 8698c2ecf20Sopenharmony_civoid acpi_cppc_processor_exit(struct acpi_processor *pr) 8708c2ecf20Sopenharmony_ci{ 8718c2ecf20Sopenharmony_ci struct cpc_desc *cpc_ptr; 8728c2ecf20Sopenharmony_ci unsigned int i; 8738c2ecf20Sopenharmony_ci void __iomem *addr; 8748c2ecf20Sopenharmony_ci int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id); 8758c2ecf20Sopenharmony_ci 8768c2ecf20Sopenharmony_ci if (pcc_ss_id >=0 && pcc_data[pcc_ss_id]) { 8778c2ecf20Sopenharmony_ci if (pcc_data[pcc_ss_id]->pcc_channel_acquired) { 8788c2ecf20Sopenharmony_ci pcc_data[pcc_ss_id]->refcount--; 8798c2ecf20Sopenharmony_ci if (!pcc_data[pcc_ss_id]->refcount) { 8808c2ecf20Sopenharmony_ci pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel); 8818c2ecf20Sopenharmony_ci kfree(pcc_data[pcc_ss_id]); 8828c2ecf20Sopenharmony_ci pcc_data[pcc_ss_id] = NULL; 8838c2ecf20Sopenharmony_ci } 8848c2ecf20Sopenharmony_ci } 8858c2ecf20Sopenharmony_ci } 8868c2ecf20Sopenharmony_ci 8878c2ecf20Sopenharmony_ci cpc_ptr = per_cpu(cpc_desc_ptr, pr->id); 8888c2ecf20Sopenharmony_ci if (!cpc_ptr) 8898c2ecf20Sopenharmony_ci return; 8908c2ecf20Sopenharmony_ci 8918c2ecf20Sopenharmony_ci /* Free all the mapped sys mem areas for this CPU */ 8928c2ecf20Sopenharmony_ci for (i = 2; i < cpc_ptr->num_entries; i++) { 8938c2ecf20Sopenharmony_ci addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr; 8948c2ecf20Sopenharmony_ci if (addr) 8958c2ecf20Sopenharmony_ci iounmap(addr); 8968c2ecf20Sopenharmony_ci } 8978c2ecf20Sopenharmony_ci 8988c2ecf20Sopenharmony_ci kobject_put(&cpc_ptr->kobj); 8998c2ecf20Sopenharmony_ci kfree(cpc_ptr); 9008c2ecf20Sopenharmony_ci} 9018c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(acpi_cppc_processor_exit); 9028c2ecf20Sopenharmony_ci 9038c2ecf20Sopenharmony_ci/** 9048c2ecf20Sopenharmony_ci * cpc_read_ffh() - Read FFH register 9058c2ecf20Sopenharmony_ci * @cpunum: CPU number to read 9068c2ecf20Sopenharmony_ci * @reg: cppc register information 9078c2ecf20Sopenharmony_ci * @val: place holder for return value 9088c2ecf20Sopenharmony_ci * 9098c2ecf20Sopenharmony_ci * Read bit_width bits from a specified address and bit_offset 9108c2ecf20Sopenharmony_ci * 9118c2ecf20Sopenharmony_ci * Return: 0 for success and error code 9128c2ecf20Sopenharmony_ci */ 9138c2ecf20Sopenharmony_ciint __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val) 9148c2ecf20Sopenharmony_ci{ 9158c2ecf20Sopenharmony_ci return -ENOTSUPP; 9168c2ecf20Sopenharmony_ci} 9178c2ecf20Sopenharmony_ci 9188c2ecf20Sopenharmony_ci/** 9198c2ecf20Sopenharmony_ci * cpc_write_ffh() - Write FFH register 9208c2ecf20Sopenharmony_ci * @cpunum: CPU number to write 9218c2ecf20Sopenharmony_ci * @reg: cppc register information 9228c2ecf20Sopenharmony_ci * @val: value to write 9238c2ecf20Sopenharmony_ci * 9248c2ecf20Sopenharmony_ci * Write value of bit_width bits to a specified address and bit_offset 9258c2ecf20Sopenharmony_ci * 9268c2ecf20Sopenharmony_ci * Return: 0 for success and error code 9278c2ecf20Sopenharmony_ci */ 9288c2ecf20Sopenharmony_ciint __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val) 9298c2ecf20Sopenharmony_ci{ 9308c2ecf20Sopenharmony_ci return -ENOTSUPP; 9318c2ecf20Sopenharmony_ci} 9328c2ecf20Sopenharmony_ci 9338c2ecf20Sopenharmony_ci/* 9348c2ecf20Sopenharmony_ci * Since cpc_read and cpc_write are called while holding pcc_lock, it should be 9358c2ecf20Sopenharmony_ci * as fast as possible. We have already mapped the PCC subspace during init, so 9368c2ecf20Sopenharmony_ci * we can directly write to it. 9378c2ecf20Sopenharmony_ci */ 9388c2ecf20Sopenharmony_ci 9398c2ecf20Sopenharmony_cistatic int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val) 9408c2ecf20Sopenharmony_ci{ 9418c2ecf20Sopenharmony_ci int ret_val = 0; 9428c2ecf20Sopenharmony_ci void __iomem *vaddr = 0; 9438c2ecf20Sopenharmony_ci int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); 9448c2ecf20Sopenharmony_ci struct cpc_reg *reg = ®_res->cpc_entry.reg; 9458c2ecf20Sopenharmony_ci 9468c2ecf20Sopenharmony_ci if (reg_res->type == ACPI_TYPE_INTEGER) { 9478c2ecf20Sopenharmony_ci *val = reg_res->cpc_entry.int_value; 9488c2ecf20Sopenharmony_ci return ret_val; 9498c2ecf20Sopenharmony_ci } 9508c2ecf20Sopenharmony_ci 9518c2ecf20Sopenharmony_ci *val = 0; 9528c2ecf20Sopenharmony_ci if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) 9538c2ecf20Sopenharmony_ci vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id); 9548c2ecf20Sopenharmony_ci else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 9558c2ecf20Sopenharmony_ci vaddr = reg_res->sys_mem_vaddr; 9568c2ecf20Sopenharmony_ci else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) 9578c2ecf20Sopenharmony_ci return cpc_read_ffh(cpu, reg, val); 9588c2ecf20Sopenharmony_ci else 9598c2ecf20Sopenharmony_ci return acpi_os_read_memory((acpi_physical_address)reg->address, 9608c2ecf20Sopenharmony_ci val, reg->bit_width); 9618c2ecf20Sopenharmony_ci 9628c2ecf20Sopenharmony_ci switch (reg->bit_width) { 9638c2ecf20Sopenharmony_ci case 8: 9648c2ecf20Sopenharmony_ci *val = readb_relaxed(vaddr); 9658c2ecf20Sopenharmony_ci break; 9668c2ecf20Sopenharmony_ci case 16: 9678c2ecf20Sopenharmony_ci *val = readw_relaxed(vaddr); 9688c2ecf20Sopenharmony_ci break; 9698c2ecf20Sopenharmony_ci case 32: 9708c2ecf20Sopenharmony_ci *val = readl_relaxed(vaddr); 9718c2ecf20Sopenharmony_ci break; 9728c2ecf20Sopenharmony_ci case 64: 9738c2ecf20Sopenharmony_ci *val = readq_relaxed(vaddr); 9748c2ecf20Sopenharmony_ci break; 9758c2ecf20Sopenharmony_ci default: 9768c2ecf20Sopenharmony_ci pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n", 9778c2ecf20Sopenharmony_ci reg->bit_width, pcc_ss_id); 9788c2ecf20Sopenharmony_ci ret_val = -EFAULT; 9798c2ecf20Sopenharmony_ci } 9808c2ecf20Sopenharmony_ci 9818c2ecf20Sopenharmony_ci return ret_val; 9828c2ecf20Sopenharmony_ci} 9838c2ecf20Sopenharmony_ci 9848c2ecf20Sopenharmony_cistatic int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) 9858c2ecf20Sopenharmony_ci{ 9868c2ecf20Sopenharmony_ci int ret_val = 0; 9878c2ecf20Sopenharmony_ci void __iomem *vaddr = 0; 9888c2ecf20Sopenharmony_ci int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); 9898c2ecf20Sopenharmony_ci struct cpc_reg *reg = ®_res->cpc_entry.reg; 9908c2ecf20Sopenharmony_ci 9918c2ecf20Sopenharmony_ci if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) 9928c2ecf20Sopenharmony_ci vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id); 9938c2ecf20Sopenharmony_ci else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 9948c2ecf20Sopenharmony_ci vaddr = reg_res->sys_mem_vaddr; 9958c2ecf20Sopenharmony_ci else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) 9968c2ecf20Sopenharmony_ci return cpc_write_ffh(cpu, reg, val); 9978c2ecf20Sopenharmony_ci else 9988c2ecf20Sopenharmony_ci return acpi_os_write_memory((acpi_physical_address)reg->address, 9998c2ecf20Sopenharmony_ci val, reg->bit_width); 10008c2ecf20Sopenharmony_ci 10018c2ecf20Sopenharmony_ci switch (reg->bit_width) { 10028c2ecf20Sopenharmony_ci case 8: 10038c2ecf20Sopenharmony_ci writeb_relaxed(val, vaddr); 10048c2ecf20Sopenharmony_ci break; 10058c2ecf20Sopenharmony_ci case 16: 10068c2ecf20Sopenharmony_ci writew_relaxed(val, vaddr); 10078c2ecf20Sopenharmony_ci break; 10088c2ecf20Sopenharmony_ci case 32: 10098c2ecf20Sopenharmony_ci writel_relaxed(val, vaddr); 10108c2ecf20Sopenharmony_ci break; 10118c2ecf20Sopenharmony_ci case 64: 10128c2ecf20Sopenharmony_ci writeq_relaxed(val, vaddr); 10138c2ecf20Sopenharmony_ci break; 10148c2ecf20Sopenharmony_ci default: 10158c2ecf20Sopenharmony_ci pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n", 10168c2ecf20Sopenharmony_ci reg->bit_width, pcc_ss_id); 10178c2ecf20Sopenharmony_ci ret_val = -EFAULT; 10188c2ecf20Sopenharmony_ci break; 10198c2ecf20Sopenharmony_ci } 10208c2ecf20Sopenharmony_ci 10218c2ecf20Sopenharmony_ci return ret_val; 10228c2ecf20Sopenharmony_ci} 10238c2ecf20Sopenharmony_ci 10248c2ecf20Sopenharmony_ci/** 10258c2ecf20Sopenharmony_ci * cppc_get_desired_perf - Get the value of desired performance register. 10268c2ecf20Sopenharmony_ci * @cpunum: CPU from which to get desired performance. 10278c2ecf20Sopenharmony_ci * @desired_perf: address of a variable to store the returned desired performance 10288c2ecf20Sopenharmony_ci * 10298c2ecf20Sopenharmony_ci * Return: 0 for success, -EIO otherwise. 10308c2ecf20Sopenharmony_ci */ 10318c2ecf20Sopenharmony_ciint cppc_get_desired_perf(int cpunum, u64 *desired_perf) 10328c2ecf20Sopenharmony_ci{ 10338c2ecf20Sopenharmony_ci struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); 10348c2ecf20Sopenharmony_ci int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); 10358c2ecf20Sopenharmony_ci struct cpc_register_resource *desired_reg; 10368c2ecf20Sopenharmony_ci struct cppc_pcc_data *pcc_ss_data = NULL; 10378c2ecf20Sopenharmony_ci 10388c2ecf20Sopenharmony_ci desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; 10398c2ecf20Sopenharmony_ci 10408c2ecf20Sopenharmony_ci if (CPC_IN_PCC(desired_reg)) { 10418c2ecf20Sopenharmony_ci int ret = 0; 10428c2ecf20Sopenharmony_ci 10438c2ecf20Sopenharmony_ci if (pcc_ss_id < 0) 10448c2ecf20Sopenharmony_ci return -EIO; 10458c2ecf20Sopenharmony_ci 10468c2ecf20Sopenharmony_ci pcc_ss_data = pcc_data[pcc_ss_id]; 10478c2ecf20Sopenharmony_ci 10488c2ecf20Sopenharmony_ci down_write(&pcc_ss_data->pcc_lock); 10498c2ecf20Sopenharmony_ci 10508c2ecf20Sopenharmony_ci if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) 10518c2ecf20Sopenharmony_ci cpc_read(cpunum, desired_reg, desired_perf); 10528c2ecf20Sopenharmony_ci else 10538c2ecf20Sopenharmony_ci ret = -EIO; 10548c2ecf20Sopenharmony_ci 10558c2ecf20Sopenharmony_ci up_write(&pcc_ss_data->pcc_lock); 10568c2ecf20Sopenharmony_ci 10578c2ecf20Sopenharmony_ci return ret; 10588c2ecf20Sopenharmony_ci } 10598c2ecf20Sopenharmony_ci 10608c2ecf20Sopenharmony_ci cpc_read(cpunum, desired_reg, desired_perf); 10618c2ecf20Sopenharmony_ci 10628c2ecf20Sopenharmony_ci return 0; 10638c2ecf20Sopenharmony_ci} 10648c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(cppc_get_desired_perf); 10658c2ecf20Sopenharmony_ci 10668c2ecf20Sopenharmony_ci/** 10678c2ecf20Sopenharmony_ci * cppc_get_perf_caps - Get a CPU's performance capabilities. 10688c2ecf20Sopenharmony_ci * @cpunum: CPU from which to get capabilities info. 10698c2ecf20Sopenharmony_ci * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h 10708c2ecf20Sopenharmony_ci * 10718c2ecf20Sopenharmony_ci * Return: 0 for success with perf_caps populated else -ERRNO. 10728c2ecf20Sopenharmony_ci */ 10738c2ecf20Sopenharmony_ciint cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) 10748c2ecf20Sopenharmony_ci{ 10758c2ecf20Sopenharmony_ci struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); 10768c2ecf20Sopenharmony_ci struct cpc_register_resource *highest_reg, *lowest_reg, 10778c2ecf20Sopenharmony_ci *lowest_non_linear_reg, *nominal_reg, *guaranteed_reg, 10788c2ecf20Sopenharmony_ci *low_freq_reg = NULL, *nom_freq_reg = NULL; 10798c2ecf20Sopenharmony_ci u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0; 10808c2ecf20Sopenharmony_ci int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); 10818c2ecf20Sopenharmony_ci struct cppc_pcc_data *pcc_ss_data = NULL; 10828c2ecf20Sopenharmony_ci int ret = 0, regs_in_pcc = 0; 10838c2ecf20Sopenharmony_ci 10848c2ecf20Sopenharmony_ci if (!cpc_desc) { 10858c2ecf20Sopenharmony_ci pr_debug("No CPC descriptor for CPU:%d\n", cpunum); 10868c2ecf20Sopenharmony_ci return -ENODEV; 10878c2ecf20Sopenharmony_ci } 10888c2ecf20Sopenharmony_ci 10898c2ecf20Sopenharmony_ci highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF]; 10908c2ecf20Sopenharmony_ci lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF]; 10918c2ecf20Sopenharmony_ci lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF]; 10928c2ecf20Sopenharmony_ci nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF]; 10938c2ecf20Sopenharmony_ci low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ]; 10948c2ecf20Sopenharmony_ci nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ]; 10958c2ecf20Sopenharmony_ci guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF]; 10968c2ecf20Sopenharmony_ci 10978c2ecf20Sopenharmony_ci /* Are any of the regs PCC ?*/ 10988c2ecf20Sopenharmony_ci if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) || 10998c2ecf20Sopenharmony_ci CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) || 11008c2ecf20Sopenharmony_ci CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) { 11018c2ecf20Sopenharmony_ci if (pcc_ss_id < 0) { 11028c2ecf20Sopenharmony_ci pr_debug("Invalid pcc_ss_id\n"); 11038c2ecf20Sopenharmony_ci return -ENODEV; 11048c2ecf20Sopenharmony_ci } 11058c2ecf20Sopenharmony_ci pcc_ss_data = pcc_data[pcc_ss_id]; 11068c2ecf20Sopenharmony_ci regs_in_pcc = 1; 11078c2ecf20Sopenharmony_ci down_write(&pcc_ss_data->pcc_lock); 11088c2ecf20Sopenharmony_ci /* Ring doorbell once to update PCC subspace */ 11098c2ecf20Sopenharmony_ci if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) { 11108c2ecf20Sopenharmony_ci ret = -EIO; 11118c2ecf20Sopenharmony_ci goto out_err; 11128c2ecf20Sopenharmony_ci } 11138c2ecf20Sopenharmony_ci } 11148c2ecf20Sopenharmony_ci 11158c2ecf20Sopenharmony_ci cpc_read(cpunum, highest_reg, &high); 11168c2ecf20Sopenharmony_ci perf_caps->highest_perf = high; 11178c2ecf20Sopenharmony_ci 11188c2ecf20Sopenharmony_ci cpc_read(cpunum, lowest_reg, &low); 11198c2ecf20Sopenharmony_ci perf_caps->lowest_perf = low; 11208c2ecf20Sopenharmony_ci 11218c2ecf20Sopenharmony_ci cpc_read(cpunum, nominal_reg, &nom); 11228c2ecf20Sopenharmony_ci perf_caps->nominal_perf = nom; 11238c2ecf20Sopenharmony_ci 11248c2ecf20Sopenharmony_ci if (guaranteed_reg->type != ACPI_TYPE_BUFFER || 11258c2ecf20Sopenharmony_ci IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) { 11268c2ecf20Sopenharmony_ci perf_caps->guaranteed_perf = 0; 11278c2ecf20Sopenharmony_ci } else { 11288c2ecf20Sopenharmony_ci cpc_read(cpunum, guaranteed_reg, &guaranteed); 11298c2ecf20Sopenharmony_ci perf_caps->guaranteed_perf = guaranteed; 11308c2ecf20Sopenharmony_ci } 11318c2ecf20Sopenharmony_ci 11328c2ecf20Sopenharmony_ci cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear); 11338c2ecf20Sopenharmony_ci perf_caps->lowest_nonlinear_perf = min_nonlinear; 11348c2ecf20Sopenharmony_ci 11358c2ecf20Sopenharmony_ci if (!high || !low || !nom || !min_nonlinear) 11368c2ecf20Sopenharmony_ci ret = -EFAULT; 11378c2ecf20Sopenharmony_ci 11388c2ecf20Sopenharmony_ci /* Read optional lowest and nominal frequencies if present */ 11398c2ecf20Sopenharmony_ci if (CPC_SUPPORTED(low_freq_reg)) 11408c2ecf20Sopenharmony_ci cpc_read(cpunum, low_freq_reg, &low_f); 11418c2ecf20Sopenharmony_ci 11428c2ecf20Sopenharmony_ci if (CPC_SUPPORTED(nom_freq_reg)) 11438c2ecf20Sopenharmony_ci cpc_read(cpunum, nom_freq_reg, &nom_f); 11448c2ecf20Sopenharmony_ci 11458c2ecf20Sopenharmony_ci perf_caps->lowest_freq = low_f; 11468c2ecf20Sopenharmony_ci perf_caps->nominal_freq = nom_f; 11478c2ecf20Sopenharmony_ci 11488c2ecf20Sopenharmony_ci 11498c2ecf20Sopenharmony_ciout_err: 11508c2ecf20Sopenharmony_ci if (regs_in_pcc) 11518c2ecf20Sopenharmony_ci up_write(&pcc_ss_data->pcc_lock); 11528c2ecf20Sopenharmony_ci return ret; 11538c2ecf20Sopenharmony_ci} 11548c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(cppc_get_perf_caps); 11558c2ecf20Sopenharmony_ci 11568c2ecf20Sopenharmony_ci/** 11578c2ecf20Sopenharmony_ci * cppc_get_perf_ctrs - Read a CPU's performance feedback counters. 11588c2ecf20Sopenharmony_ci * @cpunum: CPU from which to read counters. 11598c2ecf20Sopenharmony_ci * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h 11608c2ecf20Sopenharmony_ci * 11618c2ecf20Sopenharmony_ci * Return: 0 for success with perf_fb_ctrs populated else -ERRNO. 11628c2ecf20Sopenharmony_ci */ 11638c2ecf20Sopenharmony_ciint cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) 11648c2ecf20Sopenharmony_ci{ 11658c2ecf20Sopenharmony_ci struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); 11668c2ecf20Sopenharmony_ci struct cpc_register_resource *delivered_reg, *reference_reg, 11678c2ecf20Sopenharmony_ci *ref_perf_reg, *ctr_wrap_reg; 11688c2ecf20Sopenharmony_ci int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); 11698c2ecf20Sopenharmony_ci struct cppc_pcc_data *pcc_ss_data = NULL; 11708c2ecf20Sopenharmony_ci u64 delivered, reference, ref_perf, ctr_wrap_time; 11718c2ecf20Sopenharmony_ci int ret = 0, regs_in_pcc = 0; 11728c2ecf20Sopenharmony_ci 11738c2ecf20Sopenharmony_ci if (!cpc_desc) { 11748c2ecf20Sopenharmony_ci pr_debug("No CPC descriptor for CPU:%d\n", cpunum); 11758c2ecf20Sopenharmony_ci return -ENODEV; 11768c2ecf20Sopenharmony_ci } 11778c2ecf20Sopenharmony_ci 11788c2ecf20Sopenharmony_ci delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR]; 11798c2ecf20Sopenharmony_ci reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR]; 11808c2ecf20Sopenharmony_ci ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF]; 11818c2ecf20Sopenharmony_ci ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME]; 11828c2ecf20Sopenharmony_ci 11838c2ecf20Sopenharmony_ci /* 11848c2ecf20Sopenharmony_ci * If reference perf register is not supported then we should 11858c2ecf20Sopenharmony_ci * use the nominal perf value 11868c2ecf20Sopenharmony_ci */ 11878c2ecf20Sopenharmony_ci if (!CPC_SUPPORTED(ref_perf_reg)) 11888c2ecf20Sopenharmony_ci ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF]; 11898c2ecf20Sopenharmony_ci 11908c2ecf20Sopenharmony_ci /* Are any of the regs PCC ?*/ 11918c2ecf20Sopenharmony_ci if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) || 11928c2ecf20Sopenharmony_ci CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) { 11938c2ecf20Sopenharmony_ci if (pcc_ss_id < 0) { 11948c2ecf20Sopenharmony_ci pr_debug("Invalid pcc_ss_id\n"); 11958c2ecf20Sopenharmony_ci return -ENODEV; 11968c2ecf20Sopenharmony_ci } 11978c2ecf20Sopenharmony_ci pcc_ss_data = pcc_data[pcc_ss_id]; 11988c2ecf20Sopenharmony_ci down_write(&pcc_ss_data->pcc_lock); 11998c2ecf20Sopenharmony_ci regs_in_pcc = 1; 12008c2ecf20Sopenharmony_ci /* Ring doorbell once to update PCC subspace */ 12018c2ecf20Sopenharmony_ci if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) { 12028c2ecf20Sopenharmony_ci ret = -EIO; 12038c2ecf20Sopenharmony_ci goto out_err; 12048c2ecf20Sopenharmony_ci } 12058c2ecf20Sopenharmony_ci } 12068c2ecf20Sopenharmony_ci 12078c2ecf20Sopenharmony_ci cpc_read(cpunum, delivered_reg, &delivered); 12088c2ecf20Sopenharmony_ci cpc_read(cpunum, reference_reg, &reference); 12098c2ecf20Sopenharmony_ci cpc_read(cpunum, ref_perf_reg, &ref_perf); 12108c2ecf20Sopenharmony_ci 12118c2ecf20Sopenharmony_ci /* 12128c2ecf20Sopenharmony_ci * Per spec, if ctr_wrap_time optional register is unsupported, then the 12138c2ecf20Sopenharmony_ci * performance counters are assumed to never wrap during the lifetime of 12148c2ecf20Sopenharmony_ci * platform 12158c2ecf20Sopenharmony_ci */ 12168c2ecf20Sopenharmony_ci ctr_wrap_time = (u64)(~((u64)0)); 12178c2ecf20Sopenharmony_ci if (CPC_SUPPORTED(ctr_wrap_reg)) 12188c2ecf20Sopenharmony_ci cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time); 12198c2ecf20Sopenharmony_ci 12208c2ecf20Sopenharmony_ci if (!delivered || !reference || !ref_perf) { 12218c2ecf20Sopenharmony_ci ret = -EFAULT; 12228c2ecf20Sopenharmony_ci goto out_err; 12238c2ecf20Sopenharmony_ci } 12248c2ecf20Sopenharmony_ci 12258c2ecf20Sopenharmony_ci perf_fb_ctrs->delivered = delivered; 12268c2ecf20Sopenharmony_ci perf_fb_ctrs->reference = reference; 12278c2ecf20Sopenharmony_ci perf_fb_ctrs->reference_perf = ref_perf; 12288c2ecf20Sopenharmony_ci perf_fb_ctrs->wraparound_time = ctr_wrap_time; 12298c2ecf20Sopenharmony_ciout_err: 12308c2ecf20Sopenharmony_ci if (regs_in_pcc) 12318c2ecf20Sopenharmony_ci up_write(&pcc_ss_data->pcc_lock); 12328c2ecf20Sopenharmony_ci return ret; 12338c2ecf20Sopenharmony_ci} 12348c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(cppc_get_perf_ctrs); 12358c2ecf20Sopenharmony_ci 12368c2ecf20Sopenharmony_ci/** 12378c2ecf20Sopenharmony_ci * cppc_set_perf - Set a CPU's performance controls. 12388c2ecf20Sopenharmony_ci * @cpu: CPU for which to set performance controls. 12398c2ecf20Sopenharmony_ci * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h 12408c2ecf20Sopenharmony_ci * 12418c2ecf20Sopenharmony_ci * Return: 0 for success, -ERRNO otherwise. 12428c2ecf20Sopenharmony_ci */ 12438c2ecf20Sopenharmony_ciint cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) 12448c2ecf20Sopenharmony_ci{ 12458c2ecf20Sopenharmony_ci struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); 12468c2ecf20Sopenharmony_ci struct cpc_register_resource *desired_reg; 12478c2ecf20Sopenharmony_ci int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu); 12488c2ecf20Sopenharmony_ci struct cppc_pcc_data *pcc_ss_data = NULL; 12498c2ecf20Sopenharmony_ci int ret = 0; 12508c2ecf20Sopenharmony_ci 12518c2ecf20Sopenharmony_ci if (!cpc_desc) { 12528c2ecf20Sopenharmony_ci pr_debug("No CPC descriptor for CPU:%d\n", cpu); 12538c2ecf20Sopenharmony_ci return -ENODEV; 12548c2ecf20Sopenharmony_ci } 12558c2ecf20Sopenharmony_ci 12568c2ecf20Sopenharmony_ci desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; 12578c2ecf20Sopenharmony_ci 12588c2ecf20Sopenharmony_ci /* 12598c2ecf20Sopenharmony_ci * This is Phase-I where we want to write to CPC registers 12608c2ecf20Sopenharmony_ci * -> We want all CPUs to be able to execute this phase in parallel 12618c2ecf20Sopenharmony_ci * 12628c2ecf20Sopenharmony_ci * Since read_lock can be acquired by multiple CPUs simultaneously we 12638c2ecf20Sopenharmony_ci * achieve that goal here 12648c2ecf20Sopenharmony_ci */ 12658c2ecf20Sopenharmony_ci if (CPC_IN_PCC(desired_reg)) { 12668c2ecf20Sopenharmony_ci if (pcc_ss_id < 0) { 12678c2ecf20Sopenharmony_ci pr_debug("Invalid pcc_ss_id\n"); 12688c2ecf20Sopenharmony_ci return -ENODEV; 12698c2ecf20Sopenharmony_ci } 12708c2ecf20Sopenharmony_ci pcc_ss_data = pcc_data[pcc_ss_id]; 12718c2ecf20Sopenharmony_ci down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */ 12728c2ecf20Sopenharmony_ci if (pcc_ss_data->platform_owns_pcc) { 12738c2ecf20Sopenharmony_ci ret = check_pcc_chan(pcc_ss_id, false); 12748c2ecf20Sopenharmony_ci if (ret) { 12758c2ecf20Sopenharmony_ci up_read(&pcc_ss_data->pcc_lock); 12768c2ecf20Sopenharmony_ci return ret; 12778c2ecf20Sopenharmony_ci } 12788c2ecf20Sopenharmony_ci } 12798c2ecf20Sopenharmony_ci /* 12808c2ecf20Sopenharmony_ci * Update the pending_write to make sure a PCC CMD_READ will not 12818c2ecf20Sopenharmony_ci * arrive and steal the channel during the switch to write lock 12828c2ecf20Sopenharmony_ci */ 12838c2ecf20Sopenharmony_ci pcc_ss_data->pending_pcc_write_cmd = true; 12848c2ecf20Sopenharmony_ci cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt; 12858c2ecf20Sopenharmony_ci cpc_desc->write_cmd_status = 0; 12868c2ecf20Sopenharmony_ci } 12878c2ecf20Sopenharmony_ci 12888c2ecf20Sopenharmony_ci /* 12898c2ecf20Sopenharmony_ci * Skip writing MIN/MAX until Linux knows how to come up with 12908c2ecf20Sopenharmony_ci * useful values. 12918c2ecf20Sopenharmony_ci */ 12928c2ecf20Sopenharmony_ci cpc_write(cpu, desired_reg, perf_ctrls->desired_perf); 12938c2ecf20Sopenharmony_ci 12948c2ecf20Sopenharmony_ci if (CPC_IN_PCC(desired_reg)) 12958c2ecf20Sopenharmony_ci up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */ 12968c2ecf20Sopenharmony_ci /* 12978c2ecf20Sopenharmony_ci * This is Phase-II where we transfer the ownership of PCC to Platform 12988c2ecf20Sopenharmony_ci * 12998c2ecf20Sopenharmony_ci * Short Summary: Basically if we think of a group of cppc_set_perf 13008c2ecf20Sopenharmony_ci * requests that happened in short overlapping interval. The last CPU to 13018c2ecf20Sopenharmony_ci * come out of Phase-I will enter Phase-II and ring the doorbell. 13028c2ecf20Sopenharmony_ci * 13038c2ecf20Sopenharmony_ci * We have the following requirements for Phase-II: 13048c2ecf20Sopenharmony_ci * 1. We want to execute Phase-II only when there are no CPUs 13058c2ecf20Sopenharmony_ci * currently executing in Phase-I 13068c2ecf20Sopenharmony_ci * 2. Once we start Phase-II we want to avoid all other CPUs from 13078c2ecf20Sopenharmony_ci * entering Phase-I. 13088c2ecf20Sopenharmony_ci * 3. We want only one CPU among all those who went through Phase-I 13098c2ecf20Sopenharmony_ci * to run phase-II 13108c2ecf20Sopenharmony_ci * 13118c2ecf20Sopenharmony_ci * If write_trylock fails to get the lock and doesn't transfer the 13128c2ecf20Sopenharmony_ci * PCC ownership to the platform, then one of the following will be TRUE 13138c2ecf20Sopenharmony_ci * 1. There is at-least one CPU in Phase-I which will later execute 13148c2ecf20Sopenharmony_ci * write_trylock, so the CPUs in Phase-I will be responsible for 13158c2ecf20Sopenharmony_ci * executing the Phase-II. 13168c2ecf20Sopenharmony_ci * 2. Some other CPU has beaten this CPU to successfully execute the 13178c2ecf20Sopenharmony_ci * write_trylock and has already acquired the write_lock. We know for a 13188c2ecf20Sopenharmony_ci * fact it (other CPU acquiring the write_lock) couldn't have happened 13198c2ecf20Sopenharmony_ci * before this CPU's Phase-I as we held the read_lock. 13208c2ecf20Sopenharmony_ci * 3. Some other CPU executing pcc CMD_READ has stolen the 13218c2ecf20Sopenharmony_ci * down_write, in which case, send_pcc_cmd will check for pending 13228c2ecf20Sopenharmony_ci * CMD_WRITE commands by checking the pending_pcc_write_cmd. 13238c2ecf20Sopenharmony_ci * So this CPU can be certain that its request will be delivered 13248c2ecf20Sopenharmony_ci * So in all cases, this CPU knows that its request will be delivered 13258c2ecf20Sopenharmony_ci * by another CPU and can return 13268c2ecf20Sopenharmony_ci * 13278c2ecf20Sopenharmony_ci * After getting the down_write we still need to check for 13288c2ecf20Sopenharmony_ci * pending_pcc_write_cmd to take care of the following scenario 13298c2ecf20Sopenharmony_ci * The thread running this code could be scheduled out between 13308c2ecf20Sopenharmony_ci * Phase-I and Phase-II. Before it is scheduled back on, another CPU 13318c2ecf20Sopenharmony_ci * could have delivered the request to Platform by triggering the 13328c2ecf20Sopenharmony_ci * doorbell and transferred the ownership of PCC to platform. So this 13338c2ecf20Sopenharmony_ci * avoids triggering an unnecessary doorbell and more importantly before 13348c2ecf20Sopenharmony_ci * triggering the doorbell it makes sure that the PCC channel ownership 13358c2ecf20Sopenharmony_ci * is still with OSPM. 13368c2ecf20Sopenharmony_ci * pending_pcc_write_cmd can also be cleared by a different CPU, if 13378c2ecf20Sopenharmony_ci * there was a pcc CMD_READ waiting on down_write and it steals the lock 13388c2ecf20Sopenharmony_ci * before the pcc CMD_WRITE is completed. pcc_send_cmd checks for this 13398c2ecf20Sopenharmony_ci * case during a CMD_READ and if there are pending writes it delivers 13408c2ecf20Sopenharmony_ci * the write command before servicing the read command 13418c2ecf20Sopenharmony_ci */ 13428c2ecf20Sopenharmony_ci if (CPC_IN_PCC(desired_reg)) { 13438c2ecf20Sopenharmony_ci if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */ 13448c2ecf20Sopenharmony_ci /* Update only if there are pending write commands */ 13458c2ecf20Sopenharmony_ci if (pcc_ss_data->pending_pcc_write_cmd) 13468c2ecf20Sopenharmony_ci send_pcc_cmd(pcc_ss_id, CMD_WRITE); 13478c2ecf20Sopenharmony_ci up_write(&pcc_ss_data->pcc_lock); /* END Phase-II */ 13488c2ecf20Sopenharmony_ci } else 13498c2ecf20Sopenharmony_ci /* Wait until pcc_write_cnt is updated by send_pcc_cmd */ 13508c2ecf20Sopenharmony_ci wait_event(pcc_ss_data->pcc_write_wait_q, 13518c2ecf20Sopenharmony_ci cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt); 13528c2ecf20Sopenharmony_ci 13538c2ecf20Sopenharmony_ci /* send_pcc_cmd updates the status in case of failure */ 13548c2ecf20Sopenharmony_ci ret = cpc_desc->write_cmd_status; 13558c2ecf20Sopenharmony_ci } 13568c2ecf20Sopenharmony_ci return ret; 13578c2ecf20Sopenharmony_ci} 13588c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(cppc_set_perf); 13598c2ecf20Sopenharmony_ci 13608c2ecf20Sopenharmony_ci/** 13618c2ecf20Sopenharmony_ci * cppc_get_transition_latency - returns frequency transition latency in ns 13628c2ecf20Sopenharmony_ci * 13638c2ecf20Sopenharmony_ci * ACPI CPPC does not explicitly specifiy how a platform can specify the 13648c2ecf20Sopenharmony_ci * transition latency for perfromance change requests. The closest we have 13658c2ecf20Sopenharmony_ci * is the timing information from the PCCT tables which provides the info 13668c2ecf20Sopenharmony_ci * on the number and frequency of PCC commands the platform can handle. 13678c2ecf20Sopenharmony_ci */ 13688c2ecf20Sopenharmony_ciunsigned int cppc_get_transition_latency(int cpu_num) 13698c2ecf20Sopenharmony_ci{ 13708c2ecf20Sopenharmony_ci /* 13718c2ecf20Sopenharmony_ci * Expected transition latency is based on the PCCT timing values 13728c2ecf20Sopenharmony_ci * Below are definition from ACPI spec: 13738c2ecf20Sopenharmony_ci * pcc_nominal- Expected latency to process a command, in microseconds 13748c2ecf20Sopenharmony_ci * pcc_mpar - The maximum number of periodic requests that the subspace 13758c2ecf20Sopenharmony_ci * channel can support, reported in commands per minute. 0 13768c2ecf20Sopenharmony_ci * indicates no limitation. 13778c2ecf20Sopenharmony_ci * pcc_mrtt - The minimum amount of time that OSPM must wait after the 13788c2ecf20Sopenharmony_ci * completion of a command before issuing the next command, 13798c2ecf20Sopenharmony_ci * in microseconds. 13808c2ecf20Sopenharmony_ci */ 13818c2ecf20Sopenharmony_ci unsigned int latency_ns = 0; 13828c2ecf20Sopenharmony_ci struct cpc_desc *cpc_desc; 13838c2ecf20Sopenharmony_ci struct cpc_register_resource *desired_reg; 13848c2ecf20Sopenharmony_ci int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num); 13858c2ecf20Sopenharmony_ci struct cppc_pcc_data *pcc_ss_data; 13868c2ecf20Sopenharmony_ci 13878c2ecf20Sopenharmony_ci cpc_desc = per_cpu(cpc_desc_ptr, cpu_num); 13888c2ecf20Sopenharmony_ci if (!cpc_desc) 13898c2ecf20Sopenharmony_ci return CPUFREQ_ETERNAL; 13908c2ecf20Sopenharmony_ci 13918c2ecf20Sopenharmony_ci desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; 13928c2ecf20Sopenharmony_ci if (!CPC_IN_PCC(desired_reg)) 13938c2ecf20Sopenharmony_ci return CPUFREQ_ETERNAL; 13948c2ecf20Sopenharmony_ci 13958c2ecf20Sopenharmony_ci if (pcc_ss_id < 0) 13968c2ecf20Sopenharmony_ci return CPUFREQ_ETERNAL; 13978c2ecf20Sopenharmony_ci 13988c2ecf20Sopenharmony_ci pcc_ss_data = pcc_data[pcc_ss_id]; 13998c2ecf20Sopenharmony_ci if (pcc_ss_data->pcc_mpar) 14008c2ecf20Sopenharmony_ci latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar); 14018c2ecf20Sopenharmony_ci 14028c2ecf20Sopenharmony_ci latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000); 14038c2ecf20Sopenharmony_ci latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000); 14048c2ecf20Sopenharmony_ci 14058c2ecf20Sopenharmony_ci return latency_ns; 14068c2ecf20Sopenharmony_ci} 14078c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(cppc_get_transition_latency); 1408