162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only 262306a36Sopenharmony_ci/* 362306a36Sopenharmony_ci * arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM 462306a36Sopenharmony_ci * 562306a36Sopenharmony_ci * Created by: Nicolas Pitre, March 2012 662306a36Sopenharmony_ci * Copyright: (C) 2012-2013 Linaro Limited 762306a36Sopenharmony_ci */ 862306a36Sopenharmony_ci 962306a36Sopenharmony_ci#include <linux/export.h> 1062306a36Sopenharmony_ci#include <linux/kernel.h> 1162306a36Sopenharmony_ci#include <linux/init.h> 1262306a36Sopenharmony_ci#include <linux/irqflags.h> 1362306a36Sopenharmony_ci#include <linux/cpu_pm.h> 1462306a36Sopenharmony_ci 1562306a36Sopenharmony_ci#include <asm/mcpm.h> 1662306a36Sopenharmony_ci#include <asm/cacheflush.h> 1762306a36Sopenharmony_ci#include <asm/idmap.h> 1862306a36Sopenharmony_ci#include <asm/cputype.h> 1962306a36Sopenharmony_ci#include <asm/suspend.h> 2062306a36Sopenharmony_ci 2162306a36Sopenharmony_ci/* 2262306a36Sopenharmony_ci * The public API for this code is documented in arch/arm/include/asm/mcpm.h. 2362306a36Sopenharmony_ci * For a comprehensive description of the main algorithm used here, please 2462306a36Sopenharmony_ci * see Documentation/arch/arm/cluster-pm-race-avoidance.rst. 2562306a36Sopenharmony_ci */ 2662306a36Sopenharmony_ci 2762306a36Sopenharmony_cistruct sync_struct mcpm_sync; 2862306a36Sopenharmony_ci 2962306a36Sopenharmony_ci/* 3062306a36Sopenharmony_ci * __mcpm_cpu_going_down: Indicates that the cpu is being torn down. 3162306a36Sopenharmony_ci * This must be called at the point of committing to teardown of a CPU. 3262306a36Sopenharmony_ci * The CPU cache (SCTRL.C bit) is expected to still be active. 3362306a36Sopenharmony_ci */ 3462306a36Sopenharmony_cistatic void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) 3562306a36Sopenharmony_ci{ 3662306a36Sopenharmony_ci mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; 3762306a36Sopenharmony_ci sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); 3862306a36Sopenharmony_ci} 3962306a36Sopenharmony_ci 4062306a36Sopenharmony_ci/* 4162306a36Sopenharmony_ci * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the 4262306a36Sopenharmony_ci * cluster can be torn down without disrupting this CPU. 4362306a36Sopenharmony_ci * To avoid deadlocks, this must be called before a CPU is powered down. 4462306a36Sopenharmony_ci * The CPU cache (SCTRL.C bit) is expected to be off. 4562306a36Sopenharmony_ci * However L2 cache might or might not be active. 4662306a36Sopenharmony_ci */ 4762306a36Sopenharmony_cistatic void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) 4862306a36Sopenharmony_ci{ 4962306a36Sopenharmony_ci dmb(); 5062306a36Sopenharmony_ci mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; 5162306a36Sopenharmony_ci sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); 5262306a36Sopenharmony_ci sev(); 5362306a36Sopenharmony_ci} 5462306a36Sopenharmony_ci 5562306a36Sopenharmony_ci/* 5662306a36Sopenharmony_ci * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section. 5762306a36Sopenharmony_ci * @state: the final state of the cluster: 5862306a36Sopenharmony_ci * CLUSTER_UP: no destructive teardown was done and the cluster has been 5962306a36Sopenharmony_ci * restored to the previous state (CPU cache still active); or 6062306a36Sopenharmony_ci * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off 6162306a36Sopenharmony_ci * (CPU cache disabled, L2 cache either enabled or disabled). 6262306a36Sopenharmony_ci */ 6362306a36Sopenharmony_cistatic void __mcpm_outbound_leave_critical(unsigned int cluster, int state) 6462306a36Sopenharmony_ci{ 6562306a36Sopenharmony_ci dmb(); 6662306a36Sopenharmony_ci mcpm_sync.clusters[cluster].cluster = state; 6762306a36Sopenharmony_ci sync_cache_w(&mcpm_sync.clusters[cluster].cluster); 6862306a36Sopenharmony_ci sev(); 6962306a36Sopenharmony_ci} 7062306a36Sopenharmony_ci 7162306a36Sopenharmony_ci/* 7262306a36Sopenharmony_ci * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section. 7362306a36Sopenharmony_ci * This function should be called by the last man, after local CPU teardown 7462306a36Sopenharmony_ci * is complete. CPU cache expected to be active. 7562306a36Sopenharmony_ci * 7662306a36Sopenharmony_ci * Returns: 7762306a36Sopenharmony_ci * false: the critical section was not entered because an inbound CPU was 7862306a36Sopenharmony_ci * observed, or the cluster is already being set up; 7962306a36Sopenharmony_ci * true: the critical section was entered: it is now safe to tear down the 8062306a36Sopenharmony_ci * cluster. 8162306a36Sopenharmony_ci */ 8262306a36Sopenharmony_cistatic bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) 8362306a36Sopenharmony_ci{ 8462306a36Sopenharmony_ci unsigned int i; 8562306a36Sopenharmony_ci struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster]; 8662306a36Sopenharmony_ci 8762306a36Sopenharmony_ci /* Warn inbound CPUs that the cluster is being torn down: */ 8862306a36Sopenharmony_ci c->cluster = CLUSTER_GOING_DOWN; 8962306a36Sopenharmony_ci sync_cache_w(&c->cluster); 9062306a36Sopenharmony_ci 9162306a36Sopenharmony_ci /* Back out if the inbound cluster is already in the critical region: */ 9262306a36Sopenharmony_ci sync_cache_r(&c->inbound); 9362306a36Sopenharmony_ci if (c->inbound == INBOUND_COMING_UP) 9462306a36Sopenharmony_ci goto abort; 9562306a36Sopenharmony_ci 9662306a36Sopenharmony_ci /* 9762306a36Sopenharmony_ci * Wait for all CPUs to get out of the GOING_DOWN state, so that local 9862306a36Sopenharmony_ci * teardown is complete on each CPU before tearing down the cluster. 9962306a36Sopenharmony_ci * 10062306a36Sopenharmony_ci * If any CPU has been woken up again from the DOWN state, then we 10162306a36Sopenharmony_ci * shouldn't be taking the cluster down at all: abort in that case. 10262306a36Sopenharmony_ci */ 10362306a36Sopenharmony_ci sync_cache_r(&c->cpus); 10462306a36Sopenharmony_ci for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) { 10562306a36Sopenharmony_ci int cpustate; 10662306a36Sopenharmony_ci 10762306a36Sopenharmony_ci if (i == cpu) 10862306a36Sopenharmony_ci continue; 10962306a36Sopenharmony_ci 11062306a36Sopenharmony_ci while (1) { 11162306a36Sopenharmony_ci cpustate = c->cpus[i].cpu; 11262306a36Sopenharmony_ci if (cpustate != CPU_GOING_DOWN) 11362306a36Sopenharmony_ci break; 11462306a36Sopenharmony_ci 11562306a36Sopenharmony_ci wfe(); 11662306a36Sopenharmony_ci sync_cache_r(&c->cpus[i].cpu); 11762306a36Sopenharmony_ci } 11862306a36Sopenharmony_ci 11962306a36Sopenharmony_ci switch (cpustate) { 12062306a36Sopenharmony_ci case CPU_DOWN: 12162306a36Sopenharmony_ci continue; 12262306a36Sopenharmony_ci 12362306a36Sopenharmony_ci default: 12462306a36Sopenharmony_ci goto abort; 12562306a36Sopenharmony_ci } 12662306a36Sopenharmony_ci } 12762306a36Sopenharmony_ci 12862306a36Sopenharmony_ci return true; 12962306a36Sopenharmony_ci 13062306a36Sopenharmony_ciabort: 13162306a36Sopenharmony_ci __mcpm_outbound_leave_critical(cluster, CLUSTER_UP); 13262306a36Sopenharmony_ci return false; 13362306a36Sopenharmony_ci} 13462306a36Sopenharmony_ci 13562306a36Sopenharmony_cistatic int __mcpm_cluster_state(unsigned int cluster) 13662306a36Sopenharmony_ci{ 13762306a36Sopenharmony_ci sync_cache_r(&mcpm_sync.clusters[cluster].cluster); 13862306a36Sopenharmony_ci return mcpm_sync.clusters[cluster].cluster; 13962306a36Sopenharmony_ci} 14062306a36Sopenharmony_ci 14162306a36Sopenharmony_ciextern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; 14262306a36Sopenharmony_ci 14362306a36Sopenharmony_civoid mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr) 14462306a36Sopenharmony_ci{ 14562306a36Sopenharmony_ci unsigned long val = ptr ? __pa_symbol(ptr) : 0; 14662306a36Sopenharmony_ci mcpm_entry_vectors[cluster][cpu] = val; 14762306a36Sopenharmony_ci sync_cache_w(&mcpm_entry_vectors[cluster][cpu]); 14862306a36Sopenharmony_ci} 14962306a36Sopenharmony_ci 15062306a36Sopenharmony_ciextern unsigned long mcpm_entry_early_pokes[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER][2]; 15162306a36Sopenharmony_ci 15262306a36Sopenharmony_civoid mcpm_set_early_poke(unsigned cpu, unsigned cluster, 15362306a36Sopenharmony_ci unsigned long poke_phys_addr, unsigned long poke_val) 15462306a36Sopenharmony_ci{ 15562306a36Sopenharmony_ci unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0]; 15662306a36Sopenharmony_ci poke[0] = poke_phys_addr; 15762306a36Sopenharmony_ci poke[1] = poke_val; 15862306a36Sopenharmony_ci __sync_cache_range_w(poke, 2 * sizeof(*poke)); 15962306a36Sopenharmony_ci} 16062306a36Sopenharmony_ci 16162306a36Sopenharmony_cistatic const struct mcpm_platform_ops *platform_ops; 16262306a36Sopenharmony_ci 16362306a36Sopenharmony_ciint __init mcpm_platform_register(const struct mcpm_platform_ops *ops) 16462306a36Sopenharmony_ci{ 16562306a36Sopenharmony_ci if (platform_ops) 16662306a36Sopenharmony_ci return -EBUSY; 16762306a36Sopenharmony_ci platform_ops = ops; 16862306a36Sopenharmony_ci return 0; 16962306a36Sopenharmony_ci} 17062306a36Sopenharmony_ci 17162306a36Sopenharmony_cibool mcpm_is_available(void) 17262306a36Sopenharmony_ci{ 17362306a36Sopenharmony_ci return (platform_ops) ? true : false; 17462306a36Sopenharmony_ci} 17562306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(mcpm_is_available); 17662306a36Sopenharmony_ci 17762306a36Sopenharmony_ci/* 17862306a36Sopenharmony_ci * We can't use regular spinlocks. In the switcher case, it is possible 17962306a36Sopenharmony_ci * for an outbound CPU to call power_down() after its inbound counterpart 18062306a36Sopenharmony_ci * is already live using the same logical CPU number which trips lockdep 18162306a36Sopenharmony_ci * debugging. 18262306a36Sopenharmony_ci */ 18362306a36Sopenharmony_cistatic arch_spinlock_t mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED; 18462306a36Sopenharmony_ci 18562306a36Sopenharmony_cistatic int mcpm_cpu_use_count[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; 18662306a36Sopenharmony_ci 18762306a36Sopenharmony_cistatic inline bool mcpm_cluster_unused(unsigned int cluster) 18862306a36Sopenharmony_ci{ 18962306a36Sopenharmony_ci int i, cnt; 19062306a36Sopenharmony_ci for (i = 0, cnt = 0; i < MAX_CPUS_PER_CLUSTER; i++) 19162306a36Sopenharmony_ci cnt |= mcpm_cpu_use_count[cluster][i]; 19262306a36Sopenharmony_ci return !cnt; 19362306a36Sopenharmony_ci} 19462306a36Sopenharmony_ci 19562306a36Sopenharmony_ciint mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster) 19662306a36Sopenharmony_ci{ 19762306a36Sopenharmony_ci bool cpu_is_down, cluster_is_down; 19862306a36Sopenharmony_ci int ret = 0; 19962306a36Sopenharmony_ci 20062306a36Sopenharmony_ci pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); 20162306a36Sopenharmony_ci if (!platform_ops) 20262306a36Sopenharmony_ci return -EUNATCH; /* try not to shadow power_up errors */ 20362306a36Sopenharmony_ci might_sleep(); 20462306a36Sopenharmony_ci 20562306a36Sopenharmony_ci /* 20662306a36Sopenharmony_ci * Since this is called with IRQs enabled, and no arch_spin_lock_irq 20762306a36Sopenharmony_ci * variant exists, we need to disable IRQs manually here. 20862306a36Sopenharmony_ci */ 20962306a36Sopenharmony_ci local_irq_disable(); 21062306a36Sopenharmony_ci arch_spin_lock(&mcpm_lock); 21162306a36Sopenharmony_ci 21262306a36Sopenharmony_ci cpu_is_down = !mcpm_cpu_use_count[cluster][cpu]; 21362306a36Sopenharmony_ci cluster_is_down = mcpm_cluster_unused(cluster); 21462306a36Sopenharmony_ci 21562306a36Sopenharmony_ci mcpm_cpu_use_count[cluster][cpu]++; 21662306a36Sopenharmony_ci /* 21762306a36Sopenharmony_ci * The only possible values are: 21862306a36Sopenharmony_ci * 0 = CPU down 21962306a36Sopenharmony_ci * 1 = CPU (still) up 22062306a36Sopenharmony_ci * 2 = CPU requested to be up before it had a chance 22162306a36Sopenharmony_ci * to actually make itself down. 22262306a36Sopenharmony_ci * Any other value is a bug. 22362306a36Sopenharmony_ci */ 22462306a36Sopenharmony_ci BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 1 && 22562306a36Sopenharmony_ci mcpm_cpu_use_count[cluster][cpu] != 2); 22662306a36Sopenharmony_ci 22762306a36Sopenharmony_ci if (cluster_is_down) 22862306a36Sopenharmony_ci ret = platform_ops->cluster_powerup(cluster); 22962306a36Sopenharmony_ci if (cpu_is_down && !ret) 23062306a36Sopenharmony_ci ret = platform_ops->cpu_powerup(cpu, cluster); 23162306a36Sopenharmony_ci 23262306a36Sopenharmony_ci arch_spin_unlock(&mcpm_lock); 23362306a36Sopenharmony_ci local_irq_enable(); 23462306a36Sopenharmony_ci return ret; 23562306a36Sopenharmony_ci} 23662306a36Sopenharmony_ci 23762306a36Sopenharmony_citypedef typeof(cpu_reset) phys_reset_t; 23862306a36Sopenharmony_ci 23962306a36Sopenharmony_civoid mcpm_cpu_power_down(void) 24062306a36Sopenharmony_ci{ 24162306a36Sopenharmony_ci unsigned int mpidr, cpu, cluster; 24262306a36Sopenharmony_ci bool cpu_going_down, last_man; 24362306a36Sopenharmony_ci phys_reset_t phys_reset; 24462306a36Sopenharmony_ci 24562306a36Sopenharmony_ci mpidr = read_cpuid_mpidr(); 24662306a36Sopenharmony_ci cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); 24762306a36Sopenharmony_ci cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); 24862306a36Sopenharmony_ci pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); 24962306a36Sopenharmony_ci if (WARN_ON_ONCE(!platform_ops)) 25062306a36Sopenharmony_ci return; 25162306a36Sopenharmony_ci BUG_ON(!irqs_disabled()); 25262306a36Sopenharmony_ci 25362306a36Sopenharmony_ci setup_mm_for_reboot(); 25462306a36Sopenharmony_ci 25562306a36Sopenharmony_ci __mcpm_cpu_going_down(cpu, cluster); 25662306a36Sopenharmony_ci arch_spin_lock(&mcpm_lock); 25762306a36Sopenharmony_ci BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP); 25862306a36Sopenharmony_ci 25962306a36Sopenharmony_ci mcpm_cpu_use_count[cluster][cpu]--; 26062306a36Sopenharmony_ci BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 0 && 26162306a36Sopenharmony_ci mcpm_cpu_use_count[cluster][cpu] != 1); 26262306a36Sopenharmony_ci cpu_going_down = !mcpm_cpu_use_count[cluster][cpu]; 26362306a36Sopenharmony_ci last_man = mcpm_cluster_unused(cluster); 26462306a36Sopenharmony_ci 26562306a36Sopenharmony_ci if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { 26662306a36Sopenharmony_ci platform_ops->cpu_powerdown_prepare(cpu, cluster); 26762306a36Sopenharmony_ci platform_ops->cluster_powerdown_prepare(cluster); 26862306a36Sopenharmony_ci arch_spin_unlock(&mcpm_lock); 26962306a36Sopenharmony_ci platform_ops->cluster_cache_disable(); 27062306a36Sopenharmony_ci __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); 27162306a36Sopenharmony_ci } else { 27262306a36Sopenharmony_ci if (cpu_going_down) 27362306a36Sopenharmony_ci platform_ops->cpu_powerdown_prepare(cpu, cluster); 27462306a36Sopenharmony_ci arch_spin_unlock(&mcpm_lock); 27562306a36Sopenharmony_ci /* 27662306a36Sopenharmony_ci * If cpu_going_down is false here, that means a power_up 27762306a36Sopenharmony_ci * request raced ahead of us. Even if we do not want to 27862306a36Sopenharmony_ci * shut this CPU down, the caller still expects execution 27962306a36Sopenharmony_ci * to return through the system resume entry path, like 28062306a36Sopenharmony_ci * when the WFI is aborted due to a new IRQ or the like.. 28162306a36Sopenharmony_ci * So let's continue with cache cleaning in all cases. 28262306a36Sopenharmony_ci */ 28362306a36Sopenharmony_ci platform_ops->cpu_cache_disable(); 28462306a36Sopenharmony_ci } 28562306a36Sopenharmony_ci 28662306a36Sopenharmony_ci __mcpm_cpu_down(cpu, cluster); 28762306a36Sopenharmony_ci 28862306a36Sopenharmony_ci /* Now we are prepared for power-down, do it: */ 28962306a36Sopenharmony_ci if (cpu_going_down) 29062306a36Sopenharmony_ci wfi(); 29162306a36Sopenharmony_ci 29262306a36Sopenharmony_ci /* 29362306a36Sopenharmony_ci * It is possible for a power_up request to happen concurrently 29462306a36Sopenharmony_ci * with a power_down request for the same CPU. In this case the 29562306a36Sopenharmony_ci * CPU might not be able to actually enter a powered down state 29662306a36Sopenharmony_ci * with the WFI instruction if the power_up request has removed 29762306a36Sopenharmony_ci * the required reset condition. We must perform a re-entry in 29862306a36Sopenharmony_ci * the kernel as if the power_up method just had deasserted reset 29962306a36Sopenharmony_ci * on the CPU. 30062306a36Sopenharmony_ci */ 30162306a36Sopenharmony_ci phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset); 30262306a36Sopenharmony_ci phys_reset(__pa_symbol(mcpm_entry_point), false); 30362306a36Sopenharmony_ci 30462306a36Sopenharmony_ci /* should never get here */ 30562306a36Sopenharmony_ci BUG(); 30662306a36Sopenharmony_ci} 30762306a36Sopenharmony_ci 30862306a36Sopenharmony_ciint mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster) 30962306a36Sopenharmony_ci{ 31062306a36Sopenharmony_ci int ret; 31162306a36Sopenharmony_ci 31262306a36Sopenharmony_ci if (WARN_ON_ONCE(!platform_ops || !platform_ops->wait_for_powerdown)) 31362306a36Sopenharmony_ci return -EUNATCH; 31462306a36Sopenharmony_ci 31562306a36Sopenharmony_ci ret = platform_ops->wait_for_powerdown(cpu, cluster); 31662306a36Sopenharmony_ci if (ret) 31762306a36Sopenharmony_ci pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n", 31862306a36Sopenharmony_ci __func__, cpu, cluster, ret); 31962306a36Sopenharmony_ci 32062306a36Sopenharmony_ci return ret; 32162306a36Sopenharmony_ci} 32262306a36Sopenharmony_ci 32362306a36Sopenharmony_civoid mcpm_cpu_suspend(void) 32462306a36Sopenharmony_ci{ 32562306a36Sopenharmony_ci if (WARN_ON_ONCE(!platform_ops)) 32662306a36Sopenharmony_ci return; 32762306a36Sopenharmony_ci 32862306a36Sopenharmony_ci /* Some platforms might have to enable special resume modes, etc. */ 32962306a36Sopenharmony_ci if (platform_ops->cpu_suspend_prepare) { 33062306a36Sopenharmony_ci unsigned int mpidr = read_cpuid_mpidr(); 33162306a36Sopenharmony_ci unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); 33262306a36Sopenharmony_ci unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); 33362306a36Sopenharmony_ci arch_spin_lock(&mcpm_lock); 33462306a36Sopenharmony_ci platform_ops->cpu_suspend_prepare(cpu, cluster); 33562306a36Sopenharmony_ci arch_spin_unlock(&mcpm_lock); 33662306a36Sopenharmony_ci } 33762306a36Sopenharmony_ci mcpm_cpu_power_down(); 33862306a36Sopenharmony_ci} 33962306a36Sopenharmony_ci 34062306a36Sopenharmony_ciint mcpm_cpu_powered_up(void) 34162306a36Sopenharmony_ci{ 34262306a36Sopenharmony_ci unsigned int mpidr, cpu, cluster; 34362306a36Sopenharmony_ci bool cpu_was_down, first_man; 34462306a36Sopenharmony_ci unsigned long flags; 34562306a36Sopenharmony_ci 34662306a36Sopenharmony_ci if (!platform_ops) 34762306a36Sopenharmony_ci return -EUNATCH; 34862306a36Sopenharmony_ci 34962306a36Sopenharmony_ci mpidr = read_cpuid_mpidr(); 35062306a36Sopenharmony_ci cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); 35162306a36Sopenharmony_ci cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); 35262306a36Sopenharmony_ci local_irq_save(flags); 35362306a36Sopenharmony_ci arch_spin_lock(&mcpm_lock); 35462306a36Sopenharmony_ci 35562306a36Sopenharmony_ci cpu_was_down = !mcpm_cpu_use_count[cluster][cpu]; 35662306a36Sopenharmony_ci first_man = mcpm_cluster_unused(cluster); 35762306a36Sopenharmony_ci 35862306a36Sopenharmony_ci if (first_man && platform_ops->cluster_is_up) 35962306a36Sopenharmony_ci platform_ops->cluster_is_up(cluster); 36062306a36Sopenharmony_ci if (cpu_was_down) 36162306a36Sopenharmony_ci mcpm_cpu_use_count[cluster][cpu] = 1; 36262306a36Sopenharmony_ci if (platform_ops->cpu_is_up) 36362306a36Sopenharmony_ci platform_ops->cpu_is_up(cpu, cluster); 36462306a36Sopenharmony_ci 36562306a36Sopenharmony_ci arch_spin_unlock(&mcpm_lock); 36662306a36Sopenharmony_ci local_irq_restore(flags); 36762306a36Sopenharmony_ci 36862306a36Sopenharmony_ci return 0; 36962306a36Sopenharmony_ci} 37062306a36Sopenharmony_ci 37162306a36Sopenharmony_ci#ifdef CONFIG_ARM_CPU_SUSPEND 37262306a36Sopenharmony_ci 37362306a36Sopenharmony_cistatic int __init nocache_trampoline(unsigned long _arg) 37462306a36Sopenharmony_ci{ 37562306a36Sopenharmony_ci void (*cache_disable)(void) = (void *)_arg; 37662306a36Sopenharmony_ci unsigned int mpidr = read_cpuid_mpidr(); 37762306a36Sopenharmony_ci unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); 37862306a36Sopenharmony_ci unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); 37962306a36Sopenharmony_ci phys_reset_t phys_reset; 38062306a36Sopenharmony_ci 38162306a36Sopenharmony_ci mcpm_set_entry_vector(cpu, cluster, cpu_resume_no_hyp); 38262306a36Sopenharmony_ci setup_mm_for_reboot(); 38362306a36Sopenharmony_ci 38462306a36Sopenharmony_ci __mcpm_cpu_going_down(cpu, cluster); 38562306a36Sopenharmony_ci BUG_ON(!__mcpm_outbound_enter_critical(cpu, cluster)); 38662306a36Sopenharmony_ci cache_disable(); 38762306a36Sopenharmony_ci __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); 38862306a36Sopenharmony_ci __mcpm_cpu_down(cpu, cluster); 38962306a36Sopenharmony_ci 39062306a36Sopenharmony_ci phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset); 39162306a36Sopenharmony_ci phys_reset(__pa_symbol(mcpm_entry_point), false); 39262306a36Sopenharmony_ci BUG(); 39362306a36Sopenharmony_ci} 39462306a36Sopenharmony_ci 39562306a36Sopenharmony_ciint __init mcpm_loopback(void (*cache_disable)(void)) 39662306a36Sopenharmony_ci{ 39762306a36Sopenharmony_ci int ret; 39862306a36Sopenharmony_ci 39962306a36Sopenharmony_ci /* 40062306a36Sopenharmony_ci * We're going to soft-restart the current CPU through the 40162306a36Sopenharmony_ci * low-level MCPM code by leveraging the suspend/resume 40262306a36Sopenharmony_ci * infrastructure. Let's play it safe by using cpu_pm_enter() 40362306a36Sopenharmony_ci * in case the CPU init code path resets the VFP or similar. 40462306a36Sopenharmony_ci */ 40562306a36Sopenharmony_ci local_irq_disable(); 40662306a36Sopenharmony_ci local_fiq_disable(); 40762306a36Sopenharmony_ci ret = cpu_pm_enter(); 40862306a36Sopenharmony_ci if (!ret) { 40962306a36Sopenharmony_ci ret = cpu_suspend((unsigned long)cache_disable, nocache_trampoline); 41062306a36Sopenharmony_ci cpu_pm_exit(); 41162306a36Sopenharmony_ci } 41262306a36Sopenharmony_ci local_fiq_enable(); 41362306a36Sopenharmony_ci local_irq_enable(); 41462306a36Sopenharmony_ci if (ret) 41562306a36Sopenharmony_ci pr_err("%s returned %d\n", __func__, ret); 41662306a36Sopenharmony_ci return ret; 41762306a36Sopenharmony_ci} 41862306a36Sopenharmony_ci 41962306a36Sopenharmony_ci#endif 42062306a36Sopenharmony_ci 42162306a36Sopenharmony_ciextern unsigned long mcpm_power_up_setup_phys; 42262306a36Sopenharmony_ci 42362306a36Sopenharmony_ciint __init mcpm_sync_init( 42462306a36Sopenharmony_ci void (*power_up_setup)(unsigned int affinity_level)) 42562306a36Sopenharmony_ci{ 42662306a36Sopenharmony_ci unsigned int i, j, mpidr, this_cluster; 42762306a36Sopenharmony_ci 42862306a36Sopenharmony_ci BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync); 42962306a36Sopenharmony_ci BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1)); 43062306a36Sopenharmony_ci 43162306a36Sopenharmony_ci /* 43262306a36Sopenharmony_ci * Set initial CPU and cluster states. 43362306a36Sopenharmony_ci * Only one cluster is assumed to be active at this point. 43462306a36Sopenharmony_ci */ 43562306a36Sopenharmony_ci for (i = 0; i < MAX_NR_CLUSTERS; i++) { 43662306a36Sopenharmony_ci mcpm_sync.clusters[i].cluster = CLUSTER_DOWN; 43762306a36Sopenharmony_ci mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP; 43862306a36Sopenharmony_ci for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++) 43962306a36Sopenharmony_ci mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN; 44062306a36Sopenharmony_ci } 44162306a36Sopenharmony_ci mpidr = read_cpuid_mpidr(); 44262306a36Sopenharmony_ci this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); 44362306a36Sopenharmony_ci for_each_online_cpu(i) { 44462306a36Sopenharmony_ci mcpm_cpu_use_count[this_cluster][i] = 1; 44562306a36Sopenharmony_ci mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP; 44662306a36Sopenharmony_ci } 44762306a36Sopenharmony_ci mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP; 44862306a36Sopenharmony_ci sync_cache_w(&mcpm_sync); 44962306a36Sopenharmony_ci 45062306a36Sopenharmony_ci if (power_up_setup) { 45162306a36Sopenharmony_ci mcpm_power_up_setup_phys = __pa_symbol(power_up_setup); 45262306a36Sopenharmony_ci sync_cache_w(&mcpm_power_up_setup_phys); 45362306a36Sopenharmony_ci } 45462306a36Sopenharmony_ci 45562306a36Sopenharmony_ci return 0; 45662306a36Sopenharmony_ci} 457