Lines Matching defs:cluster

88 static bool sunxi_core_is_cortex_a15(unsigned int core, unsigned int cluster)
91 int cpu = cluster * SUNXI_CPUS_PER_CLUSTER + core;
103 * would be mid way in a core or cluster power sequence.
105 pr_err("%s: Couldn't get CPU cluster %u core %u device node\n",
106 __func__, cluster, core);
116 static int sunxi_cpu_power_switch_set(unsigned int cpu, unsigned int cluster,
122 reg = readl(prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
125 pr_debug("power clamp for cluster %u cpu %u already open\n",
126 cluster, cpu);
130 writel(0xff, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
132 writel(0xfe, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
134 writel(0xf8, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
136 writel(0xf0, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
138 writel(0x00, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
141 writel(0xff, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
159 static int sunxi_cpu_powerup(unsigned int cpu, unsigned int cluster)
163 pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
164 if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS)
168 if (cluster == 0 && cpu == 0)
172 reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
174 writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
179 R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
182 R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
187 if (!sunxi_core_is_cortex_a15(cpu, cluster)) {
188 reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster));
190 writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster));
194 reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
201 if (!sunxi_core_is_cortex_a15(cpu, cluster))
204 writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
207 sunxi_cpu_power_switch_set(cpu, cluster, true);
216 reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster));
218 writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster));
228 reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
230 writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
234 R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
237 R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
242 reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
245 if (!sunxi_core_is_cortex_a15(cpu, cluster))
249 writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
254 static int sunxi_cluster_powerup(unsigned int cluster)
258 pr_debug("%s: cluster %u\n", __func__, cluster);
259 if (cluster >= SUNXI_NR_CLUSTERS)
262 /* For A83T, assert cluster cores resets */
264 reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
266 writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
271 reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster));
273 writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster));
275 /* assert cluster processor power-on resets */
276 reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
278 writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
280 /* assert cluster cores resets */
283 R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
286 R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
290 /* assert cluster resets */
291 reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
301 if (!sunxi_core_is_cortex_a15(0, cluster))
304 writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
307 reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster));
308 if (sunxi_core_is_cortex_a15(0, cluster)) {
316 writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster));
318 /* clear cluster power gate */
319 reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster));
324 writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster));
327 /* de-assert cluster resets */
328 reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
332 writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
335 reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster));
337 writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster));
344 * enable CCI-400 and proper cluster cache disable before power down.
360 /* Flush all cache levels for this cluster. */
364 * Disable cluster-level coherency by masking
375 static bool sunxi_mc_smp_cluster_is_down(unsigned int cluster)
380 if (sunxi_mc_smp_cpu_table[cluster][i])
394 unsigned int mpidr, cpu, cluster;
398 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
402 if (cluster >= SUNXI_NR_CLUSTERS || cpu >= SUNXI_CPUS_PER_CLUSTER)
407 if (sunxi_mc_smp_cpu_table[cluster][cpu])
410 if (sunxi_mc_smp_cluster_is_down(cluster)) {
412 sunxi_cluster_powerup(cluster);
419 sunxi_cpu_powerup(cpu, cluster);
422 sunxi_mc_smp_cpu_table[cluster][cpu]++;
431 unsigned int cluster = MPIDR_AFFINITY_LEVEL(read_cpuid_mpidr(), 1);
434 pr_debug("%s: cluster %u\n", __func__, cluster);
439 reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster));
441 writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster));
446 unsigned int mpidr, cpu, cluster;
451 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
452 pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
455 sunxi_mc_smp_cpu_table[cluster][cpu]--;
456 if (sunxi_mc_smp_cpu_table[cluster][cpu] == 1) {
462 } else if (sunxi_mc_smp_cpu_table[cluster][cpu] > 1) {
464 cluster, cpu);
468 last_man = sunxi_mc_smp_cluster_is_down(cluster);
481 static int sunxi_cpu_powerdown(unsigned int cpu, unsigned int cluster)
486 pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
487 if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS)
494 reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster));
496 writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster));
500 sunxi_cpu_power_switch_set(cpu, cluster, false);
505 static int sunxi_cluster_powerdown(unsigned int cluster)
509 pr_debug("%s: cluster %u\n", __func__, cluster);
510 if (cluster >= SUNXI_NR_CLUSTERS)
513 /* assert cluster resets or system will hang */
514 pr_debug("%s: assert cluster reset\n", __func__);
515 reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
519 writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
521 /* gate cluster power */
522 pr_debug("%s: gate cluster power\n", __func__);
523 reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster));
528 writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster));
536 unsigned int mpidr, cpu, cluster;
543 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
546 if (WARN_ON(cluster >= SUNXI_NR_CLUSTERS ||
565 if (sunxi_mc_smp_cpu_table[cluster][cpu])
568 reg = readl(cpucfg_base + CPUCFG_CX_STATUS(cluster));
579 sunxi_cpu_powerdown(cpu, cluster);
581 if (!sunxi_mc_smp_cluster_is_down(cluster))
584 /* wait for cluster L2 WFI */
585 ret = readl_poll_timeout(cpucfg_base + CPUCFG_CX_STATUS(cluster), reg,
590 * Ignore timeout on the cluster. Leaving the cluster on
599 /* Power down cluster */
600 sunxi_cluster_powerdown(cluster);
604 pr_debug("%s: cluster %u cpu %u powerdown: %d\n",
605 __func__, cluster, cpu, ret);
631 unsigned int mpidr, cpu, cluster;
635 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
637 if (cluster >= SUNXI_NR_CLUSTERS || cpu >= SUNXI_CPUS_PER_CLUSTER) {
641 sunxi_mc_smp_cpu_table[cluster][cpu] = 1;
648 * We need the trampoline code to enable CCI-400 on the first cluster
869 /* Configure CCI-400 for boot cluster */
872 pr_err("%s: failed to configure boot cluster: %d\n",
887 /* Actually enable multi cluster SMP */
890 pr_info("sunxi multi cluster SMP support installed\n");