1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * apb_timer.c: Driver for Langwell APB timers 4 * 5 * (C) Copyright 2009 Intel Corporation 6 * Author: Jacob Pan (jacob.jun.pan@intel.com) 7 * 8 * Note: 9 * Langwell is the south complex of Intel Moorestown MID platform. There are 10 * eight external timers in total that can be used by the operating system. 11 * The timer information, such as frequency and addresses, is provided to the 12 * OS via SFI tables. 13 * Timer interrupts are routed via FW/HW emulated IOAPIC independently via 14 * individual redirection table entries (RTE). 15 * Unlike HPET, there is no master counter, therefore one of the timers are 16 * used as clocksource. The overall allocation looks like: 17 * - timer 0 - NR_CPUs for per cpu timer 18 * - one timer for clocksource 19 * - one timer for watchdog driver. 20 * It is also worth notice that APB timer does not support true one-shot mode, 21 * free-running mode will be used here to emulate one-shot mode. 22 * APB timer can also be used as broadcast timer along with per cpu local APIC 23 * timer, but by default APB timer has higher rating than local APIC timers. 24 */ 25 26#include <linux/delay.h> 27#include <linux/dw_apb_timer.h> 28#include <linux/errno.h> 29#include <linux/init.h> 30#include <linux/slab.h> 31#include <linux/pm.h> 32#include <linux/sfi.h> 33#include <linux/interrupt.h> 34#include <linux/cpu.h> 35#include <linux/irq.h> 36 37#include <asm/fixmap.h> 38#include <asm/apb_timer.h> 39#include <asm/intel-mid.h> 40#include <asm/time.h> 41 42#define APBT_CLOCKEVENT_RATING 110 43#define APBT_CLOCKSOURCE_RATING 250 44 45#define APBT_CLOCKEVENT0_NUM (0) 46#define APBT_CLOCKSOURCE_NUM (2) 47 48static phys_addr_t apbt_address; 49static int apb_timer_block_enabled; 50static void __iomem *apbt_virt_address; 51 52/* 53 * Common DW APB timer info 54 */ 55static unsigned long apbt_freq; 56 57struct apbt_dev { 58 struct dw_apb_clock_event_device *timer; 59 unsigned int num; 60 int cpu; 61 unsigned int irq; 62 char name[10]; 63}; 64 65static struct dw_apb_clocksource *clocksource_apbt; 66 67static inline void __iomem *adev_virt_addr(struct apbt_dev *adev) 68{ 69 return apbt_virt_address + adev->num * APBTMRS_REG_SIZE; 70} 71 72static DEFINE_PER_CPU(struct apbt_dev, cpu_apbt_dev); 73 74#ifdef CONFIG_SMP 75static unsigned int apbt_num_timers_used; 76#endif 77 78static inline void apbt_set_mapping(void) 79{ 80 struct sfi_timer_table_entry *mtmr; 81 int phy_cs_timer_id = 0; 82 83 if (apbt_virt_address) { 84 pr_debug("APBT base already mapped\n"); 85 return; 86 } 87 mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM); 88 if (mtmr == NULL) { 89 printk(KERN_ERR "Failed to get MTMR %d from SFI\n", 90 APBT_CLOCKEVENT0_NUM); 91 return; 92 } 93 apbt_address = (phys_addr_t)mtmr->phys_addr; 94 if (!apbt_address) { 95 printk(KERN_WARNING "No timer base from SFI, use default\n"); 96 apbt_address = APBT_DEFAULT_BASE; 97 } 98 apbt_virt_address = ioremap(apbt_address, APBT_MMAP_SIZE); 99 if (!apbt_virt_address) { 100 pr_debug("Failed mapping APBT phy address at %lu\n",\ 101 (unsigned long)apbt_address); 102 goto panic_noapbt; 103 } 104 apbt_freq = mtmr->freq_hz; 105 sfi_free_mtmr(mtmr); 106 107 /* Now figure out the physical timer id for clocksource device */ 108 mtmr = sfi_get_mtmr(APBT_CLOCKSOURCE_NUM); 109 if (mtmr == NULL) 110 goto panic_noapbt; 111 112 /* Now figure out the physical timer id */ 113 pr_debug("Use timer %d for clocksource\n", 114 (int)(mtmr->phys_addr & 0xff) / APBTMRS_REG_SIZE); 115 phy_cs_timer_id = (unsigned int)(mtmr->phys_addr & 0xff) / 116 APBTMRS_REG_SIZE; 117 118 clocksource_apbt = dw_apb_clocksource_init(APBT_CLOCKSOURCE_RATING, 119 "apbt0", apbt_virt_address + phy_cs_timer_id * 120 APBTMRS_REG_SIZE, apbt_freq); 121 return; 122 123panic_noapbt: 124 panic("Failed to setup APB system timer\n"); 125 126} 127 128static inline void apbt_clear_mapping(void) 129{ 130 iounmap(apbt_virt_address); 131 apbt_virt_address = NULL; 132} 133 134static int __init apbt_clockevent_register(void) 135{ 136 struct sfi_timer_table_entry *mtmr; 137 struct apbt_dev *adev = this_cpu_ptr(&cpu_apbt_dev); 138 139 mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM); 140 if (mtmr == NULL) { 141 printk(KERN_ERR "Failed to get MTMR %d from SFI\n", 142 APBT_CLOCKEVENT0_NUM); 143 return -ENODEV; 144 } 145 146 adev->num = smp_processor_id(); 147 adev->timer = dw_apb_clockevent_init(smp_processor_id(), "apbt0", 148 intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ? 149 APBT_CLOCKEVENT_RATING - 100 : APBT_CLOCKEVENT_RATING, 150 adev_virt_addr(adev), 0, apbt_freq); 151 /* Firmware does EOI handling for us. */ 152 adev->timer->eoi = NULL; 153 154 if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT) { 155 global_clock_event = &adev->timer->ced; 156 printk(KERN_DEBUG "%s clockevent registered as global\n", 157 global_clock_event->name); 158 } 159 160 dw_apb_clockevent_register(adev->timer); 161 162 sfi_free_mtmr(mtmr); 163 return 0; 164} 165 166#ifdef CONFIG_SMP 167 168static void apbt_setup_irq(struct apbt_dev *adev) 169{ 170 irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT); 171 irq_set_affinity(adev->irq, cpumask_of(adev->cpu)); 172} 173 174/* Should be called with per cpu */ 175void apbt_setup_secondary_clock(void) 176{ 177 struct apbt_dev *adev; 178 int cpu; 179 180 /* Don't register boot CPU clockevent */ 181 cpu = smp_processor_id(); 182 if (!cpu) 183 return; 184 185 adev = this_cpu_ptr(&cpu_apbt_dev); 186 if (!adev->timer) { 187 adev->timer = dw_apb_clockevent_init(cpu, adev->name, 188 APBT_CLOCKEVENT_RATING, adev_virt_addr(adev), 189 adev->irq, apbt_freq); 190 adev->timer->eoi = NULL; 191 } else { 192 dw_apb_clockevent_resume(adev->timer); 193 } 194 195 printk(KERN_INFO "Registering CPU %d clockevent device %s, cpu %08x\n", 196 cpu, adev->name, adev->cpu); 197 198 apbt_setup_irq(adev); 199 dw_apb_clockevent_register(adev->timer); 200 201 return; 202} 203 204/* 205 * this notify handler process CPU hotplug events. in case of S0i3, nonboot 206 * cpus are disabled/enabled frequently, for performance reasons, we keep the 207 * per cpu timer irq registered so that we do need to do free_irq/request_irq. 208 * 209 * TODO: it might be more reliable to directly disable percpu clockevent device 210 * without the notifier chain. currently, cpu 0 may get interrupts from other 211 * cpu timers during the offline process due to the ordering of notification. 212 * the extra interrupt is harmless. 213 */ 214static int apbt_cpu_dead(unsigned int cpu) 215{ 216 struct apbt_dev *adev = &per_cpu(cpu_apbt_dev, cpu); 217 218 dw_apb_clockevent_pause(adev->timer); 219 if (system_state == SYSTEM_RUNNING) { 220 pr_debug("skipping APBT CPU %u offline\n", cpu); 221 } else { 222 pr_debug("APBT clockevent for cpu %u offline\n", cpu); 223 dw_apb_clockevent_stop(adev->timer); 224 } 225 return 0; 226} 227 228static __init int apbt_late_init(void) 229{ 230 if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT || 231 !apb_timer_block_enabled) 232 return 0; 233 return cpuhp_setup_state(CPUHP_X86_APB_DEAD, "x86/apb:dead", NULL, 234 apbt_cpu_dead); 235} 236fs_initcall(apbt_late_init); 237#else 238 239void apbt_setup_secondary_clock(void) {} 240 241#endif /* CONFIG_SMP */ 242 243static int apbt_clocksource_register(void) 244{ 245 u64 start, now; 246 u64 t1; 247 248 /* Start the counter, use timer 2 as source, timer 0/1 for event */ 249 dw_apb_clocksource_start(clocksource_apbt); 250 251 /* Verify whether apbt counter works */ 252 t1 = dw_apb_clocksource_read(clocksource_apbt); 253 start = rdtsc(); 254 255 /* 256 * We don't know the TSC frequency yet, but waiting for 257 * 200000 TSC cycles is safe: 258 * 4 GHz == 50us 259 * 1 GHz == 200us 260 */ 261 do { 262 rep_nop(); 263 now = rdtsc(); 264 } while ((now - start) < 200000UL); 265 266 /* APBT is the only always on clocksource, it has to work! */ 267 if (t1 == dw_apb_clocksource_read(clocksource_apbt)) 268 panic("APBT counter not counting. APBT disabled\n"); 269 270 dw_apb_clocksource_register(clocksource_apbt); 271 272 return 0; 273} 274 275/* 276 * Early setup the APBT timer, only use timer 0 for booting then switch to 277 * per CPU timer if possible. 278 * returns 1 if per cpu apbt is setup 279 * returns 0 if no per cpu apbt is chosen 280 * panic if set up failed, this is the only platform timer on Moorestown. 281 */ 282void __init apbt_time_init(void) 283{ 284#ifdef CONFIG_SMP 285 int i; 286 struct sfi_timer_table_entry *p_mtmr; 287 struct apbt_dev *adev; 288#endif 289 290 if (apb_timer_block_enabled) 291 return; 292 apbt_set_mapping(); 293 if (!apbt_virt_address) 294 goto out_noapbt; 295 /* 296 * Read the frequency and check for a sane value, for ESL model 297 * we extend the possible clock range to allow time scaling. 298 */ 299 300 if (apbt_freq < APBT_MIN_FREQ || apbt_freq > APBT_MAX_FREQ) { 301 pr_debug("APBT has invalid freq 0x%lx\n", apbt_freq); 302 goto out_noapbt; 303 } 304 if (apbt_clocksource_register()) { 305 pr_debug("APBT has failed to register clocksource\n"); 306 goto out_noapbt; 307 } 308 if (!apbt_clockevent_register()) 309 apb_timer_block_enabled = 1; 310 else { 311 pr_debug("APBT has failed to register clockevent\n"); 312 goto out_noapbt; 313 } 314#ifdef CONFIG_SMP 315 /* kernel cmdline disable apb timer, so we will use lapic timers */ 316 if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT) { 317 printk(KERN_INFO "apbt: disabled per cpu timer\n"); 318 return; 319 } 320 pr_debug("%s: %d CPUs online\n", __func__, num_online_cpus()); 321 if (num_possible_cpus() <= sfi_mtimer_num) 322 apbt_num_timers_used = num_possible_cpus(); 323 else 324 apbt_num_timers_used = 1; 325 pr_debug("%s: %d APB timers used\n", __func__, apbt_num_timers_used); 326 327 /* here we set up per CPU timer data structure */ 328 for (i = 0; i < apbt_num_timers_used; i++) { 329 adev = &per_cpu(cpu_apbt_dev, i); 330 adev->num = i; 331 adev->cpu = i; 332 p_mtmr = sfi_get_mtmr(i); 333 if (p_mtmr) 334 adev->irq = p_mtmr->irq; 335 else 336 printk(KERN_ERR "Failed to get timer for cpu %d\n", i); 337 snprintf(adev->name, sizeof(adev->name) - 1, "apbt%d", i); 338 } 339#endif 340 341 return; 342 343out_noapbt: 344 apbt_clear_mapping(); 345 apb_timer_block_enabled = 0; 346 panic("failed to enable APB timer\n"); 347} 348