1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * Clock and PLL control for C64x+ devices 4 * 5 * Copyright (C) 2010, 2011 Texas Instruments. 6 * Contributed by: Mark Salter <msalter@redhat.com> 7 * 8 * Copied heavily from arm/mach-davinci/clock.c, so: 9 * 10 * Copyright (C) 2006-2007 Texas Instruments. 11 * Copyright (C) 2008-2009 Deep Root Systems, LLC 12 */ 13 14#include <linux/module.h> 15#include <linux/clkdev.h> 16#include <linux/clk.h> 17#include <linux/io.h> 18#include <linux/err.h> 19 20#include <asm/clock.h> 21#include <asm/soc.h> 22 23static LIST_HEAD(clocks); 24static DEFINE_MUTEX(clocks_mutex); 25static DEFINE_SPINLOCK(clockfw_lock); 26 27static void __clk_enable(struct clk *clk) 28{ 29 if (clk->parent) 30 __clk_enable(clk->parent); 31 clk->usecount++; 32} 33 34static void __clk_disable(struct clk *clk) 35{ 36 if (WARN_ON(clk->usecount == 0)) 37 return; 38 --clk->usecount; 39 40 if (clk->parent) 41 __clk_disable(clk->parent); 42} 43 44int clk_enable(struct clk *clk) 45{ 46 unsigned long flags; 47 48 if (clk == NULL || IS_ERR(clk)) 49 return -EINVAL; 50 51 spin_lock_irqsave(&clockfw_lock, flags); 52 __clk_enable(clk); 53 spin_unlock_irqrestore(&clockfw_lock, flags); 54 55 return 0; 56} 57EXPORT_SYMBOL(clk_enable); 58 59void clk_disable(struct clk *clk) 60{ 61 unsigned long flags; 62 63 if (clk == NULL || IS_ERR(clk)) 64 return; 65 66 spin_lock_irqsave(&clockfw_lock, flags); 67 __clk_disable(clk); 68 spin_unlock_irqrestore(&clockfw_lock, flags); 69} 70EXPORT_SYMBOL(clk_disable); 71 72unsigned long clk_get_rate(struct clk *clk) 73{ 74 if (clk == NULL || IS_ERR(clk)) 75 return -EINVAL; 76 77 return clk->rate; 78} 79EXPORT_SYMBOL(clk_get_rate); 80 81long clk_round_rate(struct clk *clk, unsigned long rate) 82{ 83 if (clk == NULL || IS_ERR(clk)) 84 return -EINVAL; 85 86 if (clk->round_rate) 87 return clk->round_rate(clk, rate); 88 89 return clk->rate; 90} 91EXPORT_SYMBOL(clk_round_rate); 92 93/* Propagate rate to children */ 94static void propagate_rate(struct clk *root) 95{ 96 struct clk *clk; 97 98 list_for_each_entry(clk, &root->children, childnode) { 99 if (clk->recalc) 100 clk->rate = clk->recalc(clk); 101 propagate_rate(clk); 102 } 103} 104 105int clk_set_rate(struct clk *clk, unsigned long rate) 106{ 107 unsigned long flags; 108 int ret = -EINVAL; 109 110 if (clk == NULL || IS_ERR(clk)) 111 return ret; 112 113 if (clk->set_rate) 114 ret = clk->set_rate(clk, rate); 115 116 spin_lock_irqsave(&clockfw_lock, flags); 117 if (ret == 0) { 118 if (clk->recalc) 119 clk->rate = clk->recalc(clk); 120 propagate_rate(clk); 121 } 122 spin_unlock_irqrestore(&clockfw_lock, flags); 123 124 return ret; 125} 126EXPORT_SYMBOL(clk_set_rate); 127 128int clk_set_parent(struct clk *clk, struct clk *parent) 129{ 130 unsigned long flags; 131 132 if (clk == NULL || IS_ERR(clk)) 133 return -EINVAL; 134 135 /* Cannot change parent on enabled clock */ 136 if (WARN_ON(clk->usecount)) 137 return -EINVAL; 138 139 mutex_lock(&clocks_mutex); 140 clk->parent = parent; 141 list_del_init(&clk->childnode); 142 list_add(&clk->childnode, &clk->parent->children); 143 mutex_unlock(&clocks_mutex); 144 145 spin_lock_irqsave(&clockfw_lock, flags); 146 if (clk->recalc) 147 clk->rate = clk->recalc(clk); 148 propagate_rate(clk); 149 spin_unlock_irqrestore(&clockfw_lock, flags); 150 151 return 0; 152} 153EXPORT_SYMBOL(clk_set_parent); 154 155int clk_register(struct clk *clk) 156{ 157 if (clk == NULL || IS_ERR(clk)) 158 return -EINVAL; 159 160 if (WARN(clk->parent && !clk->parent->rate, 161 "CLK: %s parent %s has no rate!\n", 162 clk->name, clk->parent->name)) 163 return -EINVAL; 164 165 mutex_lock(&clocks_mutex); 166 list_add_tail(&clk->node, &clocks); 167 if (clk->parent) 168 list_add_tail(&clk->childnode, &clk->parent->children); 169 mutex_unlock(&clocks_mutex); 170 171 /* If rate is already set, use it */ 172 if (clk->rate) 173 return 0; 174 175 /* Else, see if there is a way to calculate it */ 176 if (clk->recalc) 177 clk->rate = clk->recalc(clk); 178 179 /* Otherwise, default to parent rate */ 180 else if (clk->parent) 181 clk->rate = clk->parent->rate; 182 183 return 0; 184} 185EXPORT_SYMBOL(clk_register); 186 187void clk_unregister(struct clk *clk) 188{ 189 if (clk == NULL || IS_ERR(clk)) 190 return; 191 192 mutex_lock(&clocks_mutex); 193 list_del(&clk->node); 194 list_del(&clk->childnode); 195 mutex_unlock(&clocks_mutex); 196} 197EXPORT_SYMBOL(clk_unregister); 198 199 200static u32 pll_read(struct pll_data *pll, int reg) 201{ 202 return soc_readl(pll->base + reg); 203} 204 205static unsigned long clk_sysclk_recalc(struct clk *clk) 206{ 207 u32 v, plldiv = 0; 208 struct pll_data *pll; 209 unsigned long rate = clk->rate; 210 211 if (WARN_ON(!clk->parent)) 212 return rate; 213 214 rate = clk->parent->rate; 215 216 /* the parent must be a PLL */ 217 if (WARN_ON(!clk->parent->pll_data)) 218 return rate; 219 220 pll = clk->parent->pll_data; 221 222 /* If pre-PLL, source clock is before the multiplier and divider(s) */ 223 if (clk->flags & PRE_PLL) 224 rate = pll->input_rate; 225 226 if (!clk->div) { 227 pr_debug("%s: (no divider) rate = %lu KHz\n", 228 clk->name, rate / 1000); 229 return rate; 230 } 231 232 if (clk->flags & FIXED_DIV_PLL) { 233 rate /= clk->div; 234 pr_debug("%s: (fixed divide by %d) rate = %lu KHz\n", 235 clk->name, clk->div, rate / 1000); 236 return rate; 237 } 238 239 v = pll_read(pll, clk->div); 240 if (v & PLLDIV_EN) 241 plldiv = (v & PLLDIV_RATIO_MASK) + 1; 242 243 if (plldiv == 0) 244 plldiv = 1; 245 246 rate /= plldiv; 247 248 pr_debug("%s: (divide by %d) rate = %lu KHz\n", 249 clk->name, plldiv, rate / 1000); 250 251 return rate; 252} 253 254static unsigned long clk_leafclk_recalc(struct clk *clk) 255{ 256 if (WARN_ON(!clk->parent)) 257 return clk->rate; 258 259 pr_debug("%s: (parent %s) rate = %lu KHz\n", 260 clk->name, clk->parent->name, clk->parent->rate / 1000); 261 262 return clk->parent->rate; 263} 264 265static unsigned long clk_pllclk_recalc(struct clk *clk) 266{ 267 u32 ctrl, mult = 0, prediv = 0, postdiv = 0; 268 u8 bypass; 269 struct pll_data *pll = clk->pll_data; 270 unsigned long rate = clk->rate; 271 272 if (clk->flags & FIXED_RATE_PLL) 273 return rate; 274 275 ctrl = pll_read(pll, PLLCTL); 276 rate = pll->input_rate = clk->parent->rate; 277 278 if (ctrl & PLLCTL_PLLEN) 279 bypass = 0; 280 else 281 bypass = 1; 282 283 if (pll->flags & PLL_HAS_MUL) { 284 mult = pll_read(pll, PLLM); 285 mult = (mult & PLLM_PLLM_MASK) + 1; 286 } 287 if (pll->flags & PLL_HAS_PRE) { 288 prediv = pll_read(pll, PLLPRE); 289 if (prediv & PLLDIV_EN) 290 prediv = (prediv & PLLDIV_RATIO_MASK) + 1; 291 else 292 prediv = 0; 293 } 294 if (pll->flags & PLL_HAS_POST) { 295 postdiv = pll_read(pll, PLLPOST); 296 if (postdiv & PLLDIV_EN) 297 postdiv = (postdiv & PLLDIV_RATIO_MASK) + 1; 298 else 299 postdiv = 1; 300 } 301 302 if (!bypass) { 303 if (prediv) 304 rate /= prediv; 305 if (mult) 306 rate *= mult; 307 if (postdiv) 308 rate /= postdiv; 309 310 pr_debug("PLL%d: input = %luMHz, pre[%d] mul[%d] post[%d] " 311 "--> %luMHz output.\n", 312 pll->num, clk->parent->rate / 1000000, 313 prediv, mult, postdiv, rate / 1000000); 314 } else 315 pr_debug("PLL%d: input = %luMHz, bypass mode.\n", 316 pll->num, clk->parent->rate / 1000000); 317 318 return rate; 319} 320 321 322static void __init __init_clk(struct clk *clk) 323{ 324 INIT_LIST_HEAD(&clk->node); 325 INIT_LIST_HEAD(&clk->children); 326 INIT_LIST_HEAD(&clk->childnode); 327 328 if (!clk->recalc) { 329 330 /* Check if clock is a PLL */ 331 if (clk->pll_data) 332 clk->recalc = clk_pllclk_recalc; 333 334 /* Else, if it is a PLL-derived clock */ 335 else if (clk->flags & CLK_PLL) 336 clk->recalc = clk_sysclk_recalc; 337 338 /* Otherwise, it is a leaf clock (PSC clock) */ 339 else if (clk->parent) 340 clk->recalc = clk_leafclk_recalc; 341 } 342} 343 344void __init c6x_clks_init(struct clk_lookup *clocks) 345{ 346 struct clk_lookup *c; 347 struct clk *clk; 348 size_t num_clocks = 0; 349 350 for (c = clocks; c->clk; c++) { 351 clk = c->clk; 352 353 __init_clk(clk); 354 clk_register(clk); 355 num_clocks++; 356 357 /* Turn on clocks that Linux doesn't otherwise manage */ 358 if (clk->flags & ALWAYS_ENABLED) 359 clk_enable(clk); 360 } 361 362 clkdev_add_table(clocks, num_clocks); 363} 364 365#ifdef CONFIG_DEBUG_FS 366 367#include <linux/debugfs.h> 368#include <linux/seq_file.h> 369 370#define CLKNAME_MAX 10 /* longest clock name */ 371#define NEST_DELTA 2 372#define NEST_MAX 4 373 374static void 375dump_clock(struct seq_file *s, unsigned nest, struct clk *parent) 376{ 377 char *state; 378 char buf[CLKNAME_MAX + NEST_DELTA * NEST_MAX]; 379 struct clk *clk; 380 unsigned i; 381 382 if (parent->flags & CLK_PLL) 383 state = "pll"; 384 else 385 state = ""; 386 387 /* <nest spaces> name <pad to end> */ 388 memset(buf, ' ', sizeof(buf) - 1); 389 buf[sizeof(buf) - 1] = 0; 390 i = strlen(parent->name); 391 memcpy(buf + nest, parent->name, 392 min(i, (unsigned)(sizeof(buf) - 1 - nest))); 393 394 seq_printf(s, "%s users=%2d %-3s %9ld Hz\n", 395 buf, parent->usecount, state, clk_get_rate(parent)); 396 /* REVISIT show device associations too */ 397 398 /* cost is now small, but not linear... */ 399 list_for_each_entry(clk, &parent->children, childnode) { 400 dump_clock(s, nest + NEST_DELTA, clk); 401 } 402} 403 404static int c6x_ck_show(struct seq_file *m, void *v) 405{ 406 struct clk *clk; 407 408 /* 409 * Show clock tree; We trust nonzero usecounts equate to PSC enables... 410 */ 411 mutex_lock(&clocks_mutex); 412 list_for_each_entry(clk, &clocks, node) 413 if (!clk->parent) 414 dump_clock(m, 0, clk); 415 mutex_unlock(&clocks_mutex); 416 417 return 0; 418} 419 420static int c6x_ck_open(struct inode *inode, struct file *file) 421{ 422 return single_open(file, c6x_ck_show, NULL); 423} 424 425static const struct file_operations c6x_ck_operations = { 426 .open = c6x_ck_open, 427 .read = seq_read, 428 .llseek = seq_lseek, 429 .release = single_release, 430}; 431 432static int __init c6x_clk_debugfs_init(void) 433{ 434 debugfs_create_file("c6x_clocks", S_IFREG | S_IRUGO, NULL, NULL, 435 &c6x_ck_operations); 436 437 return 0; 438} 439device_initcall(c6x_clk_debugfs_init); 440#endif /* CONFIG_DEBUG_FS */ 441