Lines Matching refs:pd

21  * @pd: PRCI context
25 * address of the PRCI register target described by @pd, and return
30 * Return: the contents of the register described by @pd and @offs.
32 static u32 __prci_readl(struct __prci_data *pd, u32 offs)
34 return readl_relaxed(pd->va + offs);
37 static void __prci_writel(u32 v, u32 offs, struct __prci_data *pd)
39 writel_relaxed(v, pd->va + offs);
117 * @pd: PRCI context
121 * the PRCI identified by @pd, and store it into the local configuration
125 * @pd and @pwd from changing during execution.
127 static void __prci_wrpll_read_cfg0(struct __prci_data *pd,
130 __prci_wrpll_unpack(&pwd->c, __prci_readl(pd, pwd->cfg0_offs));
135 * @pd: PRCI context
145 * @pd and @pwd from changing during execution.
147 static void __prci_wrpll_write_cfg0(struct __prci_data *pd,
151 __prci_writel(__prci_wrpll_pack(c), pwd->cfg0_offs, pd);
159 * @pd: PRCI context
163 static void __prci_wrpll_write_cfg1(struct __prci_data *pd,
167 __prci_writel(enable, pwd->cfg1_offs, pd);
206 struct __prci_data *pd = pc->pd;
214 pwd->enable_bypass(pd);
216 __prci_wrpll_write_cfg0(pd, pwd, &pwd->c);
227 struct __prci_data *pd = pc->pd;
230 r = __prci_readl(pd, pwd->cfg1_offs);
242 struct __prci_data *pd = pc->pd;
247 __prci_wrpll_write_cfg1(pd, pwd, PRCI_COREPLLCFG1_CKE_MASK);
250 pwd->disable_bypass(pd);
259 struct __prci_data *pd = pc->pd;
263 pwd->enable_bypass(pd);
265 r = __prci_readl(pd, pwd->cfg1_offs);
268 __prci_wrpll_write_cfg1(pd, pwd, r);
277 struct __prci_data *pd = pc->pd;
281 v = __prci_readl(pd, PRCI_CLKMUXSTATUSREG_OFFSET);
294 struct __prci_data *pd = pc->pd;
295 u32 div = __prci_readl(pd, PRCI_HFPCLKPLLDIV_OFFSET);
306 * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
313 void sifive_prci_coreclksel_use_hfclk(struct __prci_data *pd)
317 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
319 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
321 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
327 * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
334 void sifive_prci_coreclksel_use_corepll(struct __prci_data *pd)
338 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
340 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
342 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
348 * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
356 void sifive_prci_coreclksel_use_final_corepll(struct __prci_data *pd)
360 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
362 __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
364 r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
370 * @pd: struct __prci_data * for the PRCI containing the COREPLL mux reg
377 void sifive_prci_corepllsel_use_dvfscorepll(struct __prci_data *pd)
381 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET);
383 __prci_writel(r, PRCI_COREPLLSEL_OFFSET, pd);
385 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET); /* barrier */
391 * @pd: struct __prci_data * for the PRCI containing the COREPLL mux reg
398 void sifive_prci_corepllsel_use_corepll(struct __prci_data *pd)
402 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET);
404 __prci_writel(r, PRCI_COREPLLSEL_OFFSET, pd);
406 r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET); /* barrier */
412 * @pd: struct __prci_data * for the PRCI containing the HFPCLKPLL mux reg
419 void sifive_prci_hfpclkpllsel_use_hfclk(struct __prci_data *pd)
423 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET);
425 __prci_writel(r, PRCI_HFPCLKPLLSEL_OFFSET, pd);
427 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET); /* barrier */
433 * @pd: struct __prci_data * for the PRCI containing the HFPCLKPLL mux reg
440 void sifive_prci_hfpclkpllsel_use_hfpclkpll(struct __prci_data *pd)
444 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET);
446 __prci_writel(r, PRCI_HFPCLKPLLSEL_OFFSET, pd);
448 r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET); /* barrier */
455 struct __prci_data *pd = pc->pd;
458 r = __prci_readl(pd, PRCI_PCIE_AUX_OFFSET);
469 struct __prci_data *pd = pc->pd;
475 __prci_writel(1, PRCI_PCIE_AUX_OFFSET, pd);
476 r = __prci_readl(pd, PRCI_PCIE_AUX_OFFSET); /* barrier */
484 struct __prci_data *pd = pc->pd;
487 __prci_writel(0, PRCI_PCIE_AUX_OFFSET, pd);
488 r = __prci_readl(pd, PRCI_PCIE_AUX_OFFSET); /* barrier */
495 * @pd: The pointer for PRCI per-device instance data
503 static int __prci_register_clocks(struct device *dev, struct __prci_data *pd,
527 pic->pd = pd;
530 __prci_wrpll_read_cfg0(pd, pic->pwd);
546 pd->hw_clks.hws[i] = &pic->hw;
549 pd->hw_clks.num = i;
552 &pd->hw_clks);
570 struct __prci_data *pd;
576 pd = devm_kzalloc(dev, struct_size(pd, hw_clks.hws, desc->num_clks), GFP_KERNEL);
577 if (!pd)
580 pd->va = devm_platform_ioremap_resource(pdev, 0);
581 if (IS_ERR(pd->va))
582 return PTR_ERR(pd->va);
584 pd->reset.rcdev.owner = THIS_MODULE;
585 pd->reset.rcdev.nr_resets = PRCI_RST_NR;
586 pd->reset.rcdev.ops = &reset_simple_ops;
587 pd->reset.rcdev.of_node = pdev->dev.of_node;
588 pd->reset.active_low = true;
589 pd->reset.membase = pd->va + PRCI_DEVICESRESETREG_OFFSET;
590 spin_lock_init(&pd->reset.lock);
592 r = devm_reset_controller_register(&pdev->dev, &pd->reset.rcdev);
597 r = __prci_register_clocks(dev, pd, desc);