Lines Matching defs:tmu

39 	struct sh_tmu_device *tmu;
89 switch (ch->tmu->model) {
91 return ioread8(ch->tmu->mapbase + 2);
93 return ioread8(ch->tmu->mapbase + 4);
111 switch (ch->tmu->model) {
113 return iowrite8(value, ch->tmu->mapbase + 2);
115 return iowrite8(value, ch->tmu->mapbase + 4);
132 raw_spin_lock_irqsave(&ch->tmu->lock, flags);
141 raw_spin_unlock_irqrestore(&ch->tmu->lock, flags);
149 ret = clk_enable(ch->tmu->clk);
151 dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n",
177 pm_runtime_get_sync(&ch->tmu->pdev->dev);
178 dev_pm_syscore_device(&ch->tmu->pdev->dev, true);
192 clk_disable(ch->tmu->clk);
205 dev_pm_syscore_device(&ch->tmu->pdev->dev, false);
206 pm_runtime_put(&ch->tmu->pdev->dev);
295 dev_pm_genpd_suspend(&ch->tmu->pdev->dev);
307 dev_pm_genpd_resume(&ch->tmu->pdev->dev);
327 dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n",
330 clocksource_register_hz(cs, ch->tmu->rate);
344 ch->periodic = (ch->tmu->rate + HZ/2) / HZ;
367 dev_info(&ch->tmu->pdev->dev, "ch%u: used for %s clock events\n",
397 dev_pm_genpd_suspend(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
402 dev_pm_genpd_resume(&ced_to_sh_tmu(ced)->tmu->pdev->dev);
423 dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n",
426 clockevents_config_and_register(ced, ch->tmu->rate, 0x300, 0xffffffff);
430 dev_name(&ch->tmu->pdev->dev), ch);
432 dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n",
442 ch->tmu->has_clockevent = true;
445 ch->tmu->has_clocksource = true;
454 struct sh_tmu_device *tmu)
460 ch->tmu = tmu;
463 if (tmu->model == SH_TMU_SH3)
464 ch->base = tmu->mapbase + 4 + ch->index * 12;
466 ch->base = tmu->mapbase + 8 + ch->index * 12;
468 ch->irq = platform_get_irq(tmu->pdev, index);
475 return sh_tmu_register(ch, dev_name(&tmu->pdev->dev),
479 static int sh_tmu_map_memory(struct sh_tmu_device *tmu)
483 res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0);
485 dev_err(&tmu->pdev->dev, "failed to get I/O memory\n");
489 tmu->mapbase = ioremap(res->start, resource_size(res));
490 if (tmu->mapbase == NULL)
496 static int sh_tmu_parse_dt(struct sh_tmu_device *tmu)
498 struct device_node *np = tmu->pdev->dev.of_node;
500 tmu->model = SH_TMU;
501 tmu->num_channels = 3;
503 of_property_read_u32(np, "#renesas,channels", &tmu->num_channels);
505 if (tmu->num_channels != 2 && tmu->num_channels != 3) {
506 dev_err(&tmu->pdev->dev, "invalid number of channels %u\n",
507 tmu->num_channels);
514 static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
519 tmu->pdev = pdev;
521 raw_spin_lock_init(&tmu->lock);
524 ret = sh_tmu_parse_dt(tmu);
531 tmu->model = id->driver_data;
532 tmu->num_channels = hweight8(cfg->channels_mask);
534 dev_err(&tmu->pdev->dev, "missing platform data\n");
539 tmu->clk = clk_get(&tmu->pdev->dev, "fck");
540 if (IS_ERR(tmu->clk)) {
541 dev_err(&tmu->pdev->dev, "cannot get clock\n");
542 return PTR_ERR(tmu->clk);
545 ret = clk_prepare(tmu->clk);
550 ret = clk_enable(tmu->clk);
554 tmu->rate = clk_get_rate(tmu->clk) / 4;
555 clk_disable(tmu->clk);
558 ret = sh_tmu_map_memory(tmu);
560 dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n");
565 tmu->channels = kcalloc(tmu->num_channels, sizeof(*tmu->channels),
567 if (tmu->channels == NULL) {
576 for (i = 0; i < tmu->num_channels; ++i) {
577 ret = sh_tmu_channel_setup(&tmu->channels[i], i,
578 i == 0, i == 1, tmu);
583 platform_set_drvdata(pdev, tmu);
588 kfree(tmu->channels);
589 iounmap(tmu->mapbase);
591 clk_unprepare(tmu->clk);
593 clk_put(tmu->clk);
599 struct sh_tmu_device *tmu = platform_get_drvdata(pdev);
607 if (tmu) {
612 tmu = kzalloc(sizeof(*tmu), GFP_KERNEL);
613 if (tmu == NULL)
616 ret = sh_tmu_setup(tmu, pdev);
618 kfree(tmu);
627 if (tmu->has_clockevent || tmu->has_clocksource)
636 { "sh-tmu", SH_TMU },
637 { "sh-tmu-sh3", SH_TMU_SH3 },
643 { .compatible = "renesas,tmu" },