Lines Matching refs:dev
82 struct orion_watchdog *dev)
86 dev->clk = clk_get(&pdev->dev, NULL);
87 if (IS_ERR(dev->clk))
88 return PTR_ERR(dev->clk);
89 ret = clk_prepare_enable(dev->clk);
91 clk_put(dev->clk);
95 dev->clk_rate = clk_get_rate(dev->clk);
100 struct orion_watchdog *dev)
104 dev->clk = clk_get(&pdev->dev, NULL);
105 if (IS_ERR(dev->clk))
106 return PTR_ERR(dev->clk);
107 ret = clk_prepare_enable(dev->clk);
109 clk_put(dev->clk);
114 atomic_io_modify(dev->reg + TIMER_CTRL,
118 dev->clk_rate = clk_get_rate(dev->clk) / WDT_A370_RATIO;
123 struct orion_watchdog *dev)
127 dev->clk = of_clk_get_by_name(pdev->dev.of_node, "fixed");
128 if (!IS_ERR(dev->clk)) {
129 ret = clk_prepare_enable(dev->clk);
131 clk_put(dev->clk);
135 atomic_io_modify(dev->reg + TIMER_CTRL,
138 dev->clk_rate = clk_get_rate(dev->clk);
144 dev->clk = clk_get(&pdev->dev, NULL);
145 if (IS_ERR(dev->clk))
146 return PTR_ERR(dev->clk);
148 ret = clk_prepare_enable(dev->clk);
150 clk_put(dev->clk);
154 atomic_io_modify(dev->reg + TIMER_CTRL,
157 dev->clk_rate = clk_get_rate(dev->clk) / WDT_A370_RATIO;
163 struct orion_watchdog *dev)
168 dev->clk = of_clk_get_by_name(pdev->dev.of_node, "fixed");
169 if (IS_ERR(dev->clk))
170 return PTR_ERR(dev->clk);
171 ret = clk_prepare_enable(dev->clk);
173 clk_put(dev->clk);
179 atomic_io_modify(dev->reg + TIMER_CTRL, val, val);
181 dev->clk_rate = clk_get_rate(dev->clk);
187 struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
189 writel(dev->clk_rate * wdt_dev->timeout,
190 dev->reg + dev->data->wdt_counter_offset);
191 if (dev->wdt.info->options & WDIOF_PRETIMEOUT)
192 writel(dev->clk_rate * (wdt_dev->timeout - wdt_dev->pretimeout),
193 dev->reg + TIMER1_VAL_OFF);
200 struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
204 writel(dev->clk_rate * wdt_dev->timeout,
205 dev->reg + dev->data->wdt_counter_offset);
206 if (dev->wdt.info->options & WDIOF_PRETIMEOUT)
207 writel(dev->clk_rate * (wdt_dev->timeout - wdt_dev->pretimeout),
208 dev->reg + TIMER1_VAL_OFF);
211 atomic_io_modify(dev->reg + TIMER_A370_STATUS, WDT_A370_EXPIRED, 0);
214 reg = dev->data->wdt_enable_bit;
215 if (dev->wdt.info->options & WDIOF_PRETIMEOUT)
217 atomic_io_modify(dev->reg + TIMER_CTRL, reg, reg);
220 reg = readl(dev->rstout);
221 reg |= dev->data->rstout_enable_bit;
222 writel(reg, dev->rstout);
224 atomic_io_modify(dev->rstout_mask, dev->data->rstout_mask_bit, 0);
230 struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
234 writel(dev->clk_rate * wdt_dev->timeout,
235 dev->reg + dev->data->wdt_counter_offset);
238 atomic_io_modify(dev->reg + TIMER_A370_STATUS, WDT_A370_EXPIRED, 0);
241 atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit,
242 dev->data->wdt_enable_bit);
245 reg = readl(dev->rstout);
246 reg |= dev->data->rstout_enable_bit;
247 writel(reg, dev->rstout);
253 struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
256 writel(dev->clk_rate * wdt_dev->timeout,
257 dev->reg + dev->data->wdt_counter_offset);
260 atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit,
261 dev->data->wdt_enable_bit);
264 atomic_io_modify(dev->rstout, dev->data->rstout_enable_bit,
265 dev->data->rstout_enable_bit);
272 struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
275 return dev->data->start(wdt_dev);
280 struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
283 atomic_io_modify(dev->rstout, dev->data->rstout_enable_bit, 0);
286 atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit, 0);
293 struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
297 atomic_io_modify(dev->rstout_mask, dev->data->rstout_mask_bit,
298 dev->data->rstout_mask_bit);
299 reg = readl(dev->rstout);
300 reg &= ~dev->data->rstout_enable_bit;
301 writel(reg, dev->rstout);
304 mask = dev->data->wdt_enable_bit;
307 atomic_io_modify(dev->reg + TIMER_CTRL, mask, 0);
314 struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
318 reg = readl(dev->rstout);
319 reg &= ~dev->data->rstout_enable_bit;
320 writel(reg, dev->rstout);
323 atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit, 0);
330 struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
332 return dev->data->stop(wdt_dev);
335 static int orion_enabled(struct orion_watchdog *dev)
339 enabled = readl(dev->rstout) & dev->data->rstout_enable_bit;
340 running = readl(dev->reg + TIMER_CTRL) & dev->data->wdt_enable_bit;
345 static int armada375_enabled(struct orion_watchdog *dev)
349 masked = readl(dev->rstout_mask) & dev->data->rstout_mask_bit;
350 enabled = readl(dev->rstout) & dev->data->rstout_enable_bit;
351 running = readl(dev->reg + TIMER_CTRL) & dev->data->wdt_enable_bit;
358 struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
360 return dev->data->enabled(dev);
365 struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
366 return readl(dev->reg + dev->data->wdt_counter_offset) / dev->clk_rate;
390 struct orion_watchdog *dev = devid;
392 atomic_io_modify(dev->reg + TIMER_A370_STATUS,
394 watchdog_notify_pretimeout(&dev->wdt);
412 return devm_ioremap(&pdev->dev, res->start,
418 return devm_ioremap(&pdev->dev, rstout, 0x4);
499 struct orion_watchdog *dev)
501 struct device_node *node = pdev->dev.of_node;
507 dev->reg = devm_ioremap(&pdev->dev, res->start,
509 if (!dev->reg)
515 dev->rstout = orion_wdt_ioremap_rstout(pdev, res->start &
517 if (!dev->rstout)
524 dev->rstout = devm_platform_ioremap_resource(pdev, 1);
525 if (IS_ERR(dev->rstout))
526 return PTR_ERR(dev->rstout);
532 dev->rstout = devm_platform_ioremap_resource(pdev, 1);
533 if (IS_ERR(dev->rstout))
534 return PTR_ERR(dev->rstout);
539 dev->rstout_mask = devm_ioremap(&pdev->dev, res->start,
541 if (!dev->rstout_mask)
553 struct orion_watchdog *dev;
558 dev = devm_kzalloc(&pdev->dev, sizeof(struct orion_watchdog),
560 if (!dev)
563 match = of_match_device(orion_wdt_of_match_table, &pdev->dev);
568 dev->wdt.info = &orion_wdt_info;
569 dev->wdt.ops = &orion_wdt_ops;
570 dev->wdt.min_timeout = 1;
571 dev->data = match->data;
573 ret = orion_wdt_get_regs(pdev, dev);
577 ret = dev->data->clock_init(pdev, dev);
579 dev_err(&pdev->dev, "cannot initialize clock\n");
583 wdt_max_duration = WDT_MAX_CYCLE_COUNT / dev->clk_rate;
585 dev->wdt.timeout = wdt_max_duration;
586 dev->wdt.max_timeout = wdt_max_duration;
587 dev->wdt.parent = &pdev->dev;
588 watchdog_init_timeout(&dev->wdt, heartbeat, &pdev->dev);
590 platform_set_drvdata(pdev, &dev->wdt);
591 watchdog_set_drvdata(&dev->wdt, dev);
599 if (!orion_wdt_enabled(&dev->wdt))
600 orion_wdt_stop(&dev->wdt);
602 set_bit(WDOG_HW_RUNNING, &dev->wdt.status);
611 ret = devm_request_irq(&pdev->dev, irq, orion_wdt_irq, 0,
612 pdev->name, dev);
614 dev_err(&pdev->dev, "failed to request IRQ\n");
623 ret = devm_request_irq(&pdev->dev, irq, orion_wdt_pre_irq,
624 0, pdev->name, dev);
626 dev_err(&pdev->dev, "failed to request IRQ\n");
632 watchdog_set_nowayout(&dev->wdt, nowayout);
633 ret = watchdog_register_device(&dev->wdt);
638 dev->wdt.timeout, nowayout ? ", nowayout" : "");
642 clk_disable_unprepare(dev->clk);
643 clk_put(dev->clk);
650 struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev);
653 clk_disable_unprepare(dev->clk);
654 clk_put(dev->clk);