Lines Matching defs:t7l66xb

32 #include <linux/mfd/t7l66xb.h>
66 struct t7l66xb {
82 struct t7l66xb *t7l66xb = dev_get_drvdata(mmc->dev.parent);
87 ret = clk_prepare_enable(t7l66xb->clk32k);
91 raw_spin_lock_irqsave(&t7l66xb->lock, flags);
93 dev_ctl = tmio_ioread8(t7l66xb->scr + SCR_DEV_CTL);
95 tmio_iowrite8(dev_ctl, t7l66xb->scr + SCR_DEV_CTL);
97 raw_spin_unlock_irqrestore(&t7l66xb->lock, flags);
99 tmio_core_mmc_enable(t7l66xb->scr + 0x200, 0,
107 struct t7l66xb *t7l66xb = dev_get_drvdata(mmc->dev.parent);
111 raw_spin_lock_irqsave(&t7l66xb->lock, flags);
113 dev_ctl = tmio_ioread8(t7l66xb->scr + SCR_DEV_CTL);
115 tmio_iowrite8(dev_ctl, t7l66xb->scr + SCR_DEV_CTL);
117 raw_spin_unlock_irqrestore(&t7l66xb->lock, flags);
119 clk_disable_unprepare(t7l66xb->clk32k);
126 struct t7l66xb *t7l66xb = dev_get_drvdata(mmc->dev.parent);
128 tmio_core_mmc_pwr(t7l66xb->scr + 0x200, 0, state);
133 struct t7l66xb *t7l66xb = dev_get_drvdata(mmc->dev.parent);
135 tmio_core_mmc_clk_div(t7l66xb->scr + 0x200, 0, state);
186 struct t7l66xb *t7l66xb = irq_desc_get_handler_data(desc);
190 irq_base = t7l66xb->irq_base;
192 while ((isr = tmio_ioread8(t7l66xb->scr + SCR_ISR) &
193 ~tmio_ioread8(t7l66xb->scr + SCR_IMR)))
201 struct t7l66xb *t7l66xb = irq_data_get_irq_chip_data(data);
205 raw_spin_lock_irqsave(&t7l66xb->lock, flags);
206 imr = tmio_ioread8(t7l66xb->scr + SCR_IMR);
207 imr |= 1 << (data->irq - t7l66xb->irq_base);
208 tmio_iowrite8(imr, t7l66xb->scr + SCR_IMR);
209 raw_spin_unlock_irqrestore(&t7l66xb->lock, flags);
214 struct t7l66xb *t7l66xb = irq_data_get_irq_chip_data(data);
218 raw_spin_lock_irqsave(&t7l66xb->lock, flags);
219 imr = tmio_ioread8(t7l66xb->scr + SCR_IMR);
220 imr &= ~(1 << (data->irq - t7l66xb->irq_base));
221 tmio_iowrite8(imr, t7l66xb->scr + SCR_IMR);
222 raw_spin_unlock_irqrestore(&t7l66xb->lock, flags);
226 .name = "t7l66xb",
237 struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
240 irq_base = t7l66xb->irq_base;
244 irq_set_chip_data(irq, t7l66xb);
247 irq_set_irq_type(t7l66xb->irq, IRQ_TYPE_EDGE_FALLING);
248 irq_set_chained_handler_and_data(t7l66xb->irq, t7l66xb_irq, t7l66xb);
253 struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
256 irq_base = t7l66xb->irq_base;
258 irq_set_chained_handler_and_data(t7l66xb->irq, NULL, NULL);
271 struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
276 clk_disable_unprepare(t7l66xb->clk48m);
283 struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
287 ret = clk_prepare_enable(t7l66xb->clk48m);
294 tmio_core_mmc_enable(t7l66xb->scr + 0x200, 0,
309 struct t7l66xb *t7l66xb;
320 t7l66xb = kzalloc(sizeof *t7l66xb, GFP_KERNEL);
321 if (!t7l66xb)
324 raw_spin_lock_init(&t7l66xb->lock);
326 platform_set_drvdata(dev, t7l66xb);
330 t7l66xb->irq = ret;
334 t7l66xb->irq_base = pdata->irq_base;
336 t7l66xb->clk32k = clk_get(&dev->dev, "CLK_CK32K");
337 if (IS_ERR(t7l66xb->clk32k)) {
338 ret = PTR_ERR(t7l66xb->clk32k);
342 t7l66xb->clk48m = clk_get(&dev->dev, "CLK_CK48M");
343 if (IS_ERR(t7l66xb->clk48m)) {
344 ret = PTR_ERR(t7l66xb->clk48m);
348 rscr = &t7l66xb->rscr;
349 rscr->name = "t7l66xb-core";
358 t7l66xb->scr = ioremap(rscr->start, resource_size(rscr));
359 if (!t7l66xb->scr) {
364 ret = clk_prepare_enable(t7l66xb->clk48m);
372 tmio_iowrite8(0xbf, t7l66xb->scr + SCR_IMR);
375 dev->name, tmio_ioread8(t7l66xb->scr + SCR_REVID),
376 (unsigned long)iomem->start, t7l66xb->irq);
385 iomem, t7l66xb->irq_base, NULL);
391 clk_disable_unprepare(t7l66xb->clk48m);
393 iounmap(t7l66xb->scr);
395 release_resource(&t7l66xb->rscr);
397 clk_put(t7l66xb->clk48m);
399 clk_put(t7l66xb->clk32k);
402 kfree(t7l66xb);
408 struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
410 clk_disable_unprepare(t7l66xb->clk48m);
411 clk_put(t7l66xb->clk48m);
412 clk_disable_unprepare(t7l66xb->clk32k);
413 clk_put(t7l66xb->clk32k);
415 iounmap(t7l66xb->scr);
416 release_resource(&t7l66xb->rscr);
418 kfree(t7l66xb);
425 .name = "t7l66xb",
440 MODULE_ALIAS("platform:t7l66xb");