Lines Matching refs:ucb

36  *	@ucb: UCB1x00 structure describing chip
50 void ucb1x00_io_set_dir(struct ucb1x00 *ucb, unsigned int in, unsigned int out)
54 spin_lock_irqsave(&ucb->io_lock, flags);
55 ucb->io_dir |= out;
56 ucb->io_dir &= ~in;
58 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
59 spin_unlock_irqrestore(&ucb->io_lock, flags);
64 * @ucb: UCB1x00 structure describing chip
78 void ucb1x00_io_write(struct ucb1x00 *ucb, unsigned int set, unsigned int clear)
82 spin_lock_irqsave(&ucb->io_lock, flags);
83 ucb->io_out |= set;
84 ucb->io_out &= ~clear;
86 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
87 spin_unlock_irqrestore(&ucb->io_lock, flags);
92 * @ucb: UCB1x00 structure describing chip
102 unsigned int ucb1x00_io_read(struct ucb1x00 *ucb)
104 return ucb1x00_reg_read(ucb, UCB_IO_DATA);
109 struct ucb1x00 *ucb = gpiochip_get_data(chip);
112 spin_lock_irqsave(&ucb->io_lock, flags);
114 ucb->io_out |= 1 << offset;
116 ucb->io_out &= ~(1 << offset);
118 ucb1x00_enable(ucb);
119 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
120 ucb1x00_disable(ucb);
121 spin_unlock_irqrestore(&ucb->io_lock, flags);
126 struct ucb1x00 *ucb = gpiochip_get_data(chip);
129 ucb1x00_enable(ucb);
130 val = ucb1x00_reg_read(ucb, UCB_IO_DATA);
131 ucb1x00_disable(ucb);
138 struct ucb1x00 *ucb = gpiochip_get_data(chip);
141 spin_lock_irqsave(&ucb->io_lock, flags);
142 ucb->io_dir &= ~(1 << offset);
143 ucb1x00_enable(ucb);
144 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
145 ucb1x00_disable(ucb);
146 spin_unlock_irqrestore(&ucb->io_lock, flags);
154 struct ucb1x00 *ucb = gpiochip_get_data(chip);
158 spin_lock_irqsave(&ucb->io_lock, flags);
159 old = ucb->io_out;
161 ucb->io_out |= mask;
163 ucb->io_out &= ~mask;
165 ucb1x00_enable(ucb);
166 if (old != ucb->io_out)
167 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
169 if (!(ucb->io_dir & mask)) {
170 ucb->io_dir |= mask;
171 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
173 ucb1x00_disable(ucb);
174 spin_unlock_irqrestore(&ucb->io_lock, flags);
181 struct ucb1x00 *ucb = gpiochip_get_data(chip);
183 return ucb->irq_base > 0 ? ucb->irq_base + offset : -ENXIO;
197 * @ucb: UCB1x00 structure describing chip
211 void ucb1x00_adc_enable(struct ucb1x00 *ucb)
213 mutex_lock(&ucb->adc_mutex);
215 ucb->adc_cr |= UCB_ADC_ENA;
217 ucb1x00_enable(ucb);
218 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr);
223 * @ucb: UCB1x00 structure describing chip
237 unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync)
244 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr | adc_channel);
245 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr | adc_channel | UCB_ADC_START);
248 val = ucb1x00_reg_read(ucb, UCB_ADC_DATA);
261 * @ucb: UCB1x00 structure describing chip
265 void ucb1x00_adc_disable(struct ucb1x00 *ucb)
267 ucb->adc_cr &= ~UCB_ADC_ENA;
268 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr);
269 ucb1x00_disable(ucb);
271 mutex_unlock(&ucb->adc_mutex);
284 struct ucb1x00 *ucb = irq_desc_get_handler_data(desc);
287 ucb1x00_enable(ucb);
288 isr = ucb1x00_reg_read(ucb, UCB_IE_STATUS);
289 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, isr);
290 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
294 generic_handle_irq(ucb->irq_base + i);
295 ucb1x00_disable(ucb);
298 static void ucb1x00_irq_update(struct ucb1x00 *ucb, unsigned mask)
300 ucb1x00_enable(ucb);
301 if (ucb->irq_ris_enbl & mask)
302 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
303 ucb->irq_mask);
304 if (ucb->irq_fal_enbl & mask)
305 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
306 ucb->irq_mask);
307 ucb1x00_disable(ucb);
316 struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
317 unsigned mask = 1 << (data->irq - ucb->irq_base);
319 raw_spin_lock(&ucb->irq_lock);
320 ucb->irq_mask &= ~mask;
321 ucb1x00_irq_update(ucb, mask);
322 raw_spin_unlock(&ucb->irq_lock);
327 struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
328 unsigned mask = 1 << (data->irq - ucb->irq_base);
330 raw_spin_lock(&ucb->irq_lock);
331 ucb->irq_mask |= mask;
332 ucb1x00_irq_update(ucb, mask);
333 raw_spin_unlock(&ucb->irq_lock);
338 struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
339 unsigned mask = 1 << (data->irq - ucb->irq_base);
341 raw_spin_lock(&ucb->irq_lock);
343 ucb->irq_ris_enbl |= mask;
345 ucb->irq_ris_enbl &= ~mask;
348 ucb->irq_fal_enbl |= mask;
350 ucb->irq_fal_enbl &= ~mask;
351 if (ucb->irq_mask & mask) {
352 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
353 ucb->irq_mask);
354 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
355 ucb->irq_mask);
357 raw_spin_unlock(&ucb->irq_lock);
364 struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data);
365 struct ucb1x00_plat_data *pdata = ucb->mcp->attached_device.platform_data;
366 unsigned mask = 1 << (data->irq - ucb->irq_base);
371 raw_spin_lock(&ucb->irq_lock);
373 ucb->irq_wake |= mask;
375 ucb->irq_wake &= ~mask;
376 raw_spin_unlock(&ucb->irq_lock);
390 static int ucb1x00_add_dev(struct ucb1x00 *ucb, struct ucb1x00_driver *drv)
399 dev->ucb = ucb;
408 list_add_tail(&dev->dev_node, &ucb->devs);
441 static int ucb1x00_detect_irq(struct ucb1x00 *ucb)
450 ucb1x00_reg_write(ucb, UCB_IE_RIS, UCB_IE_ADC);
451 ucb1x00_reg_write(ucb, UCB_IE_FAL, UCB_IE_ADC);
452 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0xffff);
453 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
458 ucb1x00_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA);
459 ucb1x00_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA | UCB_ADC_START);
464 while ((ucb1x00_reg_read(ucb, UCB_ADC_DATA) & UCB_ADC_DAT_VAL) == 0);
465 ucb1x00_reg_write(ucb, UCB_ADC_CR, 0);
470 ucb1x00_reg_write(ucb, UCB_IE_RIS, 0);
471 ucb1x00_reg_write(ucb, UCB_IE_FAL, 0);
472 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0xffff);
473 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
483 struct ucb1x00 *ucb = classdev_to_ucb1x00(dev);
484 kfree(ucb);
496 struct ucb1x00 *ucb;
513 ucb = kzalloc(sizeof(struct ucb1x00), GFP_KERNEL);
515 if (!ucb)
518 device_initialize(&ucb->dev);
519 ucb->dev.class = &ucb1x00_class;
520 ucb->dev.parent = &mcp->attached_device;
521 dev_set_name(&ucb->dev, "ucb1x00");
523 raw_spin_lock_init(&ucb->irq_lock);
524 spin_lock_init(&ucb->io_lock);
525 mutex_init(&ucb->adc_mutex);
527 ucb->id = id;
528 ucb->mcp = mcp;
530 ret = device_add(&ucb->dev);
534 ucb1x00_enable(ucb);
535 ucb->irq = ucb1x00_detect_irq(ucb);
536 ucb1x00_disable(ucb);
537 if (!ucb->irq) {
538 dev_err(&ucb->dev, "IRQ probe failed\n");
543 ucb->gpio.base = -1;
545 ucb->irq_base = irq_alloc_descs(-1, irq_base, 16, -1);
546 if (ucb->irq_base < 0) {
547 dev_err(&ucb->dev, "unable to allocate 16 irqs: %d\n",
548 ucb->irq_base);
549 ret = ucb->irq_base;
554 unsigned irq = ucb->irq_base + i;
557 irq_set_chip_data(irq, ucb);
561 irq_set_irq_type(ucb->irq, IRQ_TYPE_EDGE_RISING);
562 irq_set_chained_handler_and_data(ucb->irq, ucb1x00_irq, ucb);
565 ucb->gpio.label = dev_name(&ucb->dev);
566 ucb->gpio.parent = &ucb->dev;
567 ucb->gpio.owner = THIS_MODULE;
568 ucb->gpio.base = pdata->gpio_base;
569 ucb->gpio.ngpio = 10;
570 ucb->gpio.set = ucb1x00_gpio_set;
571 ucb->gpio.get = ucb1x00_gpio_get;
572 ucb->gpio.direction_input = ucb1x00_gpio_direction_input;
573 ucb->gpio.direction_output = ucb1x00_gpio_direction_output;
574 ucb->gpio.to_irq = ucb1x00_to_irq;
575 ret = gpiochip_add_data(&ucb->gpio, ucb);
579 dev_info(&ucb->dev, "gpio_base not set so no gpiolib support");
581 mcp_set_drvdata(mcp, ucb);
584 device_set_wakeup_capable(&ucb->dev, pdata->can_wakeup);
586 INIT_LIST_HEAD(&ucb->devs);
588 list_add_tail(&ucb->node, &ucb1x00_devices);
590 ucb1x00_add_dev(ucb, drv);
597 irq_set_chained_handler(ucb->irq, NULL);
599 if (ucb->irq_base > 0)
600 irq_free_descs(ucb->irq_base, 16);
602 device_del(&ucb->dev);
604 put_device(&ucb->dev);
614 struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
618 list_del(&ucb->node);
619 list_for_each_safe(l, n, &ucb->devs) {
625 if (ucb->gpio.base != -1)
626 gpiochip_remove(&ucb->gpio);
628 irq_set_chained_handler(ucb->irq, NULL);
629 irq_free_descs(ucb->irq_base, 16);
630 device_unregister(&ucb->dev);
638 struct ucb1x00 *ucb;
643 list_for_each_entry(ucb, &ucb1x00_devices, node) {
644 ucb1x00_add_dev(ucb, drv);
666 struct ucb1x00 *ucb = dev_get_drvdata(dev);
670 list_for_each_entry(udev, &ucb->devs, dev_node) {
676 if (ucb->irq_wake) {
679 raw_spin_lock_irqsave(&ucb->irq_lock, flags);
680 ucb1x00_enable(ucb);
681 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
682 ucb->irq_wake);
683 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
684 ucb->irq_wake);
685 ucb1x00_disable(ucb);
686 raw_spin_unlock_irqrestore(&ucb->irq_lock, flags);
688 enable_irq_wake(ucb->irq);
698 struct ucb1x00 *ucb = dev_get_drvdata(dev);
701 if (!ucb->irq_wake && pdata && pdata->reset)
704 ucb1x00_enable(ucb);
705 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
706 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
708 if (ucb->irq_wake) {
711 raw_spin_lock_irqsave(&ucb->irq_lock, flags);
712 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl &
713 ucb->irq_mask);
714 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl &
715 ucb->irq_mask);
716 raw_spin_unlock_irqrestore(&ucb->irq_lock, flags);
718 disable_irq_wake(ucb->irq);
720 ucb1x00_disable(ucb);
723 list_for_each_entry(udev, &ucb->devs, dev_node) {