Lines Matching refs:trig
52 struct iio_trigger *trig = to_iio_trigger(dev);
53 return sprintf(buf, "%s\n", trig->name);
118 int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig)
120 if (!indio_dev || !trig)
126 indio_dev->trig = iio_trigger_get(trig);
148 struct iio_trigger *trig = NULL, *iter;
153 trig = iter;
154 iio_trigger_get(trig);
159 return trig;
162 void iio_trigger_poll(struct iio_trigger *trig)
166 if (!atomic_read(&trig->use_count)) {
167 atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
170 if (trig->subirqs[i].enabled)
171 generic_handle_irq(trig->subirq_base + i);
173 iio_trigger_notify_done(trig);
186 void iio_trigger_poll_chained(struct iio_trigger *trig)
190 if (!atomic_read(&trig->use_count)) {
191 atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
194 if (trig->subirqs[i].enabled)
195 handle_nested_irq(trig->subirq_base + i);
197 iio_trigger_notify_done(trig);
203 void iio_trigger_notify_done(struct iio_trigger *trig)
205 if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
206 trig->ops->try_reenable)
207 if (trig->ops->try_reenable(trig))
209 iio_trigger_poll(trig);
214 static int iio_trigger_get_irq(struct iio_trigger *trig)
217 mutex_lock(&trig->pool_lock);
218 ret = bitmap_find_free_region(trig->pool,
221 mutex_unlock(&trig->pool_lock);
223 ret += trig->subirq_base;
228 static void iio_trigger_put_irq(struct iio_trigger *trig, int irq)
230 mutex_lock(&trig->pool_lock);
231 clear_bit(irq - trig->subirq_base, trig->pool);
232 mutex_unlock(&trig->pool_lock);
242 int iio_trigger_attach_poll_func(struct iio_trigger *trig,
247 = bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
253 pf->irq = iio_trigger_get_irq(trig);
256 trig->name, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
268 if (trig->ops && trig->ops->set_trigger_state && notinuse) {
269 ret = trig->ops->set_trigger_state(trig, true);
279 if (pf->indio_dev->dev.parent == trig->dev.parent)
280 trig->attached_own_device = true;
287 iio_trigger_put_irq(trig, pf->irq);
293 int iio_trigger_detach_poll_func(struct iio_trigger *trig,
298 = (bitmap_weight(trig->pool,
301 if (trig->ops && trig->ops->set_trigger_state && no_other_users) {
302 ret = trig->ops->set_trigger_state(trig, false);
306 if (pf->indio_dev->dev.parent == trig->dev.parent)
307 trig->attached_own_device = false;
308 iio_trigger_put_irq(trig, pf->irq);
379 if (indio_dev->trig)
380 return sprintf(buf, "%s\n", indio_dev->trig->name);
404 struct iio_trigger *oldtrig = indio_dev->trig;
405 struct iio_trigger *trig;
419 trig = iio_trigger_acquire_by_name(buf);
420 if (oldtrig == trig) {
425 if (trig && indio_dev->info->validate_trigger) {
426 ret = indio_dev->info->validate_trigger(indio_dev, trig);
431 if (trig && trig->ops && trig->ops->validate_device) {
432 ret = trig->ops->validate_device(trig, indio_dev);
437 indio_dev->trig = trig;
445 if (indio_dev->trig) {
447 iio_trigger_attach_poll_func(indio_dev->trig,
454 if (trig)
455 iio_trigger_put(trig);
475 struct iio_trigger *trig = to_iio_trigger(device);
478 if (trig->subirq_base) {
480 irq_modify_status(trig->subirq_base + i,
483 irq_set_chip(trig->subirq_base + i,
485 irq_set_handler(trig->subirq_base + i,
489 irq_free_descs(trig->subirq_base,
492 kfree(trig->name);
493 kfree(trig);
504 struct iio_trigger *trig
507 trig->subirqs[d->irq - trig->subirq_base].enabled = false;
513 struct iio_trigger *trig
516 trig->subirqs[d->irq - trig->subirq_base].enabled = true;
522 struct iio_trigger *trig;
525 trig = kzalloc(sizeof *trig, GFP_KERNEL);
526 if (!trig)
529 trig->dev.type = &iio_trig_type;
530 trig->dev.bus = &iio_bus_type;
531 device_initialize(&trig->dev);
533 mutex_init(&trig->pool_lock);
534 trig->subirq_base = irq_alloc_descs(-1, 0,
537 if (trig->subirq_base < 0)
540 trig->name = kvasprintf(GFP_KERNEL, fmt, vargs);
541 if (trig->name == NULL)
544 trig->subirq_chip.name = trig->name;
545 trig->subirq_chip.irq_mask = &iio_trig_subirqmask;
546 trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask;
548 irq_set_chip(trig->subirq_base + i, &trig->subirq_chip);
549 irq_set_handler(trig->subirq_base + i, &handle_simple_irq);
550 irq_modify_status(trig->subirq_base + i,
554 return trig;
557 irq_free_descs(trig->subirq_base, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
559 kfree(trig);
565 struct iio_trigger *trig;
569 trig = viio_trigger_alloc(fmt, vargs);
572 return trig;
576 void iio_trigger_free(struct iio_trigger *trig)
578 if (trig)
579 put_device(&trig->dev);
605 struct iio_trigger **ptr, *trig;
615 trig = viio_trigger_alloc(fmt, vargs);
617 if (trig) {
618 *ptr = trig;
624 return trig;
671 return indio_dev->trig->attached_own_device;
678 * @trig: The IIO trigger to check
687 int iio_trigger_validate_own_device(struct iio_trigger *trig,
690 if (indio_dev->dev.parent != trig->dev.parent)
705 if (indio_dev->trig)
706 iio_trigger_put(indio_dev->trig);