Lines Matching refs:inst
57 #define for_each_handle_rcu(qh, inst) \
58 list_for_each_entry_rcu(qh, &inst->handles, list, \
61 #define for_each_instance(idx, inst, kdev) \
62 for (idx = 0, inst = kdev->instances; \
64 idx++, inst = knav_queue_idx_to_inst(kdev, idx))
82 * @inst: - qmss queue instance like accumulator
84 void knav_queue_notify(struct knav_queue_inst *inst)
88 if (!inst)
92 for_each_handle_rcu(qh, inst) {
106 struct knav_queue_inst *inst = _instdata;
108 knav_queue_notify(inst);
113 struct knav_queue_inst *inst)
115 unsigned queue = inst->id - range->queue_base;
121 inst->irq_name, inst);
137 static void knav_queue_free_irq(struct knav_queue_inst *inst)
139 struct knav_range_info *range = inst->range;
140 unsigned queue = inst->id - inst->range->queue_base;
146 free_irq(irq, inst);
150 static inline bool knav_queue_is_busy(struct knav_queue_inst *inst)
152 return !list_empty(&inst->handles);
155 static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst)
157 return inst->range->flags & RANGE_RESERVED;
160 static inline bool knav_queue_is_shared(struct knav_queue_inst *inst)
165 for_each_handle_rcu(tmp, inst) {
175 static inline bool knav_queue_match_type(struct knav_queue_inst *inst,
179 (inst->range->flags & RANGE_HAS_IRQ)) {
182 (inst->range->flags & RANGE_HAS_ACCUMULATOR)) {
185 !(inst->range->flags &
195 struct knav_queue_inst *inst;
198 for_each_instance(idx, inst, kdev) {
199 if (inst->id == id)
200 return inst;
215 static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
222 qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL);
233 qh->inst = inst;
234 id = inst->id - inst->qmgr->start_queue;
235 qh->reg_push = &inst->qmgr->reg_push[id];
236 qh->reg_pop = &inst->qmgr->reg_pop[id];
237 qh->reg_peek = &inst->qmgr->reg_peek[id];
240 if (!knav_queue_is_busy(inst)) {
241 struct knav_range_info *range = inst->range;
243 inst->name = kstrndup(name, KNAV_NAME_SIZE - 1, GFP_KERNEL);
245 ret = range->ops->open_queue(range, inst, flags);
250 list_add_tail_rcu(&qh->list, &inst->handles);
256 devm_kfree(inst->kdev->dev, qh);
263 struct knav_queue_inst *inst;
269 inst = knav_queue_find_by_id(id);
270 if (!inst)
274 if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst))
279 (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst)))
282 qh = __knav_queue_open(inst, name, flags);
293 struct knav_queue_inst *inst;
299 for_each_instance(idx, inst, kdev) {
300 if (knav_queue_is_reserved(inst))
302 if (!knav_queue_match_type(inst, type))
304 if (knav_queue_is_busy(inst))
306 qh = __knav_queue_open(inst, name, flags);
315 static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled)
317 struct knav_range_info *range = inst->range;
320 range->ops->set_notify(range, inst, enabled);
325 struct knav_queue_inst *inst = qh->inst;
337 first = (atomic_inc_return(&inst->num_notifiers) == 1);
339 knav_queue_set_notify(inst, true);
346 struct knav_queue_inst *inst = qh->inst;
353 last = (atomic_dec_return(&inst->num_notifiers) == 0);
355 knav_queue_set_notify(inst, false);
368 if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ)))
384 struct knav_queue_inst *inst,
390 queue = inst->id - range->queue_base;
400 struct knav_queue_inst *inst, unsigned flags)
402 return knav_queue_setup_irq(range, inst);
406 struct knav_queue_inst *inst)
408 knav_queue_free_irq(inst);
422 struct knav_queue_inst *inst = qh->inst;
425 atomic_read(&inst->desc_count);
429 struct knav_queue_inst *inst)
431 struct knav_device *kdev = inst->kdev;
440 if (!knav_queue_is_busy(inst))
444 kdev->base_id + inst->id, inst->name);
445 for_each_handle_rcu(qh, inst) {
467 struct knav_queue_inst *inst;
474 for_each_instance(idx, inst, kdev)
475 knav_queue_debug_show_instance(s, inst);
504 struct knav_queue_inst *inst = qh->inst;
505 unsigned id = inst->id - inst->qmgr->start_queue;
507 atomic_set(&inst->desc_count, 0);
508 writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh);
553 struct knav_queue_inst *inst = qh->inst;
562 if (!knav_queue_is_busy(inst)) {
563 struct knav_range_info *range = inst->range;
566 range->ops->close_queue(range, inst);
569 devm_kfree(inst->kdev->dev, qh);
590 ret = qh->inst->kdev->base_id + qh->inst->id;
657 struct knav_queue_inst *inst = qh->inst;
662 if (inst->descs) {
663 if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) {
664 atomic_inc(&inst->desc_count);
667 idx = atomic_inc_return(&inst->desc_head);
669 val = inst->descs[idx];
1683 struct knav_queue_inst *inst,
1687 inst->qmgr = knav_find_qmgr(id);
1688 if (!inst->qmgr)
1691 INIT_LIST_HEAD(&inst->handles);
1692 inst->kdev = kdev;
1693 inst->range = range;
1694 inst->irq_num = -1;
1695 inst->id = id;
1697 inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL);
1700 return range->ops->init_queue(range, inst);