Lines Matching refs:fq
49 iovad->fq = NULL;
59 return !!iovad->fq;
71 free_percpu(iovad->fq);
73 iovad->fq = NULL;
95 struct iova_fq *fq;
97 fq = per_cpu_ptr(queue, cpu);
98 fq->head = 0;
99 fq->tail = 0;
101 spin_lock_init(&fq->lock);
106 iovad->fq = queue;
459 #define fq_ring_for_each(i, fq) \
460 for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
462 static inline bool fq_full(struct iova_fq *fq)
464 assert_spin_locked(&fq->lock);
465 return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
468 static inline unsigned fq_ring_add(struct iova_fq *fq)
470 unsigned idx = fq->tail;
472 assert_spin_locked(&fq->lock);
474 fq->tail = (idx + 1) % IOVA_FQ_SIZE;
479 static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
484 assert_spin_locked(&fq->lock);
486 fq_ring_for_each(idx, fq) {
488 if (fq->entries[idx].counter >= counter)
492 iovad->entry_dtor(fq->entries[idx].data);
495 fq->entries[idx].iova_pfn,
496 fq->entries[idx].pages);
498 fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
522 struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu);
525 fq_ring_for_each(idx, fq)
526 iovad->entry_dtor(fq->entries[idx].data);
540 struct iova_fq *fq;
542 fq = per_cpu_ptr(iovad->fq, cpu);
543 spin_lock_irqsave(&fq->lock, flags);
544 fq_ring_free(iovad, fq);
545 spin_unlock_irqrestore(&fq->lock, flags);
553 struct iova_fq *fq = raw_cpu_ptr(iovad->fq);
557 spin_lock_irqsave(&fq->lock, flags);
564 fq_ring_free(iovad, fq);
566 if (fq_full(fq)) {
568 fq_ring_free(iovad, fq);
571 idx = fq_ring_add(fq);
573 fq->entries[idx].iova_pfn = pfn;
574 fq->entries[idx].pages = pages;
575 fq->entries[idx].data = data;
576 fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt);
578 spin_unlock_irqrestore(&fq->lock, flags);