Lines Matching defs:llq
156 writel_relaxed(q->llq.cons, q->cons_reg);
167 struct arm_smmu_ll_queue *llq = &q->llq;
169 if (likely(Q_OVF(llq->prod) == Q_OVF(llq->cons)))
172 llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
173 Q_IDX(llq, llq->cons);
189 if (Q_OVF(prod) != Q_OVF(q->llq.prod))
192 q->llq.prod = prod;
247 if (queue_empty(&q->llq))
250 queue_read(ent, Q_ENT(q, q->llq.cons), q->ent_dwords);
251 queue_inc_cons(&q->llq);
363 ent.sync.msiaddr = q->base_dma + Q_IDX(&q->llq, prod) *
531 struct arm_smmu_ll_queue llq = {
532 .max_n_shift = cmdq->q.llq.max_n_shift,
536 ewidx = BIT_WORD(Q_IDX(&llq, eprod));
537 ebidx = Q_IDX(&llq, eprod) % BITS_PER_LONG;
539 while (llq.prod != eprod) {
544 swidx = BIT_WORD(Q_IDX(&llq, llq.prod));
545 sbidx = Q_IDX(&llq, llq.prod) % BITS_PER_LONG;
565 valid = (ULONG_MAX + !!Q_WRP(&llq, llq.prod)) & mask;
569 llq.prod = queue_inc_prod_n(&llq, limit - sbidx);
589 struct arm_smmu_ll_queue *llq)
601 WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg));
603 llq->val = READ_ONCE(cmdq->q.llq.val);
609 llq->val = READ_ONCE(smmu->cmdq.q.llq.val);
610 if (!queue_full(llq))
624 struct arm_smmu_ll_queue *llq)
629 u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod));
639 llq->cons = ret ? llq->prod : queue_inc_prod_n(llq, 1);
644 * Wait until the SMMU cons index passes llq->prod.
648 struct arm_smmu_ll_queue *llq)
652 u32 prod = llq->prod;
656 llq->val = READ_ONCE(smmu->cmdq.q.llq.val);
658 if (queue_consumed(llq, prod))
671 * cmdq->q.llq.cons. Roughly speaking:
691 llq->cons = readl(cmdq->q.cons_reg);
698 struct arm_smmu_ll_queue *llq)
701 return __arm_smmu_cmdq_poll_until_msi(smmu, llq);
703 return __arm_smmu_cmdq_poll_until_consumed(smmu, llq);
710 struct arm_smmu_ll_queue llq = {
711 .max_n_shift = cmdq->q.llq.max_n_shift,
718 prod = queue_inc_prod_n(&llq, i);
747 struct arm_smmu_ll_queue llq = {
748 .max_n_shift = cmdq->q.llq.max_n_shift,
749 }, head = llq;
754 llq.val = READ_ONCE(cmdq->q.llq.val);
758 while (!queue_has_space(&llq, n + sync)) {
760 if (arm_smmu_cmdq_poll_until_not_full(smmu, &llq))
765 head.cons = llq.cons;
766 head.prod = queue_inc_prod_n(&llq, n + sync) |
769 old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val);
770 if (old == llq.val)
773 llq.val = old;
775 owner = !(llq.prod & CMDQ_PROD_OWNED_FLAG);
777 llq.prod &= ~CMDQ_PROD_OWNED_FLAG;
783 arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n);
785 prod = queue_inc_prod_n(&llq, n);
800 arm_smmu_cmdq_set_valid_map(cmdq, llq.prod, head.prod);
805 atomic_cond_read_relaxed(&cmdq->owner_prod, VAL == llq.prod);
809 &cmdq->q.llq.atomic.prod);
817 arm_smmu_cmdq_poll_valid_map(cmdq, llq.prod, prod);
835 llq.prod = queue_inc_prod_n(&llq, n);
836 ret = arm_smmu_cmdq_poll_until_sync(smmu, &llq);
840 llq.prod,
847 * reader, in which case we can safely update cmdq->q.llq.cons
850 WRITE_ONCE(cmdq->q.llq.cons, llq.cons);
1371 struct arm_smmu_ll_queue *llq = &q->llq;
1392 } while (!queue_empty(llq));
1441 struct arm_smmu_ll_queue *llq = &q->llq;
1450 } while (!queue_empty(llq));
2615 qsz = ((1 << q->llq.max_n_shift) * dwords) << 3;
2621 q->llq.max_n_shift--;
2633 1 << q->llq.max_n_shift, name);
2642 q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->llq.max_n_shift);
2644 q->llq.prod = q->llq.cons = 0;
2658 unsigned int nents = 1 << cmdq->q.llq.max_n_shift;
3068 writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
3069 writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
3096 writel_relaxed(smmu->evtq.q.llq.prod,
3098 writel_relaxed(smmu->evtq.q.llq.cons,
3113 writel_relaxed(smmu->priq.q.llq.prod,
3115 writel_relaxed(smmu->priq.q.llq.cons,
3272 smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
3274 if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) {
3286 smmu->evtq.q.llq.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT,
3288 smmu->priq.q.llq.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT,