Lines Matching defs:llq
146 writel_relaxed(q->llq.cons, q->cons_reg);
157 struct arm_smmu_ll_queue *llq = &q->llq;
159 if (likely(Q_OVF(llq->prod) == Q_OVF(llq->cons)))
162 llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
163 Q_IDX(llq, llq->cons);
179 if (Q_OVF(prod) != Q_OVF(q->llq.prod))
182 q->llq.prod = prod;
237 if (queue_empty(&q->llq))
240 queue_read(ent, Q_ENT(q, q->llq.cons), q->ent_dwords);
241 queue_inc_cons(&q->llq);
365 ent.sync.msiaddr = q->base_dma + Q_IDX(&q->llq, prod) *
536 struct arm_smmu_ll_queue llq = {
537 .max_n_shift = cmdq->q.llq.max_n_shift,
541 ewidx = BIT_WORD(Q_IDX(&llq, eprod));
542 ebidx = Q_IDX(&llq, eprod) % BITS_PER_LONG;
544 while (llq.prod != eprod) {
549 swidx = BIT_WORD(Q_IDX(&llq, llq.prod));
550 sbidx = Q_IDX(&llq, llq.prod) % BITS_PER_LONG;
570 valid = (ULONG_MAX + !!Q_WRP(&llq, llq.prod)) & mask;
574 llq.prod = queue_inc_prod_n(&llq, limit - sbidx);
594 struct arm_smmu_ll_queue *llq)
606 WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg));
608 llq->val = READ_ONCE(cmdq->q.llq.val);
614 llq->val = READ_ONCE(cmdq->q.llq.val);
615 if (!queue_full(llq))
629 struct arm_smmu_ll_queue *llq)
634 u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod));
644 llq->cons = ret ? llq->prod : queue_inc_prod_n(llq, 1);
649 * Wait until the SMMU cons index passes llq->prod.
653 struct arm_smmu_ll_queue *llq)
657 u32 prod = llq->prod;
661 llq->val = READ_ONCE(cmdq->q.llq.val);
663 if (queue_consumed(llq, prod))
676 * cmdq->q.llq.cons. Roughly speaking:
696 llq->cons = readl(cmdq->q.cons_reg);
703 struct arm_smmu_ll_queue *llq)
706 return __arm_smmu_cmdq_poll_until_msi(smmu, llq);
708 return __arm_smmu_cmdq_poll_until_consumed(smmu, llq);
715 struct arm_smmu_ll_queue llq = {
716 .max_n_shift = cmdq->q.llq.max_n_shift,
723 prod = queue_inc_prod_n(&llq, i);
752 struct arm_smmu_ll_queue llq, head;
755 llq.max_n_shift = cmdq->q.llq.max_n_shift;
759 llq.val = READ_ONCE(cmdq->q.llq.val);
763 while (!queue_has_space(&llq, n + sync)) {
765 if (arm_smmu_cmdq_poll_until_not_full(smmu, &llq))
770 head.cons = llq.cons;
771 head.prod = queue_inc_prod_n(&llq, n + sync) |
774 old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val);
775 if (old == llq.val)
778 llq.val = old;
780 owner = !(llq.prod & CMDQ_PROD_OWNED_FLAG);
782 llq.prod &= ~CMDQ_PROD_OWNED_FLAG;
788 arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n);
790 prod = queue_inc_prod_n(&llq, n);
805 arm_smmu_cmdq_set_valid_map(cmdq, llq.prod, head.prod);
810 atomic_cond_read_relaxed(&cmdq->owner_prod, VAL == llq.prod);
814 &cmdq->q.llq.atomic.prod);
822 arm_smmu_cmdq_poll_valid_map(cmdq, llq.prod, prod);
840 llq.prod = queue_inc_prod_n(&llq, n);
841 ret = arm_smmu_cmdq_poll_until_sync(smmu, &llq);
845 llq.prod,
852 * reader, in which case we can safely update cmdq->q.llq.cons
855 WRITE_ONCE(cmdq->q.llq.cons, llq.cons);
1568 struct arm_smmu_ll_queue *llq = &q->llq;
1595 } while (!queue_empty(llq));
1644 struct arm_smmu_ll_queue *llq = &q->llq;
1653 } while (!queue_empty(llq));
2900 qsz = ((1 << q->llq.max_n_shift) * dwords) << 3;
2906 q->llq.max_n_shift--;
2918 1 << q->llq.max_n_shift, name);
2927 q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->llq.max_n_shift);
2929 q->llq.prod = q->llq.cons = 0;
2936 unsigned int nents = 1 << cmdq->q.llq.max_n_shift;
3340 writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
3341 writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
3366 writel_relaxed(smmu->evtq.q.llq.prod, smmu->page1 + ARM_SMMU_EVTQ_PROD);
3367 writel_relaxed(smmu->evtq.q.llq.cons, smmu->page1 + ARM_SMMU_EVTQ_CONS);
3381 writel_relaxed(smmu->priq.q.llq.prod,
3383 writel_relaxed(smmu->priq.q.llq.cons,
3581 smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
3583 if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) {
3595 smmu->evtq.q.llq.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT,
3597 smmu->priq.q.llq.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT,