Lines Matching refs:prod
106 u32 space, prod, cons;
108 prod = Q_IDX(q, q->prod);
111 if (Q_WRP(q, q->prod) == Q_WRP(q, q->cons))
112 space = (1 << q->max_n_shift) - (prod - cons);
114 space = cons - prod;
121 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
122 Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
127 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
128 Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
131 static bool queue_consumed(struct arm_smmu_ll_queue *q, u32 prod)
133 return ((Q_WRP(q, q->cons) == Q_WRP(q, prod)) &&
134 (Q_IDX(q, q->cons) > Q_IDX(q, prod))) ||
135 ((Q_WRP(q, q->cons) != Q_WRP(q, prod)) &&
136 (Q_IDX(q, q->cons) <= Q_IDX(q, prod)));
159 if (likely(Q_OVF(llq->prod) == Q_OVF(llq->cons)))
162 llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
169 u32 prod;
175 * prod has indeed moved.
177 prod = readl(q->prod_reg);
179 if (Q_OVF(prod) != Q_OVF(q->llq.prod))
182 q->llq.prod = prod;
188 u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + n;
189 return Q_OVF(q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
354 struct arm_smmu_queue *q, u32 prod)
365 ent.sync.msiaddr = q->base_dma + Q_IDX(&q->llq, prod) *
502 * itself, but is responsible for advancing the hardware prod pointer when
518 * d. Advance the hardware prod pointer.
538 .prod = sprod,
544 while (llq.prod != eprod) {
549 swidx = BIT_WORD(Q_IDX(&llq, llq.prod));
550 sbidx = Q_IDX(&llq, llq.prod) % BITS_PER_LONG;
570 valid = (ULONG_MAX + !!Q_WRP(&llq, llq.prod)) & mask;
574 llq.prod = queue_inc_prod_n(&llq, limit - sbidx);
634 u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod));
644 llq->cons = ret ? llq->prod : queue_inc_prod_n(llq, 1);
649 * Wait until the SMMU cons index passes llq->prod.
657 u32 prod = llq->prod;
663 if (queue_consumed(llq, prod))
712 u32 prod, int n)
717 .prod = prod,
723 prod = queue_inc_prod_n(&llq, i);
724 queue_write(Q_ENT(&cmdq->q, prod), cmd, CMDQ_ENT_DWORDS);
748 u32 prod;
771 head.prod = queue_inc_prod_n(&llq, n + sync) |
780 owner = !(llq.prod & CMDQ_PROD_OWNED_FLAG);
781 head.prod &= ~CMDQ_PROD_OWNED_FLAG;
782 llq.prod &= ~CMDQ_PROD_OWNED_FLAG;
788 arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n);
790 prod = queue_inc_prod_n(&llq, n);
791 arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, &cmdq->q, prod);
792 queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS);
805 arm_smmu_cmdq_set_valid_map(cmdq, llq.prod, head.prod);
810 atomic_cond_read_relaxed(&cmdq->owner_prod, VAL == llq.prod);
813 prod = atomic_fetch_andnot_relaxed(CMDQ_PROD_OWNED_FLAG,
814 &cmdq->q.llq.atomic.prod);
815 prod &= ~CMDQ_PROD_OWNED_FLAG;
822 arm_smmu_cmdq_poll_valid_map(cmdq, llq.prod, prod);
825 * d. Advance the hardware prod pointer
828 writel_relaxed(prod, cmdq->q.prod_reg);
833 * race to update prod and potentially move it backwards.
835 atomic_set_release(&cmdq->owner_prod, prod);
840 llq.prod = queue_inc_prod_n(&llq, n);
845 llq.prod,
2929 q->llq.prod = q->llq.cons = 0;
3340 writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
3366 writel_relaxed(smmu->evtq.q.llq.prod, smmu->page1 + ARM_SMMU_EVTQ_PROD);
3381 writel_relaxed(smmu->priq.q.llq.prod,