Lines Matching refs:prod

116 	u32 space, prod, cons;
118 prod = Q_IDX(q, q->prod);
121 if (Q_WRP(q, q->prod) == Q_WRP(q, q->cons))
122 space = (1 << q->max_n_shift) - (prod - cons);
124 space = cons - prod;
131 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
132 Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
137 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
138 Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
141 static bool queue_consumed(struct arm_smmu_ll_queue *q, u32 prod)
143 return ((Q_WRP(q, q->cons) == Q_WRP(q, prod)) &&
144 (Q_IDX(q, q->cons) > Q_IDX(q, prod))) ||
145 ((Q_WRP(q, q->cons) != Q_WRP(q, prod)) &&
146 (Q_IDX(q, q->cons) <= Q_IDX(q, prod)));
169 if (likely(Q_OVF(llq->prod) == Q_OVF(llq->cons)))
172 llq->cons = Q_OVF(llq->prod) | Q_WRP(llq, llq->cons) |
179 u32 prod;
185 * prod has indeed moved.
187 prod = readl(q->prod_reg);
189 if (Q_OVF(prod) != Q_OVF(q->llq.prod))
192 q->llq.prod = prod;
198 u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + n;
199 return Q_OVF(q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
351 u32 prod)
363 ent.sync.msiaddr = q->base_dma + Q_IDX(&q->llq, prod) *
497 * itself, but is responsible for advancing the hardware prod pointer when
513 * d. Advance the hardware prod pointer.
533 .prod = sprod,
539 while (llq.prod != eprod) {
544 swidx = BIT_WORD(Q_IDX(&llq, llq.prod));
545 sbidx = Q_IDX(&llq, llq.prod) % BITS_PER_LONG;
565 valid = (ULONG_MAX + !!Q_WRP(&llq, llq.prod)) & mask;
569 llq.prod = queue_inc_prod_n(&llq, limit - sbidx);
629 u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod));
639 llq->cons = ret ? llq->prod : queue_inc_prod_n(llq, 1);
644 * Wait until the SMMU cons index passes llq->prod.
652 u32 prod = llq->prod;
658 if (queue_consumed(llq, prod))
707 u32 prod, int n)
712 .prod = prod,
718 prod = queue_inc_prod_n(&llq, i);
719 queue_write(Q_ENT(&cmdq->q, prod), cmd, CMDQ_ENT_DWORDS);
743 u32 prod;
766 head.prod = queue_inc_prod_n(&llq, n + sync) |
775 owner = !(llq.prod & CMDQ_PROD_OWNED_FLAG);
776 head.prod &= ~CMDQ_PROD_OWNED_FLAG;
777 llq.prod &= ~CMDQ_PROD_OWNED_FLAG;
783 arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n);
785 prod = queue_inc_prod_n(&llq, n);
786 arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, prod);
787 queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS);
800 arm_smmu_cmdq_set_valid_map(cmdq, llq.prod, head.prod);
805 atomic_cond_read_relaxed(&cmdq->owner_prod, VAL == llq.prod);
808 prod = atomic_fetch_andnot_relaxed(CMDQ_PROD_OWNED_FLAG,
809 &cmdq->q.llq.atomic.prod);
810 prod &= ~CMDQ_PROD_OWNED_FLAG;
817 arm_smmu_cmdq_poll_valid_map(cmdq, llq.prod, prod);
820 * d. Advance the hardware prod pointer
823 writel_relaxed(prod, cmdq->q.prod_reg);
828 * race to update prod and potentially move it backwards.
830 atomic_set_release(&cmdq->owner_prod, prod);
835 llq.prod = queue_inc_prod_n(&llq, n);
840 llq.prod,
2644 q->llq.prod = q->llq.cons = 0;
3068 writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
3096 writel_relaxed(smmu->evtq.q.llq.prod,
3113 writel_relaxed(smmu->priq.q.llq.prod,