Lines Matching defs:cmdq

353 	struct arm_smmu_queue *q = &smmu->cmdq.q;
381 struct arm_smmu_queue *q = &smmu->cmdq.q;
411 * not to touch any of the shadow cmdq state.
439 static void arm_smmu_cmdq_shared_lock(struct arm_smmu_cmdq *cmdq)
449 if (atomic_fetch_inc_relaxed(&cmdq->lock) >= 0)
453 val = atomic_cond_read_relaxed(&cmdq->lock, VAL >= 0);
454 } while (atomic_cmpxchg_relaxed(&cmdq->lock, val, val + 1) != val);
457 static void arm_smmu_cmdq_shared_unlock(struct arm_smmu_cmdq *cmdq)
459 (void)atomic_dec_return_release(&cmdq->lock);
462 static bool arm_smmu_cmdq_shared_tryunlock(struct arm_smmu_cmdq *cmdq)
464 if (atomic_read(&cmdq->lock) == 1)
467 arm_smmu_cmdq_shared_unlock(cmdq);
471 #define arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags) \
475 __ret = !atomic_cmpxchg_relaxed(&cmdq->lock, 0, INT_MIN); \
481 #define arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags) \
483 atomic_set_release(&cmdq->lock, 0); \
527 static void __arm_smmu_cmdq_poll_set_valid_map(struct arm_smmu_cmdq *cmdq,
532 .max_n_shift = cmdq->q.llq.max_n_shift,
547 ptr = &cmdq->valid_map[swidx];
574 static void arm_smmu_cmdq_set_valid_map(struct arm_smmu_cmdq *cmdq,
577 __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, true);
581 static void arm_smmu_cmdq_poll_valid_map(struct arm_smmu_cmdq *cmdq,
584 __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, false);
593 struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
597 * Try to update our copy of cons by grabbing exclusive cmdq access. If
600 if (arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags)) {
601 WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg));
602 arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags);
603 llq->val = READ_ONCE(cmdq->q.llq.val);
609 llq->val = READ_ONCE(smmu->cmdq.q.llq.val);
621 * Must be called with the cmdq lock held in some capacity.
628 struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
629 u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod));
645 * Must be called with the cmdq lock held in some capacity.
651 struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
656 llq->val = READ_ONCE(smmu->cmdq.q.llq.val);
671 * cmdq->q.llq.cons. Roughly speaking:
691 llq->cons = readl(cmdq->q.cons_reg);
706 static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds,
711 .max_n_shift = cmdq->q.llq.max_n_shift,
719 queue_write(Q_ENT(&cmdq->q, prod), cmd, CMDQ_ENT_DWORDS);
746 struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
748 .max_n_shift = cmdq->q.llq.max_n_shift,
754 llq.val = READ_ONCE(cmdq->q.llq.val);
769 old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val);
783 arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n);
787 queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS);
792 * We achieve that by taking the cmdq lock as shared before
795 arm_smmu_cmdq_shared_lock(cmdq);
800 arm_smmu_cmdq_set_valid_map(cmdq, llq.prod, head.prod);
805 atomic_cond_read_relaxed(&cmdq->owner_prod, VAL == llq.prod);
809 &cmdq->q.llq.atomic.prod);
817 arm_smmu_cmdq_poll_valid_map(cmdq, llq.prod, prod);
823 writel_relaxed(prod, cmdq->q.prod_reg);
830 atomic_set_release(&cmdq->owner_prod, prod);
841 readl_relaxed(cmdq->q.prod_reg),
842 readl_relaxed(cmdq->q.cons_reg));
846 * Try to unlock the cmdq lock. This will fail if we're the last
847 * reader, in which case we can safely update cmdq->q.llq.cons
849 if (!arm_smmu_cmdq_shared_tryunlock(cmdq)) {
850 WRITE_ONCE(cmdq->q.llq.cons, llq.cons);
851 arm_smmu_cmdq_shared_unlock(cmdq);
2657 struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
2658 unsigned int nents = 1 << cmdq->q.llq.max_n_shift;
2661 atomic_set(&cmdq->owner_prod, 0);
2662 atomic_set(&cmdq->lock, 0);
2666 dev_err(smmu->dev, "failed to allocate cmdq bitmap\n");
2669 cmdq->valid_map = bitmap;
2680 /* cmdq */
2681 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
2683 "cmdq");
2904 /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
3067 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
3068 writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
3069 writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
3272 smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
3274 if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) {