Lines Matching defs:cmdq

350 	return &smmu->cmdq;
414 * not to touch any of the shadow cmdq state.
429 __arm_smmu_cmdq_skip_err(smmu, &smmu->cmdq.q);
444 static void arm_smmu_cmdq_shared_lock(struct arm_smmu_cmdq *cmdq)
454 if (atomic_fetch_inc_relaxed(&cmdq->lock) >= 0)
458 val = atomic_cond_read_relaxed(&cmdq->lock, VAL >= 0);
459 } while (atomic_cmpxchg_relaxed(&cmdq->lock, val, val + 1) != val);
462 static void arm_smmu_cmdq_shared_unlock(struct arm_smmu_cmdq *cmdq)
464 (void)atomic_dec_return_release(&cmdq->lock);
467 static bool arm_smmu_cmdq_shared_tryunlock(struct arm_smmu_cmdq *cmdq)
469 if (atomic_read(&cmdq->lock) == 1)
472 arm_smmu_cmdq_shared_unlock(cmdq);
476 #define arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags) \
480 __ret = !atomic_cmpxchg_relaxed(&cmdq->lock, 0, INT_MIN); \
486 #define arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags) \
488 atomic_set_release(&cmdq->lock, 0); \
532 static void __arm_smmu_cmdq_poll_set_valid_map(struct arm_smmu_cmdq *cmdq,
537 .max_n_shift = cmdq->q.llq.max_n_shift,
552 ptr = &cmdq->valid_map[swidx];
579 static void arm_smmu_cmdq_set_valid_map(struct arm_smmu_cmdq *cmdq,
582 __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, true);
586 static void arm_smmu_cmdq_poll_valid_map(struct arm_smmu_cmdq *cmdq,
589 __arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, false);
598 struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
602 * Try to update our copy of cons by grabbing exclusive cmdq access. If
605 if (arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags)) {
606 WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg));
607 arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags);
608 llq->val = READ_ONCE(cmdq->q.llq.val);
614 llq->val = READ_ONCE(cmdq->q.llq.val);
626 * Must be called with the cmdq lock held in some capacity.
633 struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
634 u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod));
650 * Must be called with the cmdq lock held in some capacity.
656 struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
661 llq->val = READ_ONCE(cmdq->q.llq.val);
676 * cmdq->q.llq.cons. Roughly speaking:
696 llq->cons = readl(cmdq->q.cons_reg);
711 static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds,
716 .max_n_shift = cmdq->q.llq.max_n_shift,
724 queue_write(Q_ENT(&cmdq->q, prod), cmd, CMDQ_ENT_DWORDS);
751 struct arm_smmu_cmdq *cmdq = arm_smmu_get_cmdq(smmu);
755 llq.max_n_shift = cmdq->q.llq.max_n_shift;
759 llq.val = READ_ONCE(cmdq->q.llq.val);
774 old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val);
788 arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n);
791 arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, &cmdq->q, prod);
792 queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS);
797 * We achieve that by taking the cmdq lock as shared before
800 arm_smmu_cmdq_shared_lock(cmdq);
805 arm_smmu_cmdq_set_valid_map(cmdq, llq.prod, head.prod);
810 atomic_cond_read_relaxed(&cmdq->owner_prod, VAL == llq.prod);
814 &cmdq->q.llq.atomic.prod);
822 arm_smmu_cmdq_poll_valid_map(cmdq, llq.prod, prod);
828 writel_relaxed(prod, cmdq->q.prod_reg);
835 atomic_set_release(&cmdq->owner_prod, prod);
846 readl_relaxed(cmdq->q.prod_reg),
847 readl_relaxed(cmdq->q.cons_reg));
851 * Try to unlock the cmdq lock. This will fail if we're the last
852 * reader, in which case we can safely update cmdq->q.llq.cons
854 if (!arm_smmu_cmdq_shared_tryunlock(cmdq)) {
855 WRITE_ONCE(cmdq->q.llq.cons, llq.cons);
856 arm_smmu_cmdq_shared_unlock(cmdq);
2935 struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
2936 unsigned int nents = 1 << cmdq->q.llq.max_n_shift;
2938 atomic_set(&cmdq->owner_prod, 0);
2939 atomic_set(&cmdq->lock, 0);
2941 cmdq->valid_map = (atomic_long_t *)devm_bitmap_zalloc(smmu->dev, nents,
2943 if (!cmdq->valid_map)
2953 /* cmdq */
2954 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, smmu->base,
2956 CMDQ_ENT_DWORDS, "cmdq");
3184 /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
3339 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
3340 writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
3341 writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
3581 smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
3583 if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) {