162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only
262306a36Sopenharmony_ci/*
362306a36Sopenharmony_ci *  Copyright (C) 2003 Russell King, All Rights Reserved.
462306a36Sopenharmony_ci *  Copyright 2006-2007 Pierre Ossman
562306a36Sopenharmony_ci */
662306a36Sopenharmony_ci#include <linux/slab.h>
762306a36Sopenharmony_ci#include <linux/module.h>
862306a36Sopenharmony_ci#include <linux/blkdev.h>
962306a36Sopenharmony_ci#include <linux/freezer.h>
1062306a36Sopenharmony_ci#include <linux/scatterlist.h>
1162306a36Sopenharmony_ci#include <linux/dma-mapping.h>
1262306a36Sopenharmony_ci#include <linux/backing-dev.h>
1362306a36Sopenharmony_ci
1462306a36Sopenharmony_ci#include <linux/mmc/card.h>
1562306a36Sopenharmony_ci#include <linux/mmc/host.h>
1662306a36Sopenharmony_ci
1762306a36Sopenharmony_ci#include "queue.h"
1862306a36Sopenharmony_ci#include "block.h"
1962306a36Sopenharmony_ci#include "core.h"
2062306a36Sopenharmony_ci#include "card.h"
2162306a36Sopenharmony_ci#include "crypto.h"
2262306a36Sopenharmony_ci#include "host.h"
2362306a36Sopenharmony_ci
2462306a36Sopenharmony_ci#define MMC_DMA_MAP_MERGE_SEGMENTS	512
2562306a36Sopenharmony_ci
2662306a36Sopenharmony_cistatic inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)
2762306a36Sopenharmony_ci{
2862306a36Sopenharmony_ci	/* Allow only 1 DCMD at a time */
2962306a36Sopenharmony_ci	return mq->in_flight[MMC_ISSUE_DCMD];
3062306a36Sopenharmony_ci}
3162306a36Sopenharmony_ci
3262306a36Sopenharmony_civoid mmc_cqe_check_busy(struct mmc_queue *mq)
3362306a36Sopenharmony_ci{
3462306a36Sopenharmony_ci	if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq))
3562306a36Sopenharmony_ci		mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY;
3662306a36Sopenharmony_ci}
3762306a36Sopenharmony_ci
3862306a36Sopenharmony_cistatic inline bool mmc_cqe_can_dcmd(struct mmc_host *host)
3962306a36Sopenharmony_ci{
4062306a36Sopenharmony_ci	return host->caps2 & MMC_CAP2_CQE_DCMD;
4162306a36Sopenharmony_ci}
4262306a36Sopenharmony_ci
4362306a36Sopenharmony_cistatic enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host,
4462306a36Sopenharmony_ci					      struct request *req)
4562306a36Sopenharmony_ci{
4662306a36Sopenharmony_ci	switch (req_op(req)) {
4762306a36Sopenharmony_ci	case REQ_OP_DRV_IN:
4862306a36Sopenharmony_ci	case REQ_OP_DRV_OUT:
4962306a36Sopenharmony_ci	case REQ_OP_DISCARD:
5062306a36Sopenharmony_ci	case REQ_OP_SECURE_ERASE:
5162306a36Sopenharmony_ci	case REQ_OP_WRITE_ZEROES:
5262306a36Sopenharmony_ci		return MMC_ISSUE_SYNC;
5362306a36Sopenharmony_ci	case REQ_OP_FLUSH:
5462306a36Sopenharmony_ci		return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC;
5562306a36Sopenharmony_ci	default:
5662306a36Sopenharmony_ci		return MMC_ISSUE_ASYNC;
5762306a36Sopenharmony_ci	}
5862306a36Sopenharmony_ci}
5962306a36Sopenharmony_ci
6062306a36Sopenharmony_cienum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
6162306a36Sopenharmony_ci{
6262306a36Sopenharmony_ci	struct mmc_host *host = mq->card->host;
6362306a36Sopenharmony_ci
6462306a36Sopenharmony_ci	if (host->cqe_enabled && !host->hsq_enabled)
6562306a36Sopenharmony_ci		return mmc_cqe_issue_type(host, req);
6662306a36Sopenharmony_ci
6762306a36Sopenharmony_ci	if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
6862306a36Sopenharmony_ci		return MMC_ISSUE_ASYNC;
6962306a36Sopenharmony_ci
7062306a36Sopenharmony_ci	return MMC_ISSUE_SYNC;
7162306a36Sopenharmony_ci}
7262306a36Sopenharmony_ci
7362306a36Sopenharmony_cistatic void __mmc_cqe_recovery_notifier(struct mmc_queue *mq)
7462306a36Sopenharmony_ci{
7562306a36Sopenharmony_ci	if (!mq->recovery_needed) {
7662306a36Sopenharmony_ci		mq->recovery_needed = true;
7762306a36Sopenharmony_ci		schedule_work(&mq->recovery_work);
7862306a36Sopenharmony_ci	}
7962306a36Sopenharmony_ci}
8062306a36Sopenharmony_ci
8162306a36Sopenharmony_civoid mmc_cqe_recovery_notifier(struct mmc_request *mrq)
8262306a36Sopenharmony_ci{
8362306a36Sopenharmony_ci	struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
8462306a36Sopenharmony_ci						  brq.mrq);
8562306a36Sopenharmony_ci	struct request *req = mmc_queue_req_to_req(mqrq);
8662306a36Sopenharmony_ci	struct request_queue *q = req->q;
8762306a36Sopenharmony_ci	struct mmc_queue *mq = q->queuedata;
8862306a36Sopenharmony_ci	unsigned long flags;
8962306a36Sopenharmony_ci
9062306a36Sopenharmony_ci	spin_lock_irqsave(&mq->lock, flags);
9162306a36Sopenharmony_ci	__mmc_cqe_recovery_notifier(mq);
9262306a36Sopenharmony_ci	spin_unlock_irqrestore(&mq->lock, flags);
9362306a36Sopenharmony_ci}
9462306a36Sopenharmony_ci
9562306a36Sopenharmony_cistatic enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
9662306a36Sopenharmony_ci{
9762306a36Sopenharmony_ci	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
9862306a36Sopenharmony_ci	struct mmc_request *mrq = &mqrq->brq.mrq;
9962306a36Sopenharmony_ci	struct mmc_queue *mq = req->q->queuedata;
10062306a36Sopenharmony_ci	struct mmc_host *host = mq->card->host;
10162306a36Sopenharmony_ci	enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
10262306a36Sopenharmony_ci	bool recovery_needed = false;
10362306a36Sopenharmony_ci
10462306a36Sopenharmony_ci	switch (issue_type) {
10562306a36Sopenharmony_ci	case MMC_ISSUE_ASYNC:
10662306a36Sopenharmony_ci	case MMC_ISSUE_DCMD:
10762306a36Sopenharmony_ci		if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
10862306a36Sopenharmony_ci			if (recovery_needed)
10962306a36Sopenharmony_ci				mmc_cqe_recovery_notifier(mrq);
11062306a36Sopenharmony_ci			return BLK_EH_RESET_TIMER;
11162306a36Sopenharmony_ci		}
11262306a36Sopenharmony_ci		/* The request has gone already */
11362306a36Sopenharmony_ci		return BLK_EH_DONE;
11462306a36Sopenharmony_ci	default:
11562306a36Sopenharmony_ci		/* Timeout is handled by mmc core */
11662306a36Sopenharmony_ci		return BLK_EH_RESET_TIMER;
11762306a36Sopenharmony_ci	}
11862306a36Sopenharmony_ci}
11962306a36Sopenharmony_ci
12062306a36Sopenharmony_cistatic enum blk_eh_timer_return mmc_mq_timed_out(struct request *req)
12162306a36Sopenharmony_ci{
12262306a36Sopenharmony_ci	struct request_queue *q = req->q;
12362306a36Sopenharmony_ci	struct mmc_queue *mq = q->queuedata;
12462306a36Sopenharmony_ci	struct mmc_card *card = mq->card;
12562306a36Sopenharmony_ci	struct mmc_host *host = card->host;
12662306a36Sopenharmony_ci	unsigned long flags;
12762306a36Sopenharmony_ci	bool ignore_tout;
12862306a36Sopenharmony_ci
12962306a36Sopenharmony_ci	spin_lock_irqsave(&mq->lock, flags);
13062306a36Sopenharmony_ci	ignore_tout = mq->recovery_needed || !host->cqe_enabled || host->hsq_enabled;
13162306a36Sopenharmony_ci	spin_unlock_irqrestore(&mq->lock, flags);
13262306a36Sopenharmony_ci
13362306a36Sopenharmony_ci	return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
13462306a36Sopenharmony_ci}
13562306a36Sopenharmony_ci
13662306a36Sopenharmony_cistatic void mmc_mq_recovery_handler(struct work_struct *work)
13762306a36Sopenharmony_ci{
13862306a36Sopenharmony_ci	struct mmc_queue *mq = container_of(work, struct mmc_queue,
13962306a36Sopenharmony_ci					    recovery_work);
14062306a36Sopenharmony_ci	struct request_queue *q = mq->queue;
14162306a36Sopenharmony_ci	struct mmc_host *host = mq->card->host;
14262306a36Sopenharmony_ci
14362306a36Sopenharmony_ci	mmc_get_card(mq->card, &mq->ctx);
14462306a36Sopenharmony_ci
14562306a36Sopenharmony_ci	mq->in_recovery = true;
14662306a36Sopenharmony_ci
14762306a36Sopenharmony_ci	if (host->cqe_enabled && !host->hsq_enabled)
14862306a36Sopenharmony_ci		mmc_blk_cqe_recovery(mq);
14962306a36Sopenharmony_ci	else
15062306a36Sopenharmony_ci		mmc_blk_mq_recovery(mq);
15162306a36Sopenharmony_ci
15262306a36Sopenharmony_ci	mq->in_recovery = false;
15362306a36Sopenharmony_ci
15462306a36Sopenharmony_ci	spin_lock_irq(&mq->lock);
15562306a36Sopenharmony_ci	mq->recovery_needed = false;
15662306a36Sopenharmony_ci	spin_unlock_irq(&mq->lock);
15762306a36Sopenharmony_ci
15862306a36Sopenharmony_ci	if (host->hsq_enabled)
15962306a36Sopenharmony_ci		host->cqe_ops->cqe_recovery_finish(host);
16062306a36Sopenharmony_ci
16162306a36Sopenharmony_ci	mmc_put_card(mq->card, &mq->ctx);
16262306a36Sopenharmony_ci
16362306a36Sopenharmony_ci	blk_mq_run_hw_queues(q, true);
16462306a36Sopenharmony_ci}
16562306a36Sopenharmony_ci
16662306a36Sopenharmony_cistatic struct scatterlist *mmc_alloc_sg(unsigned short sg_len, gfp_t gfp)
16762306a36Sopenharmony_ci{
16862306a36Sopenharmony_ci	struct scatterlist *sg;
16962306a36Sopenharmony_ci
17062306a36Sopenharmony_ci	sg = kmalloc_array(sg_len, sizeof(*sg), gfp);
17162306a36Sopenharmony_ci	if (sg)
17262306a36Sopenharmony_ci		sg_init_table(sg, sg_len);
17362306a36Sopenharmony_ci
17462306a36Sopenharmony_ci	return sg;
17562306a36Sopenharmony_ci}
17662306a36Sopenharmony_ci
17762306a36Sopenharmony_cistatic void mmc_queue_setup_discard(struct request_queue *q,
17862306a36Sopenharmony_ci				    struct mmc_card *card)
17962306a36Sopenharmony_ci{
18062306a36Sopenharmony_ci	unsigned max_discard;
18162306a36Sopenharmony_ci
18262306a36Sopenharmony_ci	max_discard = mmc_calc_max_discard(card);
18362306a36Sopenharmony_ci	if (!max_discard)
18462306a36Sopenharmony_ci		return;
18562306a36Sopenharmony_ci
18662306a36Sopenharmony_ci	blk_queue_max_discard_sectors(q, max_discard);
18762306a36Sopenharmony_ci	q->limits.discard_granularity = card->pref_erase << 9;
18862306a36Sopenharmony_ci	/* granularity must not be greater than max. discard */
18962306a36Sopenharmony_ci	if (card->pref_erase > max_discard)
19062306a36Sopenharmony_ci		q->limits.discard_granularity = SECTOR_SIZE;
19162306a36Sopenharmony_ci	if (mmc_can_secure_erase_trim(card))
19262306a36Sopenharmony_ci		blk_queue_max_secure_erase_sectors(q, max_discard);
19362306a36Sopenharmony_ci	if (mmc_can_trim(card) && card->erased_byte == 0)
19462306a36Sopenharmony_ci		blk_queue_max_write_zeroes_sectors(q, max_discard);
19562306a36Sopenharmony_ci}
19662306a36Sopenharmony_ci
19762306a36Sopenharmony_cistatic unsigned short mmc_get_max_segments(struct mmc_host *host)
19862306a36Sopenharmony_ci{
19962306a36Sopenharmony_ci	return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS :
20062306a36Sopenharmony_ci					 host->max_segs;
20162306a36Sopenharmony_ci}
20262306a36Sopenharmony_ci
20362306a36Sopenharmony_cistatic int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
20462306a36Sopenharmony_ci			       unsigned int hctx_idx, unsigned int numa_node)
20562306a36Sopenharmony_ci{
20662306a36Sopenharmony_ci	struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
20762306a36Sopenharmony_ci	struct mmc_queue *mq = set->driver_data;
20862306a36Sopenharmony_ci	struct mmc_card *card = mq->card;
20962306a36Sopenharmony_ci	struct mmc_host *host = card->host;
21062306a36Sopenharmony_ci
21162306a36Sopenharmony_ci	mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), GFP_KERNEL);
21262306a36Sopenharmony_ci	if (!mq_rq->sg)
21362306a36Sopenharmony_ci		return -ENOMEM;
21462306a36Sopenharmony_ci
21562306a36Sopenharmony_ci	return 0;
21662306a36Sopenharmony_ci}
21762306a36Sopenharmony_ci
21862306a36Sopenharmony_cistatic void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
21962306a36Sopenharmony_ci				unsigned int hctx_idx)
22062306a36Sopenharmony_ci{
22162306a36Sopenharmony_ci	struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
22262306a36Sopenharmony_ci
22362306a36Sopenharmony_ci	kfree(mq_rq->sg);
22462306a36Sopenharmony_ci	mq_rq->sg = NULL;
22562306a36Sopenharmony_ci}
22662306a36Sopenharmony_ci
22762306a36Sopenharmony_cistatic blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
22862306a36Sopenharmony_ci				    const struct blk_mq_queue_data *bd)
22962306a36Sopenharmony_ci{
23062306a36Sopenharmony_ci	struct request *req = bd->rq;
23162306a36Sopenharmony_ci	struct request_queue *q = req->q;
23262306a36Sopenharmony_ci	struct mmc_queue *mq = q->queuedata;
23362306a36Sopenharmony_ci	struct mmc_card *card = mq->card;
23462306a36Sopenharmony_ci	struct mmc_host *host = card->host;
23562306a36Sopenharmony_ci	enum mmc_issue_type issue_type;
23662306a36Sopenharmony_ci	enum mmc_issued issued;
23762306a36Sopenharmony_ci	bool get_card, cqe_retune_ok;
23862306a36Sopenharmony_ci	blk_status_t ret;
23962306a36Sopenharmony_ci
24062306a36Sopenharmony_ci	if (mmc_card_removed(mq->card)) {
24162306a36Sopenharmony_ci		req->rq_flags |= RQF_QUIET;
24262306a36Sopenharmony_ci		return BLK_STS_IOERR;
24362306a36Sopenharmony_ci	}
24462306a36Sopenharmony_ci
24562306a36Sopenharmony_ci	issue_type = mmc_issue_type(mq, req);
24662306a36Sopenharmony_ci
24762306a36Sopenharmony_ci	spin_lock_irq(&mq->lock);
24862306a36Sopenharmony_ci
24962306a36Sopenharmony_ci	if (mq->recovery_needed || mq->busy) {
25062306a36Sopenharmony_ci		spin_unlock_irq(&mq->lock);
25162306a36Sopenharmony_ci		return BLK_STS_RESOURCE;
25262306a36Sopenharmony_ci	}
25362306a36Sopenharmony_ci
25462306a36Sopenharmony_ci	switch (issue_type) {
25562306a36Sopenharmony_ci	case MMC_ISSUE_DCMD:
25662306a36Sopenharmony_ci		if (mmc_cqe_dcmd_busy(mq)) {
25762306a36Sopenharmony_ci			mq->cqe_busy |= MMC_CQE_DCMD_BUSY;
25862306a36Sopenharmony_ci			spin_unlock_irq(&mq->lock);
25962306a36Sopenharmony_ci			return BLK_STS_RESOURCE;
26062306a36Sopenharmony_ci		}
26162306a36Sopenharmony_ci		break;
26262306a36Sopenharmony_ci	case MMC_ISSUE_ASYNC:
26362306a36Sopenharmony_ci		/*
26462306a36Sopenharmony_ci		 * For MMC host software queue, we only allow 2 requests in
26562306a36Sopenharmony_ci		 * flight to avoid a long latency.
26662306a36Sopenharmony_ci		 */
26762306a36Sopenharmony_ci		if (host->hsq_enabled && mq->in_flight[issue_type] > 2) {
26862306a36Sopenharmony_ci			spin_unlock_irq(&mq->lock);
26962306a36Sopenharmony_ci			return BLK_STS_RESOURCE;
27062306a36Sopenharmony_ci		}
27162306a36Sopenharmony_ci		break;
27262306a36Sopenharmony_ci	default:
27362306a36Sopenharmony_ci		/*
27462306a36Sopenharmony_ci		 * Timeouts are handled by mmc core, and we don't have a host
27562306a36Sopenharmony_ci		 * API to abort requests, so we can't handle the timeout anyway.
27662306a36Sopenharmony_ci		 * However, when the timeout happens, blk_mq_complete_request()
27762306a36Sopenharmony_ci		 * no longer works (to stop the request disappearing under us).
27862306a36Sopenharmony_ci		 * To avoid racing with that, set a large timeout.
27962306a36Sopenharmony_ci		 */
28062306a36Sopenharmony_ci		req->timeout = 600 * HZ;
28162306a36Sopenharmony_ci		break;
28262306a36Sopenharmony_ci	}
28362306a36Sopenharmony_ci
28462306a36Sopenharmony_ci	/* Parallel dispatch of requests is not supported at the moment */
28562306a36Sopenharmony_ci	mq->busy = true;
28662306a36Sopenharmony_ci
28762306a36Sopenharmony_ci	mq->in_flight[issue_type] += 1;
28862306a36Sopenharmony_ci	get_card = (mmc_tot_in_flight(mq) == 1);
28962306a36Sopenharmony_ci	cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
29062306a36Sopenharmony_ci
29162306a36Sopenharmony_ci	spin_unlock_irq(&mq->lock);
29262306a36Sopenharmony_ci
29362306a36Sopenharmony_ci	if (!(req->rq_flags & RQF_DONTPREP)) {
29462306a36Sopenharmony_ci		req_to_mmc_queue_req(req)->retries = 0;
29562306a36Sopenharmony_ci		req->rq_flags |= RQF_DONTPREP;
29662306a36Sopenharmony_ci	}
29762306a36Sopenharmony_ci
29862306a36Sopenharmony_ci	if (get_card)
29962306a36Sopenharmony_ci		mmc_get_card(card, &mq->ctx);
30062306a36Sopenharmony_ci
30162306a36Sopenharmony_ci	if (host->cqe_enabled) {
30262306a36Sopenharmony_ci		host->retune_now = host->need_retune && cqe_retune_ok &&
30362306a36Sopenharmony_ci				   !host->hold_retune;
30462306a36Sopenharmony_ci	}
30562306a36Sopenharmony_ci
30662306a36Sopenharmony_ci	blk_mq_start_request(req);
30762306a36Sopenharmony_ci
30862306a36Sopenharmony_ci	issued = mmc_blk_mq_issue_rq(mq, req);
30962306a36Sopenharmony_ci
31062306a36Sopenharmony_ci	switch (issued) {
31162306a36Sopenharmony_ci	case MMC_REQ_BUSY:
31262306a36Sopenharmony_ci		ret = BLK_STS_RESOURCE;
31362306a36Sopenharmony_ci		break;
31462306a36Sopenharmony_ci	case MMC_REQ_FAILED_TO_START:
31562306a36Sopenharmony_ci		ret = BLK_STS_IOERR;
31662306a36Sopenharmony_ci		break;
31762306a36Sopenharmony_ci	default:
31862306a36Sopenharmony_ci		ret = BLK_STS_OK;
31962306a36Sopenharmony_ci		break;
32062306a36Sopenharmony_ci	}
32162306a36Sopenharmony_ci
32262306a36Sopenharmony_ci	if (issued != MMC_REQ_STARTED) {
32362306a36Sopenharmony_ci		bool put_card = false;
32462306a36Sopenharmony_ci
32562306a36Sopenharmony_ci		spin_lock_irq(&mq->lock);
32662306a36Sopenharmony_ci		mq->in_flight[issue_type] -= 1;
32762306a36Sopenharmony_ci		if (mmc_tot_in_flight(mq) == 0)
32862306a36Sopenharmony_ci			put_card = true;
32962306a36Sopenharmony_ci		mq->busy = false;
33062306a36Sopenharmony_ci		spin_unlock_irq(&mq->lock);
33162306a36Sopenharmony_ci		if (put_card)
33262306a36Sopenharmony_ci			mmc_put_card(card, &mq->ctx);
33362306a36Sopenharmony_ci	} else {
33462306a36Sopenharmony_ci		WRITE_ONCE(mq->busy, false);
33562306a36Sopenharmony_ci	}
33662306a36Sopenharmony_ci
33762306a36Sopenharmony_ci	return ret;
33862306a36Sopenharmony_ci}
33962306a36Sopenharmony_ci
34062306a36Sopenharmony_cistatic const struct blk_mq_ops mmc_mq_ops = {
34162306a36Sopenharmony_ci	.queue_rq	= mmc_mq_queue_rq,
34262306a36Sopenharmony_ci	.init_request	= mmc_mq_init_request,
34362306a36Sopenharmony_ci	.exit_request	= mmc_mq_exit_request,
34462306a36Sopenharmony_ci	.complete	= mmc_blk_mq_complete,
34562306a36Sopenharmony_ci	.timeout	= mmc_mq_timed_out,
34662306a36Sopenharmony_ci};
34762306a36Sopenharmony_ci
34862306a36Sopenharmony_cistatic void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
34962306a36Sopenharmony_ci{
35062306a36Sopenharmony_ci	struct mmc_host *host = card->host;
35162306a36Sopenharmony_ci	unsigned block_size = 512;
35262306a36Sopenharmony_ci
35362306a36Sopenharmony_ci	blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
35462306a36Sopenharmony_ci	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
35562306a36Sopenharmony_ci	if (mmc_can_erase(card))
35662306a36Sopenharmony_ci		mmc_queue_setup_discard(mq->queue, card);
35762306a36Sopenharmony_ci
35862306a36Sopenharmony_ci	if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask)
35962306a36Sopenharmony_ci		blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
36062306a36Sopenharmony_ci	blk_queue_max_hw_sectors(mq->queue,
36162306a36Sopenharmony_ci		min(host->max_blk_count, host->max_req_size / 512));
36262306a36Sopenharmony_ci	if (host->can_dma_map_merge)
36362306a36Sopenharmony_ci		WARN(!blk_queue_can_use_dma_map_merging(mq->queue,
36462306a36Sopenharmony_ci							mmc_dev(host)),
36562306a36Sopenharmony_ci		     "merging was advertised but not possible");
36662306a36Sopenharmony_ci	blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
36762306a36Sopenharmony_ci
36862306a36Sopenharmony_ci	if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) {
36962306a36Sopenharmony_ci		block_size = card->ext_csd.data_sector_size;
37062306a36Sopenharmony_ci		WARN_ON(block_size != 512 && block_size != 4096);
37162306a36Sopenharmony_ci	}
37262306a36Sopenharmony_ci
37362306a36Sopenharmony_ci	blk_queue_logical_block_size(mq->queue, block_size);
37462306a36Sopenharmony_ci	/*
37562306a36Sopenharmony_ci	 * After blk_queue_can_use_dma_map_merging() was called with succeed,
37662306a36Sopenharmony_ci	 * since it calls blk_queue_virt_boundary(), the mmc should not call
37762306a36Sopenharmony_ci	 * both blk_queue_max_segment_size().
37862306a36Sopenharmony_ci	 */
37962306a36Sopenharmony_ci	if (!host->can_dma_map_merge)
38062306a36Sopenharmony_ci		blk_queue_max_segment_size(mq->queue,
38162306a36Sopenharmony_ci			round_down(host->max_seg_size, block_size));
38262306a36Sopenharmony_ci
38362306a36Sopenharmony_ci	dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
38462306a36Sopenharmony_ci
38562306a36Sopenharmony_ci	INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
38662306a36Sopenharmony_ci	INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
38762306a36Sopenharmony_ci
38862306a36Sopenharmony_ci	mutex_init(&mq->complete_lock);
38962306a36Sopenharmony_ci
39062306a36Sopenharmony_ci	init_waitqueue_head(&mq->wait);
39162306a36Sopenharmony_ci
39262306a36Sopenharmony_ci	mmc_crypto_setup_queue(mq->queue, host);
39362306a36Sopenharmony_ci}
39462306a36Sopenharmony_ci
39562306a36Sopenharmony_cistatic inline bool mmc_merge_capable(struct mmc_host *host)
39662306a36Sopenharmony_ci{
39762306a36Sopenharmony_ci	return host->caps2 & MMC_CAP2_MERGE_CAPABLE;
39862306a36Sopenharmony_ci}
39962306a36Sopenharmony_ci
40062306a36Sopenharmony_ci/* Set queue depth to get a reasonable value for q->nr_requests */
40162306a36Sopenharmony_ci#define MMC_QUEUE_DEPTH 64
40262306a36Sopenharmony_ci
40362306a36Sopenharmony_ci/**
40462306a36Sopenharmony_ci * mmc_init_queue - initialise a queue structure.
40562306a36Sopenharmony_ci * @mq: mmc queue
40662306a36Sopenharmony_ci * @card: mmc card to attach this queue
40762306a36Sopenharmony_ci *
40862306a36Sopenharmony_ci * Initialise a MMC card request queue.
40962306a36Sopenharmony_ci */
41062306a36Sopenharmony_cistruct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
41162306a36Sopenharmony_ci{
41262306a36Sopenharmony_ci	struct mmc_host *host = card->host;
41362306a36Sopenharmony_ci	struct gendisk *disk;
41462306a36Sopenharmony_ci	int ret;
41562306a36Sopenharmony_ci
41662306a36Sopenharmony_ci	mq->card = card;
41762306a36Sopenharmony_ci
41862306a36Sopenharmony_ci	spin_lock_init(&mq->lock);
41962306a36Sopenharmony_ci
42062306a36Sopenharmony_ci	memset(&mq->tag_set, 0, sizeof(mq->tag_set));
42162306a36Sopenharmony_ci	mq->tag_set.ops = &mmc_mq_ops;
42262306a36Sopenharmony_ci	/*
42362306a36Sopenharmony_ci	 * The queue depth for CQE must match the hardware because the request
42462306a36Sopenharmony_ci	 * tag is used to index the hardware queue.
42562306a36Sopenharmony_ci	 */
42662306a36Sopenharmony_ci	if (host->cqe_enabled && !host->hsq_enabled)
42762306a36Sopenharmony_ci		mq->tag_set.queue_depth =
42862306a36Sopenharmony_ci			min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
42962306a36Sopenharmony_ci	else
43062306a36Sopenharmony_ci		mq->tag_set.queue_depth = MMC_QUEUE_DEPTH;
43162306a36Sopenharmony_ci	mq->tag_set.numa_node = NUMA_NO_NODE;
43262306a36Sopenharmony_ci	mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
43362306a36Sopenharmony_ci	mq->tag_set.nr_hw_queues = 1;
43462306a36Sopenharmony_ci	mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
43562306a36Sopenharmony_ci	mq->tag_set.driver_data = mq;
43662306a36Sopenharmony_ci
43762306a36Sopenharmony_ci	/*
43862306a36Sopenharmony_ci	 * Since blk_mq_alloc_tag_set() calls .init_request() of mmc_mq_ops,
43962306a36Sopenharmony_ci	 * the host->can_dma_map_merge should be set before to get max_segs
44062306a36Sopenharmony_ci	 * from mmc_get_max_segments().
44162306a36Sopenharmony_ci	 */
44262306a36Sopenharmony_ci	if (mmc_merge_capable(host) &&
44362306a36Sopenharmony_ci	    host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS &&
44462306a36Sopenharmony_ci	    dma_get_merge_boundary(mmc_dev(host)))
44562306a36Sopenharmony_ci		host->can_dma_map_merge = 1;
44662306a36Sopenharmony_ci	else
44762306a36Sopenharmony_ci		host->can_dma_map_merge = 0;
44862306a36Sopenharmony_ci
44962306a36Sopenharmony_ci	ret = blk_mq_alloc_tag_set(&mq->tag_set);
45062306a36Sopenharmony_ci	if (ret)
45162306a36Sopenharmony_ci		return ERR_PTR(ret);
45262306a36Sopenharmony_ci
45362306a36Sopenharmony_ci
45462306a36Sopenharmony_ci	disk = blk_mq_alloc_disk(&mq->tag_set, mq);
45562306a36Sopenharmony_ci	if (IS_ERR(disk)) {
45662306a36Sopenharmony_ci		blk_mq_free_tag_set(&mq->tag_set);
45762306a36Sopenharmony_ci		return disk;
45862306a36Sopenharmony_ci	}
45962306a36Sopenharmony_ci	mq->queue = disk->queue;
46062306a36Sopenharmony_ci
46162306a36Sopenharmony_ci	if (mmc_host_is_spi(host) && host->use_spi_crc)
46262306a36Sopenharmony_ci		blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
46362306a36Sopenharmony_ci	blk_queue_rq_timeout(mq->queue, 60 * HZ);
46462306a36Sopenharmony_ci
46562306a36Sopenharmony_ci	mmc_setup_queue(mq, card);
46662306a36Sopenharmony_ci	return disk;
46762306a36Sopenharmony_ci}
46862306a36Sopenharmony_ci
46962306a36Sopenharmony_civoid mmc_queue_suspend(struct mmc_queue *mq)
47062306a36Sopenharmony_ci{
47162306a36Sopenharmony_ci	blk_mq_quiesce_queue(mq->queue);
47262306a36Sopenharmony_ci
47362306a36Sopenharmony_ci	/*
47462306a36Sopenharmony_ci	 * The host remains claimed while there are outstanding requests, so
47562306a36Sopenharmony_ci	 * simply claiming and releasing here ensures there are none.
47662306a36Sopenharmony_ci	 */
47762306a36Sopenharmony_ci	mmc_claim_host(mq->card->host);
47862306a36Sopenharmony_ci	mmc_release_host(mq->card->host);
47962306a36Sopenharmony_ci}
48062306a36Sopenharmony_ci
48162306a36Sopenharmony_civoid mmc_queue_resume(struct mmc_queue *mq)
48262306a36Sopenharmony_ci{
48362306a36Sopenharmony_ci	blk_mq_unquiesce_queue(mq->queue);
48462306a36Sopenharmony_ci}
48562306a36Sopenharmony_ci
48662306a36Sopenharmony_civoid mmc_cleanup_queue(struct mmc_queue *mq)
48762306a36Sopenharmony_ci{
48862306a36Sopenharmony_ci	struct request_queue *q = mq->queue;
48962306a36Sopenharmony_ci
49062306a36Sopenharmony_ci	/*
49162306a36Sopenharmony_ci	 * The legacy code handled the possibility of being suspended,
49262306a36Sopenharmony_ci	 * so do that here too.
49362306a36Sopenharmony_ci	 */
49462306a36Sopenharmony_ci	if (blk_queue_quiesced(q))
49562306a36Sopenharmony_ci		blk_mq_unquiesce_queue(q);
49662306a36Sopenharmony_ci
49762306a36Sopenharmony_ci	/*
49862306a36Sopenharmony_ci	 * If the recovery completes the last (and only remaining) request in
49962306a36Sopenharmony_ci	 * the queue, and the card has been removed, we could end up here with
50062306a36Sopenharmony_ci	 * the recovery not quite finished yet, so cancel it.
50162306a36Sopenharmony_ci	 */
50262306a36Sopenharmony_ci	cancel_work_sync(&mq->recovery_work);
50362306a36Sopenharmony_ci
50462306a36Sopenharmony_ci	blk_mq_free_tag_set(&mq->tag_set);
50562306a36Sopenharmony_ci
50662306a36Sopenharmony_ci	/*
50762306a36Sopenharmony_ci	 * A request can be completed before the next request, potentially
50862306a36Sopenharmony_ci	 * leaving a complete_work with nothing to do. Such a work item might
50962306a36Sopenharmony_ci	 * still be queued at this point. Flush it.
51062306a36Sopenharmony_ci	 */
51162306a36Sopenharmony_ci	flush_work(&mq->complete_work);
51262306a36Sopenharmony_ci
51362306a36Sopenharmony_ci	mq->card = NULL;
51462306a36Sopenharmony_ci}
51562306a36Sopenharmony_ci
51662306a36Sopenharmony_ci/*
51762306a36Sopenharmony_ci * Prepare the sg list(s) to be handed of to the host driver
51862306a36Sopenharmony_ci */
51962306a36Sopenharmony_ciunsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
52062306a36Sopenharmony_ci{
52162306a36Sopenharmony_ci	struct request *req = mmc_queue_req_to_req(mqrq);
52262306a36Sopenharmony_ci
52362306a36Sopenharmony_ci	return blk_rq_map_sg(mq->queue, req, mqrq->sg);
52462306a36Sopenharmony_ci}
525