18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only
28c2ecf20Sopenharmony_ci/*
38c2ecf20Sopenharmony_ci *  Copyright (C) 2003 Russell King, All Rights Reserved.
48c2ecf20Sopenharmony_ci *  Copyright 2006-2007 Pierre Ossman
58c2ecf20Sopenharmony_ci */
68c2ecf20Sopenharmony_ci#include <linux/slab.h>
78c2ecf20Sopenharmony_ci#include <linux/module.h>
88c2ecf20Sopenharmony_ci#include <linux/blkdev.h>
98c2ecf20Sopenharmony_ci#include <linux/freezer.h>
108c2ecf20Sopenharmony_ci#include <linux/kthread.h>
118c2ecf20Sopenharmony_ci#include <linux/scatterlist.h>
128c2ecf20Sopenharmony_ci#include <linux/dma-mapping.h>
138c2ecf20Sopenharmony_ci#include <linux/backing-dev.h>
148c2ecf20Sopenharmony_ci
158c2ecf20Sopenharmony_ci#include <linux/mmc/card.h>
168c2ecf20Sopenharmony_ci#include <linux/mmc/host.h>
178c2ecf20Sopenharmony_ci
188c2ecf20Sopenharmony_ci#include "queue.h"
198c2ecf20Sopenharmony_ci#include "block.h"
208c2ecf20Sopenharmony_ci#include "core.h"
218c2ecf20Sopenharmony_ci#include "card.h"
228c2ecf20Sopenharmony_ci#include "host.h"
238c2ecf20Sopenharmony_ci
248c2ecf20Sopenharmony_ci#define MMC_DMA_MAP_MERGE_SEGMENTS	512
258c2ecf20Sopenharmony_ci
268c2ecf20Sopenharmony_cistatic inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)
278c2ecf20Sopenharmony_ci{
288c2ecf20Sopenharmony_ci	/* Allow only 1 DCMD at a time */
298c2ecf20Sopenharmony_ci	return mq->in_flight[MMC_ISSUE_DCMD];
308c2ecf20Sopenharmony_ci}
318c2ecf20Sopenharmony_ci
328c2ecf20Sopenharmony_civoid mmc_cqe_check_busy(struct mmc_queue *mq)
338c2ecf20Sopenharmony_ci{
348c2ecf20Sopenharmony_ci	if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq))
358c2ecf20Sopenharmony_ci		mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY;
368c2ecf20Sopenharmony_ci
378c2ecf20Sopenharmony_ci	mq->cqe_busy &= ~MMC_CQE_QUEUE_FULL;
388c2ecf20Sopenharmony_ci}
398c2ecf20Sopenharmony_ci
408c2ecf20Sopenharmony_cistatic inline bool mmc_cqe_can_dcmd(struct mmc_host *host)
418c2ecf20Sopenharmony_ci{
428c2ecf20Sopenharmony_ci	return host->caps2 & MMC_CAP2_CQE_DCMD;
438c2ecf20Sopenharmony_ci}
448c2ecf20Sopenharmony_ci
458c2ecf20Sopenharmony_cistatic enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host,
468c2ecf20Sopenharmony_ci					      struct request *req)
478c2ecf20Sopenharmony_ci{
488c2ecf20Sopenharmony_ci	switch (req_op(req)) {
498c2ecf20Sopenharmony_ci	case REQ_OP_DRV_IN:
508c2ecf20Sopenharmony_ci	case REQ_OP_DRV_OUT:
518c2ecf20Sopenharmony_ci	case REQ_OP_DISCARD:
528c2ecf20Sopenharmony_ci	case REQ_OP_SECURE_ERASE:
538c2ecf20Sopenharmony_ci		return MMC_ISSUE_SYNC;
548c2ecf20Sopenharmony_ci	case REQ_OP_FLUSH:
558c2ecf20Sopenharmony_ci		return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC;
568c2ecf20Sopenharmony_ci	default:
578c2ecf20Sopenharmony_ci		return MMC_ISSUE_ASYNC;
588c2ecf20Sopenharmony_ci	}
598c2ecf20Sopenharmony_ci}
608c2ecf20Sopenharmony_ci
618c2ecf20Sopenharmony_cienum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
628c2ecf20Sopenharmony_ci{
638c2ecf20Sopenharmony_ci	struct mmc_host *host = mq->card->host;
648c2ecf20Sopenharmony_ci
658c2ecf20Sopenharmony_ci	if (mq->use_cqe && !host->hsq_enabled)
668c2ecf20Sopenharmony_ci		return mmc_cqe_issue_type(host, req);
678c2ecf20Sopenharmony_ci
688c2ecf20Sopenharmony_ci	if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
698c2ecf20Sopenharmony_ci		return MMC_ISSUE_ASYNC;
708c2ecf20Sopenharmony_ci
718c2ecf20Sopenharmony_ci	return MMC_ISSUE_SYNC;
728c2ecf20Sopenharmony_ci}
738c2ecf20Sopenharmony_ci
748c2ecf20Sopenharmony_cistatic void __mmc_cqe_recovery_notifier(struct mmc_queue *mq)
758c2ecf20Sopenharmony_ci{
768c2ecf20Sopenharmony_ci	if (!mq->recovery_needed) {
778c2ecf20Sopenharmony_ci		mq->recovery_needed = true;
788c2ecf20Sopenharmony_ci		schedule_work(&mq->recovery_work);
798c2ecf20Sopenharmony_ci	}
808c2ecf20Sopenharmony_ci}
818c2ecf20Sopenharmony_ci
828c2ecf20Sopenharmony_civoid mmc_cqe_recovery_notifier(struct mmc_request *mrq)
838c2ecf20Sopenharmony_ci{
848c2ecf20Sopenharmony_ci	struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
858c2ecf20Sopenharmony_ci						  brq.mrq);
868c2ecf20Sopenharmony_ci	struct request *req = mmc_queue_req_to_req(mqrq);
878c2ecf20Sopenharmony_ci	struct request_queue *q = req->q;
888c2ecf20Sopenharmony_ci	struct mmc_queue *mq = q->queuedata;
898c2ecf20Sopenharmony_ci	unsigned long flags;
908c2ecf20Sopenharmony_ci
918c2ecf20Sopenharmony_ci	spin_lock_irqsave(&mq->lock, flags);
928c2ecf20Sopenharmony_ci	__mmc_cqe_recovery_notifier(mq);
938c2ecf20Sopenharmony_ci	spin_unlock_irqrestore(&mq->lock, flags);
948c2ecf20Sopenharmony_ci}
958c2ecf20Sopenharmony_ci
968c2ecf20Sopenharmony_cistatic enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
978c2ecf20Sopenharmony_ci{
988c2ecf20Sopenharmony_ci	struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
998c2ecf20Sopenharmony_ci	struct mmc_request *mrq = &mqrq->brq.mrq;
1008c2ecf20Sopenharmony_ci	struct mmc_queue *mq = req->q->queuedata;
1018c2ecf20Sopenharmony_ci	struct mmc_host *host = mq->card->host;
1028c2ecf20Sopenharmony_ci	enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
1038c2ecf20Sopenharmony_ci	bool recovery_needed = false;
1048c2ecf20Sopenharmony_ci
1058c2ecf20Sopenharmony_ci	switch (issue_type) {
1068c2ecf20Sopenharmony_ci	case MMC_ISSUE_ASYNC:
1078c2ecf20Sopenharmony_ci	case MMC_ISSUE_DCMD:
1088c2ecf20Sopenharmony_ci		if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
1098c2ecf20Sopenharmony_ci			if (recovery_needed)
1108c2ecf20Sopenharmony_ci				mmc_cqe_recovery_notifier(mrq);
1118c2ecf20Sopenharmony_ci			return BLK_EH_RESET_TIMER;
1128c2ecf20Sopenharmony_ci		}
1138c2ecf20Sopenharmony_ci		/* The request has gone already */
1148c2ecf20Sopenharmony_ci		return BLK_EH_DONE;
1158c2ecf20Sopenharmony_ci	default:
1168c2ecf20Sopenharmony_ci		/* Timeout is handled by mmc core */
1178c2ecf20Sopenharmony_ci		return BLK_EH_RESET_TIMER;
1188c2ecf20Sopenharmony_ci	}
1198c2ecf20Sopenharmony_ci}
1208c2ecf20Sopenharmony_ci
1218c2ecf20Sopenharmony_cistatic enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
1228c2ecf20Sopenharmony_ci						 bool reserved)
1238c2ecf20Sopenharmony_ci{
1248c2ecf20Sopenharmony_ci	struct request_queue *q = req->q;
1258c2ecf20Sopenharmony_ci	struct mmc_queue *mq = q->queuedata;
1268c2ecf20Sopenharmony_ci	struct mmc_card *card = mq->card;
1278c2ecf20Sopenharmony_ci	struct mmc_host *host = card->host;
1288c2ecf20Sopenharmony_ci	unsigned long flags;
1298c2ecf20Sopenharmony_ci	bool ignore_tout;
1308c2ecf20Sopenharmony_ci
1318c2ecf20Sopenharmony_ci	spin_lock_irqsave(&mq->lock, flags);
1328c2ecf20Sopenharmony_ci	ignore_tout = mq->recovery_needed || !mq->use_cqe || host->hsq_enabled;
1338c2ecf20Sopenharmony_ci	spin_unlock_irqrestore(&mq->lock, flags);
1348c2ecf20Sopenharmony_ci
1358c2ecf20Sopenharmony_ci	return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
1368c2ecf20Sopenharmony_ci}
1378c2ecf20Sopenharmony_ci
1388c2ecf20Sopenharmony_cistatic void mmc_mq_recovery_handler(struct work_struct *work)
1398c2ecf20Sopenharmony_ci{
1408c2ecf20Sopenharmony_ci	struct mmc_queue *mq = container_of(work, struct mmc_queue,
1418c2ecf20Sopenharmony_ci					    recovery_work);
1428c2ecf20Sopenharmony_ci	struct request_queue *q = mq->queue;
1438c2ecf20Sopenharmony_ci	struct mmc_host *host = mq->card->host;
1448c2ecf20Sopenharmony_ci
1458c2ecf20Sopenharmony_ci	mmc_get_card(mq->card, &mq->ctx);
1468c2ecf20Sopenharmony_ci
1478c2ecf20Sopenharmony_ci	mq->in_recovery = true;
1488c2ecf20Sopenharmony_ci
1498c2ecf20Sopenharmony_ci	if (mq->use_cqe && !host->hsq_enabled)
1508c2ecf20Sopenharmony_ci		mmc_blk_cqe_recovery(mq);
1518c2ecf20Sopenharmony_ci	else
1528c2ecf20Sopenharmony_ci		mmc_blk_mq_recovery(mq);
1538c2ecf20Sopenharmony_ci
1548c2ecf20Sopenharmony_ci	mq->in_recovery = false;
1558c2ecf20Sopenharmony_ci
1568c2ecf20Sopenharmony_ci	spin_lock_irq(&mq->lock);
1578c2ecf20Sopenharmony_ci	mq->recovery_needed = false;
1588c2ecf20Sopenharmony_ci	spin_unlock_irq(&mq->lock);
1598c2ecf20Sopenharmony_ci
1608c2ecf20Sopenharmony_ci	if (host->hsq_enabled)
1618c2ecf20Sopenharmony_ci		host->cqe_ops->cqe_recovery_finish(host);
1628c2ecf20Sopenharmony_ci
1638c2ecf20Sopenharmony_ci	mmc_put_card(mq->card, &mq->ctx);
1648c2ecf20Sopenharmony_ci
1658c2ecf20Sopenharmony_ci	blk_mq_run_hw_queues(q, true);
1668c2ecf20Sopenharmony_ci}
1678c2ecf20Sopenharmony_ci
1688c2ecf20Sopenharmony_cistatic struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
1698c2ecf20Sopenharmony_ci{
1708c2ecf20Sopenharmony_ci	struct scatterlist *sg;
1718c2ecf20Sopenharmony_ci
1728c2ecf20Sopenharmony_ci	sg = kmalloc_array(sg_len, sizeof(*sg), gfp);
1738c2ecf20Sopenharmony_ci	if (sg)
1748c2ecf20Sopenharmony_ci		sg_init_table(sg, sg_len);
1758c2ecf20Sopenharmony_ci
1768c2ecf20Sopenharmony_ci	return sg;
1778c2ecf20Sopenharmony_ci}
1788c2ecf20Sopenharmony_ci
1798c2ecf20Sopenharmony_cistatic void mmc_queue_setup_discard(struct request_queue *q,
1808c2ecf20Sopenharmony_ci				    struct mmc_card *card)
1818c2ecf20Sopenharmony_ci{
1828c2ecf20Sopenharmony_ci	unsigned max_discard;
1838c2ecf20Sopenharmony_ci
1848c2ecf20Sopenharmony_ci	max_discard = mmc_calc_max_discard(card);
1858c2ecf20Sopenharmony_ci	if (!max_discard)
1868c2ecf20Sopenharmony_ci		return;
1878c2ecf20Sopenharmony_ci
1888c2ecf20Sopenharmony_ci	blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
1898c2ecf20Sopenharmony_ci	blk_queue_max_discard_sectors(q, max_discard);
1908c2ecf20Sopenharmony_ci	q->limits.discard_granularity = card->pref_erase << 9;
1918c2ecf20Sopenharmony_ci	/* granularity must not be greater than max. discard */
1928c2ecf20Sopenharmony_ci	if (card->pref_erase > max_discard)
1938c2ecf20Sopenharmony_ci		q->limits.discard_granularity = SECTOR_SIZE;
1948c2ecf20Sopenharmony_ci	if (mmc_can_secure_erase_trim(card))
1958c2ecf20Sopenharmony_ci		blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
1968c2ecf20Sopenharmony_ci}
1978c2ecf20Sopenharmony_ci
1988c2ecf20Sopenharmony_cistatic unsigned int mmc_get_max_segments(struct mmc_host *host)
1998c2ecf20Sopenharmony_ci{
2008c2ecf20Sopenharmony_ci	return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS :
2018c2ecf20Sopenharmony_ci					 host->max_segs;
2028c2ecf20Sopenharmony_ci}
2038c2ecf20Sopenharmony_ci
2048c2ecf20Sopenharmony_ci/**
2058c2ecf20Sopenharmony_ci * mmc_init_request() - initialize the MMC-specific per-request data
2068c2ecf20Sopenharmony_ci * @mq: the request queue
2078c2ecf20Sopenharmony_ci * @req: the request
2088c2ecf20Sopenharmony_ci * @gfp: memory allocation policy
2098c2ecf20Sopenharmony_ci */
2108c2ecf20Sopenharmony_cistatic int __mmc_init_request(struct mmc_queue *mq, struct request *req,
2118c2ecf20Sopenharmony_ci			      gfp_t gfp)
2128c2ecf20Sopenharmony_ci{
2138c2ecf20Sopenharmony_ci	struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
2148c2ecf20Sopenharmony_ci	struct mmc_card *card = mq->card;
2158c2ecf20Sopenharmony_ci	struct mmc_host *host = card->host;
2168c2ecf20Sopenharmony_ci
2178c2ecf20Sopenharmony_ci	mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), gfp);
2188c2ecf20Sopenharmony_ci	if (!mq_rq->sg)
2198c2ecf20Sopenharmony_ci		return -ENOMEM;
2208c2ecf20Sopenharmony_ci
2218c2ecf20Sopenharmony_ci	return 0;
2228c2ecf20Sopenharmony_ci}
2238c2ecf20Sopenharmony_ci
2248c2ecf20Sopenharmony_cistatic void mmc_exit_request(struct request_queue *q, struct request *req)
2258c2ecf20Sopenharmony_ci{
2268c2ecf20Sopenharmony_ci	struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
2278c2ecf20Sopenharmony_ci
2288c2ecf20Sopenharmony_ci	kfree(mq_rq->sg);
2298c2ecf20Sopenharmony_ci	mq_rq->sg = NULL;
2308c2ecf20Sopenharmony_ci}
2318c2ecf20Sopenharmony_ci
2328c2ecf20Sopenharmony_cistatic int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
2338c2ecf20Sopenharmony_ci			       unsigned int hctx_idx, unsigned int numa_node)
2348c2ecf20Sopenharmony_ci{
2358c2ecf20Sopenharmony_ci	return __mmc_init_request(set->driver_data, req, GFP_KERNEL);
2368c2ecf20Sopenharmony_ci}
2378c2ecf20Sopenharmony_ci
2388c2ecf20Sopenharmony_cistatic void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
2398c2ecf20Sopenharmony_ci				unsigned int hctx_idx)
2408c2ecf20Sopenharmony_ci{
2418c2ecf20Sopenharmony_ci	struct mmc_queue *mq = set->driver_data;
2428c2ecf20Sopenharmony_ci
2438c2ecf20Sopenharmony_ci	mmc_exit_request(mq->queue, req);
2448c2ecf20Sopenharmony_ci}
2458c2ecf20Sopenharmony_ci
2468c2ecf20Sopenharmony_cistatic blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
2478c2ecf20Sopenharmony_ci				    const struct blk_mq_queue_data *bd)
2488c2ecf20Sopenharmony_ci{
2498c2ecf20Sopenharmony_ci	struct request *req = bd->rq;
2508c2ecf20Sopenharmony_ci	struct request_queue *q = req->q;
2518c2ecf20Sopenharmony_ci	struct mmc_queue *mq = q->queuedata;
2528c2ecf20Sopenharmony_ci	struct mmc_card *card = mq->card;
2538c2ecf20Sopenharmony_ci	struct mmc_host *host = card->host;
2548c2ecf20Sopenharmony_ci	enum mmc_issue_type issue_type;
2558c2ecf20Sopenharmony_ci	enum mmc_issued issued;
2568c2ecf20Sopenharmony_ci	bool get_card, cqe_retune_ok;
2578c2ecf20Sopenharmony_ci	int ret;
2588c2ecf20Sopenharmony_ci
2598c2ecf20Sopenharmony_ci	if (mmc_card_removed(mq->card)) {
2608c2ecf20Sopenharmony_ci		req->rq_flags |= RQF_QUIET;
2618c2ecf20Sopenharmony_ci		return BLK_STS_IOERR;
2628c2ecf20Sopenharmony_ci	}
2638c2ecf20Sopenharmony_ci
2648c2ecf20Sopenharmony_ci	issue_type = mmc_issue_type(mq, req);
2658c2ecf20Sopenharmony_ci
2668c2ecf20Sopenharmony_ci	spin_lock_irq(&mq->lock);
2678c2ecf20Sopenharmony_ci
2688c2ecf20Sopenharmony_ci	if (mq->recovery_needed || mq->busy) {
2698c2ecf20Sopenharmony_ci		spin_unlock_irq(&mq->lock);
2708c2ecf20Sopenharmony_ci		return BLK_STS_RESOURCE;
2718c2ecf20Sopenharmony_ci	}
2728c2ecf20Sopenharmony_ci
2738c2ecf20Sopenharmony_ci	switch (issue_type) {
2748c2ecf20Sopenharmony_ci	case MMC_ISSUE_DCMD:
2758c2ecf20Sopenharmony_ci		if (mmc_cqe_dcmd_busy(mq)) {
2768c2ecf20Sopenharmony_ci			mq->cqe_busy |= MMC_CQE_DCMD_BUSY;
2778c2ecf20Sopenharmony_ci			spin_unlock_irq(&mq->lock);
2788c2ecf20Sopenharmony_ci			return BLK_STS_RESOURCE;
2798c2ecf20Sopenharmony_ci		}
2808c2ecf20Sopenharmony_ci		break;
2818c2ecf20Sopenharmony_ci	case MMC_ISSUE_ASYNC:
2828c2ecf20Sopenharmony_ci		/*
2838c2ecf20Sopenharmony_ci		 * For MMC host software queue, we only allow 2 requests in
2848c2ecf20Sopenharmony_ci		 * flight to avoid a long latency.
2858c2ecf20Sopenharmony_ci		 */
2868c2ecf20Sopenharmony_ci		if (host->hsq_enabled && mq->in_flight[issue_type] > 2) {
2878c2ecf20Sopenharmony_ci			spin_unlock_irq(&mq->lock);
2888c2ecf20Sopenharmony_ci			return BLK_STS_RESOURCE;
2898c2ecf20Sopenharmony_ci		}
2908c2ecf20Sopenharmony_ci		break;
2918c2ecf20Sopenharmony_ci	default:
2928c2ecf20Sopenharmony_ci		/*
2938c2ecf20Sopenharmony_ci		 * Timeouts are handled by mmc core, and we don't have a host
2948c2ecf20Sopenharmony_ci		 * API to abort requests, so we can't handle the timeout anyway.
2958c2ecf20Sopenharmony_ci		 * However, when the timeout happens, blk_mq_complete_request()
2968c2ecf20Sopenharmony_ci		 * no longer works (to stop the request disappearing under us).
2978c2ecf20Sopenharmony_ci		 * To avoid racing with that, set a large timeout.
2988c2ecf20Sopenharmony_ci		 */
2998c2ecf20Sopenharmony_ci		req->timeout = 600 * HZ;
3008c2ecf20Sopenharmony_ci		break;
3018c2ecf20Sopenharmony_ci	}
3028c2ecf20Sopenharmony_ci
3038c2ecf20Sopenharmony_ci	/* Parallel dispatch of requests is not supported at the moment */
3048c2ecf20Sopenharmony_ci	mq->busy = true;
3058c2ecf20Sopenharmony_ci
3068c2ecf20Sopenharmony_ci	mq->in_flight[issue_type] += 1;
3078c2ecf20Sopenharmony_ci	get_card = (mmc_tot_in_flight(mq) == 1);
3088c2ecf20Sopenharmony_ci	cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
3098c2ecf20Sopenharmony_ci
3108c2ecf20Sopenharmony_ci	spin_unlock_irq(&mq->lock);
3118c2ecf20Sopenharmony_ci
3128c2ecf20Sopenharmony_ci	if (!(req->rq_flags & RQF_DONTPREP)) {
3138c2ecf20Sopenharmony_ci		req_to_mmc_queue_req(req)->retries = 0;
3148c2ecf20Sopenharmony_ci		req->rq_flags |= RQF_DONTPREP;
3158c2ecf20Sopenharmony_ci	}
3168c2ecf20Sopenharmony_ci
3178c2ecf20Sopenharmony_ci	if (get_card)
3188c2ecf20Sopenharmony_ci		mmc_get_card(card, &mq->ctx);
3198c2ecf20Sopenharmony_ci
3208c2ecf20Sopenharmony_ci	if (mq->use_cqe) {
3218c2ecf20Sopenharmony_ci		host->retune_now = host->need_retune && cqe_retune_ok &&
3228c2ecf20Sopenharmony_ci				   !host->hold_retune;
3238c2ecf20Sopenharmony_ci	}
3248c2ecf20Sopenharmony_ci
3258c2ecf20Sopenharmony_ci	blk_mq_start_request(req);
3268c2ecf20Sopenharmony_ci
3278c2ecf20Sopenharmony_ci	issued = mmc_blk_mq_issue_rq(mq, req);
3288c2ecf20Sopenharmony_ci
3298c2ecf20Sopenharmony_ci	switch (issued) {
3308c2ecf20Sopenharmony_ci	case MMC_REQ_BUSY:
3318c2ecf20Sopenharmony_ci		ret = BLK_STS_RESOURCE;
3328c2ecf20Sopenharmony_ci		break;
3338c2ecf20Sopenharmony_ci	case MMC_REQ_FAILED_TO_START:
3348c2ecf20Sopenharmony_ci		ret = BLK_STS_IOERR;
3358c2ecf20Sopenharmony_ci		break;
3368c2ecf20Sopenharmony_ci	default:
3378c2ecf20Sopenharmony_ci		ret = BLK_STS_OK;
3388c2ecf20Sopenharmony_ci		break;
3398c2ecf20Sopenharmony_ci	}
3408c2ecf20Sopenharmony_ci
3418c2ecf20Sopenharmony_ci	if (issued != MMC_REQ_STARTED) {
3428c2ecf20Sopenharmony_ci		bool put_card = false;
3438c2ecf20Sopenharmony_ci
3448c2ecf20Sopenharmony_ci		spin_lock_irq(&mq->lock);
3458c2ecf20Sopenharmony_ci		mq->in_flight[issue_type] -= 1;
3468c2ecf20Sopenharmony_ci		if (mmc_tot_in_flight(mq) == 0)
3478c2ecf20Sopenharmony_ci			put_card = true;
3488c2ecf20Sopenharmony_ci		mq->busy = false;
3498c2ecf20Sopenharmony_ci		spin_unlock_irq(&mq->lock);
3508c2ecf20Sopenharmony_ci		if (put_card)
3518c2ecf20Sopenharmony_ci			mmc_put_card(card, &mq->ctx);
3528c2ecf20Sopenharmony_ci	} else {
3538c2ecf20Sopenharmony_ci		WRITE_ONCE(mq->busy, false);
3548c2ecf20Sopenharmony_ci	}
3558c2ecf20Sopenharmony_ci
3568c2ecf20Sopenharmony_ci	return ret;
3578c2ecf20Sopenharmony_ci}
3588c2ecf20Sopenharmony_ci
3598c2ecf20Sopenharmony_cistatic const struct blk_mq_ops mmc_mq_ops = {
3608c2ecf20Sopenharmony_ci	.queue_rq	= mmc_mq_queue_rq,
3618c2ecf20Sopenharmony_ci	.init_request	= mmc_mq_init_request,
3628c2ecf20Sopenharmony_ci	.exit_request	= mmc_mq_exit_request,
3638c2ecf20Sopenharmony_ci	.complete	= mmc_blk_mq_complete,
3648c2ecf20Sopenharmony_ci	.timeout	= mmc_mq_timed_out,
3658c2ecf20Sopenharmony_ci};
3668c2ecf20Sopenharmony_ci
3678c2ecf20Sopenharmony_cistatic void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
3688c2ecf20Sopenharmony_ci{
3698c2ecf20Sopenharmony_ci	struct mmc_host *host = card->host;
3708c2ecf20Sopenharmony_ci	unsigned block_size = 512;
3718c2ecf20Sopenharmony_ci
3728c2ecf20Sopenharmony_ci	blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
3738c2ecf20Sopenharmony_ci	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
3748c2ecf20Sopenharmony_ci	if (mmc_can_erase(card))
3758c2ecf20Sopenharmony_ci		mmc_queue_setup_discard(mq->queue, card);
3768c2ecf20Sopenharmony_ci
3778c2ecf20Sopenharmony_ci	if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask)
3788c2ecf20Sopenharmony_ci		blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
3798c2ecf20Sopenharmony_ci	blk_queue_max_hw_sectors(mq->queue,
3808c2ecf20Sopenharmony_ci		min(host->max_blk_count, host->max_req_size / 512));
3818c2ecf20Sopenharmony_ci	if (host->can_dma_map_merge)
3828c2ecf20Sopenharmony_ci		WARN(!blk_queue_can_use_dma_map_merging(mq->queue,
3838c2ecf20Sopenharmony_ci							mmc_dev(host)),
3848c2ecf20Sopenharmony_ci		     "merging was advertised but not possible");
3858c2ecf20Sopenharmony_ci	blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
3868c2ecf20Sopenharmony_ci
3878c2ecf20Sopenharmony_ci	if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) {
3888c2ecf20Sopenharmony_ci		block_size = card->ext_csd.data_sector_size;
3898c2ecf20Sopenharmony_ci		WARN_ON(block_size != 512 && block_size != 4096);
3908c2ecf20Sopenharmony_ci	}
3918c2ecf20Sopenharmony_ci
3928c2ecf20Sopenharmony_ci	blk_queue_logical_block_size(mq->queue, block_size);
3938c2ecf20Sopenharmony_ci	/*
3948c2ecf20Sopenharmony_ci	 * After blk_queue_can_use_dma_map_merging() was called with succeed,
3958c2ecf20Sopenharmony_ci	 * since it calls blk_queue_virt_boundary(), the mmc should not call
3968c2ecf20Sopenharmony_ci	 * both blk_queue_max_segment_size().
3978c2ecf20Sopenharmony_ci	 */
3988c2ecf20Sopenharmony_ci	if (!host->can_dma_map_merge)
3998c2ecf20Sopenharmony_ci		blk_queue_max_segment_size(mq->queue,
4008c2ecf20Sopenharmony_ci			round_down(host->max_seg_size, block_size));
4018c2ecf20Sopenharmony_ci
4028c2ecf20Sopenharmony_ci	dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
4038c2ecf20Sopenharmony_ci
4048c2ecf20Sopenharmony_ci	INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
4058c2ecf20Sopenharmony_ci	INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
4068c2ecf20Sopenharmony_ci
4078c2ecf20Sopenharmony_ci	mutex_init(&mq->complete_lock);
4088c2ecf20Sopenharmony_ci
4098c2ecf20Sopenharmony_ci	init_waitqueue_head(&mq->wait);
4108c2ecf20Sopenharmony_ci}
4118c2ecf20Sopenharmony_ci
4128c2ecf20Sopenharmony_cistatic inline bool mmc_merge_capable(struct mmc_host *host)
4138c2ecf20Sopenharmony_ci{
4148c2ecf20Sopenharmony_ci	return host->caps2 & MMC_CAP2_MERGE_CAPABLE;
4158c2ecf20Sopenharmony_ci}
4168c2ecf20Sopenharmony_ci
4178c2ecf20Sopenharmony_ci/* Set queue depth to get a reasonable value for q->nr_requests */
4188c2ecf20Sopenharmony_ci#define MMC_QUEUE_DEPTH 64
4198c2ecf20Sopenharmony_ci
4208c2ecf20Sopenharmony_ci/**
4218c2ecf20Sopenharmony_ci * mmc_init_queue - initialise a queue structure.
4228c2ecf20Sopenharmony_ci * @mq: mmc queue
4238c2ecf20Sopenharmony_ci * @card: mmc card to attach this queue
4248c2ecf20Sopenharmony_ci *
4258c2ecf20Sopenharmony_ci * Initialise a MMC card request queue.
4268c2ecf20Sopenharmony_ci */
4278c2ecf20Sopenharmony_ciint mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
4288c2ecf20Sopenharmony_ci{
4298c2ecf20Sopenharmony_ci	struct mmc_host *host = card->host;
4308c2ecf20Sopenharmony_ci	int ret;
4318c2ecf20Sopenharmony_ci
4328c2ecf20Sopenharmony_ci	mq->card = card;
4338c2ecf20Sopenharmony_ci	mq->use_cqe = host->cqe_enabled;
4348c2ecf20Sopenharmony_ci
4358c2ecf20Sopenharmony_ci	spin_lock_init(&mq->lock);
4368c2ecf20Sopenharmony_ci
4378c2ecf20Sopenharmony_ci	memset(&mq->tag_set, 0, sizeof(mq->tag_set));
4388c2ecf20Sopenharmony_ci	mq->tag_set.ops = &mmc_mq_ops;
4398c2ecf20Sopenharmony_ci	/*
4408c2ecf20Sopenharmony_ci	 * The queue depth for CQE must match the hardware because the request
4418c2ecf20Sopenharmony_ci	 * tag is used to index the hardware queue.
4428c2ecf20Sopenharmony_ci	 */
4438c2ecf20Sopenharmony_ci	if (mq->use_cqe && !host->hsq_enabled)
4448c2ecf20Sopenharmony_ci		mq->tag_set.queue_depth =
4458c2ecf20Sopenharmony_ci			min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
4468c2ecf20Sopenharmony_ci	else
4478c2ecf20Sopenharmony_ci		mq->tag_set.queue_depth = MMC_QUEUE_DEPTH;
4488c2ecf20Sopenharmony_ci	mq->tag_set.numa_node = NUMA_NO_NODE;
4498c2ecf20Sopenharmony_ci	mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
4508c2ecf20Sopenharmony_ci	mq->tag_set.nr_hw_queues = 1;
4518c2ecf20Sopenharmony_ci	mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
4528c2ecf20Sopenharmony_ci	mq->tag_set.driver_data = mq;
4538c2ecf20Sopenharmony_ci
4548c2ecf20Sopenharmony_ci	/*
4558c2ecf20Sopenharmony_ci	 * Since blk_mq_alloc_tag_set() calls .init_request() of mmc_mq_ops,
4568c2ecf20Sopenharmony_ci	 * the host->can_dma_map_merge should be set before to get max_segs
4578c2ecf20Sopenharmony_ci	 * from mmc_get_max_segments().
4588c2ecf20Sopenharmony_ci	 */
4598c2ecf20Sopenharmony_ci	if (mmc_merge_capable(host) &&
4608c2ecf20Sopenharmony_ci	    host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS &&
4618c2ecf20Sopenharmony_ci	    dma_get_merge_boundary(mmc_dev(host)))
4628c2ecf20Sopenharmony_ci		host->can_dma_map_merge = 1;
4638c2ecf20Sopenharmony_ci	else
4648c2ecf20Sopenharmony_ci		host->can_dma_map_merge = 0;
4658c2ecf20Sopenharmony_ci
4668c2ecf20Sopenharmony_ci	ret = blk_mq_alloc_tag_set(&mq->tag_set);
4678c2ecf20Sopenharmony_ci	if (ret)
4688c2ecf20Sopenharmony_ci		return ret;
4698c2ecf20Sopenharmony_ci
4708c2ecf20Sopenharmony_ci	mq->queue = blk_mq_init_queue(&mq->tag_set);
4718c2ecf20Sopenharmony_ci	if (IS_ERR(mq->queue)) {
4728c2ecf20Sopenharmony_ci		ret = PTR_ERR(mq->queue);
4738c2ecf20Sopenharmony_ci		goto free_tag_set;
4748c2ecf20Sopenharmony_ci	}
4758c2ecf20Sopenharmony_ci
4768c2ecf20Sopenharmony_ci	if (mmc_host_is_spi(host) && host->use_spi_crc)
4778c2ecf20Sopenharmony_ci		blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
4788c2ecf20Sopenharmony_ci
4798c2ecf20Sopenharmony_ci	mq->queue->queuedata = mq;
4808c2ecf20Sopenharmony_ci	blk_queue_rq_timeout(mq->queue, 60 * HZ);
4818c2ecf20Sopenharmony_ci
4828c2ecf20Sopenharmony_ci	mmc_setup_queue(mq, card);
4838c2ecf20Sopenharmony_ci	return 0;
4848c2ecf20Sopenharmony_ci
4858c2ecf20Sopenharmony_cifree_tag_set:
4868c2ecf20Sopenharmony_ci	blk_mq_free_tag_set(&mq->tag_set);
4878c2ecf20Sopenharmony_ci	return ret;
4888c2ecf20Sopenharmony_ci}
4898c2ecf20Sopenharmony_ci
4908c2ecf20Sopenharmony_civoid mmc_queue_suspend(struct mmc_queue *mq)
4918c2ecf20Sopenharmony_ci{
4928c2ecf20Sopenharmony_ci	blk_mq_quiesce_queue(mq->queue);
4938c2ecf20Sopenharmony_ci
4948c2ecf20Sopenharmony_ci	/*
4958c2ecf20Sopenharmony_ci	 * The host remains claimed while there are outstanding requests, so
4968c2ecf20Sopenharmony_ci	 * simply claiming and releasing here ensures there are none.
4978c2ecf20Sopenharmony_ci	 */
4988c2ecf20Sopenharmony_ci	mmc_claim_host(mq->card->host);
4998c2ecf20Sopenharmony_ci	mmc_release_host(mq->card->host);
5008c2ecf20Sopenharmony_ci}
5018c2ecf20Sopenharmony_ci
5028c2ecf20Sopenharmony_civoid mmc_queue_resume(struct mmc_queue *mq)
5038c2ecf20Sopenharmony_ci{
5048c2ecf20Sopenharmony_ci	blk_mq_unquiesce_queue(mq->queue);
5058c2ecf20Sopenharmony_ci}
5068c2ecf20Sopenharmony_ci
5078c2ecf20Sopenharmony_civoid mmc_cleanup_queue(struct mmc_queue *mq)
5088c2ecf20Sopenharmony_ci{
5098c2ecf20Sopenharmony_ci	struct request_queue *q = mq->queue;
5108c2ecf20Sopenharmony_ci
5118c2ecf20Sopenharmony_ci	/*
5128c2ecf20Sopenharmony_ci	 * The legacy code handled the possibility of being suspended,
5138c2ecf20Sopenharmony_ci	 * so do that here too.
5148c2ecf20Sopenharmony_ci	 */
5158c2ecf20Sopenharmony_ci	if (blk_queue_quiesced(q))
5168c2ecf20Sopenharmony_ci		blk_mq_unquiesce_queue(q);
5178c2ecf20Sopenharmony_ci
5188c2ecf20Sopenharmony_ci	blk_cleanup_queue(q);
5198c2ecf20Sopenharmony_ci	blk_mq_free_tag_set(&mq->tag_set);
5208c2ecf20Sopenharmony_ci
5218c2ecf20Sopenharmony_ci	/*
5228c2ecf20Sopenharmony_ci	 * A request can be completed before the next request, potentially
5238c2ecf20Sopenharmony_ci	 * leaving a complete_work with nothing to do. Such a work item might
5248c2ecf20Sopenharmony_ci	 * still be queued at this point. Flush it.
5258c2ecf20Sopenharmony_ci	 */
5268c2ecf20Sopenharmony_ci	flush_work(&mq->complete_work);
5278c2ecf20Sopenharmony_ci
5288c2ecf20Sopenharmony_ci	mq->card = NULL;
5298c2ecf20Sopenharmony_ci}
5308c2ecf20Sopenharmony_ci
5318c2ecf20Sopenharmony_ci/*
5328c2ecf20Sopenharmony_ci * Prepare the sg list(s) to be handed of to the host driver
5338c2ecf20Sopenharmony_ci */
5348c2ecf20Sopenharmony_ciunsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
5358c2ecf20Sopenharmony_ci{
5368c2ecf20Sopenharmony_ci	struct request *req = mmc_queue_req_to_req(mqrq);
5378c2ecf20Sopenharmony_ci
5388c2ecf20Sopenharmony_ci	return blk_rq_map_sg(mq->queue, req, mqrq->sg);
5398c2ecf20Sopenharmony_ci}
540