Lines Matching defs:hsq
18 struct mmc_hsq *hsq = container_of(work, struct mmc_hsq, retry_work);
19 struct mmc_host *mmc = hsq->mmc;
21 mmc->ops->request(mmc, hsq->mrq);
24 static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
26 struct mmc_host *mmc = hsq->mmc;
31 spin_lock_irqsave(&hsq->lock, flags);
34 if (hsq->mrq || hsq->recovery_halt) {
35 spin_unlock_irqrestore(&hsq->lock, flags);
40 if (!hsq->qcnt || !hsq->enabled) {
41 spin_unlock_irqrestore(&hsq->lock, flags);
45 slot = &hsq->slot[hsq->next_tag];
46 hsq->mrq = slot->mrq;
47 hsq->qcnt--;
49 spin_unlock_irqrestore(&hsq->lock, flags);
52 ret = mmc->ops->request_atomic(mmc, hsq->mrq);
54 mmc->ops->request(mmc, hsq->mrq);
66 schedule_work(&hsq->retry_work);
71 static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
80 hsq->next_tag = HSQ_INVALID_TAG;
81 hsq->tail_tag = HSQ_INVALID_TAG;
85 tag = hsq->tag_slot[hsq->next_tag];
86 hsq->tag_slot[hsq->next_tag] = HSQ_INVALID_TAG;
87 hsq->next_tag = tag;
90 static void mmc_hsq_post_request(struct mmc_hsq *hsq)
95 spin_lock_irqsave(&hsq->lock, flags);
97 remains = hsq->qcnt;
98 hsq->mrq = NULL;
101 mmc_hsq_update_next_tag(hsq, remains);
103 if (hsq->waiting_for_idle && !remains) {
104 hsq->waiting_for_idle = false;
105 wake_up(&hsq->wait_queue);
109 if (hsq->recovery_halt) {
110 spin_unlock_irqrestore(&hsq->lock, flags);
114 spin_unlock_irqrestore(&hsq->lock, flags);
121 mmc_hsq_pump_requests(hsq);
134 struct mmc_hsq *hsq = mmc->cqe_private;
137 spin_lock_irqsave(&hsq->lock, flags);
139 if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) {
140 spin_unlock_irqrestore(&hsq->lock, flags);
147 hsq->slot[hsq->next_tag].mrq = NULL;
149 spin_unlock_irqrestore(&hsq->lock, flags);
151 mmc_cqe_request_done(mmc, hsq->mrq);
153 mmc_hsq_post_request(hsq);
161 struct mmc_hsq *hsq = mmc->cqe_private;
164 spin_lock_irqsave(&hsq->lock, flags);
166 hsq->recovery_halt = true;
168 spin_unlock_irqrestore(&hsq->lock, flags);
173 struct mmc_hsq *hsq = mmc->cqe_private;
176 spin_lock_irq(&hsq->lock);
178 hsq->recovery_halt = false;
179 remains = hsq->qcnt;
181 spin_unlock_irq(&hsq->lock);
188 mmc_hsq_pump_requests(hsq);
193 struct mmc_hsq *hsq = mmc->cqe_private;
196 spin_lock_irq(&hsq->lock);
198 if (!hsq->enabled) {
199 spin_unlock_irq(&hsq->lock);
204 if (hsq->recovery_halt) {
205 spin_unlock_irq(&hsq->lock);
209 hsq->slot[tag].mrq = mrq;
215 if (hsq->next_tag == HSQ_INVALID_TAG) {
216 hsq->next_tag = tag;
217 hsq->tail_tag = tag;
218 hsq->tag_slot[hsq->tail_tag] = HSQ_INVALID_TAG;
220 hsq->tag_slot[hsq->tail_tag] = tag;
221 hsq->tail_tag = tag;
224 hsq->qcnt++;
226 spin_unlock_irq(&hsq->lock);
228 mmc_hsq_pump_requests(hsq);
239 static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret)
243 spin_lock_irq(&hsq->lock);
245 is_idle = (!hsq->mrq && !hsq->qcnt) ||
246 hsq->recovery_halt;
248 *ret = hsq->recovery_halt ? -EBUSY : 0;
249 hsq->waiting_for_idle = !is_idle;
251 spin_unlock_irq(&hsq->lock);
258 struct mmc_hsq *hsq = mmc->cqe_private;
261 wait_event(hsq->wait_queue,
262 mmc_hsq_queue_is_idle(hsq, &ret));
269 struct mmc_hsq *hsq = mmc->cqe_private;
273 spin_lock_irq(&hsq->lock);
275 if (!hsq->enabled) {
276 spin_unlock_irq(&hsq->lock);
280 spin_unlock_irq(&hsq->lock);
282 ret = wait_event_timeout(hsq->wait_queue,
283 mmc_hsq_queue_is_idle(hsq, &ret),
290 spin_lock_irq(&hsq->lock);
292 hsq->enabled = false;
294 spin_unlock_irq(&hsq->lock);
299 struct mmc_hsq *hsq = mmc->cqe_private;
301 spin_lock_irq(&hsq->lock);
303 if (hsq->enabled) {
304 spin_unlock_irq(&hsq->lock);
308 hsq->enabled = true;
310 spin_unlock_irq(&hsq->lock);
325 int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
328 hsq->num_slots = HSQ_NUM_SLOTS;
329 hsq->next_tag = HSQ_INVALID_TAG;
330 hsq->tail_tag = HSQ_INVALID_TAG;
332 hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots,
334 if (!hsq->slot)
337 hsq->mmc = mmc;
338 hsq->mmc->cqe_private = hsq;
342 hsq->tag_slot[i] = HSQ_INVALID_TAG;
344 INIT_WORK(&hsq->retry_work, mmc_hsq_retry_handler);
345 spin_lock_init(&hsq->lock);
346 init_waitqueue_head(&hsq->wait_queue);