Lines Matching defs:hsq
21 struct mmc_hsq *hsq = container_of(work, struct mmc_hsq, retry_work);
22 struct mmc_host *mmc = hsq->mmc;
24 mmc->ops->request(mmc, hsq->mrq);
27 static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
29 struct mmc_host *mmc = hsq->mmc;
34 spin_lock_irqsave(&hsq->lock, flags);
37 if (hsq->mrq || hsq->recovery_halt) {
38 spin_unlock_irqrestore(&hsq->lock, flags);
43 if (!hsq->qcnt || !hsq->enabled) {
44 spin_unlock_irqrestore(&hsq->lock, flags);
48 slot = &hsq->slot[hsq->next_tag];
49 hsq->mrq = slot->mrq;
50 hsq->qcnt--;
52 spin_unlock_irqrestore(&hsq->lock, flags);
55 ret = mmc->ops->request_atomic(mmc, hsq->mrq);
57 mmc->ops->request(mmc, hsq->mrq);
69 schedule_work(&hsq->retry_work);
74 static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
84 hsq->next_tag = HSQ_INVALID_TAG;
92 if (++hsq->next_tag != HSQ_INVALID_TAG) {
93 slot = &hsq->slot[hsq->next_tag];
100 slot = &hsq->slot[tag];
108 hsq->next_tag = tag;
111 static void mmc_hsq_post_request(struct mmc_hsq *hsq)
116 spin_lock_irqsave(&hsq->lock, flags);
118 remains = hsq->qcnt;
119 hsq->mrq = NULL;
122 mmc_hsq_update_next_tag(hsq, remains);
124 if (hsq->waiting_for_idle && !remains) {
125 hsq->waiting_for_idle = false;
126 wake_up(&hsq->wait_queue);
130 if (hsq->recovery_halt) {
131 spin_unlock_irqrestore(&hsq->lock, flags);
135 spin_unlock_irqrestore(&hsq->lock, flags);
142 mmc_hsq_pump_requests(hsq);
155 struct mmc_hsq *hsq = mmc->cqe_private;
158 spin_lock_irqsave(&hsq->lock, flags);
160 if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) {
161 spin_unlock_irqrestore(&hsq->lock, flags);
168 hsq->slot[hsq->next_tag].mrq = NULL;
170 spin_unlock_irqrestore(&hsq->lock, flags);
172 mmc_cqe_request_done(mmc, hsq->mrq);
174 mmc_hsq_post_request(hsq);
182 struct mmc_hsq *hsq = mmc->cqe_private;
185 spin_lock_irqsave(&hsq->lock, flags);
187 hsq->recovery_halt = true;
189 spin_unlock_irqrestore(&hsq->lock, flags);
194 struct mmc_hsq *hsq = mmc->cqe_private;
197 spin_lock_irq(&hsq->lock);
199 hsq->recovery_halt = false;
200 remains = hsq->qcnt;
202 spin_unlock_irq(&hsq->lock);
209 mmc_hsq_pump_requests(hsq);
214 struct mmc_hsq *hsq = mmc->cqe_private;
217 spin_lock_irq(&hsq->lock);
219 if (!hsq->enabled) {
220 spin_unlock_irq(&hsq->lock);
225 if (hsq->recovery_halt) {
226 spin_unlock_irq(&hsq->lock);
230 hsq->slot[tag].mrq = mrq;
236 if (hsq->next_tag == HSQ_INVALID_TAG)
237 hsq->next_tag = tag;
239 hsq->qcnt++;
241 spin_unlock_irq(&hsq->lock);
243 mmc_hsq_pump_requests(hsq);
254 static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret)
258 spin_lock_irq(&hsq->lock);
260 is_idle = (!hsq->mrq && !hsq->qcnt) ||
261 hsq->recovery_halt;
263 *ret = hsq->recovery_halt ? -EBUSY : 0;
264 hsq->waiting_for_idle = !is_idle;
266 spin_unlock_irq(&hsq->lock);
273 struct mmc_hsq *hsq = mmc->cqe_private;
276 wait_event(hsq->wait_queue,
277 mmc_hsq_queue_is_idle(hsq, &ret));
284 struct mmc_hsq *hsq = mmc->cqe_private;
288 spin_lock_irq(&hsq->lock);
290 if (!hsq->enabled) {
291 spin_unlock_irq(&hsq->lock);
295 spin_unlock_irq(&hsq->lock);
297 ret = wait_event_timeout(hsq->wait_queue,
298 mmc_hsq_queue_is_idle(hsq, &ret),
305 spin_lock_irq(&hsq->lock);
307 hsq->enabled = false;
309 spin_unlock_irq(&hsq->lock);
314 struct mmc_hsq *hsq = mmc->cqe_private;
316 spin_lock_irq(&hsq->lock);
318 if (hsq->enabled) {
319 spin_unlock_irq(&hsq->lock);
323 hsq->enabled = true;
325 spin_unlock_irq(&hsq->lock);
340 int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
342 hsq->num_slots = HSQ_NUM_SLOTS;
343 hsq->next_tag = HSQ_INVALID_TAG;
345 hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots,
347 if (!hsq->slot)
350 hsq->mmc = mmc;
351 hsq->mmc->cqe_private = hsq;
354 INIT_WORK(&hsq->retry_work, mmc_hsq_retry_handler);
355 spin_lock_init(&hsq->lock);
356 init_waitqueue_head(&hsq->wait_queue);