Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / mmc / host / mmc_hsq.c
bloba5e05ed0fda3ebca5369ee35544beda9b9dafdc9
1 // SPDX-License-Identifier: GPL-2.0
2 /*
4 * MMC software queue support based on command queue interfaces
6 * Copyright (C) 2019 Linaro, Inc.
7 * Author: Baolin Wang <baolin.wang@linaro.org>
8 */
10 #include <linux/mmc/card.h>
11 #include <linux/mmc/host.h>
12 #include <linux/module.h>
14 #include "mmc_hsq.h"
16 #define HSQ_NUM_SLOTS 64
17 #define HSQ_INVALID_TAG HSQ_NUM_SLOTS
19 static void mmc_hsq_retry_handler(struct work_struct *work)
21 struct mmc_hsq *hsq = container_of(work, struct mmc_hsq, retry_work);
22 struct mmc_host *mmc = hsq->mmc;
24 mmc->ops->request(mmc, hsq->mrq);
27 static void mmc_hsq_pump_requests(struct mmc_hsq *hsq)
29 struct mmc_host *mmc = hsq->mmc;
30 struct hsq_slot *slot;
31 unsigned long flags;
32 int ret = 0;
34 spin_lock_irqsave(&hsq->lock, flags);
36 /* Make sure we are not already running a request now */
37 if (hsq->mrq) {
38 spin_unlock_irqrestore(&hsq->lock, flags);
39 return;
42 /* Make sure there are remain requests need to pump */
43 if (!hsq->qcnt || !hsq->enabled) {
44 spin_unlock_irqrestore(&hsq->lock, flags);
45 return;
48 slot = &hsq->slot[hsq->next_tag];
49 hsq->mrq = slot->mrq;
50 hsq->qcnt--;
52 spin_unlock_irqrestore(&hsq->lock, flags);
54 if (mmc->ops->request_atomic)
55 ret = mmc->ops->request_atomic(mmc, hsq->mrq);
56 else
57 mmc->ops->request(mmc, hsq->mrq);
60 * If returning BUSY from request_atomic(), which means the card
61 * may be busy now, and we should change to non-atomic context to
62 * try again for this unusual case, to avoid time-consuming operations
63 * in the atomic context.
65 * Note: we just give a warning for other error cases, since the host
66 * driver will handle them.
68 if (ret == -EBUSY)
69 schedule_work(&hsq->retry_work);
70 else
71 WARN_ON_ONCE(ret);
74 static void mmc_hsq_update_next_tag(struct mmc_hsq *hsq, int remains)
76 struct hsq_slot *slot;
77 int tag;
80 * If there are no remain requests in software queue, then set a invalid
81 * tag.
83 if (!remains) {
84 hsq->next_tag = HSQ_INVALID_TAG;
85 return;
89 * Increasing the next tag and check if the corresponding request is
90 * available, if yes, then we found a candidate request.
92 if (++hsq->next_tag != HSQ_INVALID_TAG) {
93 slot = &hsq->slot[hsq->next_tag];
94 if (slot->mrq)
95 return;
98 /* Othersie we should iterate all slots to find a available tag. */
99 for (tag = 0; tag < HSQ_NUM_SLOTS; tag++) {
100 slot = &hsq->slot[tag];
101 if (slot->mrq)
102 break;
105 if (tag == HSQ_NUM_SLOTS)
106 tag = HSQ_INVALID_TAG;
108 hsq->next_tag = tag;
111 static void mmc_hsq_post_request(struct mmc_hsq *hsq)
113 unsigned long flags;
114 int remains;
116 spin_lock_irqsave(&hsq->lock, flags);
118 remains = hsq->qcnt;
119 hsq->mrq = NULL;
121 /* Update the next available tag to be queued. */
122 mmc_hsq_update_next_tag(hsq, remains);
124 if (hsq->waiting_for_idle && !remains) {
125 hsq->waiting_for_idle = false;
126 wake_up(&hsq->wait_queue);
129 /* Do not pump new request in recovery mode. */
130 if (hsq->recovery_halt) {
131 spin_unlock_irqrestore(&hsq->lock, flags);
132 return;
135 spin_unlock_irqrestore(&hsq->lock, flags);
138 * Try to pump new request to host controller as fast as possible,
139 * after completing previous request.
141 if (remains > 0)
142 mmc_hsq_pump_requests(hsq);
146 * mmc_hsq_finalize_request - finalize one request if the request is done
147 * @mmc: the host controller
148 * @mrq: the request need to be finalized
150 * Return true if we finalized the corresponding request in software queue,
151 * otherwise return false.
153 bool mmc_hsq_finalize_request(struct mmc_host *mmc, struct mmc_request *mrq)
155 struct mmc_hsq *hsq = mmc->cqe_private;
156 unsigned long flags;
158 spin_lock_irqsave(&hsq->lock, flags);
160 if (!hsq->enabled || !hsq->mrq || hsq->mrq != mrq) {
161 spin_unlock_irqrestore(&hsq->lock, flags);
162 return false;
166 * Clear current completed slot request to make a room for new request.
168 hsq->slot[hsq->next_tag].mrq = NULL;
170 spin_unlock_irqrestore(&hsq->lock, flags);
172 mmc_cqe_request_done(mmc, hsq->mrq);
174 mmc_hsq_post_request(hsq);
176 return true;
178 EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request);
180 static void mmc_hsq_recovery_start(struct mmc_host *mmc)
182 struct mmc_hsq *hsq = mmc->cqe_private;
183 unsigned long flags;
185 spin_lock_irqsave(&hsq->lock, flags);
187 hsq->recovery_halt = true;
189 spin_unlock_irqrestore(&hsq->lock, flags);
192 static void mmc_hsq_recovery_finish(struct mmc_host *mmc)
194 struct mmc_hsq *hsq = mmc->cqe_private;
195 int remains;
197 spin_lock_irq(&hsq->lock);
199 hsq->recovery_halt = false;
200 remains = hsq->qcnt;
202 spin_unlock_irq(&hsq->lock);
205 * Try to pump new request if there are request pending in software
206 * queue after finishing recovery.
208 if (remains > 0)
209 mmc_hsq_pump_requests(hsq);
212 static int mmc_hsq_request(struct mmc_host *mmc, struct mmc_request *mrq)
214 struct mmc_hsq *hsq = mmc->cqe_private;
215 int tag = mrq->tag;
217 spin_lock_irq(&hsq->lock);
219 if (!hsq->enabled) {
220 spin_unlock_irq(&hsq->lock);
221 return -ESHUTDOWN;
224 /* Do not queue any new requests in recovery mode. */
225 if (hsq->recovery_halt) {
226 spin_unlock_irq(&hsq->lock);
227 return -EBUSY;
230 hsq->slot[tag].mrq = mrq;
233 * Set the next tag as current request tag if no available
234 * next tag.
236 if (hsq->next_tag == HSQ_INVALID_TAG)
237 hsq->next_tag = tag;
239 hsq->qcnt++;
241 spin_unlock_irq(&hsq->lock);
243 mmc_hsq_pump_requests(hsq);
245 return 0;
248 static void mmc_hsq_post_req(struct mmc_host *mmc, struct mmc_request *mrq)
250 if (mmc->ops->post_req)
251 mmc->ops->post_req(mmc, mrq, 0);
254 static bool mmc_hsq_queue_is_idle(struct mmc_hsq *hsq, int *ret)
256 bool is_idle;
258 spin_lock_irq(&hsq->lock);
260 is_idle = (!hsq->mrq && !hsq->qcnt) ||
261 hsq->recovery_halt;
263 *ret = hsq->recovery_halt ? -EBUSY : 0;
264 hsq->waiting_for_idle = !is_idle;
266 spin_unlock_irq(&hsq->lock);
268 return is_idle;
271 static int mmc_hsq_wait_for_idle(struct mmc_host *mmc)
273 struct mmc_hsq *hsq = mmc->cqe_private;
274 int ret;
276 wait_event(hsq->wait_queue,
277 mmc_hsq_queue_is_idle(hsq, &ret));
279 return ret;
282 static void mmc_hsq_disable(struct mmc_host *mmc)
284 struct mmc_hsq *hsq = mmc->cqe_private;
285 u32 timeout = 500;
286 int ret;
288 spin_lock_irq(&hsq->lock);
290 if (!hsq->enabled) {
291 spin_unlock_irq(&hsq->lock);
292 return;
295 spin_unlock_irq(&hsq->lock);
297 ret = wait_event_timeout(hsq->wait_queue,
298 mmc_hsq_queue_is_idle(hsq, &ret),
299 msecs_to_jiffies(timeout));
300 if (ret == 0) {
301 pr_warn("could not stop mmc software queue\n");
302 return;
305 spin_lock_irq(&hsq->lock);
307 hsq->enabled = false;
309 spin_unlock_irq(&hsq->lock);
312 static int mmc_hsq_enable(struct mmc_host *mmc, struct mmc_card *card)
314 struct mmc_hsq *hsq = mmc->cqe_private;
316 spin_lock_irq(&hsq->lock);
318 if (hsq->enabled) {
319 spin_unlock_irq(&hsq->lock);
320 return -EBUSY;
323 hsq->enabled = true;
325 spin_unlock_irq(&hsq->lock);
327 return 0;
330 static const struct mmc_cqe_ops mmc_hsq_ops = {
331 .cqe_enable = mmc_hsq_enable,
332 .cqe_disable = mmc_hsq_disable,
333 .cqe_request = mmc_hsq_request,
334 .cqe_post_req = mmc_hsq_post_req,
335 .cqe_wait_for_idle = mmc_hsq_wait_for_idle,
336 .cqe_recovery_start = mmc_hsq_recovery_start,
337 .cqe_recovery_finish = mmc_hsq_recovery_finish,
340 int mmc_hsq_init(struct mmc_hsq *hsq, struct mmc_host *mmc)
342 hsq->num_slots = HSQ_NUM_SLOTS;
343 hsq->next_tag = HSQ_INVALID_TAG;
345 hsq->slot = devm_kcalloc(mmc_dev(mmc), hsq->num_slots,
346 sizeof(struct hsq_slot), GFP_KERNEL);
347 if (!hsq->slot)
348 return -ENOMEM;
350 hsq->mmc = mmc;
351 hsq->mmc->cqe_private = hsq;
352 mmc->cqe_ops = &mmc_hsq_ops;
354 INIT_WORK(&hsq->retry_work, mmc_hsq_retry_handler);
355 spin_lock_init(&hsq->lock);
356 init_waitqueue_head(&hsq->wait_queue);
358 return 0;
360 EXPORT_SYMBOL_GPL(mmc_hsq_init);
362 void mmc_hsq_suspend(struct mmc_host *mmc)
364 mmc_hsq_disable(mmc);
366 EXPORT_SYMBOL_GPL(mmc_hsq_suspend);
368 int mmc_hsq_resume(struct mmc_host *mmc)
370 return mmc_hsq_enable(mmc, NULL);
372 EXPORT_SYMBOL_GPL(mmc_hsq_resume);
374 MODULE_DESCRIPTION("MMC Host Software Queue support");
375 MODULE_LICENSE("GPL v2");