1 // SPDX-License-Identifier: GPL-2.0
4 * MMC software queue support based on command queue interfaces
6 * Copyright (C) 2019 Linaro, Inc.
7 * Author: Baolin Wang <baolin.wang@linaro.org>
10 #include <linux/mmc/card.h>
11 #include <linux/mmc/host.h>
12 #include <linux/module.h>
16 #define HSQ_NUM_SLOTS 64
17 #define HSQ_INVALID_TAG HSQ_NUM_SLOTS
19 static void mmc_hsq_retry_handler(struct work_struct
*work
)
21 struct mmc_hsq
*hsq
= container_of(work
, struct mmc_hsq
, retry_work
);
22 struct mmc_host
*mmc
= hsq
->mmc
;
24 mmc
->ops
->request(mmc
, hsq
->mrq
);
27 static void mmc_hsq_pump_requests(struct mmc_hsq
*hsq
)
29 struct mmc_host
*mmc
= hsq
->mmc
;
30 struct hsq_slot
*slot
;
34 spin_lock_irqsave(&hsq
->lock
, flags
);
36 /* Make sure we are not already running a request now */
38 spin_unlock_irqrestore(&hsq
->lock
, flags
);
42 /* Make sure there are remain requests need to pump */
43 if (!hsq
->qcnt
|| !hsq
->enabled
) {
44 spin_unlock_irqrestore(&hsq
->lock
, flags
);
48 slot
= &hsq
->slot
[hsq
->next_tag
];
52 spin_unlock_irqrestore(&hsq
->lock
, flags
);
54 if (mmc
->ops
->request_atomic
)
55 ret
= mmc
->ops
->request_atomic(mmc
, hsq
->mrq
);
57 mmc
->ops
->request(mmc
, hsq
->mrq
);
60 * If returning BUSY from request_atomic(), which means the card
61 * may be busy now, and we should change to non-atomic context to
62 * try again for this unusual case, to avoid time-consuming operations
63 * in the atomic context.
65 * Note: we just give a warning for other error cases, since the host
66 * driver will handle them.
69 schedule_work(&hsq
->retry_work
);
74 static void mmc_hsq_update_next_tag(struct mmc_hsq
*hsq
, int remains
)
76 struct hsq_slot
*slot
;
80 * If there are no remain requests in software queue, then set a invalid
84 hsq
->next_tag
= HSQ_INVALID_TAG
;
89 * Increasing the next tag and check if the corresponding request is
90 * available, if yes, then we found a candidate request.
92 if (++hsq
->next_tag
!= HSQ_INVALID_TAG
) {
93 slot
= &hsq
->slot
[hsq
->next_tag
];
98 /* Othersie we should iterate all slots to find a available tag. */
99 for (tag
= 0; tag
< HSQ_NUM_SLOTS
; tag
++) {
100 slot
= &hsq
->slot
[tag
];
105 if (tag
== HSQ_NUM_SLOTS
)
106 tag
= HSQ_INVALID_TAG
;
111 static void mmc_hsq_post_request(struct mmc_hsq
*hsq
)
116 spin_lock_irqsave(&hsq
->lock
, flags
);
121 /* Update the next available tag to be queued. */
122 mmc_hsq_update_next_tag(hsq
, remains
);
124 if (hsq
->waiting_for_idle
&& !remains
) {
125 hsq
->waiting_for_idle
= false;
126 wake_up(&hsq
->wait_queue
);
129 /* Do not pump new request in recovery mode. */
130 if (hsq
->recovery_halt
) {
131 spin_unlock_irqrestore(&hsq
->lock
, flags
);
135 spin_unlock_irqrestore(&hsq
->lock
, flags
);
138 * Try to pump new request to host controller as fast as possible,
139 * after completing previous request.
142 mmc_hsq_pump_requests(hsq
);
146 * mmc_hsq_finalize_request - finalize one request if the request is done
147 * @mmc: the host controller
148 * @mrq: the request need to be finalized
150 * Return true if we finalized the corresponding request in software queue,
151 * otherwise return false.
153 bool mmc_hsq_finalize_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
155 struct mmc_hsq
*hsq
= mmc
->cqe_private
;
158 spin_lock_irqsave(&hsq
->lock
, flags
);
160 if (!hsq
->enabled
|| !hsq
->mrq
|| hsq
->mrq
!= mrq
) {
161 spin_unlock_irqrestore(&hsq
->lock
, flags
);
166 * Clear current completed slot request to make a room for new request.
168 hsq
->slot
[hsq
->next_tag
].mrq
= NULL
;
170 spin_unlock_irqrestore(&hsq
->lock
, flags
);
172 mmc_cqe_request_done(mmc
, hsq
->mrq
);
174 mmc_hsq_post_request(hsq
);
178 EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request
);
180 static void mmc_hsq_recovery_start(struct mmc_host
*mmc
)
182 struct mmc_hsq
*hsq
= mmc
->cqe_private
;
185 spin_lock_irqsave(&hsq
->lock
, flags
);
187 hsq
->recovery_halt
= true;
189 spin_unlock_irqrestore(&hsq
->lock
, flags
);
192 static void mmc_hsq_recovery_finish(struct mmc_host
*mmc
)
194 struct mmc_hsq
*hsq
= mmc
->cqe_private
;
197 spin_lock_irq(&hsq
->lock
);
199 hsq
->recovery_halt
= false;
202 spin_unlock_irq(&hsq
->lock
);
205 * Try to pump new request if there are request pending in software
206 * queue after finishing recovery.
209 mmc_hsq_pump_requests(hsq
);
212 static int mmc_hsq_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
214 struct mmc_hsq
*hsq
= mmc
->cqe_private
;
217 spin_lock_irq(&hsq
->lock
);
220 spin_unlock_irq(&hsq
->lock
);
224 /* Do not queue any new requests in recovery mode. */
225 if (hsq
->recovery_halt
) {
226 spin_unlock_irq(&hsq
->lock
);
230 hsq
->slot
[tag
].mrq
= mrq
;
233 * Set the next tag as current request tag if no available
236 if (hsq
->next_tag
== HSQ_INVALID_TAG
)
241 spin_unlock_irq(&hsq
->lock
);
243 mmc_hsq_pump_requests(hsq
);
248 static void mmc_hsq_post_req(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
250 if (mmc
->ops
->post_req
)
251 mmc
->ops
->post_req(mmc
, mrq
, 0);
254 static bool mmc_hsq_queue_is_idle(struct mmc_hsq
*hsq
, int *ret
)
258 spin_lock_irq(&hsq
->lock
);
260 is_idle
= (!hsq
->mrq
&& !hsq
->qcnt
) ||
263 *ret
= hsq
->recovery_halt
? -EBUSY
: 0;
264 hsq
->waiting_for_idle
= !is_idle
;
266 spin_unlock_irq(&hsq
->lock
);
271 static int mmc_hsq_wait_for_idle(struct mmc_host
*mmc
)
273 struct mmc_hsq
*hsq
= mmc
->cqe_private
;
276 wait_event(hsq
->wait_queue
,
277 mmc_hsq_queue_is_idle(hsq
, &ret
));
282 static void mmc_hsq_disable(struct mmc_host
*mmc
)
284 struct mmc_hsq
*hsq
= mmc
->cqe_private
;
288 spin_lock_irq(&hsq
->lock
);
291 spin_unlock_irq(&hsq
->lock
);
295 spin_unlock_irq(&hsq
->lock
);
297 ret
= wait_event_timeout(hsq
->wait_queue
,
298 mmc_hsq_queue_is_idle(hsq
, &ret
),
299 msecs_to_jiffies(timeout
));
301 pr_warn("could not stop mmc software queue\n");
305 spin_lock_irq(&hsq
->lock
);
307 hsq
->enabled
= false;
309 spin_unlock_irq(&hsq
->lock
);
312 static int mmc_hsq_enable(struct mmc_host
*mmc
, struct mmc_card
*card
)
314 struct mmc_hsq
*hsq
= mmc
->cqe_private
;
316 spin_lock_irq(&hsq
->lock
);
319 spin_unlock_irq(&hsq
->lock
);
325 spin_unlock_irq(&hsq
->lock
);
330 static const struct mmc_cqe_ops mmc_hsq_ops
= {
331 .cqe_enable
= mmc_hsq_enable
,
332 .cqe_disable
= mmc_hsq_disable
,
333 .cqe_request
= mmc_hsq_request
,
334 .cqe_post_req
= mmc_hsq_post_req
,
335 .cqe_wait_for_idle
= mmc_hsq_wait_for_idle
,
336 .cqe_recovery_start
= mmc_hsq_recovery_start
,
337 .cqe_recovery_finish
= mmc_hsq_recovery_finish
,
340 int mmc_hsq_init(struct mmc_hsq
*hsq
, struct mmc_host
*mmc
)
342 hsq
->num_slots
= HSQ_NUM_SLOTS
;
343 hsq
->next_tag
= HSQ_INVALID_TAG
;
345 hsq
->slot
= devm_kcalloc(mmc_dev(mmc
), hsq
->num_slots
,
346 sizeof(struct hsq_slot
), GFP_KERNEL
);
351 hsq
->mmc
->cqe_private
= hsq
;
352 mmc
->cqe_ops
= &mmc_hsq_ops
;
354 INIT_WORK(&hsq
->retry_work
, mmc_hsq_retry_handler
);
355 spin_lock_init(&hsq
->lock
);
356 init_waitqueue_head(&hsq
->wait_queue
);
360 EXPORT_SYMBOL_GPL(mmc_hsq_init
);
362 void mmc_hsq_suspend(struct mmc_host
*mmc
)
364 mmc_hsq_disable(mmc
);
366 EXPORT_SYMBOL_GPL(mmc_hsq_suspend
);
368 int mmc_hsq_resume(struct mmc_host
*mmc
)
370 return mmc_hsq_enable(mmc
, NULL
);
372 EXPORT_SYMBOL_GPL(mmc_hsq_resume
);
374 MODULE_DESCRIPTION("MMC Host Software Queue support");
375 MODULE_LICENSE("GPL v2");