1 // SPDX-License-Identifier: GPL-2.0
4 * MMC software queue support based on command queue interfaces
6 * Copyright (C) 2019 Linaro, Inc.
7 * Author: Baolin Wang <baolin.wang@linaro.org>
10 #include <linux/mmc/card.h>
11 #include <linux/mmc/host.h>
12 #include <linux/module.h>
16 static void mmc_hsq_retry_handler(struct work_struct
*work
)
18 struct mmc_hsq
*hsq
= container_of(work
, struct mmc_hsq
, retry_work
);
19 struct mmc_host
*mmc
= hsq
->mmc
;
21 mmc
->ops
->request(mmc
, hsq
->mrq
);
24 static void mmc_hsq_modify_threshold(struct mmc_hsq
*hsq
)
26 struct mmc_host
*mmc
= hsq
->mmc
;
27 struct mmc_request
*mrq
;
28 unsigned int tag
, need_change
= 0;
30 mmc
->hsq_depth
= HSQ_NORMAL_DEPTH
;
31 for (tag
= 0; tag
< HSQ_NUM_SLOTS
; tag
++) {
32 mrq
= hsq
->slot
[tag
].mrq
;
33 if (mrq
&& mrq
->data
&&
34 (mrq
->data
->blksz
* mrq
->data
->blocks
== 4096) &&
35 (mrq
->data
->flags
& MMC_DATA_WRITE
) &&
36 (++need_change
== 2)) {
37 mmc
->hsq_depth
= HSQ_PERFORMANCE_DEPTH
;
43 static void mmc_hsq_pump_requests(struct mmc_hsq
*hsq
)
45 struct mmc_host
*mmc
= hsq
->mmc
;
46 struct hsq_slot
*slot
;
50 spin_lock_irqsave(&hsq
->lock
, flags
);
52 /* Make sure we are not already running a request now */
53 if (hsq
->mrq
|| hsq
->recovery_halt
) {
54 spin_unlock_irqrestore(&hsq
->lock
, flags
);
58 /* Make sure there are remain requests need to pump */
59 if (!hsq
->qcnt
|| !hsq
->enabled
) {
60 spin_unlock_irqrestore(&hsq
->lock
, flags
);
64 mmc_hsq_modify_threshold(hsq
);
66 slot
= &hsq
->slot
[hsq
->next_tag
];
70 spin_unlock_irqrestore(&hsq
->lock
, flags
);
72 if (mmc
->ops
->request_atomic
)
73 ret
= mmc
->ops
->request_atomic(mmc
, hsq
->mrq
);
75 mmc
->ops
->request(mmc
, hsq
->mrq
);
78 * If returning BUSY from request_atomic(), which means the card
79 * may be busy now, and we should change to non-atomic context to
80 * try again for this unusual case, to avoid time-consuming operations
81 * in the atomic context.
83 * Note: we just give a warning for other error cases, since the host
84 * driver will handle them.
87 schedule_work(&hsq
->retry_work
);
92 static void mmc_hsq_update_next_tag(struct mmc_hsq
*hsq
, int remains
)
97 * If there are no remain requests in software queue, then set a invalid
101 hsq
->next_tag
= HSQ_INVALID_TAG
;
102 hsq
->tail_tag
= HSQ_INVALID_TAG
;
106 tag
= hsq
->tag_slot
[hsq
->next_tag
];
107 hsq
->tag_slot
[hsq
->next_tag
] = HSQ_INVALID_TAG
;
111 static void mmc_hsq_post_request(struct mmc_hsq
*hsq
)
116 spin_lock_irqsave(&hsq
->lock
, flags
);
121 /* Update the next available tag to be queued. */
122 mmc_hsq_update_next_tag(hsq
, remains
);
124 if (hsq
->waiting_for_idle
&& !remains
) {
125 hsq
->waiting_for_idle
= false;
126 wake_up(&hsq
->wait_queue
);
129 /* Do not pump new request in recovery mode. */
130 if (hsq
->recovery_halt
) {
131 spin_unlock_irqrestore(&hsq
->lock
, flags
);
135 spin_unlock_irqrestore(&hsq
->lock
, flags
);
138 * Try to pump new request to host controller as fast as possible,
139 * after completing previous request.
142 mmc_hsq_pump_requests(hsq
);
146 * mmc_hsq_finalize_request - finalize one request if the request is done
147 * @mmc: the host controller
148 * @mrq: the request need to be finalized
150 * Return true if we finalized the corresponding request in software queue,
151 * otherwise return false.
153 bool mmc_hsq_finalize_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
155 struct mmc_hsq
*hsq
= mmc
->cqe_private
;
158 spin_lock_irqsave(&hsq
->lock
, flags
);
160 if (!hsq
->enabled
|| !hsq
->mrq
|| hsq
->mrq
!= mrq
) {
161 spin_unlock_irqrestore(&hsq
->lock
, flags
);
166 * Clear current completed slot request to make a room for new request.
168 hsq
->slot
[hsq
->next_tag
].mrq
= NULL
;
170 spin_unlock_irqrestore(&hsq
->lock
, flags
);
172 mmc_cqe_request_done(mmc
, hsq
->mrq
);
174 mmc_hsq_post_request(hsq
);
178 EXPORT_SYMBOL_GPL(mmc_hsq_finalize_request
);
180 static void mmc_hsq_recovery_start(struct mmc_host
*mmc
)
182 struct mmc_hsq
*hsq
= mmc
->cqe_private
;
185 spin_lock_irqsave(&hsq
->lock
, flags
);
187 hsq
->recovery_halt
= true;
189 spin_unlock_irqrestore(&hsq
->lock
, flags
);
192 static void mmc_hsq_recovery_finish(struct mmc_host
*mmc
)
194 struct mmc_hsq
*hsq
= mmc
->cqe_private
;
197 spin_lock_irq(&hsq
->lock
);
199 hsq
->recovery_halt
= false;
202 spin_unlock_irq(&hsq
->lock
);
205 * Try to pump new request if there are request pending in software
206 * queue after finishing recovery.
209 mmc_hsq_pump_requests(hsq
);
212 static int mmc_hsq_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
214 struct mmc_hsq
*hsq
= mmc
->cqe_private
;
217 spin_lock_irq(&hsq
->lock
);
220 spin_unlock_irq(&hsq
->lock
);
224 /* Do not queue any new requests in recovery mode. */
225 if (hsq
->recovery_halt
) {
226 spin_unlock_irq(&hsq
->lock
);
230 hsq
->slot
[tag
].mrq
= mrq
;
233 * Set the next tag as current request tag if no available
236 if (hsq
->next_tag
== HSQ_INVALID_TAG
) {
239 hsq
->tag_slot
[hsq
->tail_tag
] = HSQ_INVALID_TAG
;
241 hsq
->tag_slot
[hsq
->tail_tag
] = tag
;
247 spin_unlock_irq(&hsq
->lock
);
249 mmc_hsq_pump_requests(hsq
);
254 static void mmc_hsq_post_req(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
256 if (mmc
->ops
->post_req
)
257 mmc
->ops
->post_req(mmc
, mrq
, 0);
260 static bool mmc_hsq_queue_is_idle(struct mmc_hsq
*hsq
, int *ret
)
264 spin_lock_irq(&hsq
->lock
);
266 is_idle
= (!hsq
->mrq
&& !hsq
->qcnt
) ||
269 *ret
= hsq
->recovery_halt
? -EBUSY
: 0;
270 hsq
->waiting_for_idle
= !is_idle
;
272 spin_unlock_irq(&hsq
->lock
);
277 static int mmc_hsq_wait_for_idle(struct mmc_host
*mmc
)
279 struct mmc_hsq
*hsq
= mmc
->cqe_private
;
282 wait_event(hsq
->wait_queue
,
283 mmc_hsq_queue_is_idle(hsq
, &ret
));
288 static void mmc_hsq_disable(struct mmc_host
*mmc
)
290 struct mmc_hsq
*hsq
= mmc
->cqe_private
;
294 spin_lock_irq(&hsq
->lock
);
297 spin_unlock_irq(&hsq
->lock
);
301 spin_unlock_irq(&hsq
->lock
);
303 ret
= wait_event_timeout(hsq
->wait_queue
,
304 mmc_hsq_queue_is_idle(hsq
, &ret
),
305 msecs_to_jiffies(timeout
));
307 pr_warn("could not stop mmc software queue\n");
311 spin_lock_irq(&hsq
->lock
);
313 hsq
->enabled
= false;
315 spin_unlock_irq(&hsq
->lock
);
318 static int mmc_hsq_enable(struct mmc_host
*mmc
, struct mmc_card
*card
)
320 struct mmc_hsq
*hsq
= mmc
->cqe_private
;
322 spin_lock_irq(&hsq
->lock
);
325 spin_unlock_irq(&hsq
->lock
);
331 spin_unlock_irq(&hsq
->lock
);
336 static const struct mmc_cqe_ops mmc_hsq_ops
= {
337 .cqe_enable
= mmc_hsq_enable
,
338 .cqe_disable
= mmc_hsq_disable
,
339 .cqe_request
= mmc_hsq_request
,
340 .cqe_post_req
= mmc_hsq_post_req
,
341 .cqe_wait_for_idle
= mmc_hsq_wait_for_idle
,
342 .cqe_recovery_start
= mmc_hsq_recovery_start
,
343 .cqe_recovery_finish
= mmc_hsq_recovery_finish
,
346 int mmc_hsq_init(struct mmc_hsq
*hsq
, struct mmc_host
*mmc
)
349 hsq
->num_slots
= HSQ_NUM_SLOTS
;
350 hsq
->next_tag
= HSQ_INVALID_TAG
;
351 hsq
->tail_tag
= HSQ_INVALID_TAG
;
353 hsq
->slot
= devm_kcalloc(mmc_dev(mmc
), hsq
->num_slots
,
354 sizeof(struct hsq_slot
), GFP_KERNEL
);
359 hsq
->mmc
->cqe_private
= hsq
;
360 mmc
->cqe_ops
= &mmc_hsq_ops
;
361 mmc
->hsq_depth
= HSQ_NORMAL_DEPTH
;
363 for (i
= 0; i
< HSQ_NUM_SLOTS
; i
++)
364 hsq
->tag_slot
[i
] = HSQ_INVALID_TAG
;
366 INIT_WORK(&hsq
->retry_work
, mmc_hsq_retry_handler
);
367 spin_lock_init(&hsq
->lock
);
368 init_waitqueue_head(&hsq
->wait_queue
);
372 EXPORT_SYMBOL_GPL(mmc_hsq_init
);
374 void mmc_hsq_suspend(struct mmc_host
*mmc
)
376 mmc_hsq_disable(mmc
);
378 EXPORT_SYMBOL_GPL(mmc_hsq_suspend
);
380 int mmc_hsq_resume(struct mmc_host
*mmc
)
382 return mmc_hsq_enable(mmc
, NULL
);
384 EXPORT_SYMBOL_GPL(mmc_hsq_resume
);
386 MODULE_DESCRIPTION("MMC Host Software Queue support");
387 MODULE_LICENSE("GPL v2");