1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * linux/drivers/mmc/core/sdio_irq.c
5 * Author: Nicolas Pitre
6 * Created: June 18, 2007
7 * Copyright: MontaVista Software Inc.
9 * Copyright 2008 Pierre Ossman
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <uapi/linux/sched/types.h>
15 #include <linux/kthread.h>
16 #include <linux/export.h>
17 #include <linux/wait.h>
18 #include <linux/delay.h>
20 #include <linux/mmc/core.h>
21 #include <linux/mmc/host.h>
22 #include <linux/mmc/card.h>
23 #include <linux/mmc/sdio.h>
24 #include <linux/mmc/sdio_func.h>
30 static int sdio_get_pending_irqs(struct mmc_host
*host
, u8
*pending
)
32 struct mmc_card
*card
= host
->card
;
35 WARN_ON(!host
->claimed
);
37 ret
= mmc_io_rw_direct(card
, 0, 0, SDIO_CCCR_INTx
, 0, pending
);
39 pr_debug("%s: error %d reading SDIO_CCCR_INTx\n",
40 mmc_card_id(card
), ret
);
44 if (*pending
&& mmc_card_broken_irq_polling(card
) &&
45 !(host
->caps
& MMC_CAP_SDIO_IRQ
)) {
48 /* A fake interrupt could be created when we poll SDIO_CCCR_INTx
49 * register with a Marvell SD8797 card. A dummy CMD52 read to
50 * function 0 register 0xff can avoid this.
52 mmc_io_rw_direct(card
, 0, 0, 0xff, 0, &dummy
);
58 static int process_sdio_pending_irqs(struct mmc_host
*host
)
60 struct mmc_card
*card
= host
->card
;
62 bool sdio_irq_pending
= host
->sdio_irq_pending
;
63 unsigned char pending
;
64 struct sdio_func
*func
;
66 /* Don't process SDIO IRQs if the card is suspended. */
67 if (mmc_card_suspended(card
))
70 /* Clear the flag to indicate that we have processed the IRQ. */
71 host
->sdio_irq_pending
= false;
74 * Optimization, if there is only 1 function interrupt registered
75 * and we know an IRQ was signaled then call irq handler directly.
76 * Otherwise do the full probe.
78 func
= card
->sdio_single_irq
;
79 if (func
&& sdio_irq_pending
) {
80 func
->irq_handler(func
);
84 ret
= sdio_get_pending_irqs(host
, &pending
);
89 for (i
= 1; i
<= 7; i
++) {
90 if (pending
& (1 << i
)) {
91 func
= card
->sdio_func
[i
- 1];
93 pr_warn("%s: pending IRQ for non-existent function\n",
96 } else if (func
->irq_handler
) {
97 func
->irq_handler(func
);
100 pr_warn("%s: pending IRQ with no handler\n",
113 static void sdio_run_irqs(struct mmc_host
*host
)
115 mmc_claim_host(host
);
116 if (host
->sdio_irqs
) {
117 process_sdio_pending_irqs(host
);
118 if (!host
->sdio_irq_pending
)
119 host
->ops
->ack_sdio_irq(host
);
121 mmc_release_host(host
);
124 void sdio_irq_work(struct work_struct
*work
)
126 struct mmc_host
*host
=
127 container_of(work
, struct mmc_host
, sdio_irq_work
.work
);
132 void sdio_signal_irq(struct mmc_host
*host
)
134 host
->sdio_irq_pending
= true;
135 queue_delayed_work(system_wq
, &host
->sdio_irq_work
, 0);
137 EXPORT_SYMBOL_GPL(sdio_signal_irq
);
139 static int sdio_irq_thread(void *_host
)
141 struct mmc_host
*host
= _host
;
142 unsigned long period
, idle_period
;
145 sched_set_fifo_low(current
);
148 * We want to allow for SDIO cards to work even on non SDIO
149 * aware hosts. One thing that non SDIO host cannot do is
150 * asynchronous notification of pending SDIO card interrupts
151 * hence we poll for them in that case.
153 idle_period
= msecs_to_jiffies(10);
154 period
= (host
->caps
& MMC_CAP_SDIO_IRQ
) ?
155 MAX_SCHEDULE_TIMEOUT
: idle_period
;
157 pr_debug("%s: IRQ thread started (poll period = %lu jiffies)\n",
158 mmc_hostname(host
), period
);
162 * We claim the host here on drivers behalf for a couple
165 * 1) it is already needed to retrieve the CCCR_INTx;
166 * 2) we want the driver(s) to clear the IRQ condition ASAP;
167 * 3) we need to control the abort condition locally.
169 * Just like traditional hard IRQ handlers, we expect SDIO
170 * IRQ handlers to be quick and to the point, so that the
171 * holding of the host lock does not cover too much work
172 * that doesn't require that lock to be held.
174 ret
= __mmc_claim_host(host
, NULL
,
175 &host
->sdio_irq_thread_abort
);
178 ret
= process_sdio_pending_irqs(host
);
179 mmc_release_host(host
);
182 * Give other threads a chance to run in the presence of
186 set_current_state(TASK_INTERRUPTIBLE
);
187 if (!kthread_should_stop())
188 schedule_timeout(HZ
);
189 set_current_state(TASK_RUNNING
);
193 * Adaptive polling frequency based on the assumption
194 * that an interrupt will be closely followed by more.
195 * This has a substantial benefit for network devices.
197 if (!(host
->caps
& MMC_CAP_SDIO_IRQ
)) {
202 if (period
> idle_period
)
203 period
= idle_period
;
207 set_current_state(TASK_INTERRUPTIBLE
);
208 if (host
->caps
& MMC_CAP_SDIO_IRQ
)
209 host
->ops
->enable_sdio_irq(host
, 1);
210 if (!kthread_should_stop())
211 schedule_timeout(period
);
212 set_current_state(TASK_RUNNING
);
213 } while (!kthread_should_stop());
215 if (host
->caps
& MMC_CAP_SDIO_IRQ
)
216 host
->ops
->enable_sdio_irq(host
, 0);
218 pr_debug("%s: IRQ thread exiting with code %d\n",
219 mmc_hostname(host
), ret
);
224 static int sdio_card_irq_get(struct mmc_card
*card
)
226 struct mmc_host
*host
= card
->host
;
228 WARN_ON(!host
->claimed
);
230 if (!host
->sdio_irqs
++) {
231 if (!(host
->caps2
& MMC_CAP2_SDIO_IRQ_NOTHREAD
)) {
232 atomic_set(&host
->sdio_irq_thread_abort
, 0);
233 host
->sdio_irq_thread
=
234 kthread_run(sdio_irq_thread
, host
,
235 "ksdioirqd/%s", mmc_hostname(host
));
236 if (IS_ERR(host
->sdio_irq_thread
)) {
237 int err
= PTR_ERR(host
->sdio_irq_thread
);
241 } else if (host
->caps
& MMC_CAP_SDIO_IRQ
) {
242 host
->ops
->enable_sdio_irq(host
, 1);
249 static int sdio_card_irq_put(struct mmc_card
*card
)
251 struct mmc_host
*host
= card
->host
;
253 WARN_ON(!host
->claimed
);
255 if (host
->sdio_irqs
< 1)
258 if (!--host
->sdio_irqs
) {
259 if (!(host
->caps2
& MMC_CAP2_SDIO_IRQ_NOTHREAD
)) {
260 atomic_set(&host
->sdio_irq_thread_abort
, 1);
261 kthread_stop(host
->sdio_irq_thread
);
262 } else if (host
->caps
& MMC_CAP_SDIO_IRQ
) {
263 host
->ops
->enable_sdio_irq(host
, 0);
270 /* If there is only 1 function registered set sdio_single_irq */
271 static void sdio_single_irq_set(struct mmc_card
*card
)
273 struct sdio_func
*func
;
276 card
->sdio_single_irq
= NULL
;
277 if ((card
->host
->caps
& MMC_CAP_SDIO_IRQ
) &&
278 card
->host
->sdio_irqs
== 1) {
279 for (i
= 0; i
< card
->sdio_funcs
; i
++) {
280 func
= card
->sdio_func
[i
];
281 if (func
&& func
->irq_handler
) {
282 card
->sdio_single_irq
= func
;
290 * sdio_claim_irq - claim the IRQ for a SDIO function
291 * @func: SDIO function
292 * @handler: IRQ handler callback
294 * Claim and activate the IRQ for the given SDIO function. The provided
295 * handler will be called when that IRQ is asserted. The host is always
296 * claimed already when the handler is called so the handler should not
297 * call sdio_claim_host() or sdio_release_host().
299 int sdio_claim_irq(struct sdio_func
*func
, sdio_irq_handler_t
*handler
)
307 pr_debug("SDIO: Enabling IRQ for %s...\n", sdio_func_id(func
));
309 if (func
->irq_handler
) {
310 pr_debug("SDIO: IRQ for %s already in use.\n", sdio_func_id(func
));
314 ret
= mmc_io_rw_direct(func
->card
, 0, 0, SDIO_CCCR_IENx
, 0, ®
);
318 reg
|= 1 << func
->num
;
320 reg
|= 1; /* Master interrupt enable */
322 ret
= mmc_io_rw_direct(func
->card
, 1, 0, SDIO_CCCR_IENx
, reg
, NULL
);
326 func
->irq_handler
= handler
;
327 ret
= sdio_card_irq_get(func
->card
);
329 func
->irq_handler
= NULL
;
330 sdio_single_irq_set(func
->card
);
334 EXPORT_SYMBOL_GPL(sdio_claim_irq
);
337 * sdio_release_irq - release the IRQ for a SDIO function
338 * @func: SDIO function
340 * Disable and release the IRQ for the given SDIO function.
342 int sdio_release_irq(struct sdio_func
*func
)
350 pr_debug("SDIO: Disabling IRQ for %s...\n", sdio_func_id(func
));
352 if (func
->irq_handler
) {
353 func
->irq_handler
= NULL
;
354 sdio_card_irq_put(func
->card
);
355 sdio_single_irq_set(func
->card
);
358 ret
= mmc_io_rw_direct(func
->card
, 0, 0, SDIO_CCCR_IENx
, 0, ®
);
362 reg
&= ~(1 << func
->num
);
364 /* Disable master interrupt with the last function interrupt */
368 ret
= mmc_io_rw_direct(func
->card
, 1, 0, SDIO_CCCR_IENx
, reg
, NULL
);
374 EXPORT_SYMBOL_GPL(sdio_release_irq
);