]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/mmc/core/sdio_irq.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 507
[thirdparty/kernel/stable.git] / drivers / mmc / core / sdio_irq.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * linux/drivers/mmc/core/sdio_irq.c
4 *
5 * Author: Nicolas Pitre
6 * Created: June 18, 2007
7 * Copyright: MontaVista Software Inc.
8 *
9 * Copyright 2008 Pierre Ossman
10 */
11
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <uapi/linux/sched/types.h>
15 #include <linux/kthread.h>
16 #include <linux/export.h>
17 #include <linux/wait.h>
18 #include <linux/delay.h>
19
20 #include <linux/mmc/core.h>
21 #include <linux/mmc/host.h>
22 #include <linux/mmc/card.h>
23 #include <linux/mmc/sdio.h>
24 #include <linux/mmc/sdio_func.h>
25
26 #include "sdio_ops.h"
27 #include "core.h"
28 #include "card.h"
29
30 static int process_sdio_pending_irqs(struct mmc_host *host)
31 {
32 struct mmc_card *card = host->card;
33 int i, ret, count;
34 unsigned char pending;
35 struct sdio_func *func;
36
37 /*
38 * Optimization, if there is only 1 function interrupt registered
39 * and we know an IRQ was signaled then call irq handler directly.
40 * Otherwise do the full probe.
41 */
42 func = card->sdio_single_irq;
43 if (func && host->sdio_irq_pending) {
44 func->irq_handler(func);
45 return 1;
46 }
47
48 ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTx, 0, &pending);
49 if (ret) {
50 pr_debug("%s: error %d reading SDIO_CCCR_INTx\n",
51 mmc_card_id(card), ret);
52 return ret;
53 }
54
55 if (pending && mmc_card_broken_irq_polling(card) &&
56 !(host->caps & MMC_CAP_SDIO_IRQ)) {
57 unsigned char dummy;
58
59 /* A fake interrupt could be created when we poll SDIO_CCCR_INTx
60 * register with a Marvell SD8797 card. A dummy CMD52 read to
61 * function 0 register 0xff can avoid this.
62 */
63 mmc_io_rw_direct(card, 0, 0, 0xff, 0, &dummy);
64 }
65
66 count = 0;
67 for (i = 1; i <= 7; i++) {
68 if (pending & (1 << i)) {
69 func = card->sdio_func[i - 1];
70 if (!func) {
71 pr_warn("%s: pending IRQ for non-existent function\n",
72 mmc_card_id(card));
73 ret = -EINVAL;
74 } else if (func->irq_handler) {
75 func->irq_handler(func);
76 count++;
77 } else {
78 pr_warn("%s: pending IRQ with no handler\n",
79 sdio_func_id(func));
80 ret = -EINVAL;
81 }
82 }
83 }
84
85 if (count)
86 return count;
87
88 return ret;
89 }
90
91 void sdio_run_irqs(struct mmc_host *host)
92 {
93 mmc_claim_host(host);
94 if (host->sdio_irqs) {
95 host->sdio_irq_pending = true;
96 process_sdio_pending_irqs(host);
97 if (host->ops->ack_sdio_irq)
98 host->ops->ack_sdio_irq(host);
99 }
100 mmc_release_host(host);
101 }
102 EXPORT_SYMBOL_GPL(sdio_run_irqs);
103
104 void sdio_irq_work(struct work_struct *work)
105 {
106 struct mmc_host *host =
107 container_of(work, struct mmc_host, sdio_irq_work.work);
108
109 sdio_run_irqs(host);
110 }
111
112 void sdio_signal_irq(struct mmc_host *host)
113 {
114 queue_delayed_work(system_wq, &host->sdio_irq_work, 0);
115 }
116 EXPORT_SYMBOL_GPL(sdio_signal_irq);
117
118 static int sdio_irq_thread(void *_host)
119 {
120 struct mmc_host *host = _host;
121 struct sched_param param = { .sched_priority = 1 };
122 unsigned long period, idle_period;
123 int ret;
124
125 sched_setscheduler(current, SCHED_FIFO, &param);
126
127 /*
128 * We want to allow for SDIO cards to work even on non SDIO
129 * aware hosts. One thing that non SDIO host cannot do is
130 * asynchronous notification of pending SDIO card interrupts
131 * hence we poll for them in that case.
132 */
133 idle_period = msecs_to_jiffies(10);
134 period = (host->caps & MMC_CAP_SDIO_IRQ) ?
135 MAX_SCHEDULE_TIMEOUT : idle_period;
136
137 pr_debug("%s: IRQ thread started (poll period = %lu jiffies)\n",
138 mmc_hostname(host), period);
139
140 do {
141 /*
142 * We claim the host here on drivers behalf for a couple
143 * reasons:
144 *
145 * 1) it is already needed to retrieve the CCCR_INTx;
146 * 2) we want the driver(s) to clear the IRQ condition ASAP;
147 * 3) we need to control the abort condition locally.
148 *
149 * Just like traditional hard IRQ handlers, we expect SDIO
150 * IRQ handlers to be quick and to the point, so that the
151 * holding of the host lock does not cover too much work
152 * that doesn't require that lock to be held.
153 */
154 ret = __mmc_claim_host(host, NULL,
155 &host->sdio_irq_thread_abort);
156 if (ret)
157 break;
158 ret = process_sdio_pending_irqs(host);
159 host->sdio_irq_pending = false;
160 mmc_release_host(host);
161
162 /*
163 * Give other threads a chance to run in the presence of
164 * errors.
165 */
166 if (ret < 0) {
167 set_current_state(TASK_INTERRUPTIBLE);
168 if (!kthread_should_stop())
169 schedule_timeout(HZ);
170 set_current_state(TASK_RUNNING);
171 }
172
173 /*
174 * Adaptive polling frequency based on the assumption
175 * that an interrupt will be closely followed by more.
176 * This has a substantial benefit for network devices.
177 */
178 if (!(host->caps & MMC_CAP_SDIO_IRQ)) {
179 if (ret > 0)
180 period /= 2;
181 else {
182 period++;
183 if (period > idle_period)
184 period = idle_period;
185 }
186 }
187
188 set_current_state(TASK_INTERRUPTIBLE);
189 if (host->caps & MMC_CAP_SDIO_IRQ)
190 host->ops->enable_sdio_irq(host, 1);
191 if (!kthread_should_stop())
192 schedule_timeout(period);
193 set_current_state(TASK_RUNNING);
194 } while (!kthread_should_stop());
195
196 if (host->caps & MMC_CAP_SDIO_IRQ)
197 host->ops->enable_sdio_irq(host, 0);
198
199 pr_debug("%s: IRQ thread exiting with code %d\n",
200 mmc_hostname(host), ret);
201
202 return ret;
203 }
204
205 static int sdio_card_irq_get(struct mmc_card *card)
206 {
207 struct mmc_host *host = card->host;
208
209 WARN_ON(!host->claimed);
210
211 if (!host->sdio_irqs++) {
212 if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
213 atomic_set(&host->sdio_irq_thread_abort, 0);
214 host->sdio_irq_thread =
215 kthread_run(sdio_irq_thread, host,
216 "ksdioirqd/%s", mmc_hostname(host));
217 if (IS_ERR(host->sdio_irq_thread)) {
218 int err = PTR_ERR(host->sdio_irq_thread);
219 host->sdio_irqs--;
220 return err;
221 }
222 } else if (host->caps & MMC_CAP_SDIO_IRQ) {
223 host->ops->enable_sdio_irq(host, 1);
224 }
225 }
226
227 return 0;
228 }
229
230 static int sdio_card_irq_put(struct mmc_card *card)
231 {
232 struct mmc_host *host = card->host;
233
234 WARN_ON(!host->claimed);
235
236 if (host->sdio_irqs < 1)
237 return -EINVAL;
238
239 if (!--host->sdio_irqs) {
240 if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
241 atomic_set(&host->sdio_irq_thread_abort, 1);
242 kthread_stop(host->sdio_irq_thread);
243 } else if (host->caps & MMC_CAP_SDIO_IRQ) {
244 host->ops->enable_sdio_irq(host, 0);
245 }
246 }
247
248 return 0;
249 }
250
251 /* If there is only 1 function registered set sdio_single_irq */
252 static void sdio_single_irq_set(struct mmc_card *card)
253 {
254 struct sdio_func *func;
255 int i;
256
257 card->sdio_single_irq = NULL;
258 if ((card->host->caps & MMC_CAP_SDIO_IRQ) &&
259 card->host->sdio_irqs == 1)
260 for (i = 0; i < card->sdio_funcs; i++) {
261 func = card->sdio_func[i];
262 if (func && func->irq_handler) {
263 card->sdio_single_irq = func;
264 break;
265 }
266 }
267 }
268
269 /**
270 * sdio_claim_irq - claim the IRQ for a SDIO function
271 * @func: SDIO function
272 * @handler: IRQ handler callback
273 *
274 * Claim and activate the IRQ for the given SDIO function. The provided
275 * handler will be called when that IRQ is asserted. The host is always
276 * claimed already when the handler is called so the handler should not
277 * call sdio_claim_host() or sdio_release_host().
278 */
279 int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler)
280 {
281 int ret;
282 unsigned char reg;
283
284 if (!func)
285 return -EINVAL;
286
287 pr_debug("SDIO: Enabling IRQ for %s...\n", sdio_func_id(func));
288
289 if (func->irq_handler) {
290 pr_debug("SDIO: IRQ for %s already in use.\n", sdio_func_id(func));
291 return -EBUSY;
292 }
293
294 ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, &reg);
295 if (ret)
296 return ret;
297
298 reg |= 1 << func->num;
299
300 reg |= 1; /* Master interrupt enable */
301
302 ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, reg, NULL);
303 if (ret)
304 return ret;
305
306 func->irq_handler = handler;
307 ret = sdio_card_irq_get(func->card);
308 if (ret)
309 func->irq_handler = NULL;
310 sdio_single_irq_set(func->card);
311
312 return ret;
313 }
314 EXPORT_SYMBOL_GPL(sdio_claim_irq);
315
316 /**
317 * sdio_release_irq - release the IRQ for a SDIO function
318 * @func: SDIO function
319 *
320 * Disable and release the IRQ for the given SDIO function.
321 */
322 int sdio_release_irq(struct sdio_func *func)
323 {
324 int ret;
325 unsigned char reg;
326
327 if (!func)
328 return -EINVAL;
329
330 pr_debug("SDIO: Disabling IRQ for %s...\n", sdio_func_id(func));
331
332 if (func->irq_handler) {
333 func->irq_handler = NULL;
334 sdio_card_irq_put(func->card);
335 sdio_single_irq_set(func->card);
336 }
337
338 ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, &reg);
339 if (ret)
340 return ret;
341
342 reg &= ~(1 << func->num);
343
344 /* Disable master interrupt with the last function interrupt */
345 if (!(reg & 0xFE))
346 reg = 0;
347
348 ret = mmc_io_rw_direct(func->card, 1, 0, SDIO_CCCR_IENx, reg, NULL);
349 if (ret)
350 return ret;
351
352 return 0;
353 }
354 EXPORT_SYMBOL_GPL(sdio_release_irq);
355