]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0-or-later | |
2 | /* | |
3 | * Simple synchronous userspace interface to SPI devices | |
4 | * | |
5 | * Copyright (C) 2006 SWAPP | |
6 | * Andrea Paterniani <a.paterniani@swapp-eng.it> | |
7 | * Copyright (C) 2007 David Brownell (simplification, cleanup) | |
8 | */ | |
9 | ||
10 | #include <linux/init.h> | |
11 | #include <linux/ioctl.h> | |
12 | #include <linux/fs.h> | |
13 | #include <linux/device.h> | |
14 | #include <linux/err.h> | |
15 | #include <linux/list.h> | |
16 | #include <linux/errno.h> | |
17 | #include <linux/mod_devicetable.h> | |
18 | #include <linux/module.h> | |
19 | #include <linux/mutex.h> | |
20 | #include <linux/property.h> | |
21 | #include <linux/slab.h> | |
22 | #include <linux/compat.h> | |
23 | ||
24 | #include <linux/spi/spi.h> | |
25 | #include <linux/spi/spidev.h> | |
26 | ||
27 | #include <linux/uaccess.h> | |
28 | ||
29 | ||
30 | /* | |
31 | * This supports access to SPI devices using normal userspace I/O calls. | |
32 | * Note that while traditional UNIX/POSIX I/O semantics are half duplex, | |
33 | * and often mask message boundaries, full SPI support requires full duplex | |
34 | * transfers. There are several kinds of internal message boundaries to | |
35 | * handle chipselect management and other protocol options. | |
36 | * | |
37 | * SPI has a character major number assigned. We allocate minor numbers | |
38 | * dynamically using a bitmask. You must use hotplug tools, such as udev | |
39 | * (or mdev with busybox) to create and destroy the /dev/spidevB.C device | |
40 | * nodes, since there is no fixed association of minor numbers with any | |
41 | * particular SPI bus or device. | |
42 | */ | |
43 | #define SPIDEV_MAJOR 153 /* assigned */ | |
44 | #define N_SPI_MINORS 32 /* ... up to 256 */ | |
45 | ||
46 | static DECLARE_BITMAP(minors, N_SPI_MINORS); | |
47 | ||
48 | static_assert(N_SPI_MINORS > 0 && N_SPI_MINORS <= 256); | |
49 | ||
50 | /* Bit masks for spi_device.mode management. Note that incorrect | |
51 | * settings for some settings can cause *lots* of trouble for other | |
52 | * devices on a shared bus: | |
53 | * | |
54 | * - CS_HIGH ... this device will be active when it shouldn't be | |
55 | * - 3WIRE ... when active, it won't behave as it should | |
56 | * - NO_CS ... there will be no explicit message boundaries; this | |
57 | * is completely incompatible with the shared bus model | |
58 | * - READY ... transfers may proceed when they shouldn't. | |
59 | * | |
60 | * REVISIT should changing those flags be privileged? | |
61 | */ | |
62 | #define SPI_MODE_MASK (SPI_MODE_X_MASK | SPI_CS_HIGH \ | |
63 | | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \ | |
64 | | SPI_NO_CS | SPI_READY | SPI_TX_DUAL \ | |
65 | | SPI_TX_QUAD | SPI_TX_OCTAL | SPI_RX_DUAL \ | |
66 | | SPI_RX_QUAD | SPI_RX_OCTAL \ | |
67 | | SPI_RX_CPHA_FLIP | SPI_3WIRE_HIZ \ | |
68 | | SPI_MOSI_IDLE_LOW) | |
69 | ||
70 | struct spidev_data { | |
71 | dev_t devt; | |
72 | struct mutex spi_lock; | |
73 | struct spi_device *spi; | |
74 | struct list_head device_entry; | |
75 | ||
76 | /* TX/RX buffers are NULL unless this device is open (users > 0) */ | |
77 | struct mutex buf_lock; | |
78 | unsigned users; | |
79 | u8 *tx_buffer; | |
80 | u8 *rx_buffer; | |
81 | u32 speed_hz; | |
82 | }; | |
83 | ||
84 | static LIST_HEAD(device_list); | |
85 | static DEFINE_MUTEX(device_list_lock); | |
86 | ||
87 | static unsigned bufsiz = 4096; | |
88 | module_param(bufsiz, uint, S_IRUGO); | |
89 | MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message"); | |
90 | ||
91 | /*-------------------------------------------------------------------------*/ | |
92 | ||
93 | static ssize_t | |
94 | spidev_sync_unlocked(struct spi_device *spi, struct spi_message *message) | |
95 | { | |
96 | ssize_t status; | |
97 | ||
98 | status = spi_sync(spi, message); | |
99 | if (status == 0) | |
100 | status = message->actual_length; | |
101 | ||
102 | return status; | |
103 | } | |
104 | ||
105 | static ssize_t | |
106 | spidev_sync(struct spidev_data *spidev, struct spi_message *message) | |
107 | { | |
108 | ssize_t status; | |
109 | struct spi_device *spi; | |
110 | ||
111 | mutex_lock(&spidev->spi_lock); | |
112 | spi = spidev->spi; | |
113 | ||
114 | if (spi == NULL) | |
115 | status = -ESHUTDOWN; | |
116 | else | |
117 | status = spidev_sync_unlocked(spi, message); | |
118 | ||
119 | mutex_unlock(&spidev->spi_lock); | |
120 | return status; | |
121 | } | |
122 | ||
123 | static inline ssize_t | |
124 | spidev_sync_write(struct spidev_data *spidev, size_t len) | |
125 | { | |
126 | struct spi_transfer t = { | |
127 | .tx_buf = spidev->tx_buffer, | |
128 | .len = len, | |
129 | .speed_hz = spidev->speed_hz, | |
130 | }; | |
131 | struct spi_message m; | |
132 | ||
133 | spi_message_init(&m); | |
134 | spi_message_add_tail(&t, &m); | |
135 | return spidev_sync(spidev, &m); | |
136 | } | |
137 | ||
138 | static inline ssize_t | |
139 | spidev_sync_read(struct spidev_data *spidev, size_t len) | |
140 | { | |
141 | struct spi_transfer t = { | |
142 | .rx_buf = spidev->rx_buffer, | |
143 | .len = len, | |
144 | .speed_hz = spidev->speed_hz, | |
145 | }; | |
146 | struct spi_message m; | |
147 | ||
148 | spi_message_init(&m); | |
149 | spi_message_add_tail(&t, &m); | |
150 | return spidev_sync(spidev, &m); | |
151 | } | |
152 | ||
153 | /*-------------------------------------------------------------------------*/ | |
154 | ||
155 | /* Read-only message with current device setup */ | |
156 | static ssize_t | |
157 | spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) | |
158 | { | |
159 | struct spidev_data *spidev; | |
160 | ssize_t status; | |
161 | ||
162 | /* chipselect only toggles at start or end of operation */ | |
163 | if (count > bufsiz) | |
164 | return -EMSGSIZE; | |
165 | ||
166 | spidev = filp->private_data; | |
167 | ||
168 | mutex_lock(&spidev->buf_lock); | |
169 | status = spidev_sync_read(spidev, count); | |
170 | if (status > 0) { | |
171 | unsigned long missing; | |
172 | ||
173 | missing = copy_to_user(buf, spidev->rx_buffer, status); | |
174 | if (missing == status) | |
175 | status = -EFAULT; | |
176 | else | |
177 | status = status - missing; | |
178 | } | |
179 | mutex_unlock(&spidev->buf_lock); | |
180 | ||
181 | return status; | |
182 | } | |
183 | ||
184 | /* Write-only message with current device setup */ | |
185 | static ssize_t | |
186 | spidev_write(struct file *filp, const char __user *buf, | |
187 | size_t count, loff_t *f_pos) | |
188 | { | |
189 | struct spidev_data *spidev; | |
190 | ssize_t status; | |
191 | unsigned long missing; | |
192 | ||
193 | /* chipselect only toggles at start or end of operation */ | |
194 | if (count > bufsiz) | |
195 | return -EMSGSIZE; | |
196 | ||
197 | spidev = filp->private_data; | |
198 | ||
199 | mutex_lock(&spidev->buf_lock); | |
200 | missing = copy_from_user(spidev->tx_buffer, buf, count); | |
201 | if (missing == 0) | |
202 | status = spidev_sync_write(spidev, count); | |
203 | else | |
204 | status = -EFAULT; | |
205 | mutex_unlock(&spidev->buf_lock); | |
206 | ||
207 | return status; | |
208 | } | |
209 | ||
210 | static int spidev_message(struct spidev_data *spidev, | |
211 | struct spi_ioc_transfer *u_xfers, unsigned n_xfers) | |
212 | { | |
213 | struct spi_message msg; | |
214 | struct spi_transfer *k_xfers; | |
215 | struct spi_transfer *k_tmp; | |
216 | struct spi_ioc_transfer *u_tmp; | |
217 | unsigned n, total, tx_total, rx_total; | |
218 | u8 *tx_buf, *rx_buf; | |
219 | int status = -EFAULT; | |
220 | ||
221 | spi_message_init(&msg); | |
222 | k_xfers = kcalloc(n_xfers, sizeof(*k_tmp), GFP_KERNEL); | |
223 | if (k_xfers == NULL) | |
224 | return -ENOMEM; | |
225 | ||
226 | /* Construct spi_message, copying any tx data to bounce buffer. | |
227 | * We walk the array of user-provided transfers, using each one | |
228 | * to initialize a kernel version of the same transfer. | |
229 | */ | |
230 | tx_buf = spidev->tx_buffer; | |
231 | rx_buf = spidev->rx_buffer; | |
232 | total = 0; | |
233 | tx_total = 0; | |
234 | rx_total = 0; | |
235 | for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; | |
236 | n; | |
237 | n--, k_tmp++, u_tmp++) { | |
238 | /* Ensure that also following allocations from rx_buf/tx_buf will meet | |
239 | * DMA alignment requirements. | |
240 | */ | |
241 | unsigned int len_aligned = ALIGN(u_tmp->len, ARCH_DMA_MINALIGN); | |
242 | ||
243 | k_tmp->len = u_tmp->len; | |
244 | ||
245 | total += k_tmp->len; | |
246 | /* Since the function returns the total length of transfers | |
247 | * on success, restrict the total to positive int values to | |
248 | * avoid the return value looking like an error. Also check | |
249 | * each transfer length to avoid arithmetic overflow. | |
250 | */ | |
251 | if (total > INT_MAX || k_tmp->len > INT_MAX) { | |
252 | status = -EMSGSIZE; | |
253 | goto done; | |
254 | } | |
255 | ||
256 | if (u_tmp->rx_buf) { | |
257 | /* this transfer needs space in RX bounce buffer */ | |
258 | rx_total += len_aligned; | |
259 | if (rx_total > bufsiz) { | |
260 | status = -EMSGSIZE; | |
261 | goto done; | |
262 | } | |
263 | k_tmp->rx_buf = rx_buf; | |
264 | rx_buf += len_aligned; | |
265 | } | |
266 | if (u_tmp->tx_buf) { | |
267 | /* this transfer needs space in TX bounce buffer */ | |
268 | tx_total += len_aligned; | |
269 | if (tx_total > bufsiz) { | |
270 | status = -EMSGSIZE; | |
271 | goto done; | |
272 | } | |
273 | k_tmp->tx_buf = tx_buf; | |
274 | if (copy_from_user(tx_buf, (const u8 __user *) | |
275 | (uintptr_t) u_tmp->tx_buf, | |
276 | u_tmp->len)) | |
277 | goto done; | |
278 | tx_buf += len_aligned; | |
279 | } | |
280 | ||
281 | k_tmp->cs_change = !!u_tmp->cs_change; | |
282 | k_tmp->tx_nbits = u_tmp->tx_nbits; | |
283 | k_tmp->rx_nbits = u_tmp->rx_nbits; | |
284 | k_tmp->bits_per_word = u_tmp->bits_per_word; | |
285 | k_tmp->delay.value = u_tmp->delay_usecs; | |
286 | k_tmp->delay.unit = SPI_DELAY_UNIT_USECS; | |
287 | k_tmp->speed_hz = u_tmp->speed_hz; | |
288 | k_tmp->word_delay.value = u_tmp->word_delay_usecs; | |
289 | k_tmp->word_delay.unit = SPI_DELAY_UNIT_USECS; | |
290 | if (!k_tmp->speed_hz) | |
291 | k_tmp->speed_hz = spidev->speed_hz; | |
292 | #ifdef VERBOSE | |
293 | dev_dbg(&spidev->spi->dev, | |
294 | " xfer len %u %s%s%s%dbits %u usec %u usec %uHz\n", | |
295 | k_tmp->len, | |
296 | k_tmp->rx_buf ? "rx " : "", | |
297 | k_tmp->tx_buf ? "tx " : "", | |
298 | k_tmp->cs_change ? "cs " : "", | |
299 | k_tmp->bits_per_word ? : spidev->spi->bits_per_word, | |
300 | k_tmp->delay.value, | |
301 | k_tmp->word_delay.value, | |
302 | k_tmp->speed_hz ? : spidev->spi->max_speed_hz); | |
303 | #endif | |
304 | spi_message_add_tail(k_tmp, &msg); | |
305 | } | |
306 | ||
307 | status = spidev_sync_unlocked(spidev->spi, &msg); | |
308 | if (status < 0) | |
309 | goto done; | |
310 | ||
311 | /* copy any rx data out of bounce buffer */ | |
312 | for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; | |
313 | n; | |
314 | n--, k_tmp++, u_tmp++) { | |
315 | if (u_tmp->rx_buf) { | |
316 | if (copy_to_user((u8 __user *) | |
317 | (uintptr_t) u_tmp->rx_buf, k_tmp->rx_buf, | |
318 | u_tmp->len)) { | |
319 | status = -EFAULT; | |
320 | goto done; | |
321 | } | |
322 | } | |
323 | } | |
324 | status = total; | |
325 | ||
326 | done: | |
327 | kfree(k_xfers); | |
328 | return status; | |
329 | } | |
330 | ||
331 | static struct spi_ioc_transfer * | |
332 | spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc, | |
333 | unsigned *n_ioc) | |
334 | { | |
335 | u32 tmp; | |
336 | ||
337 | /* Check type, command number and direction */ | |
338 | if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC | |
339 | || _IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0)) | |
340 | || _IOC_DIR(cmd) != _IOC_WRITE) | |
341 | return ERR_PTR(-ENOTTY); | |
342 | ||
343 | tmp = _IOC_SIZE(cmd); | |
344 | if ((tmp % sizeof(struct spi_ioc_transfer)) != 0) | |
345 | return ERR_PTR(-EINVAL); | |
346 | *n_ioc = tmp / sizeof(struct spi_ioc_transfer); | |
347 | if (*n_ioc == 0) | |
348 | return NULL; | |
349 | ||
350 | /* copy into scratch area */ | |
351 | return memdup_user(u_ioc, tmp); | |
352 | } | |
353 | ||
354 | static long | |
355 | spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |
356 | { | |
357 | int retval = 0; | |
358 | struct spidev_data *spidev; | |
359 | struct spi_device *spi; | |
360 | struct spi_controller *ctlr; | |
361 | u32 tmp; | |
362 | unsigned n_ioc; | |
363 | struct spi_ioc_transfer *ioc; | |
364 | ||
365 | /* Check type and command number */ | |
366 | if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC) | |
367 | return -ENOTTY; | |
368 | ||
369 | /* guard against device removal before, or while, | |
370 | * we issue this ioctl. | |
371 | */ | |
372 | spidev = filp->private_data; | |
373 | mutex_lock(&spidev->spi_lock); | |
374 | spi = spi_dev_get(spidev->spi); | |
375 | if (spi == NULL) { | |
376 | mutex_unlock(&spidev->spi_lock); | |
377 | return -ESHUTDOWN; | |
378 | } | |
379 | ||
380 | ctlr = spi->controller; | |
381 | ||
382 | /* use the buffer lock here for triple duty: | |
383 | * - prevent I/O (from us) so calling spi_setup() is safe; | |
384 | * - prevent concurrent SPI_IOC_WR_* from morphing | |
385 | * data fields while SPI_IOC_RD_* reads them; | |
386 | * - SPI_IOC_MESSAGE needs the buffer locked "normally". | |
387 | */ | |
388 | mutex_lock(&spidev->buf_lock); | |
389 | ||
390 | switch (cmd) { | |
391 | /* read requests */ | |
392 | case SPI_IOC_RD_MODE: | |
393 | case SPI_IOC_RD_MODE32: | |
394 | tmp = spi->mode & SPI_MODE_MASK; | |
395 | ||
396 | if (ctlr->use_gpio_descriptors && spi_get_csgpiod(spi, 0)) | |
397 | tmp &= ~SPI_CS_HIGH; | |
398 | ||
399 | if (cmd == SPI_IOC_RD_MODE) | |
400 | retval = put_user(tmp, (__u8 __user *)arg); | |
401 | else | |
402 | retval = put_user(tmp, (__u32 __user *)arg); | |
403 | break; | |
404 | case SPI_IOC_RD_LSB_FIRST: | |
405 | retval = put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0, | |
406 | (__u8 __user *)arg); | |
407 | break; | |
408 | case SPI_IOC_RD_BITS_PER_WORD: | |
409 | retval = put_user(spi->bits_per_word, (__u8 __user *)arg); | |
410 | break; | |
411 | case SPI_IOC_RD_MAX_SPEED_HZ: | |
412 | retval = put_user(spidev->speed_hz, (__u32 __user *)arg); | |
413 | break; | |
414 | ||
415 | /* write requests */ | |
416 | case SPI_IOC_WR_MODE: | |
417 | case SPI_IOC_WR_MODE32: | |
418 | if (cmd == SPI_IOC_WR_MODE) | |
419 | retval = get_user(tmp, (u8 __user *)arg); | |
420 | else | |
421 | retval = get_user(tmp, (u32 __user *)arg); | |
422 | if (retval == 0) { | |
423 | u32 save = spi->mode; | |
424 | ||
425 | if (tmp & ~SPI_MODE_MASK) { | |
426 | retval = -EINVAL; | |
427 | break; | |
428 | } | |
429 | ||
430 | if (ctlr->use_gpio_descriptors && spi_get_csgpiod(spi, 0)) | |
431 | tmp |= SPI_CS_HIGH; | |
432 | ||
433 | tmp |= spi->mode & ~SPI_MODE_MASK; | |
434 | spi->mode = tmp & SPI_MODE_USER_MASK; | |
435 | retval = spi_setup(spi); | |
436 | if (retval < 0) | |
437 | spi->mode = save; | |
438 | else | |
439 | dev_dbg(&spi->dev, "spi mode %x\n", tmp); | |
440 | } | |
441 | break; | |
442 | case SPI_IOC_WR_LSB_FIRST: | |
443 | retval = get_user(tmp, (__u8 __user *)arg); | |
444 | if (retval == 0) { | |
445 | u32 save = spi->mode; | |
446 | ||
447 | if (tmp) | |
448 | spi->mode |= SPI_LSB_FIRST; | |
449 | else | |
450 | spi->mode &= ~SPI_LSB_FIRST; | |
451 | retval = spi_setup(spi); | |
452 | if (retval < 0) | |
453 | spi->mode = save; | |
454 | else | |
455 | dev_dbg(&spi->dev, "%csb first\n", | |
456 | tmp ? 'l' : 'm'); | |
457 | } | |
458 | break; | |
459 | case SPI_IOC_WR_BITS_PER_WORD: | |
460 | retval = get_user(tmp, (__u8 __user *)arg); | |
461 | if (retval == 0) { | |
462 | u8 save = spi->bits_per_word; | |
463 | ||
464 | spi->bits_per_word = tmp; | |
465 | retval = spi_setup(spi); | |
466 | if (retval < 0) | |
467 | spi->bits_per_word = save; | |
468 | else | |
469 | dev_dbg(&spi->dev, "%d bits per word\n", tmp); | |
470 | } | |
471 | break; | |
472 | case SPI_IOC_WR_MAX_SPEED_HZ: { | |
473 | u32 save; | |
474 | ||
475 | retval = get_user(tmp, (__u32 __user *)arg); | |
476 | if (retval) | |
477 | break; | |
478 | if (tmp == 0) { | |
479 | retval = -EINVAL; | |
480 | break; | |
481 | } | |
482 | ||
483 | save = spi->max_speed_hz; | |
484 | ||
485 | spi->max_speed_hz = tmp; | |
486 | retval = spi_setup(spi); | |
487 | if (retval == 0) { | |
488 | spidev->speed_hz = tmp; | |
489 | dev_dbg(&spi->dev, "%d Hz (max)\n", spidev->speed_hz); | |
490 | } | |
491 | ||
492 | spi->max_speed_hz = save; | |
493 | break; | |
494 | } | |
495 | default: | |
496 | /* segmented and/or full-duplex I/O request */ | |
497 | /* Check message and copy into scratch area */ | |
498 | ioc = spidev_get_ioc_message(cmd, | |
499 | (struct spi_ioc_transfer __user *)arg, &n_ioc); | |
500 | if (IS_ERR(ioc)) { | |
501 | retval = PTR_ERR(ioc); | |
502 | break; | |
503 | } | |
504 | if (!ioc) | |
505 | break; /* n_ioc is also 0 */ | |
506 | ||
507 | /* translate to spi_message, execute */ | |
508 | retval = spidev_message(spidev, ioc, n_ioc); | |
509 | kfree(ioc); | |
510 | break; | |
511 | } | |
512 | ||
513 | mutex_unlock(&spidev->buf_lock); | |
514 | spi_dev_put(spi); | |
515 | mutex_unlock(&spidev->spi_lock); | |
516 | return retval; | |
517 | } | |
518 | ||
519 | #ifdef CONFIG_COMPAT | |
520 | static long | |
521 | spidev_compat_ioc_message(struct file *filp, unsigned int cmd, | |
522 | unsigned long arg) | |
523 | { | |
524 | struct spi_ioc_transfer __user *u_ioc; | |
525 | int retval = 0; | |
526 | struct spidev_data *spidev; | |
527 | struct spi_device *spi; | |
528 | unsigned n_ioc, n; | |
529 | struct spi_ioc_transfer *ioc; | |
530 | ||
531 | u_ioc = (struct spi_ioc_transfer __user *) compat_ptr(arg); | |
532 | ||
533 | /* guard against device removal before, or while, | |
534 | * we issue this ioctl. | |
535 | */ | |
536 | spidev = filp->private_data; | |
537 | mutex_lock(&spidev->spi_lock); | |
538 | spi = spi_dev_get(spidev->spi); | |
539 | if (spi == NULL) { | |
540 | mutex_unlock(&spidev->spi_lock); | |
541 | return -ESHUTDOWN; | |
542 | } | |
543 | ||
544 | /* SPI_IOC_MESSAGE needs the buffer locked "normally" */ | |
545 | mutex_lock(&spidev->buf_lock); | |
546 | ||
547 | /* Check message and copy into scratch area */ | |
548 | ioc = spidev_get_ioc_message(cmd, u_ioc, &n_ioc); | |
549 | if (IS_ERR(ioc)) { | |
550 | retval = PTR_ERR(ioc); | |
551 | goto done; | |
552 | } | |
553 | if (!ioc) | |
554 | goto done; /* n_ioc is also 0 */ | |
555 | ||
556 | /* Convert buffer pointers */ | |
557 | for (n = 0; n < n_ioc; n++) { | |
558 | ioc[n].rx_buf = (uintptr_t) compat_ptr(ioc[n].rx_buf); | |
559 | ioc[n].tx_buf = (uintptr_t) compat_ptr(ioc[n].tx_buf); | |
560 | } | |
561 | ||
562 | /* translate to spi_message, execute */ | |
563 | retval = spidev_message(spidev, ioc, n_ioc); | |
564 | kfree(ioc); | |
565 | ||
566 | done: | |
567 | mutex_unlock(&spidev->buf_lock); | |
568 | spi_dev_put(spi); | |
569 | mutex_unlock(&spidev->spi_lock); | |
570 | return retval; | |
571 | } | |
572 | ||
573 | static long | |
574 | spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |
575 | { | |
576 | if (_IOC_TYPE(cmd) == SPI_IOC_MAGIC | |
577 | && _IOC_NR(cmd) == _IOC_NR(SPI_IOC_MESSAGE(0)) | |
578 | && _IOC_DIR(cmd) == _IOC_WRITE) | |
579 | return spidev_compat_ioc_message(filp, cmd, arg); | |
580 | ||
581 | return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); | |
582 | } | |
583 | #else | |
584 | #define spidev_compat_ioctl NULL | |
585 | #endif /* CONFIG_COMPAT */ | |
586 | ||
587 | static int spidev_open(struct inode *inode, struct file *filp) | |
588 | { | |
589 | struct spidev_data *spidev = NULL, *iter; | |
590 | int status = -ENXIO; | |
591 | ||
592 | mutex_lock(&device_list_lock); | |
593 | ||
594 | list_for_each_entry(iter, &device_list, device_entry) { | |
595 | if (iter->devt == inode->i_rdev) { | |
596 | status = 0; | |
597 | spidev = iter; | |
598 | break; | |
599 | } | |
600 | } | |
601 | ||
602 | if (!spidev) { | |
603 | pr_debug("spidev: nothing for minor %d\n", iminor(inode)); | |
604 | goto err_find_dev; | |
605 | } | |
606 | ||
607 | if (!spidev->tx_buffer) { | |
608 | spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL); | |
609 | if (!spidev->tx_buffer) { | |
610 | status = -ENOMEM; | |
611 | goto err_find_dev; | |
612 | } | |
613 | } | |
614 | ||
615 | if (!spidev->rx_buffer) { | |
616 | spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL); | |
617 | if (!spidev->rx_buffer) { | |
618 | status = -ENOMEM; | |
619 | goto err_alloc_rx_buf; | |
620 | } | |
621 | } | |
622 | ||
623 | spidev->users++; | |
624 | filp->private_data = spidev; | |
625 | stream_open(inode, filp); | |
626 | ||
627 | mutex_unlock(&device_list_lock); | |
628 | return 0; | |
629 | ||
630 | err_alloc_rx_buf: | |
631 | kfree(spidev->tx_buffer); | |
632 | spidev->tx_buffer = NULL; | |
633 | err_find_dev: | |
634 | mutex_unlock(&device_list_lock); | |
635 | return status; | |
636 | } | |
637 | ||
638 | static int spidev_release(struct inode *inode, struct file *filp) | |
639 | { | |
640 | struct spidev_data *spidev; | |
641 | int dofree; | |
642 | ||
643 | mutex_lock(&device_list_lock); | |
644 | spidev = filp->private_data; | |
645 | filp->private_data = NULL; | |
646 | ||
647 | mutex_lock(&spidev->spi_lock); | |
648 | /* ... after we unbound from the underlying device? */ | |
649 | dofree = (spidev->spi == NULL); | |
650 | mutex_unlock(&spidev->spi_lock); | |
651 | ||
652 | /* last close? */ | |
653 | spidev->users--; | |
654 | if (!spidev->users) { | |
655 | ||
656 | kfree(spidev->tx_buffer); | |
657 | spidev->tx_buffer = NULL; | |
658 | ||
659 | kfree(spidev->rx_buffer); | |
660 | spidev->rx_buffer = NULL; | |
661 | ||
662 | if (dofree) | |
663 | kfree(spidev); | |
664 | else | |
665 | spidev->speed_hz = spidev->spi->max_speed_hz; | |
666 | } | |
667 | #ifdef CONFIG_SPI_SLAVE | |
668 | if (!dofree) | |
669 | spi_target_abort(spidev->spi); | |
670 | #endif | |
671 | mutex_unlock(&device_list_lock); | |
672 | ||
673 | return 0; | |
674 | } | |
675 | ||
676 | static const struct file_operations spidev_fops = { | |
677 | .owner = THIS_MODULE, | |
678 | /* REVISIT switch to aio primitives, so that userspace | |
679 | * gets more complete API coverage. It'll simplify things | |
680 | * too, except for the locking. | |
681 | */ | |
682 | .write = spidev_write, | |
683 | .read = spidev_read, | |
684 | .unlocked_ioctl = spidev_ioctl, | |
685 | .compat_ioctl = spidev_compat_ioctl, | |
686 | .open = spidev_open, | |
687 | .release = spidev_release, | |
688 | }; | |
689 | ||
690 | /*-------------------------------------------------------------------------*/ | |
691 | ||
692 | /* The main reason to have this class is to make mdev/udev create the | |
693 | * /dev/spidevB.C character device nodes exposing our userspace API. | |
694 | * It also simplifies memory management. | |
695 | */ | |
696 | ||
697 | static const struct class spidev_class = { | |
698 | .name = "spidev", | |
699 | }; | |
700 | ||
701 | /* | |
702 | * The spi device ids are expected to match the device names of the | |
703 | * spidev_dt_ids array below. Both arrays are kept in the same ordering. | |
704 | */ | |
705 | static const struct spi_device_id spidev_spi_ids[] = { | |
706 | { .name = /* cisco */ "spi-petra" }, | |
707 | { .name = /* dh */ "dhcom-board" }, | |
708 | { .name = /* elgin */ "jg10309-01" }, | |
709 | { .name = /* gocontroll */ "moduline-module-slot"}, | |
710 | { .name = /* lineartechnology */ "ltc2488" }, | |
711 | { .name = /* lwn */ "bk4" }, | |
712 | { .name = /* lwn */ "bk4-spi" }, | |
713 | { .name = /* menlo */ "m53cpld" }, | |
714 | { .name = /* micron */ "spi-authenta" }, | |
715 | { .name = /* rohm */ "bh2228fv" }, | |
716 | { .name = /* rohm */ "dh2228fv" }, | |
717 | { .name = /* semtech */ "sx1301" }, | |
718 | { .name = /* silabs */ "em3581" }, | |
719 | { .name = /* silabs */ "si3210" }, | |
720 | {}, | |
721 | }; | |
722 | MODULE_DEVICE_TABLE(spi, spidev_spi_ids); | |
723 | ||
724 | /* | |
725 | * spidev should never be referenced in DT without a specific compatible string, | |
726 | * it is a Linux implementation thing rather than a description of the hardware. | |
727 | */ | |
728 | static int spidev_of_check(struct device *dev) | |
729 | { | |
730 | if (device_property_match_string(dev, "compatible", "spidev") < 0) | |
731 | return 0; | |
732 | ||
733 | dev_err(dev, "spidev listed directly in DT is not supported\n"); | |
734 | return -EINVAL; | |
735 | } | |
736 | ||
737 | static const struct of_device_id spidev_dt_ids[] = { | |
738 | { .compatible = "cisco,spi-petra", .data = &spidev_of_check }, | |
739 | { .compatible = "dh,dhcom-board", .data = &spidev_of_check }, | |
740 | { .compatible = "elgin,jg10309-01", .data = &spidev_of_check }, | |
741 | { .compatible = "gocontroll,moduline-module-slot", .data = &spidev_of_check}, | |
742 | { .compatible = "lineartechnology,ltc2488", .data = &spidev_of_check }, | |
743 | { .compatible = "lwn,bk4", .data = &spidev_of_check }, | |
744 | { .compatible = "lwn,bk4-spi", .data = &spidev_of_check }, | |
745 | { .compatible = "menlo,m53cpld", .data = &spidev_of_check }, | |
746 | { .compatible = "micron,spi-authenta", .data = &spidev_of_check }, | |
747 | { .compatible = "rohm,bh2228fv", .data = &spidev_of_check }, | |
748 | { .compatible = "rohm,dh2228fv", .data = &spidev_of_check }, | |
749 | { .compatible = "semtech,sx1301", .data = &spidev_of_check }, | |
750 | { .compatible = "silabs,em3581", .data = &spidev_of_check }, | |
751 | { .compatible = "silabs,si3210", .data = &spidev_of_check }, | |
752 | {}, | |
753 | }; | |
754 | MODULE_DEVICE_TABLE(of, spidev_dt_ids); | |
755 | ||
756 | /* Dummy SPI devices not to be used in production systems */ | |
757 | static int spidev_acpi_check(struct device *dev) | |
758 | { | |
759 | dev_warn(dev, "do not use this driver in production systems!\n"); | |
760 | return 0; | |
761 | } | |
762 | ||
763 | static const struct acpi_device_id spidev_acpi_ids[] = { | |
764 | /* | |
765 | * The ACPI SPT000* devices are only meant for development and | |
766 | * testing. Systems used in production should have a proper ACPI | |
767 | * description of the connected peripheral and they should also use | |
768 | * a proper driver instead of poking directly to the SPI bus. | |
769 | */ | |
770 | { "SPT0001", (kernel_ulong_t)&spidev_acpi_check }, | |
771 | { "SPT0002", (kernel_ulong_t)&spidev_acpi_check }, | |
772 | { "SPT0003", (kernel_ulong_t)&spidev_acpi_check }, | |
773 | {}, | |
774 | }; | |
775 | MODULE_DEVICE_TABLE(acpi, spidev_acpi_ids); | |
776 | ||
777 | /*-------------------------------------------------------------------------*/ | |
778 | ||
779 | static int spidev_probe(struct spi_device *spi) | |
780 | { | |
781 | int (*match)(struct device *dev); | |
782 | struct spidev_data *spidev; | |
783 | int status; | |
784 | unsigned long minor; | |
785 | ||
786 | match = device_get_match_data(&spi->dev); | |
787 | if (match) { | |
788 | status = match(&spi->dev); | |
789 | if (status) | |
790 | return status; | |
791 | } | |
792 | ||
793 | /* Allocate driver data */ | |
794 | spidev = kzalloc(sizeof(*spidev), GFP_KERNEL); | |
795 | if (!spidev) | |
796 | return -ENOMEM; | |
797 | ||
798 | /* Initialize the driver data */ | |
799 | spidev->spi = spi; | |
800 | mutex_init(&spidev->spi_lock); | |
801 | mutex_init(&spidev->buf_lock); | |
802 | ||
803 | INIT_LIST_HEAD(&spidev->device_entry); | |
804 | ||
805 | /* If we can allocate a minor number, hook up this device. | |
806 | * Reusing minors is fine so long as udev or mdev is working. | |
807 | */ | |
808 | mutex_lock(&device_list_lock); | |
809 | minor = find_first_zero_bit(minors, N_SPI_MINORS); | |
810 | if (minor < N_SPI_MINORS) { | |
811 | struct device *dev; | |
812 | ||
813 | spidev->devt = MKDEV(SPIDEV_MAJOR, minor); | |
814 | dev = device_create(&spidev_class, &spi->dev, spidev->devt, | |
815 | spidev, "spidev%d.%d", | |
816 | spi->controller->bus_num, spi_get_chipselect(spi, 0)); | |
817 | status = PTR_ERR_OR_ZERO(dev); | |
818 | } else { | |
819 | dev_dbg(&spi->dev, "no minor number available!\n"); | |
820 | status = -ENODEV; | |
821 | } | |
822 | if (status == 0) { | |
823 | set_bit(minor, minors); | |
824 | list_add(&spidev->device_entry, &device_list); | |
825 | } | |
826 | mutex_unlock(&device_list_lock); | |
827 | ||
828 | spidev->speed_hz = spi->max_speed_hz; | |
829 | ||
830 | if (status == 0) | |
831 | spi_set_drvdata(spi, spidev); | |
832 | else | |
833 | kfree(spidev); | |
834 | ||
835 | return status; | |
836 | } | |
837 | ||
838 | static void spidev_remove(struct spi_device *spi) | |
839 | { | |
840 | struct spidev_data *spidev = spi_get_drvdata(spi); | |
841 | ||
842 | /* prevent new opens */ | |
843 | mutex_lock(&device_list_lock); | |
844 | /* make sure ops on existing fds can abort cleanly */ | |
845 | mutex_lock(&spidev->spi_lock); | |
846 | spidev->spi = NULL; | |
847 | mutex_unlock(&spidev->spi_lock); | |
848 | ||
849 | list_del(&spidev->device_entry); | |
850 | device_destroy(&spidev_class, spidev->devt); | |
851 | clear_bit(MINOR(spidev->devt), minors); | |
852 | if (spidev->users == 0) | |
853 | kfree(spidev); | |
854 | mutex_unlock(&device_list_lock); | |
855 | } | |
856 | ||
857 | static struct spi_driver spidev_spi_driver = { | |
858 | .driver = { | |
859 | .name = "spidev", | |
860 | .of_match_table = spidev_dt_ids, | |
861 | .acpi_match_table = spidev_acpi_ids, | |
862 | }, | |
863 | .probe = spidev_probe, | |
864 | .remove = spidev_remove, | |
865 | .id_table = spidev_spi_ids, | |
866 | ||
867 | /* NOTE: suspend/resume methods are not necessary here. | |
868 | * We don't do anything except pass the requests to/from | |
869 | * the underlying controller. The refrigerator handles | |
870 | * most issues; the controller driver handles the rest. | |
871 | */ | |
872 | }; | |
873 | ||
874 | /*-------------------------------------------------------------------------*/ | |
875 | ||
876 | static int __init spidev_init(void) | |
877 | { | |
878 | int status; | |
879 | ||
880 | /* Claim our 256 reserved device numbers. Then register a class | |
881 | * that will key udev/mdev to add/remove /dev nodes. Last, register | |
882 | * the driver which manages those device numbers. | |
883 | */ | |
884 | status = register_chrdev(SPIDEV_MAJOR, "spi", &spidev_fops); | |
885 | if (status < 0) | |
886 | return status; | |
887 | ||
888 | status = class_register(&spidev_class); | |
889 | if (status) { | |
890 | unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name); | |
891 | return status; | |
892 | } | |
893 | ||
894 | status = spi_register_driver(&spidev_spi_driver); | |
895 | if (status < 0) { | |
896 | class_unregister(&spidev_class); | |
897 | unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name); | |
898 | } | |
899 | return status; | |
900 | } | |
901 | module_init(spidev_init); | |
902 | ||
903 | static void __exit spidev_exit(void) | |
904 | { | |
905 | spi_unregister_driver(&spidev_spi_driver); | |
906 | class_unregister(&spidev_class); | |
907 | unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name); | |
908 | } | |
909 | module_exit(spidev_exit); | |
910 | ||
911 | MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>"); | |
912 | MODULE_DESCRIPTION("User mode SPI device interface"); | |
913 | MODULE_LICENSE("GPL"); | |
914 | MODULE_ALIAS("spi:spidev"); |