]>
Commit | Line | Data |
---|---|---|
b445bfcb | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
787f4889 MB |
2 | // SPI init/core code |
3 | // | |
4 | // Copyright (C) 2005 David Brownell | |
5 | // Copyright (C) 2008 Secret Lab Technologies Ltd. | |
8ae12a0d | 6 | |
edf6a864 | 7 | #include <linux/acpi.h> |
8ae12a0d | 8 | #include <linux/cache.h> |
edf6a864 AS |
9 | #include <linux/clk/clk-conf.h> |
10 | #include <linux/delay.h> | |
11 | #include <linux/device.h> | |
99adef31 | 12 | #include <linux/dmaengine.h> |
edf6a864 AS |
13 | #include <linux/dma-mapping.h> |
14 | #include <linux/export.h> | |
15 | #include <linux/gpio/consumer.h> | |
16 | #include <linux/highmem.h> | |
17 | #include <linux/idr.h> | |
18 | #include <linux/init.h> | |
19 | #include <linux/ioport.h> | |
20 | #include <linux/kernel.h> | |
21 | #include <linux/kthread.h> | |
22 | #include <linux/mod_devicetable.h> | |
94040828 | 23 | #include <linux/mutex.h> |
2b7a32f7 | 24 | #include <linux/of_device.h> |
d57a4282 | 25 | #include <linux/of_irq.h> |
edf6a864 AS |
26 | #include <linux/percpu.h> |
27 | #include <linux/platform_data/x86/apple.h> | |
f48c767c | 28 | #include <linux/pm_domain.h> |
edf6a864 | 29 | #include <linux/pm_runtime.h> |
826cf175 | 30 | #include <linux/property.h> |
edf6a864 | 31 | #include <linux/ptp_clock_kernel.h> |
8bd75c77 | 32 | #include <linux/sched/rt.h> |
edf6a864 | 33 | #include <linux/slab.h> |
700a2819 | 34 | #include <linux/spi/offload/types.h> |
edf6a864 AS |
35 | #include <linux/spi/spi.h> |
36 | #include <linux/spi/spi-mem.h> | |
ae7e81c0 | 37 | #include <uapi/linux/sched/types.h> |
8ae12a0d | 38 | |
56ec1978 MB |
39 | #define CREATE_TRACE_POINTS |
40 | #include <trace/events/spi.h> | |
ca1438dc AB |
41 | EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start); |
42 | EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop); | |
9b61e302 | 43 | |
46336966 BB |
44 | #include "internals.h" |
45 | ||
91ce208d | 46 | static DEFINE_IDR(spi_controller_idr); |
56ec1978 | 47 | |
8ae12a0d DB |
48 | static void spidev_release(struct device *dev) |
49 | { | |
0ffa0285 | 50 | struct spi_device *spi = to_spi_device(dev); |
8ae12a0d | 51 | |
8caab75f | 52 | spi_controller_put(spi->controller); |
5039563e | 53 | kfree(spi->driver_override); |
6598b91b | 54 | free_percpu(spi->pcpu_statistics); |
07a389fe | 55 | kfree(spi); |
8ae12a0d DB |
56 | } |
57 | ||
58 | static ssize_t | |
59 | modalias_show(struct device *dev, struct device_attribute *a, char *buf) | |
60 | { | |
61 | const struct spi_device *spi = to_spi_device(dev); | |
8c4ff6d0 ZR |
62 | int len; |
63 | ||
64 | len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); | |
65 | if (len != -ENODEV) | |
66 | return len; | |
8ae12a0d | 67 | |
f2daa466 | 68 | return sysfs_emit(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias); |
8ae12a0d | 69 | } |
aa7da564 | 70 | static DEVICE_ATTR_RO(modalias); |
8ae12a0d | 71 | |
5039563e TP |
72 | static ssize_t driver_override_store(struct device *dev, |
73 | struct device_attribute *a, | |
74 | const char *buf, size_t count) | |
75 | { | |
76 | struct spi_device *spi = to_spi_device(dev); | |
19368f0f | 77 | int ret; |
5039563e | 78 | |
19368f0f KK |
79 | ret = driver_set_override(dev, &spi->driver_override, buf, count); |
80 | if (ret) | |
81 | return ret; | |
5039563e TP |
82 | |
83 | return count; | |
84 | } | |
85 | ||
86 | static ssize_t driver_override_show(struct device *dev, | |
87 | struct device_attribute *a, char *buf) | |
88 | { | |
89 | const struct spi_device *spi = to_spi_device(dev); | |
90 | ssize_t len; | |
91 | ||
92 | device_lock(dev); | |
f2daa466 | 93 | len = sysfs_emit(buf, "%s\n", spi->driver_override ? : ""); |
5039563e TP |
94 | device_unlock(dev); |
95 | return len; | |
96 | } | |
97 | static DEVICE_ATTR_RW(driver_override); | |
98 | ||
d501cc4c | 99 | static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev) |
6598b91b DJ |
100 | { |
101 | struct spi_statistics __percpu *pcpu_stats; | |
102 | ||
103 | if (dev) | |
104 | pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics); | |
105 | else | |
106 | pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL); | |
107 | ||
108 | if (pcpu_stats) { | |
109 | int cpu; | |
110 | ||
111 | for_each_possible_cpu(cpu) { | |
112 | struct spi_statistics *stat; | |
113 | ||
114 | stat = per_cpu_ptr(pcpu_stats, cpu); | |
115 | u64_stats_init(&stat->syncp); | |
116 | } | |
117 | } | |
118 | return pcpu_stats; | |
119 | } | |
120 | ||
fc12d4bb GU |
121 | static ssize_t spi_emit_pcpu_stats(struct spi_statistics __percpu *stat, |
122 | char *buf, size_t offset) | |
123 | { | |
124 | u64 val = 0; | |
125 | int i; | |
126 | ||
127 | for_each_possible_cpu(i) { | |
128 | const struct spi_statistics *pcpu_stats; | |
129 | u64_stats_t *field; | |
130 | unsigned int start; | |
131 | u64 inc; | |
132 | ||
133 | pcpu_stats = per_cpu_ptr(stat, i); | |
134 | field = (void *)pcpu_stats + offset; | |
135 | do { | |
136 | start = u64_stats_fetch_begin(&pcpu_stats->syncp); | |
137 | inc = u64_stats_read(field); | |
138 | } while (u64_stats_fetch_retry(&pcpu_stats->syncp, start)); | |
139 | val += inc; | |
140 | } | |
141 | return sysfs_emit(buf, "%llu\n", val); | |
142 | } | |
6598b91b | 143 | |
eca2ebc7 | 144 | #define SPI_STATISTICS_ATTRS(field, file) \ |
8caab75f GU |
145 | static ssize_t spi_controller_##field##_show(struct device *dev, \ |
146 | struct device_attribute *attr, \ | |
147 | char *buf) \ | |
eca2ebc7 | 148 | { \ |
8caab75f GU |
149 | struct spi_controller *ctlr = container_of(dev, \ |
150 | struct spi_controller, dev); \ | |
6598b91b | 151 | return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \ |
eca2ebc7 | 152 | } \ |
8caab75f | 153 | static struct device_attribute dev_attr_spi_controller_##field = { \ |
ad25c92e | 154 | .attr = { .name = file, .mode = 0444 }, \ |
8caab75f | 155 | .show = spi_controller_##field##_show, \ |
eca2ebc7 MS |
156 | }; \ |
157 | static ssize_t spi_device_##field##_show(struct device *dev, \ | |
158 | struct device_attribute *attr, \ | |
159 | char *buf) \ | |
160 | { \ | |
d1eba93b | 161 | struct spi_device *spi = to_spi_device(dev); \ |
6598b91b | 162 | return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \ |
eca2ebc7 MS |
163 | } \ |
164 | static struct device_attribute dev_attr_spi_device_##field = { \ | |
ad25c92e | 165 | .attr = { .name = file, .mode = 0444 }, \ |
eca2ebc7 MS |
166 | .show = spi_device_##field##_show, \ |
167 | } | |
168 | ||
6598b91b | 169 | #define SPI_STATISTICS_SHOW_NAME(name, file, field) \ |
d501cc4c | 170 | static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \ |
eca2ebc7 MS |
171 | char *buf) \ |
172 | { \ | |
fc12d4bb GU |
173 | return spi_emit_pcpu_stats(stat, buf, \ |
174 | offsetof(struct spi_statistics, field)); \ | |
eca2ebc7 MS |
175 | } \ |
176 | SPI_STATISTICS_ATTRS(name, file) | |
177 | ||
6598b91b | 178 | #define SPI_STATISTICS_SHOW(field) \ |
eca2ebc7 | 179 | SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \ |
6598b91b | 180 | field) |
eca2ebc7 | 181 | |
6598b91b DJ |
182 | SPI_STATISTICS_SHOW(messages); |
183 | SPI_STATISTICS_SHOW(transfers); | |
184 | SPI_STATISTICS_SHOW(errors); | |
185 | SPI_STATISTICS_SHOW(timedout); | |
eca2ebc7 | 186 | |
6598b91b DJ |
187 | SPI_STATISTICS_SHOW(spi_sync); |
188 | SPI_STATISTICS_SHOW(spi_sync_immediate); | |
189 | SPI_STATISTICS_SHOW(spi_async); | |
eca2ebc7 | 190 | |
6598b91b DJ |
191 | SPI_STATISTICS_SHOW(bytes); |
192 | SPI_STATISTICS_SHOW(bytes_rx); | |
193 | SPI_STATISTICS_SHOW(bytes_tx); | |
eca2ebc7 | 194 | |
6b7bc061 MS |
195 | #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \ |
196 | SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \ | |
197 | "transfer_bytes_histo_" number, \ | |
6598b91b | 198 | transfer_bytes_histo[index]) |
6b7bc061 MS |
199 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1"); |
200 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3"); | |
201 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7"); | |
202 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15"); | |
203 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31"); | |
204 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63"); | |
205 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127"); | |
206 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255"); | |
207 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511"); | |
208 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023"); | |
209 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047"); | |
210 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095"); | |
211 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191"); | |
212 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383"); | |
213 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767"); | |
214 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535"); | |
215 | SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+"); | |
216 | ||
6598b91b | 217 | SPI_STATISTICS_SHOW(transfers_split_maxsize); |
d9f12122 | 218 | |
aa7da564 GKH |
219 | static struct attribute *spi_dev_attrs[] = { |
220 | &dev_attr_modalias.attr, | |
5039563e | 221 | &dev_attr_driver_override.attr, |
aa7da564 | 222 | NULL, |
8ae12a0d | 223 | }; |
eca2ebc7 MS |
224 | |
225 | static const struct attribute_group spi_dev_group = { | |
226 | .attrs = spi_dev_attrs, | |
227 | }; | |
228 | ||
229 | static struct attribute *spi_device_statistics_attrs[] = { | |
230 | &dev_attr_spi_device_messages.attr, | |
231 | &dev_attr_spi_device_transfers.attr, | |
232 | &dev_attr_spi_device_errors.attr, | |
233 | &dev_attr_spi_device_timedout.attr, | |
234 | &dev_attr_spi_device_spi_sync.attr, | |
235 | &dev_attr_spi_device_spi_sync_immediate.attr, | |
236 | &dev_attr_spi_device_spi_async.attr, | |
237 | &dev_attr_spi_device_bytes.attr, | |
238 | &dev_attr_spi_device_bytes_rx.attr, | |
239 | &dev_attr_spi_device_bytes_tx.attr, | |
6b7bc061 MS |
240 | &dev_attr_spi_device_transfer_bytes_histo0.attr, |
241 | &dev_attr_spi_device_transfer_bytes_histo1.attr, | |
242 | &dev_attr_spi_device_transfer_bytes_histo2.attr, | |
243 | &dev_attr_spi_device_transfer_bytes_histo3.attr, | |
244 | &dev_attr_spi_device_transfer_bytes_histo4.attr, | |
245 | &dev_attr_spi_device_transfer_bytes_histo5.attr, | |
246 | &dev_attr_spi_device_transfer_bytes_histo6.attr, | |
247 | &dev_attr_spi_device_transfer_bytes_histo7.attr, | |
248 | &dev_attr_spi_device_transfer_bytes_histo8.attr, | |
249 | &dev_attr_spi_device_transfer_bytes_histo9.attr, | |
250 | &dev_attr_spi_device_transfer_bytes_histo10.attr, | |
251 | &dev_attr_spi_device_transfer_bytes_histo11.attr, | |
252 | &dev_attr_spi_device_transfer_bytes_histo12.attr, | |
253 | &dev_attr_spi_device_transfer_bytes_histo13.attr, | |
254 | &dev_attr_spi_device_transfer_bytes_histo14.attr, | |
255 | &dev_attr_spi_device_transfer_bytes_histo15.attr, | |
256 | &dev_attr_spi_device_transfer_bytes_histo16.attr, | |
d9f12122 | 257 | &dev_attr_spi_device_transfers_split_maxsize.attr, |
eca2ebc7 MS |
258 | NULL, |
259 | }; | |
260 | ||
261 | static const struct attribute_group spi_device_statistics_group = { | |
262 | .name = "statistics", | |
263 | .attrs = spi_device_statistics_attrs, | |
264 | }; | |
265 | ||
266 | static const struct attribute_group *spi_dev_groups[] = { | |
267 | &spi_dev_group, | |
268 | &spi_device_statistics_group, | |
269 | NULL, | |
270 | }; | |
271 | ||
8caab75f GU |
272 | static struct attribute *spi_controller_statistics_attrs[] = { |
273 | &dev_attr_spi_controller_messages.attr, | |
274 | &dev_attr_spi_controller_transfers.attr, | |
275 | &dev_attr_spi_controller_errors.attr, | |
276 | &dev_attr_spi_controller_timedout.attr, | |
277 | &dev_attr_spi_controller_spi_sync.attr, | |
278 | &dev_attr_spi_controller_spi_sync_immediate.attr, | |
279 | &dev_attr_spi_controller_spi_async.attr, | |
280 | &dev_attr_spi_controller_bytes.attr, | |
281 | &dev_attr_spi_controller_bytes_rx.attr, | |
282 | &dev_attr_spi_controller_bytes_tx.attr, | |
283 | &dev_attr_spi_controller_transfer_bytes_histo0.attr, | |
284 | &dev_attr_spi_controller_transfer_bytes_histo1.attr, | |
285 | &dev_attr_spi_controller_transfer_bytes_histo2.attr, | |
286 | &dev_attr_spi_controller_transfer_bytes_histo3.attr, | |
287 | &dev_attr_spi_controller_transfer_bytes_histo4.attr, | |
288 | &dev_attr_spi_controller_transfer_bytes_histo5.attr, | |
289 | &dev_attr_spi_controller_transfer_bytes_histo6.attr, | |
290 | &dev_attr_spi_controller_transfer_bytes_histo7.attr, | |
291 | &dev_attr_spi_controller_transfer_bytes_histo8.attr, | |
292 | &dev_attr_spi_controller_transfer_bytes_histo9.attr, | |
293 | &dev_attr_spi_controller_transfer_bytes_histo10.attr, | |
294 | &dev_attr_spi_controller_transfer_bytes_histo11.attr, | |
295 | &dev_attr_spi_controller_transfer_bytes_histo12.attr, | |
296 | &dev_attr_spi_controller_transfer_bytes_histo13.attr, | |
297 | &dev_attr_spi_controller_transfer_bytes_histo14.attr, | |
298 | &dev_attr_spi_controller_transfer_bytes_histo15.attr, | |
299 | &dev_attr_spi_controller_transfer_bytes_histo16.attr, | |
300 | &dev_attr_spi_controller_transfers_split_maxsize.attr, | |
eca2ebc7 MS |
301 | NULL, |
302 | }; | |
303 | ||
8caab75f | 304 | static const struct attribute_group spi_controller_statistics_group = { |
eca2ebc7 | 305 | .name = "statistics", |
8caab75f | 306 | .attrs = spi_controller_statistics_attrs, |
eca2ebc7 MS |
307 | }; |
308 | ||
91ce208d | 309 | static const struct attribute_group *spi_controller_groups[] = { |
8caab75f | 310 | &spi_controller_statistics_group, |
eca2ebc7 MS |
311 | NULL, |
312 | }; | |
313 | ||
d501cc4c | 314 | static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats, |
da21fde0 | 315 | struct spi_transfer *xfer, |
52267fe8 | 316 | struct spi_message *msg) |
eca2ebc7 | 317 | { |
6b7bc061 | 318 | int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; |
67b9d641 | 319 | struct spi_statistics *stats; |
6b7bc061 MS |
320 | |
321 | if (l2len < 0) | |
322 | l2len = 0; | |
eca2ebc7 | 323 | |
67b9d641 DJ |
324 | get_cpu(); |
325 | stats = this_cpu_ptr(pcpu_stats); | |
6598b91b | 326 | u64_stats_update_begin(&stats->syncp); |
eca2ebc7 | 327 | |
6598b91b DJ |
328 | u64_stats_inc(&stats->transfers); |
329 | u64_stats_inc(&stats->transfer_bytes_histo[l2len]); | |
eca2ebc7 | 330 | |
6598b91b | 331 | u64_stats_add(&stats->bytes, xfer->len); |
52267fe8 | 332 | if (spi_valid_txbuf(msg, xfer)) |
6598b91b | 333 | u64_stats_add(&stats->bytes_tx, xfer->len); |
52267fe8 | 334 | if (spi_valid_rxbuf(msg, xfer)) |
6598b91b | 335 | u64_stats_add(&stats->bytes_rx, xfer->len); |
eca2ebc7 | 336 | |
6598b91b | 337 | u64_stats_update_end(&stats->syncp); |
67b9d641 | 338 | put_cpu(); |
eca2ebc7 | 339 | } |
8ae12a0d | 340 | |
350de7ce AS |
341 | /* |
342 | * modalias support makes "modprobe $MODALIAS" new-style hotplug work, | |
8ae12a0d DB |
343 | * and the sysfs version makes coldplug work too. |
344 | */ | |
3f076575 | 345 | static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name) |
75368bf6 AV |
346 | { |
347 | while (id->name[0]) { | |
3f076575 | 348 | if (!strcmp(name, id->name)) |
75368bf6 AV |
349 | return id; |
350 | id++; | |
351 | } | |
352 | return NULL; | |
353 | } | |
354 | ||
355 | const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) | |
356 | { | |
357 | const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver); | |
358 | ||
3f076575 | 359 | return spi_match_id(sdrv->id_table, sdev->modalias); |
75368bf6 AV |
360 | } |
361 | EXPORT_SYMBOL_GPL(spi_get_device_id); | |
362 | ||
aea672d0 AS |
363 | const void *spi_get_device_match_data(const struct spi_device *sdev) |
364 | { | |
365 | const void *match; | |
366 | ||
367 | match = device_get_match_data(&sdev->dev); | |
368 | if (match) | |
369 | return match; | |
370 | ||
371 | return (const void *)spi_get_device_id(sdev)->driver_data; | |
372 | } | |
373 | EXPORT_SYMBOL_GPL(spi_get_device_match_data); | |
374 | ||
d69d8048 | 375 | static int spi_match_device(struct device *dev, const struct device_driver *drv) |
8ae12a0d DB |
376 | { |
377 | const struct spi_device *spi = to_spi_device(dev); | |
75368bf6 AV |
378 | const struct spi_driver *sdrv = to_spi_driver(drv); |
379 | ||
5039563e TP |
380 | /* Check override first, and if set, only use the named driver */ |
381 | if (spi->driver_override) | |
382 | return strcmp(spi->driver_override, drv->name) == 0; | |
383 | ||
2b7a32f7 SA |
384 | /* Attempt an OF style match */ |
385 | if (of_driver_match_device(dev, drv)) | |
386 | return 1; | |
387 | ||
64bee4d2 MW |
388 | /* Then try ACPI */ |
389 | if (acpi_driver_match_device(dev, drv)) | |
390 | return 1; | |
391 | ||
75368bf6 | 392 | if (sdrv->id_table) |
3f076575 | 393 | return !!spi_match_id(sdrv->id_table, spi->modalias); |
8ae12a0d | 394 | |
35f74fca | 395 | return strcmp(spi->modalias, drv->name) == 0; |
8ae12a0d DB |
396 | } |
397 | ||
2a81ada3 | 398 | static int spi_uevent(const struct device *dev, struct kobj_uevent_env *env) |
8ae12a0d DB |
399 | { |
400 | const struct spi_device *spi = to_spi_device(dev); | |
8c4ff6d0 ZR |
401 | int rc; |
402 | ||
403 | rc = acpi_device_uevent_modalias(dev, env); | |
404 | if (rc != -ENODEV) | |
405 | return rc; | |
8ae12a0d | 406 | |
2856670f | 407 | return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias); |
8ae12a0d DB |
408 | } |
409 | ||
9db34ee6 | 410 | static int spi_probe(struct device *dev) |
b885244e DB |
411 | { |
412 | const struct spi_driver *sdrv = to_spi_driver(dev->driver); | |
44af7927 | 413 | struct spi_device *spi = to_spi_device(dev); |
b6ffe0e6 | 414 | struct fwnode_handle *fwnode = dev_fwnode(dev); |
33cf00e5 MW |
415 | int ret; |
416 | ||
86be408b SN |
417 | ret = of_clk_set_defaults(dev->of_node, false); |
418 | if (ret) | |
419 | return ret; | |
420 | ||
0020c9d2 | 421 | if (is_of_node(fwnode)) |
44af7927 | 422 | spi->irq = of_irq_get(dev->of_node, 0); |
0020c9d2 | 423 | else if (is_acpi_device_node(fwnode) && spi->irq < 0) |
b6ffe0e6 | 424 | spi->irq = acpi_dev_gpio_irq_get(to_acpi_device_node(fwnode), 0); |
0020c9d2 AS |
425 | if (spi->irq == -EPROBE_DEFER) |
426 | return dev_err_probe(dev, spi->irq, "Failed to get irq\n"); | |
427 | if (spi->irq < 0) | |
428 | spi->irq = 0; | |
d24cfee7 | 429 | |
676e7c25 | 430 | ret = dev_pm_domain_attach(dev, true); |
71f277a7 UH |
431 | if (ret) |
432 | return ret; | |
433 | ||
440408db UKK |
434 | if (sdrv->probe) { |
435 | ret = sdrv->probe(spi); | |
436 | if (ret) | |
437 | dev_pm_domain_detach(dev, true); | |
438 | } | |
b885244e | 439 | |
33cf00e5 | 440 | return ret; |
b885244e DB |
441 | } |
442 | ||
fc7a6209 | 443 | static void spi_remove(struct device *dev) |
b885244e DB |
444 | { |
445 | const struct spi_driver *sdrv = to_spi_driver(dev->driver); | |
33cf00e5 | 446 | |
a0386bba UKK |
447 | if (sdrv->remove) |
448 | sdrv->remove(to_spi_device(dev)); | |
7795d475 | 449 | |
676e7c25 | 450 | dev_pm_domain_detach(dev, true); |
b885244e DB |
451 | } |
452 | ||
9db34ee6 | 453 | static void spi_shutdown(struct device *dev) |
b885244e | 454 | { |
a6f483b2 MS |
455 | if (dev->driver) { |
456 | const struct spi_driver *sdrv = to_spi_driver(dev->driver); | |
b885244e | 457 | |
a6f483b2 MS |
458 | if (sdrv->shutdown) |
459 | sdrv->shutdown(to_spi_device(dev)); | |
460 | } | |
b885244e DB |
461 | } |
462 | ||
6df534cc | 463 | const struct bus_type spi_bus_type = { |
9db34ee6 UKK |
464 | .name = "spi", |
465 | .dev_groups = spi_dev_groups, | |
466 | .match = spi_match_device, | |
467 | .uevent = spi_uevent, | |
468 | .probe = spi_probe, | |
469 | .remove = spi_remove, | |
470 | .shutdown = spi_shutdown, | |
471 | }; | |
472 | EXPORT_SYMBOL_GPL(spi_bus_type); | |
473 | ||
33e34dc6 | 474 | /** |
ca5d2485 | 475 | * __spi_register_driver - register a SPI driver |
88c9321d | 476 | * @owner: owner module of the driver to register |
33e34dc6 DB |
477 | * @sdrv: the driver to register |
478 | * Context: can sleep | |
97d56dc6 JMC |
479 | * |
480 | * Return: zero on success, else a negative error code. | |
33e34dc6 | 481 | */ |
ca5d2485 | 482 | int __spi_register_driver(struct module *owner, struct spi_driver *sdrv) |
b885244e | 483 | { |
ca5d2485 | 484 | sdrv->driver.owner = owner; |
b885244e | 485 | sdrv->driver.bus = &spi_bus_type; |
5fa6863b MB |
486 | |
487 | /* | |
488 | * For Really Good Reasons we use spi: modaliases not of: | |
489 | * modaliases for DT so module autoloading won't work if we | |
490 | * don't have a spi_device_id as well as a compatible string. | |
491 | */ | |
492 | if (sdrv->driver.of_match_table) { | |
493 | const struct of_device_id *of_id; | |
494 | ||
495 | for (of_id = sdrv->driver.of_match_table; of_id->compatible[0]; | |
496 | of_id++) { | |
497 | const char *of_name; | |
498 | ||
499 | /* Strip off any vendor prefix */ | |
500 | of_name = strnchr(of_id->compatible, | |
501 | sizeof(of_id->compatible), ','); | |
502 | if (of_name) | |
503 | of_name++; | |
504 | else | |
505 | of_name = of_id->compatible; | |
506 | ||
507 | if (sdrv->id_table) { | |
508 | const struct spi_device_id *spi_id; | |
509 | ||
3f076575 | 510 | spi_id = spi_match_id(sdrv->id_table, of_name); |
b79332ef | 511 | if (spi_id) |
5fa6863b MB |
512 | continue; |
513 | } else { | |
514 | if (strcmp(sdrv->driver.name, of_name) == 0) | |
515 | continue; | |
516 | } | |
517 | ||
518 | pr_warn("SPI driver %s has no spi_device_id for %s\n", | |
519 | sdrv->driver.name, of_id->compatible); | |
520 | } | |
521 | } | |
522 | ||
b885244e DB |
523 | return driver_register(&sdrv->driver); |
524 | } | |
ca5d2485 | 525 | EXPORT_SYMBOL_GPL(__spi_register_driver); |
b885244e | 526 | |
8ae12a0d DB |
527 | /*-------------------------------------------------------------------------*/ |
528 | ||
350de7ce AS |
529 | /* |
530 | * SPI devices should normally not be created by SPI device drivers; that | |
8caab75f | 531 | * would make them board-specific. Similarly with SPI controller drivers. |
8ae12a0d DB |
532 | * Device registration normally goes into like arch/.../mach.../board-YYY.c |
533 | * with other readonly (flashable) information about mainboard devices. | |
534 | */ | |
535 | ||
536 | struct boardinfo { | |
537 | struct list_head list; | |
2b9603a0 | 538 | struct spi_board_info board_info; |
8ae12a0d DB |
539 | }; |
540 | ||
541 | static LIST_HEAD(board_list); | |
8caab75f | 542 | static LIST_HEAD(spi_controller_list); |
2b9603a0 FT |
543 | |
544 | /* | |
be73e323 | 545 | * Used to protect add/del operation for board_info list and |
350de7ce AS |
546 | * spi_controller list, and their matching process also used |
547 | * to protect object of type struct idr. | |
2b9603a0 | 548 | */ |
94040828 | 549 | static DEFINE_MUTEX(board_lock); |
8ae12a0d | 550 | |
dc87c98e GL |
551 | /** |
552 | * spi_alloc_device - Allocate a new SPI device | |
8caab75f | 553 | * @ctlr: Controller to which device is connected |
dc87c98e GL |
554 | * Context: can sleep |
555 | * | |
556 | * Allows a driver to allocate and initialize a spi_device without | |
557 | * registering it immediately. This allows a driver to directly | |
558 | * fill the spi_device with device parameters before calling | |
559 | * spi_add_device() on it. | |
560 | * | |
561 | * Caller is responsible to call spi_add_device() on the returned | |
8caab75f | 562 | * spi_device structure to add it to the SPI controller. If the caller |
dc87c98e GL |
563 | * needs to discard the spi_device without adding it, then it should |
564 | * call spi_dev_put() on it. | |
565 | * | |
97d56dc6 | 566 | * Return: a pointer to the new device, or NULL. |
dc87c98e | 567 | */ |
e3dc1399 | 568 | struct spi_device *spi_alloc_device(struct spi_controller *ctlr) |
dc87c98e GL |
569 | { |
570 | struct spi_device *spi; | |
dc87c98e | 571 | |
8caab75f | 572 | if (!spi_controller_get(ctlr)) |
dc87c98e GL |
573 | return NULL; |
574 | ||
5fe5f05e | 575 | spi = kzalloc(sizeof(*spi), GFP_KERNEL); |
dc87c98e | 576 | if (!spi) { |
8caab75f | 577 | spi_controller_put(ctlr); |
dc87c98e GL |
578 | return NULL; |
579 | } | |
580 | ||
6598b91b DJ |
581 | spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL); |
582 | if (!spi->pcpu_statistics) { | |
583 | kfree(spi); | |
584 | spi_controller_put(ctlr); | |
585 | return NULL; | |
586 | } | |
587 | ||
620d269f | 588 | spi->controller = ctlr; |
8caab75f | 589 | spi->dev.parent = &ctlr->dev; |
dc87c98e GL |
590 | spi->dev.bus = &spi_bus_type; |
591 | spi->dev.release = spidev_release; | |
ea235786 | 592 | spi->mode = ctlr->buswidth_override_bits; |
eca2ebc7 | 593 | |
dc87c98e GL |
594 | device_initialize(&spi->dev); |
595 | return spi; | |
596 | } | |
e3dc1399 | 597 | EXPORT_SYMBOL_GPL(spi_alloc_device); |
dc87c98e | 598 | |
e13ac47b JN |
599 | static void spi_dev_set_name(struct spi_device *spi) |
600 | { | |
8a101146 CK |
601 | struct device *dev = &spi->dev; |
602 | struct fwnode_handle *fwnode = dev_fwnode(dev); | |
e13ac47b | 603 | |
8a101146 CK |
604 | if (is_acpi_device_node(fwnode)) { |
605 | dev_set_name(dev, "spi-%s", acpi_dev_name(to_acpi_device_node(fwnode))); | |
e13ac47b JN |
606 | return; |
607 | } | |
608 | ||
ed892118 CK |
609 | if (is_software_node(fwnode)) { |
610 | dev_set_name(dev, "spi-%pfwP", fwnode); | |
e13ac47b JN |
611 | return; |
612 | } | |
613 | ||
8caab75f | 614 | dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev), |
303feb3c | 615 | spi_get_chipselect(spi, 0)); |
e13ac47b JN |
616 | } |
617 | ||
be84be4a AS |
618 | /* |
619 | * Zero(0) is a valid physical CS value and can be located at any | |
620 | * logical CS in the spi->chip_select[]. If all the physical CS | |
621 | * are initialized to 0 then It would be difficult to differentiate | |
622 | * between a valid physical CS 0 & an unused logical CS whose physical | |
623 | * CS can be 0. As a solution to this issue initialize all the CS to -1. | |
624 | * Now all the unused logical CS will have -1 physical CS value & can be | |
625 | * ignored while performing physical CS validity checks. | |
626 | */ | |
627 | #define SPI_INVALID_CS ((s8)-1) | |
628 | ||
629 | static inline bool is_valid_cs(s8 chip_select) | |
630 | { | |
631 | return chip_select != SPI_INVALID_CS; | |
632 | } | |
633 | ||
9086d0f2 AS |
634 | static inline int spi_dev_check_cs(struct device *dev, |
635 | struct spi_device *spi, u8 idx, | |
636 | struct spi_device *new_spi, u8 new_idx) | |
637 | { | |
638 | u8 cs, cs_new; | |
639 | u8 idx_new; | |
640 | ||
641 | cs = spi_get_chipselect(spi, idx); | |
642 | for (idx_new = new_idx; idx_new < SPI_CS_CNT_MAX; idx_new++) { | |
643 | cs_new = spi_get_chipselect(new_spi, idx_new); | |
be84be4a | 644 | if (is_valid_cs(cs) && is_valid_cs(cs_new) && cs == cs_new) { |
9086d0f2 AS |
645 | dev_err(dev, "chipselect %u already in use\n", cs_new); |
646 | return -EBUSY; | |
647 | } | |
648 | } | |
649 | return 0; | |
650 | } | |
651 | ||
b6fb8d3a MW |
652 | static int spi_dev_check(struct device *dev, void *data) |
653 | { | |
654 | struct spi_device *spi = to_spi_device(dev); | |
655 | struct spi_device *new_spi = data; | |
9086d0f2 | 656 | int status, idx; |
4d8ff6b0 AKM |
657 | |
658 | if (spi->controller == new_spi->controller) { | |
659 | for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { | |
9086d0f2 AS |
660 | status = spi_dev_check_cs(dev, spi, idx, new_spi, 0); |
661 | if (status) | |
662 | return status; | |
4d8ff6b0 AKM |
663 | } |
664 | } | |
b6fb8d3a MW |
665 | return 0; |
666 | } | |
667 | ||
c7299fea SK |
668 | static void spi_cleanup(struct spi_device *spi) |
669 | { | |
670 | if (spi->controller->cleanup) | |
671 | spi->controller->cleanup(spi); | |
672 | } | |
673 | ||
0c79378c | 674 | static int __spi_add_device(struct spi_device *spi) |
dc87c98e | 675 | { |
8caab75f GU |
676 | struct spi_controller *ctlr = spi->controller; |
677 | struct device *dev = ctlr->dev.parent; | |
9086d0f2 AS |
678 | int status, idx; |
679 | u8 cs; | |
4d8ff6b0 AKM |
680 | |
681 | for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { | |
682 | /* Chipselects are numbered 0..max; validate. */ | |
683 | cs = spi_get_chipselect(spi, idx); | |
be84be4a | 684 | if (is_valid_cs(cs) && cs >= ctlr->num_chipselect) { |
4d8ff6b0 AKM |
685 | dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, idx), |
686 | ctlr->num_chipselect); | |
687 | return -EINVAL; | |
688 | } | |
689 | } | |
dc87c98e | 690 | |
4d8ff6b0 AKM |
691 | /* |
692 | * Make sure that multiple logical CS doesn't map to the same physical CS. | |
693 | * For example, spi->chip_select[0] != spi->chip_select[1] and so on. | |
694 | */ | |
2c1b7bbe AKM |
695 | if (!spi_controller_is_target(ctlr)) { |
696 | for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { | |
697 | status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1); | |
698 | if (status) | |
699 | return status; | |
700 | } | |
36124dea AS |
701 | } |
702 | ||
703 | /* Set the bus ID string */ | |
704 | spi_dev_set_name(spi); | |
705 | ||
6bfb15f3 UKK |
706 | /* |
707 | * We need to make sure there's no other device with this | |
708 | * chipselect **BEFORE** we call setup(), else we'll trash | |
709 | * its configuration. | |
710 | */ | |
b6fb8d3a | 711 | status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); |
4d8ff6b0 | 712 | if (status) |
0c79378c | 713 | return status; |
e48880e0 | 714 | |
ddf75be4 LW |
715 | /* Controller may unregister concurrently */ |
716 | if (IS_ENABLED(CONFIG_SPI_DYNAMIC) && | |
717 | !device_is_registered(&ctlr->dev)) { | |
0c79378c | 718 | return -ENODEV; |
ddf75be4 LW |
719 | } |
720 | ||
4d8ff6b0 AKM |
721 | if (ctlr->cs_gpiods) { |
722 | u8 cs; | |
723 | ||
724 | for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { | |
725 | cs = spi_get_chipselect(spi, idx); | |
be84be4a | 726 | if (is_valid_cs(cs)) |
4d8ff6b0 AKM |
727 | spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]); |
728 | } | |
729 | } | |
74317984 | 730 | |
350de7ce AS |
731 | /* |
732 | * Drivers may modify this initial i/o setup, but will | |
e48880e0 DB |
733 | * normally rely on the device being setup. Devices |
734 | * using SPI_CS_HIGH can't coexist well otherwise... | |
735 | */ | |
7d077197 | 736 | status = spi_setup(spi); |
dc87c98e | 737 | if (status < 0) { |
eb288a1f LW |
738 | dev_err(dev, "can't setup %s, status %d\n", |
739 | dev_name(&spi->dev), status); | |
0c79378c | 740 | return status; |
dc87c98e GL |
741 | } |
742 | ||
e48880e0 | 743 | /* Device may be bound to an active driver when this returns */ |
dc87c98e | 744 | status = device_add(&spi->dev); |
c7299fea | 745 | if (status < 0) { |
eb288a1f LW |
746 | dev_err(dev, "can't add %s, status %d\n", |
747 | dev_name(&spi->dev), status); | |
c7299fea SK |
748 | spi_cleanup(spi); |
749 | } else { | |
35f74fca | 750 | dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); |
c7299fea | 751 | } |
dc87c98e | 752 | |
0c79378c SR |
753 | return status; |
754 | } | |
755 | ||
756 | /** | |
757 | * spi_add_device - Add spi_device allocated with spi_alloc_device | |
758 | * @spi: spi_device to register | |
759 | * | |
760 | * Companion function to spi_alloc_device. Devices allocated with | |
702ca026 | 761 | * spi_alloc_device can be added onto the SPI bus with this function. |
0c79378c SR |
762 | * |
763 | * Return: 0 on success; negative errno on failure | |
764 | */ | |
e3dc1399 | 765 | int spi_add_device(struct spi_device *spi) |
0c79378c SR |
766 | { |
767 | struct spi_controller *ctlr = spi->controller; | |
0c79378c SR |
768 | int status; |
769 | ||
4d8ff6b0 AKM |
770 | /* Set the bus ID string */ |
771 | spi_dev_set_name(spi); | |
772 | ||
6098475d | 773 | mutex_lock(&ctlr->add_lock); |
0c79378c | 774 | status = __spi_add_device(spi); |
6098475d | 775 | mutex_unlock(&ctlr->add_lock); |
e48880e0 | 776 | return status; |
dc87c98e | 777 | } |
e3dc1399 | 778 | EXPORT_SYMBOL_GPL(spi_add_device); |
8ae12a0d | 779 | |
5ee91605 AS |
780 | static void spi_set_all_cs_unused(struct spi_device *spi) |
781 | { | |
782 | u8 idx; | |
783 | ||
5ee91605 | 784 | for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) |
be84be4a | 785 | spi_set_chipselect(spi, idx, SPI_INVALID_CS); |
5ee91605 AS |
786 | } |
787 | ||
33e34dc6 DB |
788 | /** |
789 | * spi_new_device - instantiate one new SPI device | |
8caab75f | 790 | * @ctlr: Controller to which device is connected |
33e34dc6 DB |
791 | * @chip: Describes the SPI device |
792 | * Context: can sleep | |
793 | * | |
794 | * On typical mainboards, this is purely internal; and it's not needed | |
8ae12a0d DB |
795 | * after board init creates the hard-wired devices. Some development |
796 | * platforms may not be able to use spi_register_board_info though, and | |
797 | * this is exported so that for example a USB or parport based adapter | |
798 | * driver could add devices (which it would learn about out-of-band). | |
082c8cb4 | 799 | * |
97d56dc6 | 800 | * Return: the new device, or NULL. |
8ae12a0d | 801 | */ |
8caab75f | 802 | struct spi_device *spi_new_device(struct spi_controller *ctlr, |
e9d5a461 | 803 | struct spi_board_info *chip) |
8ae12a0d DB |
804 | { |
805 | struct spi_device *proxy; | |
8ae12a0d DB |
806 | int status; |
807 | ||
350de7ce AS |
808 | /* |
809 | * NOTE: caller did any chip->bus_num checks necessary. | |
082c8cb4 DB |
810 | * |
811 | * Also, unless we change the return value convention to use | |
812 | * error-or-pointer (not NULL-or-pointer), troubleshootability | |
813 | * suggests syslogged diagnostics are best here (ugh). | |
814 | */ | |
815 | ||
8caab75f | 816 | proxy = spi_alloc_device(ctlr); |
dc87c98e | 817 | if (!proxy) |
8ae12a0d DB |
818 | return NULL; |
819 | ||
102eb975 GL |
820 | WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); |
821 | ||
5ee91605 AS |
822 | /* Use provided chip-select for proxy device */ |
823 | spi_set_all_cs_unused(proxy); | |
303feb3c | 824 | spi_set_chipselect(proxy, 0, chip->chip_select); |
5ee91605 | 825 | |
8ae12a0d | 826 | proxy->max_speed_hz = chip->max_speed_hz; |
980a01c9 | 827 | proxy->mode = chip->mode; |
8ae12a0d | 828 | proxy->irq = chip->irq; |
51e99de5 | 829 | strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias)); |
8ae12a0d DB |
830 | proxy->dev.platform_data = (void *) chip->platform_data; |
831 | proxy->controller_data = chip->controller_data; | |
832 | proxy->controller_state = NULL; | |
4d8ff6b0 | 833 | /* |
bb409962 AS |
834 | * By default spi->chip_select[0] will hold the physical CS number, |
835 | * so set bit 0 in spi->cs_index_mask. | |
4d8ff6b0 | 836 | */ |
bb409962 | 837 | proxy->cs_index_mask = BIT(0); |
8ae12a0d | 838 | |
47afc77b HK |
839 | if (chip->swnode) { |
840 | status = device_add_software_node(&proxy->dev, chip->swnode); | |
826cf175 | 841 | if (status) { |
9d902c2a | 842 | dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n", |
826cf175 DT |
843 | chip->modalias, status); |
844 | goto err_dev_put; | |
845 | } | |
8ae12a0d DB |
846 | } |
847 | ||
826cf175 DT |
848 | status = spi_add_device(proxy); |
849 | if (status < 0) | |
df41a5da | 850 | goto err_dev_put; |
826cf175 | 851 | |
8ae12a0d | 852 | return proxy; |
826cf175 | 853 | |
826cf175 | 854 | err_dev_put: |
df41a5da | 855 | device_remove_software_node(&proxy->dev); |
826cf175 DT |
856 | spi_dev_put(proxy); |
857 | return NULL; | |
8ae12a0d DB |
858 | } |
859 | EXPORT_SYMBOL_GPL(spi_new_device); | |
860 | ||
3b1884c2 GU |
861 | /** |
862 | * spi_unregister_device - unregister a single SPI device | |
863 | * @spi: spi_device to unregister | |
864 | * | |
865 | * Start making the passed SPI device vanish. Normally this would be handled | |
8caab75f | 866 | * by spi_unregister_controller(). |
3b1884c2 GU |
867 | */ |
868 | void spi_unregister_device(struct spi_device *spi) | |
869 | { | |
b6ffe0e6 AS |
870 | struct fwnode_handle *fwnode; |
871 | ||
bd6c1644 GU |
872 | if (!spi) |
873 | return; | |
874 | ||
b6ffe0e6 AS |
875 | fwnode = dev_fwnode(&spi->dev); |
876 | if (is_of_node(fwnode)) { | |
877 | of_node_clear_flag(to_of_node(fwnode), OF_POPULATED); | |
878 | of_node_put(to_of_node(fwnode)); | |
879 | } else if (is_acpi_device_node(fwnode)) { | |
880 | acpi_device_clear_enumerated(to_acpi_device_node(fwnode)); | |
8324147f | 881 | } |
47afc77b | 882 | device_remove_software_node(&spi->dev); |
27e7db56 SK |
883 | device_del(&spi->dev); |
884 | spi_cleanup(spi); | |
885 | put_device(&spi->dev); | |
3b1884c2 GU |
886 | } |
887 | EXPORT_SYMBOL_GPL(spi_unregister_device); | |
888 | ||
8caab75f GU |
889 | static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr, |
890 | struct spi_board_info *bi) | |
2b9603a0 FT |
891 | { |
892 | struct spi_device *dev; | |
893 | ||
8caab75f | 894 | if (ctlr->bus_num != bi->bus_num) |
2b9603a0 FT |
895 | return; |
896 | ||
8caab75f | 897 | dev = spi_new_device(ctlr, bi); |
2b9603a0 | 898 | if (!dev) |
8caab75f | 899 | dev_err(ctlr->dev.parent, "can't create new device for %s\n", |
2b9603a0 FT |
900 | bi->modalias); |
901 | } | |
902 | ||
33e34dc6 DB |
903 | /** |
904 | * spi_register_board_info - register SPI devices for a given board | |
905 | * @info: array of chip descriptors | |
906 | * @n: how many descriptors are provided | |
907 | * Context: can sleep | |
908 | * | |
8ae12a0d DB |
909 | * Board-specific early init code calls this (probably during arch_initcall) |
910 | * with segments of the SPI device table. Any device nodes are created later, | |
911 | * after the relevant parent SPI controller (bus_num) is defined. We keep | |
912 | * this table of devices forever, so that reloading a controller driver will | |
913 | * not make Linux forget about these hard-wired devices. | |
914 | * | |
915 | * Other code can also call this, e.g. a particular add-on board might provide | |
916 | * SPI devices through its expansion connector, so code initializing that board | |
917 | * would naturally declare its SPI devices. | |
918 | * | |
919 | * The board info passed can safely be __initdata ... but be careful of | |
920 | * any embedded pointers (platform_data, etc), they're copied as-is. | |
97d56dc6 JMC |
921 | * |
922 | * Return: zero on success, else a negative error code. | |
8ae12a0d | 923 | */ |
fd4a319b | 924 | int spi_register_board_info(struct spi_board_info const *info, unsigned n) |
8ae12a0d | 925 | { |
2b9603a0 FT |
926 | struct boardinfo *bi; |
927 | int i; | |
8ae12a0d | 928 | |
c7908a37 | 929 | if (!n) |
f974cf57 | 930 | return 0; |
c7908a37 | 931 | |
f9bdb7fd | 932 | bi = kcalloc(n, sizeof(*bi), GFP_KERNEL); |
8ae12a0d DB |
933 | if (!bi) |
934 | return -ENOMEM; | |
8ae12a0d | 935 | |
2b9603a0 | 936 | for (i = 0; i < n; i++, bi++, info++) { |
8caab75f | 937 | struct spi_controller *ctlr; |
8ae12a0d | 938 | |
2b9603a0 | 939 | memcpy(&bi->board_info, info, sizeof(*info)); |
826cf175 | 940 | |
2b9603a0 FT |
941 | mutex_lock(&board_lock); |
942 | list_add_tail(&bi->list, &board_list); | |
8caab75f GU |
943 | list_for_each_entry(ctlr, &spi_controller_list, list) |
944 | spi_match_controller_to_boardinfo(ctlr, | |
945 | &bi->board_info); | |
2b9603a0 | 946 | mutex_unlock(&board_lock); |
8ae12a0d | 947 | } |
2b9603a0 FT |
948 | |
949 | return 0; | |
8ae12a0d DB |
950 | } |
951 | ||
952 | /*-------------------------------------------------------------------------*/ | |
953 | ||
fb51601b UKK |
954 | /* Core methods for SPI resource management */ |
955 | ||
956 | /** | |
957 | * spi_res_alloc - allocate a spi resource that is life-cycle managed | |
958 | * during the processing of a spi_message while using | |
959 | * spi_transfer_one | |
702ca026 | 960 | * @spi: the SPI device for which we allocate memory |
fb51601b UKK |
961 | * @release: the release code to execute for this resource |
962 | * @size: size to alloc and return | |
963 | * @gfp: GFP allocation flags | |
964 | * | |
965 | * Return: the pointer to the allocated data | |
966 | * | |
967 | * This may get enhanced in the future to allocate from a memory pool | |
968 | * of the @spi_device or @spi_controller to avoid repeated allocations. | |
969 | */ | |
da21fde0 UKK |
970 | static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release, |
971 | size_t size, gfp_t gfp) | |
fb51601b UKK |
972 | { |
973 | struct spi_res *sres; | |
974 | ||
975 | sres = kzalloc(sizeof(*sres) + size, gfp); | |
976 | if (!sres) | |
977 | return NULL; | |
978 | ||
979 | INIT_LIST_HEAD(&sres->entry); | |
980 | sres->release = release; | |
981 | ||
982 | return sres->data; | |
983 | } | |
fb51601b UKK |
984 | |
985 | /** | |
702ca026 | 986 | * spi_res_free - free an SPI resource |
fb51601b | 987 | * @res: pointer to the custom data of a resource |
fb51601b | 988 | */ |
da21fde0 | 989 | static void spi_res_free(void *res) |
fb51601b UKK |
990 | { |
991 | struct spi_res *sres = container_of(res, struct spi_res, data); | |
992 | ||
fb51601b UKK |
993 | WARN_ON(!list_empty(&sres->entry)); |
994 | kfree(sres); | |
995 | } | |
fb51601b UKK |
996 | |
997 | /** | |
998 | * spi_res_add - add a spi_res to the spi_message | |
702ca026 | 999 | * @message: the SPI message |
fb51601b UKK |
1000 | * @res: the spi_resource |
1001 | */ | |
da21fde0 | 1002 | static void spi_res_add(struct spi_message *message, void *res) |
fb51601b UKK |
1003 | { |
1004 | struct spi_res *sres = container_of(res, struct spi_res, data); | |
1005 | ||
1006 | WARN_ON(!list_empty(&sres->entry)); | |
1007 | list_add_tail(&sres->entry, &message->resources); | |
1008 | } | |
fb51601b UKK |
1009 | |
1010 | /** | |
702ca026 | 1011 | * spi_res_release - release all SPI resources for this message |
fb51601b UKK |
1012 | * @ctlr: the @spi_controller |
1013 | * @message: the @spi_message | |
1014 | */ | |
da21fde0 | 1015 | static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message) |
fb51601b UKK |
1016 | { |
1017 | struct spi_res *res, *tmp; | |
1018 | ||
1019 | list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) { | |
1020 | if (res->release) | |
1021 | res->release(ctlr, message, res->data); | |
1022 | ||
1023 | list_del(&res->entry); | |
1024 | ||
1025 | kfree(res); | |
1026 | } | |
1027 | } | |
fb51601b UKK |
1028 | |
1029 | /*-------------------------------------------------------------------------*/ | |
d707530b AS |
1030 | #define spi_for_each_valid_cs(spi, idx) \ |
1031 | for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) \ | |
1032 | if (!(spi->cs_index_mask & BIT(idx))) {} else | |
1033 | ||
4d8ff6b0 AKM |
1034 | static inline bool spi_is_last_cs(struct spi_device *spi) |
1035 | { | |
1036 | u8 idx; | |
1037 | bool last = false; | |
1038 | ||
d707530b AS |
1039 | spi_for_each_valid_cs(spi, idx) { |
1040 | if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx)) | |
1041 | last = true; | |
4d8ff6b0 AKM |
1042 | } |
1043 | return last; | |
1044 | } | |
1045 | ||
e81582c0 AS |
1046 | static void spi_toggle_csgpiod(struct spi_device *spi, u8 idx, bool enable, bool activate) |
1047 | { | |
1048 | /* | |
1049 | * Historically ACPI has no means of the GPIO polarity and | |
1050 | * thus the SPISerialBus() resource defines it on the per-chip | |
1051 | * basis. In order to avoid a chain of negations, the GPIO | |
1052 | * polarity is considered being Active High. Even for the cases | |
1053 | * when _DSD() is involved (in the updated versions of ACPI) | |
1054 | * the GPIO CS polarity must be defined Active High to avoid | |
1055 | * ambiguity. That's why we use enable, that takes SPI_CS_HIGH | |
1056 | * into account. | |
1057 | */ | |
b6ffe0e6 | 1058 | if (is_acpi_device_node(dev_fwnode(&spi->dev))) |
e81582c0 AS |
1059 | gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), !enable); |
1060 | else | |
1061 | /* Polarity handled by GPIO library */ | |
1062 | gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), activate); | |
1063 | ||
1064 | if (activate) | |
1065 | spi_delay_exec(&spi->cs_setup, NULL); | |
1066 | else | |
1067 | spi_delay_exec(&spi->cs_inactive, NULL); | |
1068 | } | |
fb51601b | 1069 | |
d347b4aa | 1070 | static void spi_set_cs(struct spi_device *spi, bool enable, bool force) |
b158935f | 1071 | { |
86527bcb | 1072 | bool activate = enable; |
4d8ff6b0 | 1073 | u8 idx; |
25093bde | 1074 | |
d40f0b6f DA |
1075 | /* |
1076 | * Avoid calling into the driver (or doing delays) if the chip select | |
1077 | * isn't actually changing from the last time this was called. | |
1078 | */ | |
1f1d979f AS |
1079 | if (!force && (enable == spi_is_last_cs(spi)) && |
1080 | (spi->controller->last_cs_index_mask == spi->cs_index_mask) && | |
d40f0b6f DA |
1081 | (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH))) |
1082 | return; | |
1083 | ||
5cb4e1f3 AS |
1084 | trace_spi_set_cs(spi, activate); |
1085 | ||
4d8ff6b0 AKM |
1086 | spi->controller->last_cs_index_mask = spi->cs_index_mask; |
1087 | for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) | |
be84be4a | 1088 | spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : SPI_INVALID_CS; |
d40f0b6f | 1089 | |
1f1d979f AS |
1090 | spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH; |
1091 | if (spi->controller->last_cs_mode_high) | |
b158935f MB |
1092 | enable = !enable; |
1093 | ||
aa0162dc JG |
1094 | /* |
1095 | * Handle chip select delays for GPIO based CS or controllers without | |
1096 | * programmable chip select timing. | |
1097 | */ | |
1098 | if ((spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) && !activate) | |
1099 | spi_delay_exec(&spi->cs_hold, NULL); | |
4d8ff6b0 | 1100 | |
aa0162dc | 1101 | if (spi_is_csgpiod(spi)) { |
f3186dd8 | 1102 | if (!(spi->mode & SPI_NO_CS)) { |
d707530b AS |
1103 | spi_for_each_valid_cs(spi, idx) { |
1104 | if (spi_get_csgpiod(spi, idx)) | |
e81582c0 | 1105 | spi_toggle_csgpiod(spi, idx, enable, activate); |
4d8ff6b0 | 1106 | } |
f3186dd8 | 1107 | } |
91ce208d | 1108 | /* Some SPI controllers need both GPIO CS & ->set_cs() */ |
82238d2c | 1109 | if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) && |
8caab75f GU |
1110 | spi->controller->set_cs) |
1111 | spi->controller->set_cs(spi, !enable); | |
1112 | } else if (spi->controller->set_cs) { | |
1113 | spi->controller->set_cs(spi, !enable); | |
8eee6b9d | 1114 | } |
aa0162dc JG |
1115 | |
1116 | if (spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) { | |
1117 | if (activate) | |
1118 | spi_delay_exec(&spi->cs_setup, NULL); | |
1119 | else | |
1120 | spi_delay_exec(&spi->cs_inactive, NULL); | |
1121 | } | |
b158935f MB |
1122 | } |
1123 | ||
2de440f5 | 1124 | #ifdef CONFIG_HAS_DMA |
0c17ba73 VW |
1125 | static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev, |
1126 | struct sg_table *sgt, void *buf, size_t len, | |
1127 | enum dma_data_direction dir, unsigned long attrs) | |
6ad45a27 MB |
1128 | { |
1129 | const bool vmalloced_buf = is_vmalloc_addr(buf); | |
df88e91b | 1130 | unsigned int max_seg_size = dma_get_max_seg_size(dev); |
b1b8153c V |
1131 | #ifdef CONFIG_HIGHMEM |
1132 | const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE && | |
1133 | (unsigned long)buf < (PKMAP_BASE + | |
1134 | (LAST_PKMAP * PAGE_SIZE))); | |
1135 | #else | |
1136 | const bool kmap_buf = false; | |
1137 | #endif | |
65598c13 AG |
1138 | int desc_len; |
1139 | int sgs; | |
6ad45a27 | 1140 | struct page *vm_page; |
8dd4a016 | 1141 | struct scatterlist *sg; |
6ad45a27 MB |
1142 | void *sg_buf; |
1143 | size_t min; | |
1144 | int i, ret; | |
1145 | ||
b1b8153c | 1146 | if (vmalloced_buf || kmap_buf) { |
ebc4cb43 | 1147 | desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE); |
65598c13 | 1148 | sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); |
0569a88f | 1149 | } else if (virt_addr_valid(buf)) { |
ebc4cb43 | 1150 | desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len); |
65598c13 | 1151 | sgs = DIV_ROUND_UP(len, desc_len); |
0569a88f V |
1152 | } else { |
1153 | return -EINVAL; | |
65598c13 AG |
1154 | } |
1155 | ||
6ad45a27 MB |
1156 | ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); |
1157 | if (ret != 0) | |
1158 | return ret; | |
1159 | ||
8dd4a016 | 1160 | sg = &sgt->sgl[0]; |
6ad45a27 | 1161 | for (i = 0; i < sgs; i++) { |
6ad45a27 | 1162 | |
b1b8153c | 1163 | if (vmalloced_buf || kmap_buf) { |
ce99319a MC |
1164 | /* |
1165 | * Next scatterlist entry size is the minimum between | |
1166 | * the desc_len and the remaining buffer length that | |
1167 | * fits in a page. | |
1168 | */ | |
1169 | min = min_t(size_t, desc_len, | |
1170 | min_t(size_t, len, | |
1171 | PAGE_SIZE - offset_in_page(buf))); | |
b1b8153c V |
1172 | if (vmalloced_buf) |
1173 | vm_page = vmalloc_to_page(buf); | |
1174 | else | |
1175 | vm_page = kmap_to_page(buf); | |
6ad45a27 MB |
1176 | if (!vm_page) { |
1177 | sg_free_table(sgt); | |
1178 | return -ENOMEM; | |
1179 | } | |
8dd4a016 | 1180 | sg_set_page(sg, vm_page, |
c1aefbdd | 1181 | min, offset_in_page(buf)); |
6ad45a27 | 1182 | } else { |
65598c13 | 1183 | min = min_t(size_t, len, desc_len); |
6ad45a27 | 1184 | sg_buf = buf; |
8dd4a016 | 1185 | sg_set_buf(sg, sg_buf, min); |
6ad45a27 MB |
1186 | } |
1187 | ||
6ad45a27 MB |
1188 | buf += min; |
1189 | len -= min; | |
8dd4a016 | 1190 | sg = sg_next(sg); |
6ad45a27 MB |
1191 | } |
1192 | ||
0c17ba73 | 1193 | ret = dma_map_sgtable(dev, sgt, dir, attrs); |
6ad45a27 MB |
1194 | if (ret < 0) { |
1195 | sg_free_table(sgt); | |
1196 | return ret; | |
1197 | } | |
1198 | ||
6ad45a27 MB |
1199 | return 0; |
1200 | } | |
1201 | ||
0c17ba73 VW |
1202 | int spi_map_buf(struct spi_controller *ctlr, struct device *dev, |
1203 | struct sg_table *sgt, void *buf, size_t len, | |
1204 | enum dma_data_direction dir) | |
1205 | { | |
1206 | return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0); | |
1207 | } | |
1208 | ||
1209 | static void spi_unmap_buf_attrs(struct spi_controller *ctlr, | |
1210 | struct device *dev, struct sg_table *sgt, | |
1211 | enum dma_data_direction dir, | |
1212 | unsigned long attrs) | |
6ad45a27 | 1213 | { |
8cc3bad9 AS |
1214 | dma_unmap_sgtable(dev, sgt, dir, attrs); |
1215 | sg_free_table(sgt); | |
1216 | sgt->orig_nents = 0; | |
1217 | sgt->nents = 0; | |
6ad45a27 MB |
1218 | } |
1219 | ||
0c17ba73 VW |
1220 | void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, |
1221 | struct sg_table *sgt, enum dma_data_direction dir) | |
1222 | { | |
1223 | spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0); | |
1224 | } | |
1225 | ||
8caab75f | 1226 | static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) |
99adef31 | 1227 | { |
99adef31 MB |
1228 | struct device *tx_dev, *rx_dev; |
1229 | struct spi_transfer *xfer; | |
6ad45a27 | 1230 | int ret; |
3a2eba9b | 1231 | |
8caab75f | 1232 | if (!ctlr->can_dma) |
99adef31 MB |
1233 | return 0; |
1234 | ||
8caab75f GU |
1235 | if (ctlr->dma_tx) |
1236 | tx_dev = ctlr->dma_tx->device->dev; | |
b470e10e VK |
1237 | else if (ctlr->dma_map_dev) |
1238 | tx_dev = ctlr->dma_map_dev; | |
c37f45b5 | 1239 | else |
8caab75f | 1240 | tx_dev = ctlr->dev.parent; |
c37f45b5 | 1241 | |
8caab75f GU |
1242 | if (ctlr->dma_rx) |
1243 | rx_dev = ctlr->dma_rx->device->dev; | |
b470e10e VK |
1244 | else if (ctlr->dma_map_dev) |
1245 | rx_dev = ctlr->dma_map_dev; | |
c37f45b5 | 1246 | else |
8caab75f | 1247 | rx_dev = ctlr->dev.parent; |
99adef31 | 1248 | |
9f788ba4 | 1249 | ret = -ENOMSG; |
99adef31 | 1250 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
0c17ba73 VW |
1251 | /* The sync is done before each transfer. */ |
1252 | unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC; | |
1253 | ||
8caab75f | 1254 | if (!ctlr->can_dma(ctlr, msg->spi, xfer)) |
99adef31 MB |
1255 | continue; |
1256 | ||
1257 | if (xfer->tx_buf != NULL) { | |
0c17ba73 VW |
1258 | ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg, |
1259 | (void *)xfer->tx_buf, | |
1260 | xfer->len, DMA_TO_DEVICE, | |
1261 | attrs); | |
6ad45a27 MB |
1262 | if (ret != 0) |
1263 | return ret; | |
e289df82 AS |
1264 | |
1265 | xfer->tx_sg_mapped = true; | |
99adef31 MB |
1266 | } |
1267 | ||
1268 | if (xfer->rx_buf != NULL) { | |
0c17ba73 VW |
1269 | ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg, |
1270 | xfer->rx_buf, xfer->len, | |
1271 | DMA_FROM_DEVICE, attrs); | |
6ad45a27 | 1272 | if (ret != 0) { |
0c17ba73 VW |
1273 | spi_unmap_buf_attrs(ctlr, tx_dev, |
1274 | &xfer->tx_sg, DMA_TO_DEVICE, | |
1275 | attrs); | |
1276 | ||
6ad45a27 | 1277 | return ret; |
99adef31 | 1278 | } |
e289df82 AS |
1279 | |
1280 | xfer->rx_sg_mapped = true; | |
99adef31 MB |
1281 | } |
1282 | } | |
9f788ba4 AS |
1283 | /* No transfer has been mapped, bail out with success */ |
1284 | if (ret) | |
1285 | return 0; | |
99adef31 | 1286 | |
f25723dc VW |
1287 | ctlr->cur_rx_dma_dev = rx_dev; |
1288 | ctlr->cur_tx_dma_dev = tx_dev; | |
99adef31 MB |
1289 | |
1290 | return 0; | |
1291 | } | |
1292 | ||
8caab75f | 1293 | static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) |
99adef31 | 1294 | { |
f25723dc VW |
1295 | struct device *rx_dev = ctlr->cur_rx_dma_dev; |
1296 | struct device *tx_dev = ctlr->cur_tx_dma_dev; | |
99adef31 | 1297 | struct spi_transfer *xfer; |
99adef31 | 1298 | |
99adef31 | 1299 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
0c17ba73 VW |
1300 | /* The sync has already been done after each transfer. */ |
1301 | unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC; | |
1302 | ||
e289df82 AS |
1303 | if (xfer->rx_sg_mapped) |
1304 | spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg, | |
1305 | DMA_FROM_DEVICE, attrs); | |
1306 | xfer->rx_sg_mapped = false; | |
99adef31 | 1307 | |
e289df82 AS |
1308 | if (xfer->tx_sg_mapped) |
1309 | spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg, | |
1310 | DMA_TO_DEVICE, attrs); | |
1311 | xfer->tx_sg_mapped = false; | |
99adef31 MB |
1312 | } |
1313 | ||
1314 | return 0; | |
1315 | } | |
0c17ba73 | 1316 | |
e289df82 | 1317 | static void spi_dma_sync_for_device(struct spi_controller *ctlr, |
0c17ba73 VW |
1318 | struct spi_transfer *xfer) |
1319 | { | |
1320 | struct device *rx_dev = ctlr->cur_rx_dma_dev; | |
1321 | struct device *tx_dev = ctlr->cur_tx_dma_dev; | |
1322 | ||
e289df82 AS |
1323 | if (xfer->tx_sg_mapped) |
1324 | dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); | |
1325 | if (xfer->rx_sg_mapped) | |
1326 | dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); | |
0c17ba73 VW |
1327 | } |
1328 | ||
e289df82 | 1329 | static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, |
0c17ba73 VW |
1330 | struct spi_transfer *xfer) |
1331 | { | |
1332 | struct device *rx_dev = ctlr->cur_rx_dma_dev; | |
1333 | struct device *tx_dev = ctlr->cur_tx_dma_dev; | |
1334 | ||
e289df82 AS |
1335 | if (xfer->rx_sg_mapped) |
1336 | dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); | |
1337 | if (xfer->tx_sg_mapped) | |
1338 | dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); | |
0c17ba73 | 1339 | } |
2de440f5 | 1340 | #else /* !CONFIG_HAS_DMA */ |
8caab75f | 1341 | static inline int __spi_map_msg(struct spi_controller *ctlr, |
2de440f5 GU |
1342 | struct spi_message *msg) |
1343 | { | |
1344 | return 0; | |
1345 | } | |
1346 | ||
8caab75f | 1347 | static inline int __spi_unmap_msg(struct spi_controller *ctlr, |
4b786458 | 1348 | struct spi_message *msg) |
2de440f5 GU |
1349 | { |
1350 | return 0; | |
1351 | } | |
0c17ba73 VW |
1352 | |
1353 | static void spi_dma_sync_for_device(struct spi_controller *ctrl, | |
1354 | struct spi_transfer *xfer) | |
1355 | { | |
1356 | } | |
1357 | ||
1358 | static void spi_dma_sync_for_cpu(struct spi_controller *ctrl, | |
1359 | struct spi_transfer *xfer) | |
1360 | { | |
1361 | } | |
2de440f5 GU |
1362 | #endif /* !CONFIG_HAS_DMA */ |
1363 | ||
8caab75f | 1364 | static inline int spi_unmap_msg(struct spi_controller *ctlr, |
4b786458 MS |
1365 | struct spi_message *msg) |
1366 | { | |
1367 | struct spi_transfer *xfer; | |
1368 | ||
1369 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | |
1370 | /* | |
1371 | * Restore the original value of tx_buf or rx_buf if they are | |
1372 | * NULL. | |
1373 | */ | |
8caab75f | 1374 | if (xfer->tx_buf == ctlr->dummy_tx) |
4b786458 | 1375 | xfer->tx_buf = NULL; |
8caab75f | 1376 | if (xfer->rx_buf == ctlr->dummy_rx) |
4b786458 MS |
1377 | xfer->rx_buf = NULL; |
1378 | } | |
1379 | ||
8caab75f | 1380 | return __spi_unmap_msg(ctlr, msg); |
4b786458 MS |
1381 | } |
1382 | ||
8caab75f | 1383 | static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) |
2de440f5 GU |
1384 | { |
1385 | struct spi_transfer *xfer; | |
1386 | void *tmp; | |
1387 | unsigned int max_tx, max_rx; | |
1388 | ||
aee67fe8 | 1389 | if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) |
1390 | && !(msg->spi->mode & SPI_3WIRE)) { | |
2de440f5 GU |
1391 | max_tx = 0; |
1392 | max_rx = 0; | |
1393 | ||
1394 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | |
8caab75f | 1395 | if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) && |
2de440f5 GU |
1396 | !xfer->tx_buf) |
1397 | max_tx = max(xfer->len, max_tx); | |
8caab75f | 1398 | if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) && |
2de440f5 GU |
1399 | !xfer->rx_buf) |
1400 | max_rx = max(xfer->len, max_rx); | |
1401 | } | |
1402 | ||
1403 | if (max_tx) { | |
8caab75f | 1404 | tmp = krealloc(ctlr->dummy_tx, max_tx, |
b00bab9d | 1405 | GFP_KERNEL | GFP_DMA | __GFP_ZERO); |
2de440f5 GU |
1406 | if (!tmp) |
1407 | return -ENOMEM; | |
8caab75f | 1408 | ctlr->dummy_tx = tmp; |
2de440f5 GU |
1409 | } |
1410 | ||
1411 | if (max_rx) { | |
8caab75f | 1412 | tmp = krealloc(ctlr->dummy_rx, max_rx, |
2de440f5 GU |
1413 | GFP_KERNEL | GFP_DMA); |
1414 | if (!tmp) | |
1415 | return -ENOMEM; | |
8caab75f | 1416 | ctlr->dummy_rx = tmp; |
2de440f5 GU |
1417 | } |
1418 | ||
1419 | if (max_tx || max_rx) { | |
1420 | list_for_each_entry(xfer, &msg->transfers, | |
1421 | transfer_list) { | |
5442dcaa CL |
1422 | if (!xfer->len) |
1423 | continue; | |
2de440f5 | 1424 | if (!xfer->tx_buf) |
8caab75f | 1425 | xfer->tx_buf = ctlr->dummy_tx; |
2de440f5 | 1426 | if (!xfer->rx_buf) |
8caab75f | 1427 | xfer->rx_buf = ctlr->dummy_rx; |
2de440f5 GU |
1428 | } |
1429 | } | |
1430 | } | |
1431 | ||
8caab75f | 1432 | return __spi_map_msg(ctlr, msg); |
2de440f5 | 1433 | } |
99adef31 | 1434 | |
810923f3 LR |
1435 | static int spi_transfer_wait(struct spi_controller *ctlr, |
1436 | struct spi_message *msg, | |
1437 | struct spi_transfer *xfer) | |
1438 | { | |
d501cc4c DJ |
1439 | struct spi_statistics __percpu *statm = ctlr->pcpu_statistics; |
1440 | struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics; | |
6170d077 | 1441 | u32 speed_hz = xfer->speed_hz; |
49686df5 | 1442 | unsigned long long ms; |
810923f3 | 1443 | |
1e0cc8d0 | 1444 | if (spi_controller_is_target(ctlr)) { |
810923f3 LR |
1445 | if (wait_for_completion_interruptible(&ctlr->xfer_completion)) { |
1446 | dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n"); | |
1447 | return -EINTR; | |
1448 | } | |
1449 | } else { | |
6170d077 XY |
1450 | if (!speed_hz) |
1451 | speed_hz = 100000; | |
1452 | ||
86b8bff7 AS |
1453 | /* |
1454 | * For each byte we wait for 8 cycles of the SPI clock. | |
1455 | * Since speed is defined in Hz and we want milliseconds, | |
1456 | * use respective multiplier, but before the division, | |
1457 | * otherwise we may get 0 for short transfers. | |
1458 | */ | |
1459 | ms = 8LL * MSEC_PER_SEC * xfer->len; | |
6170d077 | 1460 | do_div(ms, speed_hz); |
810923f3 | 1461 | |
86b8bff7 AS |
1462 | /* |
1463 | * Increase it twice and add 200 ms tolerance, use | |
1464 | * predefined maximum in case of overflow. | |
1465 | */ | |
1466 | ms += ms + 200; | |
810923f3 LR |
1467 | if (ms > UINT_MAX) |
1468 | ms = UINT_MAX; | |
1469 | ||
1470 | ms = wait_for_completion_timeout(&ctlr->xfer_completion, | |
1471 | msecs_to_jiffies(ms)); | |
1472 | ||
1473 | if (ms == 0) { | |
1474 | SPI_STATISTICS_INCREMENT_FIELD(statm, timedout); | |
1475 | SPI_STATISTICS_INCREMENT_FIELD(stats, timedout); | |
1476 | dev_err(&msg->spi->dev, | |
1477 | "SPI transfer timed out\n"); | |
1478 | return -ETIMEDOUT; | |
1479 | } | |
39cefd85 NC |
1480 | |
1481 | if (xfer->error & SPI_TRANS_FAIL_IO) | |
1482 | return -EIO; | |
810923f3 LR |
1483 | } |
1484 | ||
1485 | return 0; | |
1486 | } | |
1487 | ||
0ff2de8b MS |
1488 | static void _spi_transfer_delay_ns(u32 ns) |
1489 | { | |
1490 | if (!ns) | |
1491 | return; | |
86b8bff7 | 1492 | if (ns <= NSEC_PER_USEC) { |
0ff2de8b MS |
1493 | ndelay(ns); |
1494 | } else { | |
86b8bff7 | 1495 | u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC); |
0ff2de8b | 1496 | |
215705db | 1497 | fsleep(us); |
0ff2de8b MS |
1498 | } |
1499 | } | |
1500 | ||
3984d39b | 1501 | int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer) |
0ff2de8b | 1502 | { |
b2c98153 AA |
1503 | u32 delay = _delay->value; |
1504 | u32 unit = _delay->unit; | |
d5864e5b | 1505 | u32 hz; |
0ff2de8b | 1506 | |
b2c98153 AA |
1507 | if (!delay) |
1508 | return 0; | |
0ff2de8b MS |
1509 | |
1510 | switch (unit) { | |
1511 | case SPI_DELAY_UNIT_USECS: | |
86b8bff7 | 1512 | delay *= NSEC_PER_USEC; |
0ff2de8b | 1513 | break; |
86b8bff7 AS |
1514 | case SPI_DELAY_UNIT_NSECS: |
1515 | /* Nothing to do here */ | |
0ff2de8b | 1516 | break; |
d5864e5b | 1517 | case SPI_DELAY_UNIT_SCK: |
95c8222f | 1518 | /* Clock cycles need to be obtained from spi_transfer */ |
b2c98153 AA |
1519 | if (!xfer) |
1520 | return -EINVAL; | |
86b8bff7 AS |
1521 | /* |
1522 | * If there is unknown effective speed, approximate it | |
702ca026 | 1523 | * by underestimating with half of the requested Hz. |
d5864e5b MS |
1524 | */ |
1525 | hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2; | |
b2c98153 AA |
1526 | if (!hz) |
1527 | return -EINVAL; | |
86b8bff7 AS |
1528 | |
1529 | /* Convert delay to nanoseconds */ | |
1530 | delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz); | |
d5864e5b | 1531 | break; |
0ff2de8b | 1532 | default: |
b2c98153 AA |
1533 | return -EINVAL; |
1534 | } | |
1535 | ||
1536 | return delay; | |
1537 | } | |
3984d39b | 1538 | EXPORT_SYMBOL_GPL(spi_delay_to_ns); |
b2c98153 AA |
1539 | |
1540 | int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer) | |
1541 | { | |
1542 | int delay; | |
1543 | ||
8fede89f MB |
1544 | might_sleep(); |
1545 | ||
b2c98153 AA |
1546 | if (!_delay) |
1547 | return -EINVAL; | |
1548 | ||
3984d39b | 1549 | delay = spi_delay_to_ns(_delay, xfer); |
b2c98153 AA |
1550 | if (delay < 0) |
1551 | return delay; | |
1552 | ||
1553 | _spi_transfer_delay_ns(delay); | |
1554 | ||
1555 | return 0; | |
1556 | } | |
1557 | EXPORT_SYMBOL_GPL(spi_delay_exec); | |
1558 | ||
0ff2de8b MS |
1559 | static void _spi_transfer_cs_change_delay(struct spi_message *msg, |
1560 | struct spi_transfer *xfer) | |
1561 | { | |
86b8bff7 | 1562 | u32 default_delay_ns = 10 * NSEC_PER_USEC; |
329f0dac AA |
1563 | u32 delay = xfer->cs_change_delay.value; |
1564 | u32 unit = xfer->cs_change_delay.unit; | |
1565 | int ret; | |
0ff2de8b | 1566 | |
95c8222f | 1567 | /* Return early on "fast" mode - for everything but USECS */ |
6b3f236a AA |
1568 | if (!delay) { |
1569 | if (unit == SPI_DELAY_UNIT_USECS) | |
86b8bff7 | 1570 | _spi_transfer_delay_ns(default_delay_ns); |
0ff2de8b | 1571 | return; |
6b3f236a | 1572 | } |
0ff2de8b | 1573 | |
329f0dac AA |
1574 | ret = spi_delay_exec(&xfer->cs_change_delay, xfer); |
1575 | if (ret) { | |
0ff2de8b | 1576 | dev_err_once(&msg->spi->dev, |
86b8bff7 AS |
1577 | "Use of unsupported delay unit %i, using default of %luus\n", |
1578 | unit, default_delay_ns / NSEC_PER_USEC); | |
1579 | _spi_transfer_delay_ns(default_delay_ns); | |
0ff2de8b | 1580 | } |
0ff2de8b MS |
1581 | } |
1582 | ||
6e80133a WZ |
1583 | void spi_transfer_cs_change_delay_exec(struct spi_message *msg, |
1584 | struct spi_transfer *xfer) | |
1585 | { | |
1586 | _spi_transfer_cs_change_delay(msg, xfer); | |
1587 | } | |
1588 | EXPORT_SYMBOL_GPL(spi_transfer_cs_change_delay_exec); | |
1589 | ||
b158935f MB |
1590 | /* |
1591 | * spi_transfer_one_message - Default implementation of transfer_one_message() | |
1592 | * | |
1593 | * This is a standard implementation of transfer_one_message() for | |
8ba811a7 | 1594 | * drivers which implement a transfer_one() operation. It provides |
b158935f MB |
1595 | * standard handling of delays and chip select management. |
1596 | */ | |
8caab75f | 1597 | static int spi_transfer_one_message(struct spi_controller *ctlr, |
b158935f MB |
1598 | struct spi_message *msg) |
1599 | { | |
1600 | struct spi_transfer *xfer; | |
b158935f MB |
1601 | bool keep_cs = false; |
1602 | int ret = 0; | |
d501cc4c DJ |
1603 | struct spi_statistics __percpu *statm = ctlr->pcpu_statistics; |
1604 | struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics; | |
b158935f | 1605 | |
5e0531f6 CL |
1606 | xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list); |
1607 | spi_set_cs(msg->spi, !xfer->cs_off, false); | |
b158935f | 1608 | |
eca2ebc7 MS |
1609 | SPI_STATISTICS_INCREMENT_FIELD(statm, messages); |
1610 | SPI_STATISTICS_INCREMENT_FIELD(stats, messages); | |
1611 | ||
b158935f MB |
1612 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
1613 | trace_spi_transfer_start(msg, xfer); | |
1614 | ||
52267fe8 DL |
1615 | spi_statistics_add_transfer_stats(statm, xfer, msg); |
1616 | spi_statistics_add_transfer_stats(stats, xfer, msg); | |
eca2ebc7 | 1617 | |
b42faeee VO |
1618 | if (!ctlr->ptp_sts_supported) { |
1619 | xfer->ptp_sts_word_pre = 0; | |
1620 | ptp_read_system_prets(xfer->ptp_sts); | |
1621 | } | |
1622 | ||
b3063203 | 1623 | if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) { |
8caab75f | 1624 | reinit_completion(&ctlr->xfer_completion); |
b158935f | 1625 | |
809b1b04 | 1626 | fallback_pio: |
e289df82 | 1627 | spi_dma_sync_for_device(ctlr, xfer); |
8caab75f | 1628 | ret = ctlr->transfer_one(ctlr, msg->spi, xfer); |
38ec10f6 | 1629 | if (ret < 0) { |
e289df82 | 1630 | spi_dma_sync_for_cpu(ctlr, xfer); |
0c17ba73 | 1631 | |
e289df82 AS |
1632 | if ((xfer->tx_sg_mapped || xfer->rx_sg_mapped) && |
1633 | (xfer->error & SPI_TRANS_FAIL_NO_START)) { | |
809b1b04 RG |
1634 | __spi_unmap_msg(ctlr, msg); |
1635 | ctlr->fallback = true; | |
1636 | xfer->error &= ~SPI_TRANS_FAIL_NO_START; | |
1637 | goto fallback_pio; | |
1638 | } | |
1639 | ||
eca2ebc7 MS |
1640 | SPI_STATISTICS_INCREMENT_FIELD(statm, |
1641 | errors); | |
1642 | SPI_STATISTICS_INCREMENT_FIELD(stats, | |
1643 | errors); | |
38ec10f6 MB |
1644 | dev_err(&msg->spi->dev, |
1645 | "SPI transfer failed: %d\n", ret); | |
1646 | goto out; | |
1647 | } | |
b158935f | 1648 | |
d57e7960 MB |
1649 | if (ret > 0) { |
1650 | ret = spi_transfer_wait(ctlr, msg, xfer); | |
1651 | if (ret < 0) | |
1652 | msg->status = ret; | |
1653 | } | |
0c17ba73 | 1654 | |
e289df82 | 1655 | spi_dma_sync_for_cpu(ctlr, xfer); |
38ec10f6 MB |
1656 | } else { |
1657 | if (xfer->len) | |
1658 | dev_err(&msg->spi->dev, | |
1659 | "Bufferless transfer has length %u\n", | |
1660 | xfer->len); | |
13a42798 | 1661 | } |
b158935f | 1662 | |
b42faeee VO |
1663 | if (!ctlr->ptp_sts_supported) { |
1664 | ptp_read_system_postts(xfer->ptp_sts); | |
1665 | xfer->ptp_sts_word_post = xfer->len; | |
1666 | } | |
1667 | ||
b158935f MB |
1668 | trace_spi_transfer_stop(msg, xfer); |
1669 | ||
1670 | if (msg->status != -EINPROGRESS) | |
1671 | goto out; | |
1672 | ||
bebcfd27 | 1673 | spi_transfer_delay_exec(xfer); |
b158935f MB |
1674 | |
1675 | if (xfer->cs_change) { | |
1676 | if (list_is_last(&xfer->transfer_list, | |
1677 | &msg->transfers)) { | |
1678 | keep_cs = true; | |
1679 | } else { | |
5e0531f6 CL |
1680 | if (!xfer->cs_off) |
1681 | spi_set_cs(msg->spi, false, false); | |
0ff2de8b | 1682 | _spi_transfer_cs_change_delay(msg, xfer); |
5e0531f6 CL |
1683 | if (!list_next_entry(xfer, transfer_list)->cs_off) |
1684 | spi_set_cs(msg->spi, true, false); | |
b158935f | 1685 | } |
5e0531f6 CL |
1686 | } else if (!list_is_last(&xfer->transfer_list, &msg->transfers) && |
1687 | xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) { | |
1688 | spi_set_cs(msg->spi, xfer->cs_off, false); | |
b158935f MB |
1689 | } |
1690 | ||
1691 | msg->actual_length += xfer->len; | |
1692 | } | |
1693 | ||
1694 | out: | |
1695 | if (ret != 0 || !keep_cs) | |
d347b4aa | 1696 | spi_set_cs(msg->spi, false, false); |
b158935f MB |
1697 | |
1698 | if (msg->status == -EINPROGRESS) | |
1699 | msg->status = ret; | |
1700 | ||
8caab75f GU |
1701 | if (msg->status && ctlr->handle_err) |
1702 | ctlr->handle_err(ctlr, msg); | |
b716c4ff | 1703 | |
0ed56252 MB |
1704 | spi_finalize_current_message(ctlr); |
1705 | ||
b158935f MB |
1706 | return ret; |
1707 | } | |
1708 | ||
1709 | /** | |
1710 | * spi_finalize_current_transfer - report completion of a transfer | |
8caab75f | 1711 | * @ctlr: the controller reporting completion |
b158935f MB |
1712 | * |
1713 | * Called by SPI drivers using the core transfer_one_message() | |
1714 | * implementation to notify it that the current interrupt driven | |
9e8f4882 | 1715 | * transfer has finished and the next one may be scheduled. |
b158935f | 1716 | */ |
8caab75f | 1717 | void spi_finalize_current_transfer(struct spi_controller *ctlr) |
b158935f | 1718 | { |
8caab75f | 1719 | complete(&ctlr->xfer_completion); |
b158935f MB |
1720 | } |
1721 | EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); | |
1722 | ||
e1268597 MB |
1723 | static void spi_idle_runtime_pm(struct spi_controller *ctlr) |
1724 | { | |
1725 | if (ctlr->auto_runtime_pm) { | |
1726 | pm_runtime_mark_last_busy(ctlr->dev.parent); | |
1727 | pm_runtime_put_autosuspend(ctlr->dev.parent); | |
1728 | } | |
1729 | } | |
1730 | ||
ae7d2346 DJ |
1731 | static int __spi_pump_transfer_message(struct spi_controller *ctlr, |
1732 | struct spi_message *msg, bool was_busy) | |
1733 | { | |
1734 | struct spi_transfer *xfer; | |
1735 | int ret; | |
1736 | ||
1737 | if (!was_busy && ctlr->auto_runtime_pm) { | |
1738 | ret = pm_runtime_get_sync(ctlr->dev.parent); | |
1739 | if (ret < 0) { | |
1740 | pm_runtime_put_noidle(ctlr->dev.parent); | |
1741 | dev_err(&ctlr->dev, "Failed to power device: %d\n", | |
1742 | ret); | |
8c2ae772 DL |
1743 | |
1744 | msg->status = ret; | |
1745 | spi_finalize_current_message(ctlr); | |
1746 | ||
ae7d2346 DJ |
1747 | return ret; |
1748 | } | |
1749 | } | |
1750 | ||
1751 | if (!was_busy) | |
1752 | trace_spi_controller_busy(ctlr); | |
1753 | ||
1754 | if (!was_busy && ctlr->prepare_transfer_hardware) { | |
1755 | ret = ctlr->prepare_transfer_hardware(ctlr); | |
1756 | if (ret) { | |
1757 | dev_err(&ctlr->dev, | |
1758 | "failed to prepare transfer hardware: %d\n", | |
1759 | ret); | |
1760 | ||
1761 | if (ctlr->auto_runtime_pm) | |
1762 | pm_runtime_put(ctlr->dev.parent); | |
1763 | ||
1764 | msg->status = ret; | |
1765 | spi_finalize_current_message(ctlr); | |
1766 | ||
1767 | return ret; | |
1768 | } | |
1769 | } | |
1770 | ||
1771 | trace_spi_message_start(msg); | |
1772 | ||
1773 | if (ctlr->prepare_message) { | |
1774 | ret = ctlr->prepare_message(ctlr, msg); | |
1775 | if (ret) { | |
1776 | dev_err(&ctlr->dev, "failed to prepare message: %d\n", | |
1777 | ret); | |
1778 | msg->status = ret; | |
1779 | spi_finalize_current_message(ctlr); | |
1780 | return ret; | |
1781 | } | |
1782 | msg->prepared = true; | |
1783 | } | |
1784 | ||
1785 | ret = spi_map_msg(ctlr, msg); | |
1786 | if (ret) { | |
1787 | msg->status = ret; | |
1788 | spi_finalize_current_message(ctlr); | |
1789 | return ret; | |
1790 | } | |
1791 | ||
1792 | if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { | |
1793 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | |
1794 | xfer->ptp_sts_word_pre = 0; | |
1795 | ptp_read_system_prets(xfer->ptp_sts); | |
1796 | } | |
1797 | } | |
1798 | ||
dc302905 DJ |
1799 | /* |
1800 | * Drivers implementation of transfer_one_message() must arrange for | |
1801 | * spi_finalize_current_message() to get called. Most drivers will do | |
1802 | * this in the calling context, but some don't. For those cases, a | |
1803 | * completion is used to guarantee that this function does not return | |
1804 | * until spi_finalize_current_message() is done accessing | |
1805 | * ctlr->cur_msg. | |
1806 | * Use of the following two flags enable to opportunistically skip the | |
1807 | * use of the completion since its use involves expensive spin locks. | |
1808 | * In case of a race with the context that calls | |
1809 | * spi_finalize_current_message() the completion will always be used, | |
1810 | * due to strict ordering of these flags using barriers. | |
1811 | */ | |
1812 | WRITE_ONCE(ctlr->cur_msg_incomplete, true); | |
1813 | WRITE_ONCE(ctlr->cur_msg_need_completion, false); | |
69fa9590 | 1814 | reinit_completion(&ctlr->cur_msg_completion); |
95c8222f | 1815 | smp_wmb(); /* Make these available to spi_finalize_current_message() */ |
dc302905 | 1816 | |
ae7d2346 DJ |
1817 | ret = ctlr->transfer_one_message(ctlr, msg); |
1818 | if (ret) { | |
1819 | dev_err(&ctlr->dev, | |
1820 | "failed to transfer one message from queue\n"); | |
1821 | return ret; | |
1822 | } | |
1823 | ||
31d4c1bd DJ |
1824 | WRITE_ONCE(ctlr->cur_msg_need_completion, true); |
1825 | smp_mb(); /* See spi_finalize_current_message()... */ | |
1826 | if (READ_ONCE(ctlr->cur_msg_incomplete)) | |
1827 | wait_for_completion(&ctlr->cur_msg_completion); | |
1828 | ||
ae7d2346 DJ |
1829 | return 0; |
1830 | } | |
1831 | ||
ffbbdd21 | 1832 | /** |
702ca026 | 1833 | * __spi_pump_messages - function which processes SPI message queue |
8caab75f | 1834 | * @ctlr: controller to process queue for |
fc9e0f71 | 1835 | * @in_kthread: true if we are in the context of the message pump thread |
ffbbdd21 | 1836 | * |
702ca026 | 1837 | * This function checks if there is any SPI message in the queue that |
ffbbdd21 LW |
1838 | * needs processing and if so call out to the driver to initialize hardware |
1839 | * and transfer each message. | |
1840 | * | |
0461a414 MB |
1841 | * Note that it is called both from the kthread itself and also from |
1842 | * inside spi_sync(); the queue extraction handling at the top of the | |
1843 | * function should deal with this safely. | |
ffbbdd21 | 1844 | */ |
8caab75f | 1845 | static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) |
ffbbdd21 | 1846 | { |
d1c44c93 | 1847 | struct spi_message *msg; |
ffbbdd21 | 1848 | bool was_busy = false; |
d1c44c93 | 1849 | unsigned long flags; |
ffbbdd21 LW |
1850 | int ret; |
1851 | ||
702ca026 | 1852 | /* Take the I/O mutex */ |
c1038165 DJ |
1853 | mutex_lock(&ctlr->io_mutex); |
1854 | ||
983aee5d | 1855 | /* Lock queue */ |
8caab75f | 1856 | spin_lock_irqsave(&ctlr->queue_lock, flags); |
983aee5d MB |
1857 | |
1858 | /* Make sure we are not already running a message */ | |
8711a2ab | 1859 | if (ctlr->cur_msg) |
c1038165 | 1860 | goto out_unlock; |
983aee5d MB |
1861 | |
1862 | /* Check if the queue is idle */ | |
8caab75f | 1863 | if (list_empty(&ctlr->queue) || !ctlr->running) { |
8711a2ab | 1864 | if (!ctlr->busy) |
c1038165 | 1865 | goto out_unlock; |
fc9e0f71 | 1866 | |
e1268597 | 1867 | /* Defer any non-atomic teardown to the thread */ |
f0125f1a | 1868 | if (!in_kthread) { |
e1268597 MB |
1869 | if (!ctlr->dummy_rx && !ctlr->dummy_tx && |
1870 | !ctlr->unprepare_transfer_hardware) { | |
1871 | spi_idle_runtime_pm(ctlr); | |
1872 | ctlr->busy = false; | |
ae7d2346 | 1873 | ctlr->queue_empty = true; |
e1268597 MB |
1874 | trace_spi_controller_idle(ctlr); |
1875 | } else { | |
1876 | kthread_queue_work(ctlr->kworker, | |
1877 | &ctlr->pump_messages); | |
1878 | } | |
c1038165 | 1879 | goto out_unlock; |
f0125f1a MB |
1880 | } |
1881 | ||
1882 | ctlr->busy = false; | |
f0125f1a MB |
1883 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); |
1884 | ||
1885 | kfree(ctlr->dummy_rx); | |
1886 | ctlr->dummy_rx = NULL; | |
1887 | kfree(ctlr->dummy_tx); | |
1888 | ctlr->dummy_tx = NULL; | |
1889 | if (ctlr->unprepare_transfer_hardware && | |
1890 | ctlr->unprepare_transfer_hardware(ctlr)) | |
1891 | dev_err(&ctlr->dev, | |
1892 | "failed to unprepare transfer hardware\n"); | |
e1268597 | 1893 | spi_idle_runtime_pm(ctlr); |
f0125f1a MB |
1894 | trace_spi_controller_idle(ctlr); |
1895 | ||
1896 | spin_lock_irqsave(&ctlr->queue_lock, flags); | |
ae7d2346 | 1897 | ctlr->queue_empty = true; |
c1038165 | 1898 | goto out_unlock; |
ffbbdd21 | 1899 | } |
ffbbdd21 | 1900 | |
ffbbdd21 | 1901 | /* Extract head of queue */ |
d1c44c93 VO |
1902 | msg = list_first_entry(&ctlr->queue, struct spi_message, queue); |
1903 | ctlr->cur_msg = msg; | |
ffbbdd21 | 1904 | |
d1c44c93 | 1905 | list_del_init(&msg->queue); |
8caab75f | 1906 | if (ctlr->busy) |
ffbbdd21 LW |
1907 | was_busy = true; |
1908 | else | |
8caab75f GU |
1909 | ctlr->busy = true; |
1910 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); | |
ffbbdd21 | 1911 | |
ae7d2346 | 1912 | ret = __spi_pump_transfer_message(ctlr, msg, was_busy); |
9c9c9da7 | 1913 | kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); |
c191543e | 1914 | |
69fa9590 DJ |
1915 | ctlr->cur_msg = NULL; |
1916 | ctlr->fallback = false; | |
1917 | ||
8caab75f | 1918 | mutex_unlock(&ctlr->io_mutex); |
62826970 MB |
1919 | |
1920 | /* Prod the scheduler in case transfer_one() was busy waiting */ | |
49023d2e JH |
1921 | if (!ret) |
1922 | cond_resched(); | |
c1038165 DJ |
1923 | return; |
1924 | ||
1925 | out_unlock: | |
8711a2ab | 1926 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); |
c1038165 | 1927 | mutex_unlock(&ctlr->io_mutex); |
ffbbdd21 LW |
1928 | } |
1929 | ||
fc9e0f71 MB |
1930 | /** |
1931 | * spi_pump_messages - kthread work function which processes spi message queue | |
8caab75f | 1932 | * @work: pointer to kthread work struct contained in the controller struct |
fc9e0f71 MB |
1933 | */ |
1934 | static void spi_pump_messages(struct kthread_work *work) | |
1935 | { | |
8caab75f GU |
1936 | struct spi_controller *ctlr = |
1937 | container_of(work, struct spi_controller, pump_messages); | |
fc9e0f71 | 1938 | |
8caab75f | 1939 | __spi_pump_messages(ctlr, true); |
fc9e0f71 MB |
1940 | } |
1941 | ||
b42faeee | 1942 | /** |
350de7ce | 1943 | * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp |
b42faeee VO |
1944 | * @ctlr: Pointer to the spi_controller structure of the driver |
1945 | * @xfer: Pointer to the transfer being timestamped | |
862dd2a9 | 1946 | * @progress: How many words (not bytes) have been transferred so far |
b42faeee VO |
1947 | * @irqs_off: If true, will disable IRQs and preemption for the duration of the |
1948 | * transfer, for less jitter in time measurement. Only compatible | |
1949 | * with PIO drivers. If true, must follow up with | |
1950 | * spi_take_timestamp_post or otherwise system will crash. | |
1951 | * WARNING: for fully predictable results, the CPU frequency must | |
1952 | * also be under control (governor). | |
350de7ce AS |
1953 | * |
1954 | * This is a helper for drivers to collect the beginning of the TX timestamp | |
1955 | * for the requested byte from the SPI transfer. The frequency with which this | |
1956 | * function must be called (once per word, once for the whole transfer, once | |
1957 | * per batch of words etc) is arbitrary as long as the @tx buffer offset is | |
1958 | * greater than or equal to the requested byte at the time of the call. The | |
1959 | * timestamp is only taken once, at the first such call. It is assumed that | |
1960 | * the driver advances its @tx buffer pointer monotonically. | |
b42faeee VO |
1961 | */ |
1962 | void spi_take_timestamp_pre(struct spi_controller *ctlr, | |
1963 | struct spi_transfer *xfer, | |
862dd2a9 | 1964 | size_t progress, bool irqs_off) |
b42faeee | 1965 | { |
b42faeee VO |
1966 | if (!xfer->ptp_sts) |
1967 | return; | |
1968 | ||
6a726824 | 1969 | if (xfer->timestamped) |
b42faeee VO |
1970 | return; |
1971 | ||
6a726824 | 1972 | if (progress > xfer->ptp_sts_word_pre) |
b42faeee VO |
1973 | return; |
1974 | ||
1975 | /* Capture the resolution of the timestamp */ | |
862dd2a9 | 1976 | xfer->ptp_sts_word_pre = progress; |
b42faeee | 1977 | |
b42faeee VO |
1978 | if (irqs_off) { |
1979 | local_irq_save(ctlr->irq_flags); | |
1980 | preempt_disable(); | |
1981 | } | |
1982 | ||
1983 | ptp_read_system_prets(xfer->ptp_sts); | |
1984 | } | |
1985 | EXPORT_SYMBOL_GPL(spi_take_timestamp_pre); | |
1986 | ||
1987 | /** | |
350de7ce | 1988 | * spi_take_timestamp_post - helper to collect the end of the TX timestamp |
b42faeee VO |
1989 | * @ctlr: Pointer to the spi_controller structure of the driver |
1990 | * @xfer: Pointer to the transfer being timestamped | |
862dd2a9 | 1991 | * @progress: How many words (not bytes) have been transferred so far |
b42faeee | 1992 | * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU. |
350de7ce AS |
1993 | * |
1994 | * This is a helper for drivers to collect the end of the TX timestamp for | |
1995 | * the requested byte from the SPI transfer. Can be called with an arbitrary | |
1996 | * frequency: only the first call where @tx exceeds or is equal to the | |
1997 | * requested word will be timestamped. | |
b42faeee VO |
1998 | */ |
1999 | void spi_take_timestamp_post(struct spi_controller *ctlr, | |
2000 | struct spi_transfer *xfer, | |
862dd2a9 | 2001 | size_t progress, bool irqs_off) |
b42faeee | 2002 | { |
b42faeee VO |
2003 | if (!xfer->ptp_sts) |
2004 | return; | |
2005 | ||
6a726824 | 2006 | if (xfer->timestamped) |
b42faeee VO |
2007 | return; |
2008 | ||
862dd2a9 | 2009 | if (progress < xfer->ptp_sts_word_post) |
b42faeee VO |
2010 | return; |
2011 | ||
2012 | ptp_read_system_postts(xfer->ptp_sts); | |
2013 | ||
2014 | if (irqs_off) { | |
2015 | local_irq_restore(ctlr->irq_flags); | |
2016 | preempt_enable(); | |
2017 | } | |
2018 | ||
2019 | /* Capture the resolution of the timestamp */ | |
862dd2a9 | 2020 | xfer->ptp_sts_word_post = progress; |
b42faeee | 2021 | |
9d77522b | 2022 | xfer->timestamped = 1; |
b42faeee VO |
2023 | } |
2024 | EXPORT_SYMBOL_GPL(spi_take_timestamp_post); | |
2025 | ||
924b5867 DA |
2026 | /** |
2027 | * spi_set_thread_rt - set the controller to pump at realtime priority | |
2028 | * @ctlr: controller to boost priority of | |
2029 | * | |
2030 | * This can be called because the controller requested realtime priority | |
2031 | * (by setting the ->rt value before calling spi_register_controller()) or | |
2032 | * because a device on the bus said that its transfers needed realtime | |
2033 | * priority. | |
2034 | * | |
2035 | * NOTE: at the moment if any device on a bus says it needs realtime then | |
2036 | * the thread will be at realtime priority for all transfers on that | |
2037 | * controller. If this eventually becomes a problem we may see if we can | |
2038 | * find a way to boost the priority only temporarily during relevant | |
2039 | * transfers. | |
2040 | */ | |
2041 | static void spi_set_thread_rt(struct spi_controller *ctlr) | |
ffbbdd21 | 2042 | { |
924b5867 DA |
2043 | dev_info(&ctlr->dev, |
2044 | "will run message pump with realtime priority\n"); | |
6d2b84a4 | 2045 | sched_set_fifo(ctlr->kworker->task); |
924b5867 DA |
2046 | } |
2047 | ||
2048 | static int spi_init_queue(struct spi_controller *ctlr) | |
2049 | { | |
8caab75f GU |
2050 | ctlr->running = false; |
2051 | ctlr->busy = false; | |
ae7d2346 | 2052 | ctlr->queue_empty = true; |
ffbbdd21 | 2053 | |
b04e317b | 2054 | ctlr->kworker = kthread_run_worker(0, dev_name(&ctlr->dev)); |
60a883d1 MS |
2055 | if (IS_ERR(ctlr->kworker)) { |
2056 | dev_err(&ctlr->dev, "failed to create message pump kworker\n"); | |
2057 | return PTR_ERR(ctlr->kworker); | |
ffbbdd21 | 2058 | } |
60a883d1 | 2059 | |
8caab75f | 2060 | kthread_init_work(&ctlr->pump_messages, spi_pump_messages); |
f0125f1a | 2061 | |
ffbbdd21 | 2062 | /* |
8caab75f | 2063 | * Controller config will indicate if this controller should run the |
ffbbdd21 LW |
2064 | * message pump with high (realtime) priority to reduce the transfer |
2065 | * latency on the bus by minimising the delay between a transfer | |
2066 | * request and the scheduling of the message pump thread. Without this | |
2067 | * setting the message pump thread will remain at default priority. | |
2068 | */ | |
924b5867 DA |
2069 | if (ctlr->rt) |
2070 | spi_set_thread_rt(ctlr); | |
ffbbdd21 LW |
2071 | |
2072 | return 0; | |
2073 | } | |
2074 | ||
2075 | /** | |
2076 | * spi_get_next_queued_message() - called by driver to check for queued | |
2077 | * messages | |
8caab75f | 2078 | * @ctlr: the controller to check for queued messages |
ffbbdd21 LW |
2079 | * |
2080 | * If there are more messages in the queue, the next message is returned from | |
2081 | * this call. | |
97d56dc6 JMC |
2082 | * |
2083 | * Return: the next message in the queue, else NULL if the queue is empty. | |
ffbbdd21 | 2084 | */ |
8caab75f | 2085 | struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr) |
ffbbdd21 LW |
2086 | { |
2087 | struct spi_message *next; | |
2088 | unsigned long flags; | |
2089 | ||
95c8222f | 2090 | /* Get a pointer to the next message, if any */ |
8caab75f GU |
2091 | spin_lock_irqsave(&ctlr->queue_lock, flags); |
2092 | next = list_first_entry_or_null(&ctlr->queue, struct spi_message, | |
1cfd97f9 | 2093 | queue); |
8caab75f | 2094 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); |
ffbbdd21 LW |
2095 | |
2096 | return next; | |
2097 | } | |
2098 | EXPORT_SYMBOL_GPL(spi_get_next_queued_message); | |
2099 | ||
7b1d87af DL |
2100 | /* |
2101 | * __spi_unoptimize_message - shared implementation of spi_unoptimize_message() | |
2102 | * and spi_maybe_unoptimize_message() | |
2103 | * @msg: the message to unoptimize | |
2104 | * | |
2105 | * Peripheral drivers should use spi_unoptimize_message() and callers inside | |
2106 | * core should use spi_maybe_unoptimize_message() rather than calling this | |
2107 | * function directly. | |
2108 | * | |
2109 | * It is not valid to call this on a message that is not currently optimized. | |
2110 | */ | |
2111 | static void __spi_unoptimize_message(struct spi_message *msg) | |
2112 | { | |
2113 | struct spi_controller *ctlr = msg->spi->controller; | |
2114 | ||
2115 | if (ctlr->unoptimize_message) | |
2116 | ctlr->unoptimize_message(msg); | |
2117 | ||
fab53fea DL |
2118 | spi_res_release(ctlr, msg); |
2119 | ||
7b1d87af DL |
2120 | msg->optimized = false; |
2121 | msg->opt_state = NULL; | |
2122 | } | |
2123 | ||
2124 | /* | |
2125 | * spi_maybe_unoptimize_message - unoptimize msg not managed by a peripheral | |
2126 | * @msg: the message to unoptimize | |
2127 | * | |
2128 | * This function is used to unoptimize a message if and only if it was | |
2129 | * optimized by the core (via spi_maybe_optimize_message()). | |
2130 | */ | |
2131 | static void spi_maybe_unoptimize_message(struct spi_message *msg) | |
2132 | { | |
ca52aa4c DL |
2133 | if (!msg->pre_optimized && msg->optimized && |
2134 | !msg->spi->controller->defer_optimize_message) | |
7b1d87af DL |
2135 | __spi_unoptimize_message(msg); |
2136 | } | |
2137 | ||
ffbbdd21 LW |
2138 | /** |
2139 | * spi_finalize_current_message() - the current message is complete | |
8caab75f | 2140 | * @ctlr: the controller to return the message to |
ffbbdd21 LW |
2141 | * |
2142 | * Called by the driver to notify the core that the message in the front of the | |
2143 | * queue is complete and can be removed from the queue. | |
2144 | */ | |
8caab75f | 2145 | void spi_finalize_current_message(struct spi_controller *ctlr) |
ffbbdd21 | 2146 | { |
b42faeee | 2147 | struct spi_transfer *xfer; |
ffbbdd21 | 2148 | struct spi_message *mesg; |
2841a5fc | 2149 | int ret; |
ffbbdd21 | 2150 | |
8caab75f | 2151 | mesg = ctlr->cur_msg; |
ffbbdd21 | 2152 | |
b42faeee VO |
2153 | if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { |
2154 | list_for_each_entry(xfer, &mesg->transfers, transfer_list) { | |
2155 | ptp_read_system_postts(xfer->ptp_sts); | |
2156 | xfer->ptp_sts_word_post = xfer->len; | |
2157 | } | |
2158 | } | |
2159 | ||
6a726824 VO |
2160 | if (unlikely(ctlr->ptp_sts_supported)) |
2161 | list_for_each_entry(xfer, &mesg->transfers, transfer_list) | |
2162 | WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped); | |
f971a207 | 2163 | |
8caab75f | 2164 | spi_unmap_msg(ctlr, mesg); |
99adef31 | 2165 | |
1714582a | 2166 | if (mesg->prepared && ctlr->unprepare_message) { |
8caab75f | 2167 | ret = ctlr->unprepare_message(ctlr, mesg); |
2841a5fc | 2168 | if (ret) { |
8caab75f GU |
2169 | dev_err(&ctlr->dev, "failed to unprepare message: %d\n", |
2170 | ret); | |
2841a5fc MB |
2171 | } |
2172 | } | |
391949b6 | 2173 | |
1714582a DJ |
2174 | mesg->prepared = false; |
2175 | ||
7b1d87af DL |
2176 | spi_maybe_unoptimize_message(mesg); |
2177 | ||
dc302905 DJ |
2178 | WRITE_ONCE(ctlr->cur_msg_incomplete, false); |
2179 | smp_mb(); /* See __spi_pump_transfer_message()... */ | |
2180 | if (READ_ONCE(ctlr->cur_msg_need_completion)) | |
2181 | complete(&ctlr->cur_msg_completion); | |
8e76ef88 MS |
2182 | |
2183 | trace_spi_message_done(mesg); | |
2841a5fc | 2184 | |
ffbbdd21 LW |
2185 | mesg->state = NULL; |
2186 | if (mesg->complete) | |
2187 | mesg->complete(mesg->context); | |
2188 | } | |
2189 | EXPORT_SYMBOL_GPL(spi_finalize_current_message); | |
2190 | ||
8caab75f | 2191 | static int spi_start_queue(struct spi_controller *ctlr) |
ffbbdd21 LW |
2192 | { |
2193 | unsigned long flags; | |
2194 | ||
8caab75f | 2195 | spin_lock_irqsave(&ctlr->queue_lock, flags); |
ffbbdd21 | 2196 | |
8caab75f GU |
2197 | if (ctlr->running || ctlr->busy) { |
2198 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); | |
ffbbdd21 LW |
2199 | return -EBUSY; |
2200 | } | |
2201 | ||
8caab75f GU |
2202 | ctlr->running = true; |
2203 | ctlr->cur_msg = NULL; | |
2204 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); | |
ffbbdd21 | 2205 | |
60a883d1 | 2206 | kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); |
ffbbdd21 LW |
2207 | |
2208 | return 0; | |
2209 | } | |
2210 | ||
8caab75f | 2211 | static int spi_stop_queue(struct spi_controller *ctlr) |
ffbbdd21 | 2212 | { |
a71b7845 | 2213 | unsigned int limit = 500; |
ffbbdd21 | 2214 | unsigned long flags; |
ffbbdd21 LW |
2215 | |
2216 | /* | |
2217 | * This is a bit lame, but is optimized for the common execution path. | |
8caab75f | 2218 | * A wait_queue on the ctlr->busy could be used, but then the common |
ffbbdd21 LW |
2219 | * execution path (pump_messages) would be required to call wake_up or |
2220 | * friends on every SPI message. Do this instead. | |
2221 | */ | |
a71b7845 AS |
2222 | do { |
2223 | spin_lock_irqsave(&ctlr->queue_lock, flags); | |
2224 | if (list_empty(&ctlr->queue) && !ctlr->busy) { | |
2225 | ctlr->running = false; | |
2226 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); | |
2227 | return 0; | |
2228 | } | |
8caab75f | 2229 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); |
f97b26b0 | 2230 | usleep_range(10000, 11000); |
a71b7845 | 2231 | } while (--limit); |
ffbbdd21 | 2232 | |
a71b7845 | 2233 | return -EBUSY; |
ffbbdd21 LW |
2234 | } |
2235 | ||
8caab75f | 2236 | static int spi_destroy_queue(struct spi_controller *ctlr) |
ffbbdd21 LW |
2237 | { |
2238 | int ret; | |
2239 | ||
8caab75f | 2240 | ret = spi_stop_queue(ctlr); |
ffbbdd21 LW |
2241 | |
2242 | /* | |
3989144f | 2243 | * kthread_flush_worker will block until all work is done. |
ffbbdd21 LW |
2244 | * If the reason that stop_queue timed out is that the work will never |
2245 | * finish, then it does no good to call flush/stop thread, so | |
2246 | * return anyway. | |
2247 | */ | |
2248 | if (ret) { | |
8caab75f | 2249 | dev_err(&ctlr->dev, "problem destroying queue\n"); |
ffbbdd21 LW |
2250 | return ret; |
2251 | } | |
2252 | ||
60a883d1 | 2253 | kthread_destroy_worker(ctlr->kworker); |
ffbbdd21 LW |
2254 | |
2255 | return 0; | |
2256 | } | |
2257 | ||
0461a414 MB |
2258 | static int __spi_queued_transfer(struct spi_device *spi, |
2259 | struct spi_message *msg, | |
2260 | bool need_pump) | |
ffbbdd21 | 2261 | { |
8caab75f | 2262 | struct spi_controller *ctlr = spi->controller; |
ffbbdd21 LW |
2263 | unsigned long flags; |
2264 | ||
8caab75f | 2265 | spin_lock_irqsave(&ctlr->queue_lock, flags); |
ffbbdd21 | 2266 | |
8caab75f GU |
2267 | if (!ctlr->running) { |
2268 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); | |
ffbbdd21 LW |
2269 | return -ESHUTDOWN; |
2270 | } | |
2271 | msg->actual_length = 0; | |
2272 | msg->status = -EINPROGRESS; | |
2273 | ||
8caab75f | 2274 | list_add_tail(&msg->queue, &ctlr->queue); |
ae7d2346 | 2275 | ctlr->queue_empty = false; |
f0125f1a | 2276 | if (!ctlr->busy && need_pump) |
60a883d1 | 2277 | kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); |
ffbbdd21 | 2278 | |
8caab75f | 2279 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); |
ffbbdd21 LW |
2280 | return 0; |
2281 | } | |
2282 | ||
0461a414 MB |
2283 | /** |
2284 | * spi_queued_transfer - transfer function for queued transfers | |
702ca026 AS |
2285 | * @spi: SPI device which is requesting transfer |
2286 | * @msg: SPI message which is to handled is queued to driver queue | |
97d56dc6 JMC |
2287 | * |
2288 | * Return: zero on success, else a negative error code. | |
0461a414 MB |
2289 | */ |
2290 | static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) | |
2291 | { | |
2292 | return __spi_queued_transfer(spi, msg, true); | |
2293 | } | |
2294 | ||
8caab75f | 2295 | static int spi_controller_initialize_queue(struct spi_controller *ctlr) |
ffbbdd21 LW |
2296 | { |
2297 | int ret; | |
2298 | ||
8caab75f GU |
2299 | ctlr->transfer = spi_queued_transfer; |
2300 | if (!ctlr->transfer_one_message) | |
2301 | ctlr->transfer_one_message = spi_transfer_one_message; | |
ffbbdd21 LW |
2302 | |
2303 | /* Initialize and start queue */ | |
8caab75f | 2304 | ret = spi_init_queue(ctlr); |
ffbbdd21 | 2305 | if (ret) { |
8caab75f | 2306 | dev_err(&ctlr->dev, "problem initializing queue\n"); |
ffbbdd21 LW |
2307 | goto err_init_queue; |
2308 | } | |
8caab75f GU |
2309 | ctlr->queued = true; |
2310 | ret = spi_start_queue(ctlr); | |
ffbbdd21 | 2311 | if (ret) { |
8caab75f | 2312 | dev_err(&ctlr->dev, "problem starting queue\n"); |
ffbbdd21 LW |
2313 | goto err_start_queue; |
2314 | } | |
2315 | ||
2316 | return 0; | |
2317 | ||
2318 | err_start_queue: | |
8caab75f | 2319 | spi_destroy_queue(ctlr); |
c3676d5c | 2320 | err_init_queue: |
ffbbdd21 LW |
2321 | return ret; |
2322 | } | |
2323 | ||
988f259b BB |
2324 | /** |
2325 | * spi_flush_queue - Send all pending messages in the queue from the callers' | |
2326 | * context | |
2327 | * @ctlr: controller to process queue for | |
2328 | * | |
2329 | * This should be used when one wants to ensure all pending messages have been | |
2330 | * sent before doing something. Is used by the spi-mem code to make sure SPI | |
2331 | * memory operations do not preempt regular SPI transfers that have been queued | |
2332 | * before the spi-mem operation. | |
2333 | */ | |
2334 | void spi_flush_queue(struct spi_controller *ctlr) | |
2335 | { | |
2336 | if (ctlr->transfer == spi_queued_transfer) | |
2337 | __spi_pump_messages(ctlr, false); | |
2338 | } | |
2339 | ||
ffbbdd21 LW |
2340 | /*-------------------------------------------------------------------------*/ |
2341 | ||
7cb94361 | 2342 | #if defined(CONFIG_OF) |
f276aacf JG |
2343 | static void of_spi_parse_dt_cs_delay(struct device_node *nc, |
2344 | struct spi_delay *delay, const char *prop) | |
2345 | { | |
2346 | u32 value; | |
2347 | ||
2348 | if (!of_property_read_u32(nc, prop, &value)) { | |
2349 | if (value > U16_MAX) { | |
2350 | delay->value = DIV_ROUND_UP(value, 1000); | |
2351 | delay->unit = SPI_DELAY_UNIT_USECS; | |
2352 | } else { | |
2353 | delay->value = value; | |
2354 | delay->unit = SPI_DELAY_UNIT_NSECS; | |
2355 | } | |
2356 | } | |
2357 | } | |
2358 | ||
8caab75f | 2359 | static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, |
c2e51ac3 | 2360 | struct device_node *nc) |
aff5e3f8 | 2361 | { |
4d8ff6b0 AKM |
2362 | u32 value, cs[SPI_CS_CNT_MAX]; |
2363 | int rc, idx; | |
aff5e3f8 | 2364 | |
aff5e3f8 | 2365 | /* Mode (clock phase/polarity/etc.) */ |
e0bcb680 | 2366 | if (of_property_read_bool(nc, "spi-cpha")) |
aff5e3f8 | 2367 | spi->mode |= SPI_CPHA; |
e0bcb680 | 2368 | if (of_property_read_bool(nc, "spi-cpol")) |
aff5e3f8 | 2369 | spi->mode |= SPI_CPOL; |
e0bcb680 | 2370 | if (of_property_read_bool(nc, "spi-3wire")) |
aff5e3f8 | 2371 | spi->mode |= SPI_3WIRE; |
e0bcb680 | 2372 | if (of_property_read_bool(nc, "spi-lsb-first")) |
aff5e3f8 | 2373 | spi->mode |= SPI_LSB_FIRST; |
3e5ec1db | 2374 | if (of_property_read_bool(nc, "spi-cs-high")) |
f3186dd8 LW |
2375 | spi->mode |= SPI_CS_HIGH; |
2376 | ||
aff5e3f8 PA |
2377 | /* Device DUAL/QUAD mode */ |
2378 | if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { | |
2379 | switch (value) { | |
d962608c DB |
2380 | case 0: |
2381 | spi->mode |= SPI_NO_TX; | |
2382 | break; | |
aff5e3f8 PA |
2383 | case 1: |
2384 | break; | |
2385 | case 2: | |
2386 | spi->mode |= SPI_TX_DUAL; | |
2387 | break; | |
2388 | case 4: | |
2389 | spi->mode |= SPI_TX_QUAD; | |
2390 | break; | |
6b03061f YNG |
2391 | case 8: |
2392 | spi->mode |= SPI_TX_OCTAL; | |
2393 | break; | |
aff5e3f8 | 2394 | default: |
8caab75f | 2395 | dev_warn(&ctlr->dev, |
aff5e3f8 PA |
2396 | "spi-tx-bus-width %d not supported\n", |
2397 | value); | |
2398 | break; | |
2399 | } | |
2400 | } | |
2401 | ||
2402 | if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) { | |
2403 | switch (value) { | |
d962608c DB |
2404 | case 0: |
2405 | spi->mode |= SPI_NO_RX; | |
2406 | break; | |
aff5e3f8 PA |
2407 | case 1: |
2408 | break; | |
2409 | case 2: | |
2410 | spi->mode |= SPI_RX_DUAL; | |
2411 | break; | |
2412 | case 4: | |
2413 | spi->mode |= SPI_RX_QUAD; | |
2414 | break; | |
6b03061f YNG |
2415 | case 8: |
2416 | spi->mode |= SPI_RX_OCTAL; | |
2417 | break; | |
aff5e3f8 | 2418 | default: |
8caab75f | 2419 | dev_warn(&ctlr->dev, |
aff5e3f8 PA |
2420 | "spi-rx-bus-width %d not supported\n", |
2421 | value); | |
2422 | break; | |
2423 | } | |
2424 | } | |
2425 | ||
1e0cc8d0 | 2426 | if (spi_controller_is_target(ctlr)) { |
194276b0 | 2427 | if (!of_node_name_eq(nc, "slave")) { |
25c56c88 RH |
2428 | dev_err(&ctlr->dev, "%pOF is not called 'slave'\n", |
2429 | nc); | |
6c364062 GU |
2430 | return -EINVAL; |
2431 | } | |
2432 | return 0; | |
2433 | } | |
2434 | ||
4d8ff6b0 AKM |
2435 | if (ctlr->num_chipselect > SPI_CS_CNT_MAX) { |
2436 | dev_err(&ctlr->dev, "No. of CS is more than max. no. of supported CS\n"); | |
2437 | return -EINVAL; | |
2438 | } | |
2439 | ||
5ee91605 | 2440 | spi_set_all_cs_unused(spi); |
4d8ff6b0 | 2441 | |
6c364062 | 2442 | /* Device address */ |
4d8ff6b0 AKM |
2443 | rc = of_property_read_variable_u32_array(nc, "reg", &cs[0], 1, |
2444 | SPI_CS_CNT_MAX); | |
2445 | if (rc < 0) { | |
25c56c88 RH |
2446 | dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n", |
2447 | nc, rc); | |
6c364062 GU |
2448 | return rc; |
2449 | } | |
4d8ff6b0 AKM |
2450 | if (rc > ctlr->num_chipselect) { |
2451 | dev_err(&ctlr->dev, "%pOF has number of CS > ctlr->num_chipselect (%d)\n", | |
2452 | nc, rc); | |
2453 | return rc; | |
2454 | } | |
270ddc23 | 2455 | if ((of_property_present(nc, "parallel-memories")) && |
4d8ff6b0 AKM |
2456 | (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) { |
2457 | dev_err(&ctlr->dev, "SPI controller doesn't support multi CS\n"); | |
2458 | return -EINVAL; | |
2459 | } | |
2460 | for (idx = 0; idx < rc; idx++) | |
2461 | spi_set_chipselect(spi, idx, cs[idx]); | |
2462 | ||
2463 | /* | |
1209c556 AS |
2464 | * By default spi->chip_select[0] will hold the physical CS number, |
2465 | * so set bit 0 in spi->cs_index_mask. | |
4d8ff6b0 | 2466 | */ |
1209c556 | 2467 | spi->cs_index_mask = BIT(0); |
6c364062 | 2468 | |
aff5e3f8 | 2469 | /* Device speed */ |
671c3bf5 CG |
2470 | if (!of_property_read_u32(nc, "spi-max-frequency", &value)) |
2471 | spi->max_speed_hz = value; | |
aff5e3f8 | 2472 | |
f276aacf JG |
2473 | /* Device CS delays */ |
2474 | of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns"); | |
5827b31d JG |
2475 | of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns"); |
2476 | of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns"); | |
33a2fde5 | 2477 | |
c2e51ac3 GU |
2478 | return 0; |
2479 | } | |
2480 | ||
2481 | static struct spi_device * | |
8caab75f | 2482 | of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc) |
c2e51ac3 GU |
2483 | { |
2484 | struct spi_device *spi; | |
2485 | int rc; | |
2486 | ||
2487 | /* Alloc an spi_device */ | |
8caab75f | 2488 | spi = spi_alloc_device(ctlr); |
c2e51ac3 | 2489 | if (!spi) { |
25c56c88 | 2490 | dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc); |
c2e51ac3 GU |
2491 | rc = -ENOMEM; |
2492 | goto err_out; | |
2493 | } | |
2494 | ||
2495 | /* Select device driver */ | |
673aa1ed MR |
2496 | rc = of_alias_from_compatible(nc, spi->modalias, |
2497 | sizeof(spi->modalias)); | |
c2e51ac3 | 2498 | if (rc < 0) { |
25c56c88 | 2499 | dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc); |
c2e51ac3 GU |
2500 | goto err_out; |
2501 | } | |
2502 | ||
8caab75f | 2503 | rc = of_spi_parse_dt(ctlr, spi, nc); |
c2e51ac3 GU |
2504 | if (rc) |
2505 | goto err_out; | |
2506 | ||
aff5e3f8 PA |
2507 | /* Store a pointer to the node in the device structure */ |
2508 | of_node_get(nc); | |
c7cc588b AS |
2509 | |
2510 | device_set_node(&spi->dev, of_fwnode_handle(nc)); | |
aff5e3f8 PA |
2511 | |
2512 | /* Register the new device */ | |
aff5e3f8 PA |
2513 | rc = spi_add_device(spi); |
2514 | if (rc) { | |
25c56c88 | 2515 | dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc); |
8324147f | 2516 | goto err_of_node_put; |
aff5e3f8 PA |
2517 | } |
2518 | ||
2519 | return spi; | |
2520 | ||
8324147f JH |
2521 | err_of_node_put: |
2522 | of_node_put(nc); | |
aff5e3f8 PA |
2523 | err_out: |
2524 | spi_dev_put(spi); | |
2525 | return ERR_PTR(rc); | |
2526 | } | |
2527 | ||
d57a4282 GL |
2528 | /** |
2529 | * of_register_spi_devices() - Register child devices onto the SPI bus | |
8caab75f | 2530 | * @ctlr: Pointer to spi_controller device |
d57a4282 | 2531 | * |
6c364062 | 2532 | * Registers an spi_device for each child node of controller node which |
91ce208d | 2533 | * represents a valid SPI target device. |
d57a4282 | 2534 | */ |
8caab75f | 2535 | static void of_register_spi_devices(struct spi_controller *ctlr) |
d57a4282 GL |
2536 | { |
2537 | struct spi_device *spi; | |
2538 | struct device_node *nc; | |
d57a4282 | 2539 | |
8caab75f | 2540 | for_each_available_child_of_node(ctlr->dev.of_node, nc) { |
bd6c1644 GU |
2541 | if (of_node_test_and_set_flag(nc, OF_POPULATED)) |
2542 | continue; | |
8caab75f | 2543 | spi = of_register_spi_device(ctlr, nc); |
e0af98a7 | 2544 | if (IS_ERR(spi)) { |
8caab75f | 2545 | dev_warn(&ctlr->dev, |
25c56c88 | 2546 | "Failed to create SPI device for %pOF\n", nc); |
e0af98a7 RR |
2547 | of_node_clear_flag(nc, OF_POPULATED); |
2548 | } | |
d57a4282 GL |
2549 | } |
2550 | } | |
2551 | #else | |
8caab75f | 2552 | static void of_register_spi_devices(struct spi_controller *ctlr) { } |
d57a4282 GL |
2553 | #endif |
2554 | ||
0c79378c SR |
2555 | /** |
2556 | * spi_new_ancillary_device() - Register ancillary SPI device | |
2557 | * @spi: Pointer to the main SPI device registering the ancillary device | |
2558 | * @chip_select: Chip Select of the ancillary device | |
2559 | * | |
2560 | * Register an ancillary SPI device; for example some chips have a chip-select | |
2561 | * for normal device usage and another one for setup/firmware upload. | |
2562 | * | |
2563 | * This may only be called from main SPI device's probe routine. | |
2564 | * | |
2565 | * Return: 0 on success; negative errno on failure | |
2566 | */ | |
2567 | struct spi_device *spi_new_ancillary_device(struct spi_device *spi, | |
2568 | u8 chip_select) | |
2569 | { | |
7b5c6a54 | 2570 | struct spi_controller *ctlr = spi->controller; |
0c79378c | 2571 | struct spi_device *ancillary; |
0f2ecc3f | 2572 | int rc; |
0c79378c SR |
2573 | |
2574 | /* Alloc an spi_device */ | |
7b5c6a54 | 2575 | ancillary = spi_alloc_device(ctlr); |
0c79378c SR |
2576 | if (!ancillary) { |
2577 | rc = -ENOMEM; | |
2578 | goto err_out; | |
2579 | } | |
2580 | ||
51e99de5 | 2581 | strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias)); |
0c79378c SR |
2582 | |
2583 | /* Use provided chip-select for ancillary device */ | |
5ee91605 | 2584 | spi_set_all_cs_unused(ancillary); |
303feb3c | 2585 | spi_set_chipselect(ancillary, 0, chip_select); |
0c79378c SR |
2586 | |
2587 | /* Take over SPI mode/speed from SPI main device */ | |
2588 | ancillary->max_speed_hz = spi->max_speed_hz; | |
b01d5506 | 2589 | ancillary->mode = spi->mode; |
4d8ff6b0 | 2590 | /* |
1209c556 AS |
2591 | * By default spi->chip_select[0] will hold the physical CS number, |
2592 | * so set bit 0 in spi->cs_index_mask. | |
4d8ff6b0 | 2593 | */ |
1209c556 | 2594 | ancillary->cs_index_mask = BIT(0); |
0c79378c | 2595 | |
7b5c6a54 AS |
2596 | WARN_ON(!mutex_is_locked(&ctlr->add_lock)); |
2597 | ||
0c79378c | 2598 | /* Register the new device */ |
7b5c6a54 | 2599 | rc = __spi_add_device(ancillary); |
0c79378c SR |
2600 | if (rc) { |
2601 | dev_err(&spi->dev, "failed to register ancillary device\n"); | |
2602 | goto err_out; | |
2603 | } | |
2604 | ||
2605 | return ancillary; | |
2606 | ||
2607 | err_out: | |
2608 | spi_dev_put(ancillary); | |
2609 | return ERR_PTR(rc); | |
2610 | } | |
2611 | EXPORT_SYMBOL_GPL(spi_new_ancillary_device); | |
2612 | ||
64bee4d2 | 2613 | #ifdef CONFIG_ACPI |
4c3c5954 AB |
2614 | struct acpi_spi_lookup { |
2615 | struct spi_controller *ctlr; | |
2616 | u32 max_speed_hz; | |
2617 | u32 mode; | |
2618 | int irq; | |
2619 | u8 bits_per_word; | |
2620 | u8 chip_select; | |
87e59b36 SB |
2621 | int n; |
2622 | int index; | |
4c3c5954 AB |
2623 | }; |
2624 | ||
e612af7a SB |
2625 | static int acpi_spi_count(struct acpi_resource *ares, void *data) |
2626 | { | |
2627 | struct acpi_resource_spi_serialbus *sb; | |
2628 | int *count = data; | |
2629 | ||
2630 | if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) | |
2631 | return 1; | |
2632 | ||
2633 | sb = &ares->data.spi_serial_bus; | |
2634 | if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI) | |
2635 | return 1; | |
2636 | ||
2637 | *count = *count + 1; | |
2638 | ||
2639 | return 1; | |
2640 | } | |
2641 | ||
2642 | /** | |
2643 | * acpi_spi_count_resources - Count the number of SpiSerialBus resources | |
2644 | * @adev: ACPI device | |
2645 | * | |
702ca026 | 2646 | * Return: the number of SpiSerialBus resources in the ACPI-device's |
e612af7a SB |
2647 | * resource-list; or a negative error code. |
2648 | */ | |
2649 | int acpi_spi_count_resources(struct acpi_device *adev) | |
2650 | { | |
2651 | LIST_HEAD(r); | |
2652 | int count = 0; | |
2653 | int ret; | |
2654 | ||
2655 | ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count); | |
2656 | if (ret < 0) | |
2657 | return ret; | |
2658 | ||
2659 | acpi_dev_free_resource_list(&r); | |
2660 | ||
2661 | return count; | |
2662 | } | |
2663 | EXPORT_SYMBOL_GPL(acpi_spi_count_resources); | |
2664 | ||
4c3c5954 AB |
2665 | static void acpi_spi_parse_apple_properties(struct acpi_device *dev, |
2666 | struct acpi_spi_lookup *lookup) | |
8a2e487e | 2667 | { |
8a2e487e LW |
2668 | const union acpi_object *obj; |
2669 | ||
2670 | if (!x86_apple_machine) | |
2671 | return; | |
2672 | ||
2673 | if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj) | |
2674 | && obj->buffer.length >= 4) | |
4c3c5954 | 2675 | lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer; |
8a2e487e LW |
2676 | |
2677 | if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj) | |
2678 | && obj->buffer.length == 8) | |
4c3c5954 | 2679 | lookup->bits_per_word = *(u64 *)obj->buffer.pointer; |
8a2e487e LW |
2680 | |
2681 | if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj) | |
2682 | && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer) | |
4c3c5954 | 2683 | lookup->mode |= SPI_LSB_FIRST; |
8a2e487e LW |
2684 | |
2685 | if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj) | |
2686 | && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) | |
4c3c5954 | 2687 | lookup->mode |= SPI_CPOL; |
8a2e487e LW |
2688 | |
2689 | if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj) | |
2690 | && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) | |
4c3c5954 | 2691 | lookup->mode |= SPI_CPHA; |
8a2e487e LW |
2692 | } |
2693 | ||
64bee4d2 MW |
2694 | static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) |
2695 | { | |
4c3c5954 AB |
2696 | struct acpi_spi_lookup *lookup = data; |
2697 | struct spi_controller *ctlr = lookup->ctlr; | |
64bee4d2 MW |
2698 | |
2699 | if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { | |
2700 | struct acpi_resource_spi_serialbus *sb; | |
4c3c5954 AB |
2701 | acpi_handle parent_handle; |
2702 | acpi_status status; | |
64bee4d2 MW |
2703 | |
2704 | sb = &ares->data.spi_serial_bus; | |
2705 | if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { | |
4c3c5954 | 2706 | |
87e59b36 SB |
2707 | if (lookup->index != -1 && lookup->n++ != lookup->index) |
2708 | return 1; | |
2709 | ||
4c3c5954 AB |
2710 | status = acpi_get_handle(NULL, |
2711 | sb->resource_source.string_ptr, | |
2712 | &parent_handle); | |
2713 | ||
87e59b36 | 2714 | if (ACPI_FAILURE(status)) |
4c3c5954 AB |
2715 | return -ENODEV; |
2716 | ||
87e59b36 | 2717 | if (ctlr) { |
2d19ea9e | 2718 | if (!device_match_acpi_handle(ctlr->dev.parent, parent_handle)) |
87e59b36 SB |
2719 | return -ENODEV; |
2720 | } else { | |
2721 | struct acpi_device *adev; | |
2722 | ||
ac2a3fee RW |
2723 | adev = acpi_fetch_acpi_dev(parent_handle); |
2724 | if (!adev) | |
87e59b36 SB |
2725 | return -ENODEV; |
2726 | ||
2727 | ctlr = acpi_spi_find_controller_by_adev(adev); | |
2728 | if (!ctlr) | |
9c22ec4a | 2729 | return -EPROBE_DEFER; |
87e59b36 SB |
2730 | |
2731 | lookup->ctlr = ctlr; | |
2732 | } | |
2733 | ||
a0a90718 MW |
2734 | /* |
2735 | * ACPI DeviceSelection numbering is handled by the | |
2736 | * host controller driver in Windows and can vary | |
2737 | * from driver to driver. In Linux we always expect | |
2738 | * 0 .. max - 1 so we need to ask the driver to | |
2739 | * translate between the two schemes. | |
2740 | */ | |
8caab75f GU |
2741 | if (ctlr->fw_translate_cs) { |
2742 | int cs = ctlr->fw_translate_cs(ctlr, | |
a0a90718 MW |
2743 | sb->device_selection); |
2744 | if (cs < 0) | |
2745 | return cs; | |
4c3c5954 | 2746 | lookup->chip_select = cs; |
a0a90718 | 2747 | } else { |
4c3c5954 | 2748 | lookup->chip_select = sb->device_selection; |
a0a90718 MW |
2749 | } |
2750 | ||
4c3c5954 | 2751 | lookup->max_speed_hz = sb->connection_speed; |
0dadde34 | 2752 | lookup->bits_per_word = sb->data_bit_length; |
64bee4d2 MW |
2753 | |
2754 | if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) | |
4c3c5954 | 2755 | lookup->mode |= SPI_CPHA; |
64bee4d2 | 2756 | if (sb->clock_polarity == ACPI_SPI_START_HIGH) |
4c3c5954 | 2757 | lookup->mode |= SPI_CPOL; |
64bee4d2 | 2758 | if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) |
4c3c5954 | 2759 | lookup->mode |= SPI_CS_HIGH; |
64bee4d2 | 2760 | } |
4c3c5954 | 2761 | } else if (lookup->irq < 0) { |
64bee4d2 MW |
2762 | struct resource r; |
2763 | ||
2764 | if (acpi_dev_resource_interrupt(ares, 0, &r)) | |
4c3c5954 | 2765 | lookup->irq = r.start; |
64bee4d2 MW |
2766 | } |
2767 | ||
2768 | /* Always tell the ACPI core to skip this resource */ | |
2769 | return 1; | |
2770 | } | |
2771 | ||
000bee0e SB |
2772 | /** |
2773 | * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information | |
2774 | * @ctlr: controller to which the spi device belongs | |
2775 | * @adev: ACPI Device for the spi device | |
87e59b36 | 2776 | * @index: Index of the spi resource inside the ACPI Node |
000bee0e | 2777 | * |
702ca026 AS |
2778 | * This should be used to allocate a new SPI device from and ACPI Device node. |
2779 | * The caller is responsible for calling spi_add_device to register the SPI device. | |
000bee0e | 2780 | * |
702ca026 | 2781 | * If ctlr is set to NULL, the Controller for the SPI device will be looked up |
87e59b36 SB |
2782 | * using the resource. |
2783 | * If index is set to -1, index is not used. | |
2784 | * Note: If index is -1, ctlr must be set. | |
2785 | * | |
000bee0e SB |
2786 | * Return: a pointer to the new device, or ERR_PTR on error. |
2787 | */ | |
2788 | struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr, | |
87e59b36 SB |
2789 | struct acpi_device *adev, |
2790 | int index) | |
64bee4d2 | 2791 | { |
4c3c5954 | 2792 | acpi_handle parent_handle = NULL; |
64bee4d2 | 2793 | struct list_head resource_list; |
b28944c6 | 2794 | struct acpi_spi_lookup lookup = {}; |
64bee4d2 MW |
2795 | struct spi_device *spi; |
2796 | int ret; | |
2797 | ||
87e59b36 SB |
2798 | if (!ctlr && index == -1) |
2799 | return ERR_PTR(-EINVAL); | |
2800 | ||
4c3c5954 | 2801 | lookup.ctlr = ctlr; |
4c3c5954 | 2802 | lookup.irq = -1; |
87e59b36 SB |
2803 | lookup.index = index; |
2804 | lookup.n = 0; | |
64bee4d2 MW |
2805 | |
2806 | INIT_LIST_HEAD(&resource_list); | |
2807 | ret = acpi_dev_get_resources(adev, &resource_list, | |
4c3c5954 | 2808 | acpi_spi_add_resource, &lookup); |
64bee4d2 MW |
2809 | acpi_dev_free_resource_list(&resource_list); |
2810 | ||
4c3c5954 | 2811 | if (ret < 0) |
95c8222f | 2812 | /* Found SPI in _CRS but it points to another controller */ |
b6747f4f | 2813 | return ERR_PTR(ret); |
8a2e487e | 2814 | |
4c3c5954 | 2815 | if (!lookup.max_speed_hz && |
10e92724 | 2816 | ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) && |
2d19ea9e | 2817 | device_match_acpi_handle(lookup.ctlr->dev.parent, parent_handle)) { |
91ce208d | 2818 | /* Apple does not use _CRS but nested devices for SPI target devices */ |
4c3c5954 AB |
2819 | acpi_spi_parse_apple_properties(adev, &lookup); |
2820 | } | |
2821 | ||
2822 | if (!lookup.max_speed_hz) | |
000bee0e | 2823 | return ERR_PTR(-ENODEV); |
4c3c5954 | 2824 | |
87e59b36 | 2825 | spi = spi_alloc_device(lookup.ctlr); |
4c3c5954 | 2826 | if (!spi) { |
87e59b36 | 2827 | dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n", |
4c3c5954 | 2828 | dev_name(&adev->dev)); |
000bee0e | 2829 | return ERR_PTR(-ENOMEM); |
64bee4d2 MW |
2830 | } |
2831 | ||
5ee91605 AS |
2832 | spi_set_all_cs_unused(spi); |
2833 | spi_set_chipselect(spi, 0, lookup.chip_select); | |
4d8ff6b0 | 2834 | |
4c3c5954 AB |
2835 | ACPI_COMPANION_SET(&spi->dev, adev); |
2836 | spi->max_speed_hz = lookup.max_speed_hz; | |
ea235786 | 2837 | spi->mode |= lookup.mode; |
4c3c5954 AB |
2838 | spi->irq = lookup.irq; |
2839 | spi->bits_per_word = lookup.bits_per_word; | |
4d8ff6b0 | 2840 | /* |
1209c556 AS |
2841 | * By default spi->chip_select[0] will hold the physical CS number, |
2842 | * so set bit 0 in spi->cs_index_mask. | |
4d8ff6b0 | 2843 | */ |
1209c556 | 2844 | spi->cs_index_mask = BIT(0); |
4c3c5954 | 2845 | |
000bee0e SB |
2846 | return spi; |
2847 | } | |
2848 | EXPORT_SYMBOL_GPL(acpi_spi_device_alloc); | |
2849 | ||
2850 | static acpi_status acpi_register_spi_device(struct spi_controller *ctlr, | |
2851 | struct acpi_device *adev) | |
2852 | { | |
2853 | struct spi_device *spi; | |
2854 | ||
2855 | if (acpi_bus_get_status(adev) || !adev->status.present || | |
2856 | acpi_device_enumerated(adev)) | |
2857 | return AE_OK; | |
2858 | ||
87e59b36 | 2859 | spi = acpi_spi_device_alloc(ctlr, adev, -1); |
000bee0e SB |
2860 | if (IS_ERR(spi)) { |
2861 | if (PTR_ERR(spi) == -ENOMEM) | |
2862 | return AE_NO_MEMORY; | |
2863 | else | |
2864 | return AE_OK; | |
2865 | } | |
2866 | ||
0c6543f6 DD |
2867 | acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias, |
2868 | sizeof(spi->modalias)); | |
2869 | ||
7f24467f OP |
2870 | acpi_device_set_enumerated(adev); |
2871 | ||
33cf00e5 | 2872 | adev->power.flags.ignore_parent = true; |
64bee4d2 | 2873 | if (spi_add_device(spi)) { |
33cf00e5 | 2874 | adev->power.flags.ignore_parent = false; |
8caab75f | 2875 | dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n", |
64bee4d2 MW |
2876 | dev_name(&adev->dev)); |
2877 | spi_dev_put(spi); | |
2878 | } | |
2879 | ||
2880 | return AE_OK; | |
2881 | } | |
2882 | ||
7f24467f OP |
2883 | static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level, |
2884 | void *data, void **return_value) | |
2885 | { | |
7030c428 | 2886 | struct acpi_device *adev = acpi_fetch_acpi_dev(handle); |
8caab75f | 2887 | struct spi_controller *ctlr = data; |
7f24467f | 2888 | |
7030c428 | 2889 | if (!adev) |
7f24467f OP |
2890 | return AE_OK; |
2891 | ||
8caab75f | 2892 | return acpi_register_spi_device(ctlr, adev); |
7f24467f OP |
2893 | } |
2894 | ||
4c3c5954 AB |
2895 | #define SPI_ACPI_ENUMERATE_MAX_DEPTH 32 |
2896 | ||
8caab75f | 2897 | static void acpi_register_spi_devices(struct spi_controller *ctlr) |
64bee4d2 MW |
2898 | { |
2899 | acpi_status status; | |
2900 | acpi_handle handle; | |
2901 | ||
8caab75f | 2902 | handle = ACPI_HANDLE(ctlr->dev.parent); |
64bee4d2 MW |
2903 | if (!handle) |
2904 | return; | |
2905 | ||
4c3c5954 AB |
2906 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, |
2907 | SPI_ACPI_ENUMERATE_MAX_DEPTH, | |
8caab75f | 2908 | acpi_spi_add_device, NULL, ctlr, NULL); |
64bee4d2 | 2909 | if (ACPI_FAILURE(status)) |
91ce208d | 2910 | dev_warn(&ctlr->dev, "failed to enumerate SPI target devices\n"); |
64bee4d2 MW |
2911 | } |
2912 | #else | |
8caab75f | 2913 | static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {} |
64bee4d2 MW |
2914 | #endif /* CONFIG_ACPI */ |
2915 | ||
8caab75f | 2916 | static void spi_controller_release(struct device *dev) |
8ae12a0d | 2917 | { |
8caab75f | 2918 | struct spi_controller *ctlr; |
8ae12a0d | 2919 | |
8caab75f GU |
2920 | ctlr = container_of(dev, struct spi_controller, dev); |
2921 | kfree(ctlr); | |
8ae12a0d DB |
2922 | } |
2923 | ||
91ce208d | 2924 | static const struct class spi_controller_class = { |
8ae12a0d | 2925 | .name = "spi_master", |
8caab75f | 2926 | .dev_release = spi_controller_release, |
91ce208d | 2927 | .dev_groups = spi_controller_groups, |
8ae12a0d DB |
2928 | }; |
2929 | ||
6c364062 GU |
2930 | #ifdef CONFIG_SPI_SLAVE |
2931 | /** | |
91ce208d | 2932 | * spi_target_abort - abort the ongoing transfer request on an SPI target controller |
6c364062 GU |
2933 | * @spi: device used for the current transfer |
2934 | */ | |
b8d3b056 YY |
2935 | int spi_target_abort(struct spi_device *spi) |
2936 | { | |
2937 | struct spi_controller *ctlr = spi->controller; | |
2938 | ||
2939 | if (spi_controller_is_target(ctlr) && ctlr->target_abort) | |
2940 | return ctlr->target_abort(ctlr); | |
2941 | ||
2942 | return -ENOTSUPP; | |
2943 | } | |
2944 | EXPORT_SYMBOL_GPL(spi_target_abort); | |
2945 | ||
cc8b4659 GU |
2946 | static ssize_t slave_show(struct device *dev, struct device_attribute *attr, |
2947 | char *buf) | |
6c364062 | 2948 | { |
8caab75f GU |
2949 | struct spi_controller *ctlr = container_of(dev, struct spi_controller, |
2950 | dev); | |
6c364062 | 2951 | struct device *child; |
176fda56 | 2952 | int ret; |
6c364062 | 2953 | |
c21b0837 | 2954 | child = device_find_any_child(&ctlr->dev); |
176fda56 ML |
2955 | ret = sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL); |
2956 | put_device(child); | |
2957 | ||
2958 | return ret; | |
6c364062 GU |
2959 | } |
2960 | ||
cc8b4659 GU |
2961 | static ssize_t slave_store(struct device *dev, struct device_attribute *attr, |
2962 | const char *buf, size_t count) | |
6c364062 | 2963 | { |
8caab75f GU |
2964 | struct spi_controller *ctlr = container_of(dev, struct spi_controller, |
2965 | dev); | |
6c364062 GU |
2966 | struct spi_device *spi; |
2967 | struct device *child; | |
2968 | char name[32]; | |
2969 | int rc; | |
2970 | ||
2971 | rc = sscanf(buf, "%31s", name); | |
2972 | if (rc != 1 || !name[0]) | |
2973 | return -EINVAL; | |
2974 | ||
c21b0837 | 2975 | child = device_find_any_child(&ctlr->dev); |
6c364062 | 2976 | if (child) { |
91ce208d | 2977 | /* Remove registered target device */ |
6c364062 GU |
2978 | device_unregister(child); |
2979 | put_device(child); | |
2980 | } | |
2981 | ||
2982 | if (strcmp(name, "(null)")) { | |
91ce208d | 2983 | /* Register new target device */ |
6c364062 GU |
2984 | spi = spi_alloc_device(ctlr); |
2985 | if (!spi) | |
2986 | return -ENOMEM; | |
2987 | ||
51e99de5 | 2988 | strscpy(spi->modalias, name, sizeof(spi->modalias)); |
6c364062 GU |
2989 | |
2990 | rc = spi_add_device(spi); | |
2991 | if (rc) { | |
2992 | spi_dev_put(spi); | |
2993 | return rc; | |
2994 | } | |
2995 | } | |
2996 | ||
2997 | return count; | |
2998 | } | |
2999 | ||
cc8b4659 | 3000 | static DEVICE_ATTR_RW(slave); |
6c364062 | 3001 | |
91ce208d | 3002 | static struct attribute *spi_target_attrs[] = { |
6c364062 GU |
3003 | &dev_attr_slave.attr, |
3004 | NULL, | |
3005 | }; | |
3006 | ||
91ce208d AS |
3007 | static const struct attribute_group spi_target_group = { |
3008 | .attrs = spi_target_attrs, | |
6c364062 GU |
3009 | }; |
3010 | ||
91ce208d | 3011 | static const struct attribute_group *spi_target_groups[] = { |
8caab75f | 3012 | &spi_controller_statistics_group, |
91ce208d | 3013 | &spi_target_group, |
6c364062 GU |
3014 | NULL, |
3015 | }; | |
3016 | ||
91ce208d | 3017 | static const struct class spi_target_class = { |
6c364062 | 3018 | .name = "spi_slave", |
8caab75f | 3019 | .dev_release = spi_controller_release, |
91ce208d | 3020 | .dev_groups = spi_target_groups, |
6c364062 GU |
3021 | }; |
3022 | #else | |
91ce208d | 3023 | extern struct class spi_target_class; /* dummy */ |
6c364062 | 3024 | #endif |
8ae12a0d DB |
3025 | |
3026 | /** | |
91ce208d | 3027 | * __spi_alloc_controller - allocate an SPI host or target controller |
8ae12a0d | 3028 | * @dev: the controller, possibly using the platform_bus |
33e34dc6 | 3029 | * @size: how much zeroed driver-private data to allocate; the pointer to this |
229e6af1 LW |
3030 | * memory is in the driver_data field of the returned device, accessible |
3031 | * with spi_controller_get_devdata(); the memory is cacheline aligned; | |
3032 | * drivers granting DMA access to portions of their private data need to | |
3033 | * round up @size using ALIGN(size, dma_get_cache_alignment()). | |
91ce208d AS |
3034 | * @target: flag indicating whether to allocate an SPI host (false) or SPI target (true) |
3035 | * controller | |
33e34dc6 | 3036 | * Context: can sleep |
8ae12a0d | 3037 | * |
6c364062 | 3038 | * This call is used only by SPI controller drivers, which are the |
8ae12a0d | 3039 | * only ones directly touching chip registers. It's how they allocate |
8caab75f | 3040 | * an spi_controller structure, prior to calling spi_register_controller(). |
8ae12a0d | 3041 | * |
97d56dc6 | 3042 | * This must be called from context that can sleep. |
8ae12a0d | 3043 | * |
6c364062 | 3044 | * The caller is responsible for assigning the bus number and initializing the |
8caab75f GU |
3045 | * controller's methods before calling spi_register_controller(); and (after |
3046 | * errors adding the device) calling spi_controller_put() to prevent a memory | |
3047 | * leak. | |
97d56dc6 | 3048 | * |
6c364062 | 3049 | * Return: the SPI controller structure on success, else NULL. |
8ae12a0d | 3050 | */ |
8caab75f | 3051 | struct spi_controller *__spi_alloc_controller(struct device *dev, |
91ce208d | 3052 | unsigned int size, bool target) |
8ae12a0d | 3053 | { |
8caab75f | 3054 | struct spi_controller *ctlr; |
229e6af1 | 3055 | size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment()); |
8ae12a0d | 3056 | |
0c868461 DB |
3057 | if (!dev) |
3058 | return NULL; | |
3059 | ||
229e6af1 | 3060 | ctlr = kzalloc(size + ctlr_size, GFP_KERNEL); |
8caab75f | 3061 | if (!ctlr) |
8ae12a0d DB |
3062 | return NULL; |
3063 | ||
8caab75f | 3064 | device_initialize(&ctlr->dev); |
16a8e2fb UKK |
3065 | INIT_LIST_HEAD(&ctlr->queue); |
3066 | spin_lock_init(&ctlr->queue_lock); | |
3067 | spin_lock_init(&ctlr->bus_lock_spinlock); | |
3068 | mutex_init(&ctlr->bus_lock_mutex); | |
3069 | mutex_init(&ctlr->io_mutex); | |
3070 | mutex_init(&ctlr->add_lock); | |
8caab75f GU |
3071 | ctlr->bus_num = -1; |
3072 | ctlr->num_chipselect = 1; | |
91ce208d AS |
3073 | ctlr->target = target; |
3074 | if (IS_ENABLED(CONFIG_SPI_SLAVE) && target) | |
3075 | ctlr->dev.class = &spi_target_class; | |
6c364062 | 3076 | else |
91ce208d | 3077 | ctlr->dev.class = &spi_controller_class; |
8caab75f GU |
3078 | ctlr->dev.parent = dev; |
3079 | pm_suspend_ignore_children(&ctlr->dev, true); | |
229e6af1 | 3080 | spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size); |
8ae12a0d | 3081 | |
8caab75f | 3082 | return ctlr; |
8ae12a0d | 3083 | } |
6c364062 | 3084 | EXPORT_SYMBOL_GPL(__spi_alloc_controller); |
8ae12a0d | 3085 | |
5e844cc3 LW |
3086 | static void devm_spi_release_controller(struct device *dev, void *ctlr) |
3087 | { | |
3088 | spi_controller_put(*(struct spi_controller **)ctlr); | |
3089 | } | |
3090 | ||
3091 | /** | |
3092 | * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller() | |
3093 | * @dev: physical device of SPI controller | |
3094 | * @size: how much zeroed driver-private data to allocate | |
91ce208d | 3095 | * @target: whether to allocate an SPI host (false) or SPI target (true) controller |
5e844cc3 LW |
3096 | * Context: can sleep |
3097 | * | |
3098 | * Allocate an SPI controller and automatically release a reference on it | |
3099 | * when @dev is unbound from its driver. Drivers are thus relieved from | |
3100 | * having to call spi_controller_put(). | |
3101 | * | |
3102 | * The arguments to this function are identical to __spi_alloc_controller(). | |
3103 | * | |
3104 | * Return: the SPI controller structure on success, else NULL. | |
3105 | */ | |
3106 | struct spi_controller *__devm_spi_alloc_controller(struct device *dev, | |
3107 | unsigned int size, | |
91ce208d | 3108 | bool target) |
5e844cc3 LW |
3109 | { |
3110 | struct spi_controller **ptr, *ctlr; | |
3111 | ||
3112 | ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr), | |
3113 | GFP_KERNEL); | |
3114 | if (!ptr) | |
3115 | return NULL; | |
3116 | ||
91ce208d | 3117 | ctlr = __spi_alloc_controller(dev, size, target); |
5e844cc3 | 3118 | if (ctlr) { |
794aaf01 | 3119 | ctlr->devm_allocated = true; |
5e844cc3 LW |
3120 | *ptr = ctlr; |
3121 | devres_add(dev, ptr); | |
3122 | } else { | |
3123 | devres_free(ptr); | |
3124 | } | |
3125 | ||
3126 | return ctlr; | |
3127 | } | |
3128 | EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller); | |
3129 | ||
f3186dd8 | 3130 | /** |
91ce208d AS |
3131 | * spi_get_gpio_descs() - grab chip select GPIOs for the controller |
3132 | * @ctlr: The SPI controller to grab GPIO descriptors for | |
f3186dd8 LW |
3133 | */ |
3134 | static int spi_get_gpio_descs(struct spi_controller *ctlr) | |
3135 | { | |
3136 | int nb, i; | |
3137 | struct gpio_desc **cs; | |
3138 | struct device *dev = &ctlr->dev; | |
7d93aecd GU |
3139 | unsigned long native_cs_mask = 0; |
3140 | unsigned int num_cs_gpios = 0; | |
f3186dd8 LW |
3141 | |
3142 | nb = gpiod_count(dev, "cs"); | |
31ed8ebc AS |
3143 | if (nb < 0) { |
3144 | /* No GPIOs at all is fine, else return the error */ | |
3145 | if (nb == -ENOENT) | |
3146 | return 0; | |
f3186dd8 | 3147 | return nb; |
31ed8ebc AS |
3148 | } |
3149 | ||
3150 | ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); | |
f3186dd8 LW |
3151 | |
3152 | cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs), | |
3153 | GFP_KERNEL); | |
3154 | if (!cs) | |
3155 | return -ENOMEM; | |
3156 | ctlr->cs_gpiods = cs; | |
3157 | ||
3158 | for (i = 0; i < nb; i++) { | |
3159 | /* | |
3160 | * Most chipselects are active low, the inverted | |
3161 | * semantics are handled by special quirks in gpiolib, | |
3162 | * so initializing them GPIOD_OUT_LOW here means | |
3163 | * "unasserted", in most cases this will drive the physical | |
3164 | * line high. | |
3165 | */ | |
3166 | cs[i] = devm_gpiod_get_index_optional(dev, "cs", i, | |
3167 | GPIOD_OUT_LOW); | |
1723fdec GU |
3168 | if (IS_ERR(cs[i])) |
3169 | return PTR_ERR(cs[i]); | |
f3186dd8 LW |
3170 | |
3171 | if (cs[i]) { | |
3172 | /* | |
3173 | * If we find a CS GPIO, name it after the device and | |
3174 | * chip select line. | |
3175 | */ | |
3176 | char *gpioname; | |
3177 | ||
3178 | gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d", | |
3179 | dev_name(dev), i); | |
3180 | if (!gpioname) | |
3181 | return -ENOMEM; | |
3182 | gpiod_set_consumer_name(cs[i], gpioname); | |
7d93aecd GU |
3183 | num_cs_gpios++; |
3184 | continue; | |
f3186dd8 | 3185 | } |
7d93aecd GU |
3186 | |
3187 | if (ctlr->max_native_cs && i >= ctlr->max_native_cs) { | |
3188 | dev_err(dev, "Invalid native chip select %d\n", i); | |
3189 | return -EINVAL; | |
f3186dd8 | 3190 | } |
7d93aecd GU |
3191 | native_cs_mask |= BIT(i); |
3192 | } | |
3193 | ||
f60d7270 | 3194 | ctlr->unused_native_cs = ffs(~native_cs_mask) - 1; |
dbaca8e5 | 3195 | |
82238d2c | 3196 | if ((ctlr->flags & SPI_CONTROLLER_GPIO_SS) && num_cs_gpios && |
dbaca8e5 | 3197 | ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) { |
7d93aecd GU |
3198 | dev_err(dev, "No unused native chip select available\n"); |
3199 | return -EINVAL; | |
f3186dd8 LW |
3200 | } |
3201 | ||
3202 | return 0; | |
3203 | } | |
3204 | ||
bdf3a3b5 BB |
3205 | static int spi_controller_check_ops(struct spi_controller *ctlr) |
3206 | { | |
3207 | /* | |
b5932f5c BB |
3208 | * The controller may implement only the high-level SPI-memory like |
3209 | * operations if it does not support regular SPI transfers, and this is | |
3210 | * valid use case. | |
76a85704 WZ |
3211 | * If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least |
3212 | * one of the ->transfer_xxx() method be implemented. | |
bdf3a3b5 | 3213 | */ |
20064c47 | 3214 | if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) { |
76a85704 | 3215 | if (!ctlr->transfer && !ctlr->transfer_one && |
b5932f5c | 3216 | !ctlr->transfer_one_message) { |
76a85704 WZ |
3217 | return -EINVAL; |
3218 | } | |
b5932f5c | 3219 | } |
bdf3a3b5 BB |
3220 | |
3221 | return 0; | |
3222 | } | |
3223 | ||
440c4733 AS |
3224 | /* Allocate dynamic bus number using Linux idr */ |
3225 | static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int end) | |
3226 | { | |
3227 | int id; | |
3228 | ||
3229 | mutex_lock(&board_lock); | |
91ce208d | 3230 | id = idr_alloc(&spi_controller_idr, ctlr, start, end, GFP_KERNEL); |
440c4733 AS |
3231 | mutex_unlock(&board_lock); |
3232 | if (WARN(id < 0, "couldn't get idr")) | |
3233 | return id == -ENOSPC ? -EBUSY : id; | |
3234 | ctlr->bus_num = id; | |
3235 | return 0; | |
3236 | } | |
3237 | ||
8ae12a0d | 3238 | /** |
0809a9cc YY |
3239 | * spi_register_controller - register SPI host or target controller |
3240 | * @ctlr: initialized controller, originally from spi_alloc_host() or | |
3241 | * spi_alloc_target() | |
33e34dc6 | 3242 | * Context: can sleep |
8ae12a0d | 3243 | * |
8caab75f | 3244 | * SPI controllers connect to their drivers using some non-SPI bus, |
8ae12a0d | 3245 | * such as the platform bus. The final stage of probe() in that code |
8caab75f | 3246 | * includes calling spi_register_controller() to hook up to this SPI bus glue. |
8ae12a0d DB |
3247 | * |
3248 | * SPI controllers use board specific (often SOC specific) bus numbers, | |
3249 | * and board-specific addressing for SPI devices combines those numbers | |
3250 | * with chip select numbers. Since SPI does not directly support dynamic | |
3251 | * device identification, boards need configuration tables telling which | |
3252 | * chip is at which address. | |
3253 | * | |
3254 | * This must be called from context that can sleep. It returns zero on | |
8caab75f | 3255 | * success, else a negative error code (dropping the controller's refcount). |
0c868461 | 3256 | * After a successful return, the caller is responsible for calling |
8caab75f | 3257 | * spi_unregister_controller(). |
97d56dc6 JMC |
3258 | * |
3259 | * Return: zero on success, else a negative error code. | |
8ae12a0d | 3260 | */ |
8caab75f | 3261 | int spi_register_controller(struct spi_controller *ctlr) |
8ae12a0d | 3262 | { |
8caab75f | 3263 | struct device *dev = ctlr->dev.parent; |
2b9603a0 | 3264 | struct boardinfo *bi; |
440c4733 | 3265 | int first_dynamic; |
b93318a2 | 3266 | int status; |
4d8ff6b0 | 3267 | int idx; |
8ae12a0d | 3268 | |
0c868461 DB |
3269 | if (!dev) |
3270 | return -ENODEV; | |
3271 | ||
bdf3a3b5 BB |
3272 | /* |
3273 | * Make sure all necessary hooks are implemented before registering | |
3274 | * the SPI controller. | |
3275 | */ | |
3276 | status = spi_controller_check_ops(ctlr); | |
3277 | if (status) | |
3278 | return status; | |
3279 | ||
440c4733 AS |
3280 | if (ctlr->bus_num < 0) |
3281 | ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi"); | |
04b2d03a | 3282 | if (ctlr->bus_num >= 0) { |
95c8222f | 3283 | /* Devices with a fixed bus num must check-in with the num */ |
440c4733 AS |
3284 | status = spi_controller_id_alloc(ctlr, ctlr->bus_num, ctlr->bus_num + 1); |
3285 | if (status) | |
3286 | return status; | |
9b61e302 | 3287 | } |
8caab75f | 3288 | if (ctlr->bus_num < 0) { |
42bdd706 LS |
3289 | first_dynamic = of_alias_get_highest_id("spi"); |
3290 | if (first_dynamic < 0) | |
3291 | first_dynamic = 0; | |
3292 | else | |
3293 | first_dynamic++; | |
3294 | ||
440c4733 AS |
3295 | status = spi_controller_id_alloc(ctlr, first_dynamic, 0); |
3296 | if (status) | |
3297 | return status; | |
8ae12a0d | 3298 | } |
8caab75f GU |
3299 | ctlr->bus_lock_flag = 0; |
3300 | init_completion(&ctlr->xfer_completion); | |
69fa9590 | 3301 | init_completion(&ctlr->cur_msg_completion); |
8caab75f GU |
3302 | if (!ctlr->max_dma_len) |
3303 | ctlr->max_dma_len = INT_MAX; | |
cf32b71e | 3304 | |
350de7ce AS |
3305 | /* |
3306 | * Register the device, then userspace will see it. | |
3307 | * Registration fails if the bus ID is in use. | |
8ae12a0d | 3308 | */ |
8caab75f | 3309 | dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num); |
0a919ae4 | 3310 | |
1e0cc8d0 | 3311 | if (!spi_controller_is_target(ctlr) && ctlr->use_gpio_descriptors) { |
f48dc6b9 LW |
3312 | status = spi_get_gpio_descs(ctlr); |
3313 | if (status) | |
3314 | goto free_bus_id; | |
3315 | /* | |
3316 | * A controller using GPIO descriptors always | |
3317 | * supports SPI_CS_HIGH if need be. | |
3318 | */ | |
3319 | ctlr->mode_bits |= SPI_CS_HIGH; | |
0a919ae4 AS |
3320 | } |
3321 | ||
f9481b08 TA |
3322 | /* |
3323 | * Even if it's just one always-selected device, there must | |
3324 | * be at least one chipselect. | |
3325 | */ | |
f9981d4f AK |
3326 | if (!ctlr->num_chipselect) { |
3327 | status = -EINVAL; | |
3328 | goto free_bus_id; | |
3329 | } | |
f9481b08 | 3330 | |
be84be4a | 3331 | /* Setting last_cs to SPI_INVALID_CS means no chip selected */ |
4d8ff6b0 | 3332 | for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) |
be84be4a | 3333 | ctlr->last_cs[idx] = SPI_INVALID_CS; |
6bb477df | 3334 | |
8caab75f | 3335 | status = device_add(&ctlr->dev); |
f9981d4f AK |
3336 | if (status < 0) |
3337 | goto free_bus_id; | |
9b61e302 | 3338 | dev_dbg(dev, "registered %s %s\n", |
1e0cc8d0 | 3339 | spi_controller_is_target(ctlr) ? "target" : "host", |
9b61e302 | 3340 | dev_name(&ctlr->dev)); |
8ae12a0d | 3341 | |
b5932f5c BB |
3342 | /* |
3343 | * If we're using a queued driver, start the queue. Note that we don't | |
3344 | * need the queueing logic if the driver is only supporting high-level | |
3345 | * memory operations. | |
3346 | */ | |
3347 | if (ctlr->transfer) { | |
8caab75f | 3348 | dev_info(dev, "controller is unqueued, this is deprecated\n"); |
b5932f5c | 3349 | } else if (ctlr->transfer_one || ctlr->transfer_one_message) { |
8caab75f | 3350 | status = spi_controller_initialize_queue(ctlr); |
ffbbdd21 | 3351 | if (status) { |
8caab75f | 3352 | device_del(&ctlr->dev); |
f9981d4f | 3353 | goto free_bus_id; |
ffbbdd21 LW |
3354 | } |
3355 | } | |
95c8222f | 3356 | /* Add statistics */ |
6598b91b DJ |
3357 | ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev); |
3358 | if (!ctlr->pcpu_statistics) { | |
3359 | dev_err(dev, "Error allocating per-cpu statistics\n"); | |
d52b095b | 3360 | status = -ENOMEM; |
6598b91b DJ |
3361 | goto destroy_queue; |
3362 | } | |
ffbbdd21 | 3363 | |
2b9603a0 | 3364 | mutex_lock(&board_lock); |
8caab75f | 3365 | list_add_tail(&ctlr->list, &spi_controller_list); |
2b9603a0 | 3366 | list_for_each_entry(bi, &board_list, list) |
8caab75f | 3367 | spi_match_controller_to_boardinfo(ctlr, &bi->board_info); |
2b9603a0 FT |
3368 | mutex_unlock(&board_lock); |
3369 | ||
64bee4d2 | 3370 | /* Register devices from the device tree and ACPI */ |
8caab75f GU |
3371 | of_register_spi_devices(ctlr); |
3372 | acpi_register_spi_devices(ctlr); | |
f9981d4f AK |
3373 | return status; |
3374 | ||
6598b91b DJ |
3375 | destroy_queue: |
3376 | spi_destroy_queue(ctlr); | |
f9981d4f AK |
3377 | free_bus_id: |
3378 | mutex_lock(&board_lock); | |
91ce208d | 3379 | idr_remove(&spi_controller_idr, ctlr->bus_num); |
f9981d4f | 3380 | mutex_unlock(&board_lock); |
8ae12a0d DB |
3381 | return status; |
3382 | } | |
8caab75f | 3383 | EXPORT_SYMBOL_GPL(spi_register_controller); |
8ae12a0d | 3384 | |
43cc5a0a | 3385 | static void devm_spi_unregister(struct device *dev, void *res) |
666d5b4c | 3386 | { |
43cc5a0a | 3387 | spi_unregister_controller(*(struct spi_controller **)res); |
666d5b4c MB |
3388 | } |
3389 | ||
3390 | /** | |
91ce208d | 3391 | * devm_spi_register_controller - register managed SPI host or target controller |
8caab75f | 3392 | * @dev: device managing SPI controller |
0809a9cc YY |
3393 | * @ctlr: initialized controller, originally from spi_alloc_host() or |
3394 | * spi_alloc_target() | |
666d5b4c MB |
3395 | * Context: can sleep |
3396 | * | |
8caab75f | 3397 | * Register a SPI device as with spi_register_controller() which will |
68b892f1 | 3398 | * automatically be unregistered and freed. |
97d56dc6 JMC |
3399 | * |
3400 | * Return: zero on success, else a negative error code. | |
666d5b4c | 3401 | */ |
8caab75f GU |
3402 | int devm_spi_register_controller(struct device *dev, |
3403 | struct spi_controller *ctlr) | |
666d5b4c | 3404 | { |
43cc5a0a | 3405 | struct spi_controller **ptr; |
666d5b4c MB |
3406 | int ret; |
3407 | ||
43cc5a0a YY |
3408 | ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL); |
3409 | if (!ptr) | |
3410 | return -ENOMEM; | |
3411 | ||
8caab75f | 3412 | ret = spi_register_controller(ctlr); |
43cc5a0a YY |
3413 | if (!ret) { |
3414 | *ptr = ctlr; | |
3415 | devres_add(dev, ptr); | |
3416 | } else { | |
3417 | devres_free(ptr); | |
3418 | } | |
666d5b4c | 3419 | |
43cc5a0a | 3420 | return ret; |
666d5b4c | 3421 | } |
8caab75f | 3422 | EXPORT_SYMBOL_GPL(devm_spi_register_controller); |
666d5b4c | 3423 | |
34860089 | 3424 | static int __unregister(struct device *dev, void *null) |
8ae12a0d | 3425 | { |
34860089 | 3426 | spi_unregister_device(to_spi_device(dev)); |
8ae12a0d DB |
3427 | return 0; |
3428 | } | |
3429 | ||
3430 | /** | |
91ce208d | 3431 | * spi_unregister_controller - unregister SPI host or target controller |
8caab75f | 3432 | * @ctlr: the controller being unregistered |
33e34dc6 | 3433 | * Context: can sleep |
8ae12a0d | 3434 | * |
8caab75f | 3435 | * This call is used only by SPI controller drivers, which are the |
8ae12a0d DB |
3436 | * only ones directly touching chip registers. |
3437 | * | |
3438 | * This must be called from context that can sleep. | |
68b892f1 JH |
3439 | * |
3440 | * Note that this function also drops a reference to the controller. | |
8ae12a0d | 3441 | */ |
8caab75f | 3442 | void spi_unregister_controller(struct spi_controller *ctlr) |
8ae12a0d | 3443 | { |
9b61e302 | 3444 | struct spi_controller *found; |
67f7b278 | 3445 | int id = ctlr->bus_num; |
89fc9a1a | 3446 | |
ddf75be4 LW |
3447 | /* Prevent addition of new devices, unregister existing ones */ |
3448 | if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) | |
6098475d | 3449 | mutex_lock(&ctlr->add_lock); |
ddf75be4 | 3450 | |
84855678 LW |
3451 | device_for_each_child(&ctlr->dev, NULL, __unregister); |
3452 | ||
9b61e302 SM |
3453 | /* First make sure that this controller was ever added */ |
3454 | mutex_lock(&board_lock); | |
91ce208d | 3455 | found = idr_find(&spi_controller_idr, id); |
9b61e302 | 3456 | mutex_unlock(&board_lock); |
8caab75f GU |
3457 | if (ctlr->queued) { |
3458 | if (spi_destroy_queue(ctlr)) | |
3459 | dev_err(&ctlr->dev, "queue remove failed\n"); | |
ffbbdd21 | 3460 | } |
2b9603a0 | 3461 | mutex_lock(&board_lock); |
8caab75f | 3462 | list_del(&ctlr->list); |
2b9603a0 FT |
3463 | mutex_unlock(&board_lock); |
3464 | ||
5e844cc3 LW |
3465 | device_del(&ctlr->dev); |
3466 | ||
95c8222f | 3467 | /* Free bus id */ |
9b61e302 | 3468 | mutex_lock(&board_lock); |
613bd1ea | 3469 | if (found == ctlr) |
91ce208d | 3470 | idr_remove(&spi_controller_idr, id); |
9b61e302 | 3471 | mutex_unlock(&board_lock); |
ddf75be4 LW |
3472 | |
3473 | if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) | |
6098475d | 3474 | mutex_unlock(&ctlr->add_lock); |
6c53b45c | 3475 | |
702ca026 AS |
3476 | /* |
3477 | * Release the last reference on the controller if its driver | |
0809a9cc | 3478 | * has not yet been converted to devm_spi_alloc_host/target(). |
6c53b45c MW |
3479 | */ |
3480 | if (!ctlr->devm_allocated) | |
3481 | put_device(&ctlr->dev); | |
8ae12a0d | 3482 | } |
8caab75f | 3483 | EXPORT_SYMBOL_GPL(spi_unregister_controller); |
8ae12a0d | 3484 | |
bef4a48f MH |
3485 | static inline int __spi_check_suspended(const struct spi_controller *ctlr) |
3486 | { | |
3487 | return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0; | |
3488 | } | |
3489 | ||
3490 | static inline void __spi_mark_suspended(struct spi_controller *ctlr) | |
3491 | { | |
3492 | mutex_lock(&ctlr->bus_lock_mutex); | |
3493 | ctlr->flags |= SPI_CONTROLLER_SUSPENDED; | |
3494 | mutex_unlock(&ctlr->bus_lock_mutex); | |
3495 | } | |
3496 | ||
3497 | static inline void __spi_mark_resumed(struct spi_controller *ctlr) | |
3498 | { | |
3499 | mutex_lock(&ctlr->bus_lock_mutex); | |
3500 | ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED; | |
3501 | mutex_unlock(&ctlr->bus_lock_mutex); | |
3502 | } | |
3503 | ||
8caab75f | 3504 | int spi_controller_suspend(struct spi_controller *ctlr) |
ffbbdd21 | 3505 | { |
bef4a48f | 3506 | int ret = 0; |
ffbbdd21 | 3507 | |
8caab75f | 3508 | /* Basically no-ops for non-queued controllers */ |
bef4a48f MH |
3509 | if (ctlr->queued) { |
3510 | ret = spi_stop_queue(ctlr); | |
3511 | if (ret) | |
3512 | dev_err(&ctlr->dev, "queue stop failed\n"); | |
3513 | } | |
ffbbdd21 | 3514 | |
bef4a48f | 3515 | __spi_mark_suspended(ctlr); |
ffbbdd21 LW |
3516 | return ret; |
3517 | } | |
8caab75f | 3518 | EXPORT_SYMBOL_GPL(spi_controller_suspend); |
ffbbdd21 | 3519 | |
8caab75f | 3520 | int spi_controller_resume(struct spi_controller *ctlr) |
ffbbdd21 | 3521 | { |
bef4a48f | 3522 | int ret = 0; |
ffbbdd21 | 3523 | |
bef4a48f | 3524 | __spi_mark_resumed(ctlr); |
ffbbdd21 | 3525 | |
bef4a48f MH |
3526 | if (ctlr->queued) { |
3527 | ret = spi_start_queue(ctlr); | |
3528 | if (ret) | |
3529 | dev_err(&ctlr->dev, "queue restart failed\n"); | |
3530 | } | |
ffbbdd21 LW |
3531 | return ret; |
3532 | } | |
8caab75f | 3533 | EXPORT_SYMBOL_GPL(spi_controller_resume); |
ffbbdd21 | 3534 | |
8ae12a0d DB |
3535 | /*-------------------------------------------------------------------------*/ |
3536 | ||
523baf5a MS |
3537 | /* Core methods for spi_message alterations */ |
3538 | ||
8caab75f | 3539 | static void __spi_replace_transfers_release(struct spi_controller *ctlr, |
523baf5a MS |
3540 | struct spi_message *msg, |
3541 | void *res) | |
3542 | { | |
3543 | struct spi_replaced_transfers *rxfer = res; | |
3544 | size_t i; | |
3545 | ||
95c8222f | 3546 | /* Call extra callback if requested */ |
523baf5a | 3547 | if (rxfer->release) |
8caab75f | 3548 | rxfer->release(ctlr, msg, res); |
523baf5a | 3549 | |
95c8222f | 3550 | /* Insert replaced transfers back into the message */ |
523baf5a MS |
3551 | list_splice(&rxfer->replaced_transfers, rxfer->replaced_after); |
3552 | ||
95c8222f | 3553 | /* Remove the formerly inserted entries */ |
523baf5a MS |
3554 | for (i = 0; i < rxfer->inserted; i++) |
3555 | list_del(&rxfer->inserted_transfers[i].transfer_list); | |
3556 | } | |
3557 | ||
3558 | /** | |
3559 | * spi_replace_transfers - replace transfers with several transfers | |
3560 | * and register change with spi_message.resources | |
3561 | * @msg: the spi_message we work upon | |
3562 | * @xfer_first: the first spi_transfer we want to replace | |
3563 | * @remove: number of transfers to remove | |
3564 | * @insert: the number of transfers we want to insert instead | |
3565 | * @release: extra release code necessary in some circumstances | |
3566 | * @extradatasize: extra data to allocate (with alignment guarantees | |
3567 | * of struct @spi_transfer) | |
05885397 | 3568 | * @gfp: gfp flags |
523baf5a MS |
3569 | * |
3570 | * Returns: pointer to @spi_replaced_transfers, | |
3571 | * PTR_ERR(...) in case of errors. | |
3572 | */ | |
da21fde0 | 3573 | static struct spi_replaced_transfers *spi_replace_transfers( |
523baf5a MS |
3574 | struct spi_message *msg, |
3575 | struct spi_transfer *xfer_first, | |
3576 | size_t remove, | |
3577 | size_t insert, | |
3578 | spi_replaced_release_t release, | |
3579 | size_t extradatasize, | |
3580 | gfp_t gfp) | |
3581 | { | |
3582 | struct spi_replaced_transfers *rxfer; | |
3583 | struct spi_transfer *xfer; | |
3584 | size_t i; | |
3585 | ||
95c8222f | 3586 | /* Allocate the structure using spi_res */ |
523baf5a | 3587 | rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release, |
aef97522 | 3588 | struct_size(rxfer, inserted_transfers, insert) |
523baf5a MS |
3589 | + extradatasize, |
3590 | gfp); | |
3591 | if (!rxfer) | |
3592 | return ERR_PTR(-ENOMEM); | |
3593 | ||
95c8222f | 3594 | /* The release code to invoke before running the generic release */ |
523baf5a MS |
3595 | rxfer->release = release; |
3596 | ||
95c8222f | 3597 | /* Assign extradata */ |
523baf5a MS |
3598 | if (extradatasize) |
3599 | rxfer->extradata = | |
3600 | &rxfer->inserted_transfers[insert]; | |
3601 | ||
95c8222f | 3602 | /* Init the replaced_transfers list */ |
523baf5a MS |
3603 | INIT_LIST_HEAD(&rxfer->replaced_transfers); |
3604 | ||
350de7ce AS |
3605 | /* |
3606 | * Assign the list_entry after which we should reinsert | |
523baf5a MS |
3607 | * the @replaced_transfers - it may be spi_message.messages! |
3608 | */ | |
3609 | rxfer->replaced_after = xfer_first->transfer_list.prev; | |
3610 | ||
95c8222f | 3611 | /* Remove the requested number of transfers */ |
523baf5a | 3612 | for (i = 0; i < remove; i++) { |
350de7ce AS |
3613 | /* |
3614 | * If the entry after replaced_after it is msg->transfers | |
523baf5a | 3615 | * then we have been requested to remove more transfers |
350de7ce | 3616 | * than are in the list. |
523baf5a MS |
3617 | */ |
3618 | if (rxfer->replaced_after->next == &msg->transfers) { | |
3619 | dev_err(&msg->spi->dev, | |
3620 | "requested to remove more spi_transfers than are available\n"); | |
95c8222f | 3621 | /* Insert replaced transfers back into the message */ |
523baf5a MS |
3622 | list_splice(&rxfer->replaced_transfers, |
3623 | rxfer->replaced_after); | |
3624 | ||
95c8222f | 3625 | /* Free the spi_replace_transfer structure... */ |
523baf5a MS |
3626 | spi_res_free(rxfer); |
3627 | ||
95c8222f | 3628 | /* ...and return with an error */ |
523baf5a MS |
3629 | return ERR_PTR(-EINVAL); |
3630 | } | |
3631 | ||
350de7ce AS |
3632 | /* |
3633 | * Remove the entry after replaced_after from list of | |
3634 | * transfers and add it to list of replaced_transfers. | |
523baf5a MS |
3635 | */ |
3636 | list_move_tail(rxfer->replaced_after->next, | |
3637 | &rxfer->replaced_transfers); | |
3638 | } | |
3639 | ||
350de7ce AS |
3640 | /* |
3641 | * Create copy of the given xfer with identical settings | |
3642 | * based on the first transfer to get removed. | |
523baf5a MS |
3643 | */ |
3644 | for (i = 0; i < insert; i++) { | |
95c8222f | 3645 | /* We need to run in reverse order */ |
523baf5a MS |
3646 | xfer = &rxfer->inserted_transfers[insert - 1 - i]; |
3647 | ||
95c8222f | 3648 | /* Copy all spi_transfer data */ |
523baf5a MS |
3649 | memcpy(xfer, xfer_first, sizeof(*xfer)); |
3650 | ||
95c8222f | 3651 | /* Add to list */ |
523baf5a MS |
3652 | list_add(&xfer->transfer_list, rxfer->replaced_after); |
3653 | ||
95c8222f | 3654 | /* Clear cs_change and delay for all but the last */ |
523baf5a MS |
3655 | if (i) { |
3656 | xfer->cs_change = false; | |
bebcfd27 | 3657 | xfer->delay.value = 0; |
523baf5a MS |
3658 | } |
3659 | } | |
3660 | ||
95c8222f | 3661 | /* Set up inserted... */ |
523baf5a MS |
3662 | rxfer->inserted = insert; |
3663 | ||
95c8222f | 3664 | /* ...and register it with spi_res/spi_message */ |
523baf5a MS |
3665 | spi_res_add(msg, rxfer); |
3666 | ||
3667 | return rxfer; | |
3668 | } | |
523baf5a | 3669 | |
8caab75f | 3670 | static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, |
08933418 FE |
3671 | struct spi_message *msg, |
3672 | struct spi_transfer **xferp, | |
c0c0293c | 3673 | size_t maxsize) |
d9f12122 MS |
3674 | { |
3675 | struct spi_transfer *xfer = *xferp, *xfers; | |
3676 | struct spi_replaced_transfers *srt; | |
3677 | size_t offset; | |
3678 | size_t count, i; | |
3679 | ||
95c8222f | 3680 | /* Calculate how many we have to replace */ |
d9f12122 MS |
3681 | count = DIV_ROUND_UP(xfer->len, maxsize); |
3682 | ||
95c8222f | 3683 | /* Create replacement */ |
c0c0293c | 3684 | srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, GFP_KERNEL); |
657d32ef DC |
3685 | if (IS_ERR(srt)) |
3686 | return PTR_ERR(srt); | |
d9f12122 MS |
3687 | xfers = srt->inserted_transfers; |
3688 | ||
350de7ce AS |
3689 | /* |
3690 | * Now handle each of those newly inserted spi_transfers. | |
3691 | * Note that the replacements spi_transfers all are preset | |
d9f12122 MS |
3692 | * to the same values as *xferp, so tx_buf, rx_buf and len |
3693 | * are all identical (as well as most others) | |
3694 | * so we just have to fix up len and the pointers. | |
d9f12122 MS |
3695 | */ |
3696 | ||
350de7ce AS |
3697 | /* |
3698 | * The first transfer just needs the length modified, so we | |
3699 | * run it outside the loop. | |
d9f12122 | 3700 | */ |
c8dab77a | 3701 | xfers[0].len = min_t(size_t, maxsize, xfer[0].len); |
d9f12122 | 3702 | |
95c8222f | 3703 | /* All the others need rx_buf/tx_buf also set */ |
d9f12122 | 3704 | for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) { |
702ca026 | 3705 | /* Update rx_buf, tx_buf and DMA */ |
d9f12122 MS |
3706 | if (xfers[i].rx_buf) |
3707 | xfers[i].rx_buf += offset; | |
d9f12122 MS |
3708 | if (xfers[i].tx_buf) |
3709 | xfers[i].tx_buf += offset; | |
d9f12122 | 3710 | |
95c8222f | 3711 | /* Update length */ |
d9f12122 MS |
3712 | xfers[i].len = min(maxsize, xfers[i].len - offset); |
3713 | } | |
3714 | ||
350de7ce AS |
3715 | /* |
3716 | * We set up xferp to the last entry we have inserted, | |
3717 | * so that we skip those already split transfers. | |
d9f12122 MS |
3718 | */ |
3719 | *xferp = &xfers[count - 1]; | |
3720 | ||
95c8222f | 3721 | /* Increment statistics counters */ |
6598b91b | 3722 | SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, |
d9f12122 | 3723 | transfers_split_maxsize); |
6598b91b | 3724 | SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics, |
d9f12122 MS |
3725 | transfers_split_maxsize); |
3726 | ||
3727 | return 0; | |
3728 | } | |
3729 | ||
3730 | /** | |
ce2424d7 MCC |
3731 | * spi_split_transfers_maxsize - split spi transfers into multiple transfers |
3732 | * when an individual transfer exceeds a | |
3733 | * certain size | |
8caab75f | 3734 | * @ctlr: the @spi_controller for this transfer |
3700ce95 MI |
3735 | * @msg: the @spi_message to transform |
3736 | * @maxsize: the maximum when to apply this | |
d9f12122 | 3737 | * |
fab53fea DL |
3738 | * This function allocates resources that are automatically freed during the |
3739 | * spi message unoptimize phase so this function should only be called from | |
3740 | * optimize_message callbacks. | |
3741 | * | |
d9f12122 MS |
3742 | * Return: status of transformation |
3743 | */ | |
8caab75f | 3744 | int spi_split_transfers_maxsize(struct spi_controller *ctlr, |
d9f12122 | 3745 | struct spi_message *msg, |
c0c0293c | 3746 | size_t maxsize) |
d9f12122 MS |
3747 | { |
3748 | struct spi_transfer *xfer; | |
3749 | int ret; | |
3750 | ||
350de7ce AS |
3751 | /* |
3752 | * Iterate over the transfer_list, | |
d9f12122 MS |
3753 | * but note that xfer is advanced to the last transfer inserted |
3754 | * to avoid checking sizes again unnecessarily (also xfer does | |
350de7ce AS |
3755 | * potentially belong to a different list by the time the |
3756 | * replacement has happened). | |
d9f12122 MS |
3757 | */ |
3758 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | |
3759 | if (xfer->len > maxsize) { | |
8caab75f | 3760 | ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer, |
c0c0293c | 3761 | maxsize); |
d9f12122 MS |
3762 | if (ret) |
3763 | return ret; | |
3764 | } | |
3765 | } | |
3766 | ||
3767 | return 0; | |
3768 | } | |
3769 | EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize); | |
8ae12a0d | 3770 | |
027781f3 LG |
3771 | |
3772 | /** | |
702ca026 | 3773 | * spi_split_transfers_maxwords - split SPI transfers into multiple transfers |
027781f3 LG |
3774 | * when an individual transfer exceeds a |
3775 | * certain number of SPI words | |
3776 | * @ctlr: the @spi_controller for this transfer | |
3777 | * @msg: the @spi_message to transform | |
3778 | * @maxwords: the number of words to limit each transfer to | |
027781f3 | 3779 | * |
fab53fea DL |
3780 | * This function allocates resources that are automatically freed during the |
3781 | * spi message unoptimize phase so this function should only be called from | |
3782 | * optimize_message callbacks. | |
3783 | * | |
027781f3 LG |
3784 | * Return: status of transformation |
3785 | */ | |
3786 | int spi_split_transfers_maxwords(struct spi_controller *ctlr, | |
3787 | struct spi_message *msg, | |
c0c0293c | 3788 | size_t maxwords) |
027781f3 LG |
3789 | { |
3790 | struct spi_transfer *xfer; | |
3791 | ||
3792 | /* | |
3793 | * Iterate over the transfer_list, | |
3794 | * but note that xfer is advanced to the last transfer inserted | |
3795 | * to avoid checking sizes again unnecessarily (also xfer does | |
3796 | * potentially belong to a different list by the time the | |
3797 | * replacement has happened). | |
3798 | */ | |
3799 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | |
3800 | size_t maxsize; | |
3801 | int ret; | |
3802 | ||
163ddf1f | 3803 | maxsize = maxwords * spi_bpw_to_bytes(xfer->bits_per_word); |
027781f3 LG |
3804 | if (xfer->len > maxsize) { |
3805 | ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer, | |
c0c0293c | 3806 | maxsize); |
027781f3 LG |
3807 | if (ret) |
3808 | return ret; | |
3809 | } | |
3810 | } | |
3811 | ||
3812 | return 0; | |
3813 | } | |
3814 | EXPORT_SYMBOL_GPL(spi_split_transfers_maxwords); | |
3815 | ||
8ae12a0d DB |
3816 | /*-------------------------------------------------------------------------*/ |
3817 | ||
702ca026 AS |
3818 | /* |
3819 | * Core methods for SPI controller protocol drivers. Some of the | |
7d077197 DB |
3820 | * other core methods are currently defined as inline functions. |
3821 | */ | |
3822 | ||
8caab75f GU |
3823 | static int __spi_validate_bits_per_word(struct spi_controller *ctlr, |
3824 | u8 bits_per_word) | |
63ab645f | 3825 | { |
8caab75f | 3826 | if (ctlr->bits_per_word_mask) { |
63ab645f SB |
3827 | /* Only 32 bits fit in the mask */ |
3828 | if (bits_per_word > 32) | |
3829 | return -EINVAL; | |
8caab75f | 3830 | if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word))) |
63ab645f SB |
3831 | return -EINVAL; |
3832 | } | |
3833 | ||
3834 | return 0; | |
3835 | } | |
3836 | ||
684a4784 TA |
3837 | /** |
3838 | * spi_set_cs_timing - configure CS setup, hold, and inactive delays | |
3839 | * @spi: the device that requires specific CS timing configuration | |
3840 | * | |
3841 | * Return: zero on success, else a negative error code. | |
3842 | */ | |
3843 | static int spi_set_cs_timing(struct spi_device *spi) | |
3844 | { | |
3845 | struct device *parent = spi->controller->dev.parent; | |
3846 | int status = 0; | |
3847 | ||
303feb3c | 3848 | if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) { |
684a4784 TA |
3849 | if (spi->controller->auto_runtime_pm) { |
3850 | status = pm_runtime_get_sync(parent); | |
3851 | if (status < 0) { | |
3852 | pm_runtime_put_noidle(parent); | |
3853 | dev_err(&spi->controller->dev, "Failed to power device: %d\n", | |
3854 | status); | |
3855 | return status; | |
3856 | } | |
3857 | ||
3858 | status = spi->controller->set_cs_timing(spi); | |
3859 | pm_runtime_mark_last_busy(parent); | |
3860 | pm_runtime_put_autosuspend(parent); | |
3861 | } else { | |
3862 | status = spi->controller->set_cs_timing(spi); | |
3863 | } | |
3864 | } | |
3865 | return status; | |
3866 | } | |
3867 | ||
7d077197 DB |
3868 | /** |
3869 | * spi_setup - setup SPI mode and clock rate | |
3870 | * @spi: the device whose settings are being modified | |
3871 | * Context: can sleep, and no requests are queued to the device | |
3872 | * | |
3873 | * SPI protocol drivers may need to update the transfer mode if the | |
3874 | * device doesn't work with its default. They may likewise need | |
3875 | * to update clock rates or word sizes from initial values. This function | |
3876 | * changes those settings, and must be called from a context that can sleep. | |
3877 | * Except for SPI_CS_HIGH, which takes effect immediately, the changes take | |
3878 | * effect the next time the device is selected and data is transferred to | |
702ca026 | 3879 | * or from it. When this function returns, the SPI device is deselected. |
7d077197 DB |
3880 | * |
3881 | * Note that this call will fail if the protocol driver specifies an option | |
3882 | * that the underlying controller or its driver does not support. For | |
3883 | * example, not all hardware supports wire transfers using nine bit words, | |
3884 | * LSB-first wire encoding, or active-high chipselects. | |
97d56dc6 JMC |
3885 | * |
3886 | * Return: zero on success, else a negative error code. | |
7d077197 DB |
3887 | */ |
3888 | int spi_setup(struct spi_device *spi) | |
3889 | { | |
83596fbe | 3890 | unsigned bad_bits, ugly_bits; |
3bca1a38 | 3891 | int status; |
7d077197 | 3892 | |
d962608c | 3893 | /* |
350de7ce AS |
3894 | * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO |
3895 | * are set at the same time. | |
f477b7fb | 3896 | */ |
d962608c DB |
3897 | if ((hweight_long(spi->mode & |
3898 | (SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) || | |
3899 | (hweight_long(spi->mode & | |
3900 | (SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) { | |
f477b7fb | 3901 | dev_err(&spi->dev, |
d962608c | 3902 | "setup: can not select any two of dual, quad and no-rx/tx at the same time\n"); |
f477b7fb | 3903 | return -EINVAL; |
3904 | } | |
350de7ce | 3905 | /* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */ |
f477b7fb | 3906 | if ((spi->mode & SPI_3WIRE) && (spi->mode & |
6b03061f YNG |
3907 | (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | |
3908 | SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))) | |
f477b7fb | 3909 | return -EINVAL; |
f58872f4 MS |
3910 | /* Check against conflicting MOSI idle configuration */ |
3911 | if ((spi->mode & SPI_MOSI_IDLE_LOW) && (spi->mode & SPI_MOSI_IDLE_HIGH)) { | |
3912 | dev_err(&spi->dev, | |
3913 | "setup: MOSI configured to idle low and high at the same time.\n"); | |
3914 | return -EINVAL; | |
3915 | } | |
350de7ce AS |
3916 | /* |
3917 | * Help drivers fail *cleanly* when they need options | |
3918 | * that aren't supported with their current controller. | |
cbaa62e0 DL |
3919 | * SPI_CS_WORD has a fallback software implementation, |
3920 | * so it is ignored here. | |
e7db06b5 | 3921 | */ |
d962608c DB |
3922 | bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD | |
3923 | SPI_NO_TX | SPI_NO_RX); | |
83596fbe | 3924 | ugly_bits = bad_bits & |
6b03061f YNG |
3925 | (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL | |
3926 | SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL); | |
83596fbe GU |
3927 | if (ugly_bits) { |
3928 | dev_warn(&spi->dev, | |
3929 | "setup: ignoring unsupported mode bits %x\n", | |
3930 | ugly_bits); | |
3931 | spi->mode &= ~ugly_bits; | |
3932 | bad_bits &= ~ugly_bits; | |
3933 | } | |
e7db06b5 | 3934 | if (bad_bits) { |
eb288a1f | 3935 | dev_err(&spi->dev, "setup: unsupported mode bits %x\n", |
e7db06b5 DB |
3936 | bad_bits); |
3937 | return -EINVAL; | |
3938 | } | |
3939 | ||
b3fe2e51 | 3940 | if (!spi->bits_per_word) { |
7d077197 | 3941 | spi->bits_per_word = 8; |
b3fe2e51 PK |
3942 | } else { |
3943 | /* | |
3944 | * Some controllers may not support the default 8 bits-per-word | |
3945 | * so only perform the check when this is explicitly provided. | |
3946 | */ | |
3947 | status = __spi_validate_bits_per_word(spi->controller, | |
3948 | spi->bits_per_word); | |
3949 | if (status) | |
3950 | return status; | |
3951 | } | |
63ab645f | 3952 | |
6820e812 TA |
3953 | if (spi->controller->max_speed_hz && |
3954 | (!spi->max_speed_hz || | |
3955 | spi->max_speed_hz > spi->controller->max_speed_hz)) | |
8caab75f | 3956 | spi->max_speed_hz = spi->controller->max_speed_hz; |
052eb2d4 | 3957 | |
4fae3a58 SS |
3958 | mutex_lock(&spi->controller->io_mutex); |
3959 | ||
c914dbf8 | 3960 | if (spi->controller->setup) { |
8caab75f | 3961 | status = spi->controller->setup(spi); |
c914dbf8 JB |
3962 | if (status) { |
3963 | mutex_unlock(&spi->controller->io_mutex); | |
3964 | dev_err(&spi->controller->dev, "Failed to setup device: %d\n", | |
3965 | status); | |
3966 | return status; | |
3967 | } | |
3968 | } | |
7d077197 | 3969 | |
684a4784 TA |
3970 | status = spi_set_cs_timing(spi); |
3971 | if (status) { | |
3972 | mutex_unlock(&spi->controller->io_mutex); | |
3973 | return status; | |
3974 | } | |
3975 | ||
d948e6ca | 3976 | if (spi->controller->auto_runtime_pm && spi->controller->set_cs) { |
dd769f15 | 3977 | status = pm_runtime_resume_and_get(spi->controller->dev.parent); |
d948e6ca | 3978 | if (status < 0) { |
4fae3a58 | 3979 | mutex_unlock(&spi->controller->io_mutex); |
d948e6ca LX |
3980 | dev_err(&spi->controller->dev, "Failed to power device: %d\n", |
3981 | status); | |
3982 | return status; | |
3983 | } | |
57a94607 TL |
3984 | |
3985 | /* | |
3986 | * We do not want to return positive value from pm_runtime_get, | |
3987 | * there are many instances of devices calling spi_setup() and | |
3988 | * checking for a non-zero return value instead of a negative | |
3989 | * return value. | |
3990 | */ | |
3991 | status = 0; | |
3992 | ||
d347b4aa | 3993 | spi_set_cs(spi, false, true); |
d948e6ca LX |
3994 | pm_runtime_mark_last_busy(spi->controller->dev.parent); |
3995 | pm_runtime_put_autosuspend(spi->controller->dev.parent); | |
3996 | } else { | |
d347b4aa | 3997 | spi_set_cs(spi, false, true); |
d948e6ca | 3998 | } |
abeedb01 | 3999 | |
4fae3a58 SS |
4000 | mutex_unlock(&spi->controller->io_mutex); |
4001 | ||
924b5867 DA |
4002 | if (spi->rt && !spi->controller->rt) { |
4003 | spi->controller->rt = true; | |
4004 | spi_set_thread_rt(spi->controller); | |
4005 | } | |
4006 | ||
5cb4e1f3 AS |
4007 | trace_spi_setup(spi, status); |
4008 | ||
40b82c2d AS |
4009 | dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n", |
4010 | spi->mode & SPI_MODE_X_MASK, | |
7d077197 DB |
4011 | (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "", |
4012 | (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "", | |
4013 | (spi->mode & SPI_3WIRE) ? "3wire, " : "", | |
4014 | (spi->mode & SPI_LOOP) ? "loopback, " : "", | |
4015 | spi->bits_per_word, spi->max_speed_hz, | |
4016 | status); | |
4017 | ||
4018 | return status; | |
4019 | } | |
4020 | EXPORT_SYMBOL_GPL(spi_setup); | |
4021 | ||
6c613f68 AA |
4022 | static int _spi_xfer_word_delay_update(struct spi_transfer *xfer, |
4023 | struct spi_device *spi) | |
4024 | { | |
4025 | int delay1, delay2; | |
4026 | ||
3984d39b | 4027 | delay1 = spi_delay_to_ns(&xfer->word_delay, xfer); |
6c613f68 AA |
4028 | if (delay1 < 0) |
4029 | return delay1; | |
4030 | ||
3984d39b | 4031 | delay2 = spi_delay_to_ns(&spi->word_delay, xfer); |
6c613f68 AA |
4032 | if (delay2 < 0) |
4033 | return delay2; | |
4034 | ||
4035 | if (delay1 < delay2) | |
4036 | memcpy(&xfer->word_delay, &spi->word_delay, | |
4037 | sizeof(xfer->word_delay)); | |
4038 | ||
4039 | return 0; | |
4040 | } | |
4041 | ||
90808738 | 4042 | static int __spi_validate(struct spi_device *spi, struct spi_message *message) |
cf32b71e | 4043 | { |
8caab75f | 4044 | struct spi_controller *ctlr = spi->controller; |
e6811d1d | 4045 | struct spi_transfer *xfer; |
6ea31293 | 4046 | int w_size; |
cf32b71e | 4047 | |
24a0013a MB |
4048 | if (list_empty(&message->transfers)) |
4049 | return -EINVAL; | |
24a0013a | 4050 | |
b204aa0f DL |
4051 | message->spi = spi; |
4052 | ||
350de7ce AS |
4053 | /* |
4054 | * Half-duplex links include original MicroWire, and ones with | |
cf32b71e ES |
4055 | * only one data pin like SPI_3WIRE (switches direction) or where |
4056 | * either MOSI or MISO is missing. They can also be caused by | |
4057 | * software limitations. | |
4058 | */ | |
8caab75f GU |
4059 | if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) || |
4060 | (spi->mode & SPI_3WIRE)) { | |
4061 | unsigned flags = ctlr->flags; | |
cf32b71e ES |
4062 | |
4063 | list_for_each_entry(xfer, &message->transfers, transfer_list) { | |
4064 | if (xfer->rx_buf && xfer->tx_buf) | |
4065 | return -EINVAL; | |
8caab75f | 4066 | if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf) |
cf32b71e | 4067 | return -EINVAL; |
8caab75f | 4068 | if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf) |
cf32b71e ES |
4069 | return -EINVAL; |
4070 | } | |
4071 | } | |
4072 | ||
350de7ce | 4073 | /* |
059b8ffe LD |
4074 | * Set transfer bits_per_word and max speed as spi device default if |
4075 | * it is not set for this transfer. | |
f477b7fb | 4076 | * Set transfer tx_nbits and rx_nbits as single transfer default |
4077 | * (SPI_NBITS_SINGLE) if it is not set for this transfer. | |
b7bb367a JB |
4078 | * Ensure transfer word_delay is at least as long as that required by |
4079 | * device itself. | |
e6811d1d | 4080 | */ |
77e80588 | 4081 | message->frame_length = 0; |
e6811d1d | 4082 | list_for_each_entry(xfer, &message->transfers, transfer_list) { |
5d7e2b5e | 4083 | xfer->effective_speed_hz = 0; |
078726ce | 4084 | message->frame_length += xfer->len; |
e6811d1d LD |
4085 | if (!xfer->bits_per_word) |
4086 | xfer->bits_per_word = spi->bits_per_word; | |
a6f87fad AL |
4087 | |
4088 | if (!xfer->speed_hz) | |
059b8ffe | 4089 | xfer->speed_hz = spi->max_speed_hz; |
a6f87fad | 4090 | |
8caab75f GU |
4091 | if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz) |
4092 | xfer->speed_hz = ctlr->max_speed_hz; | |
56ede94a | 4093 | |
8caab75f | 4094 | if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word)) |
63ab645f | 4095 | return -EINVAL; |
a2fd4f9f | 4096 | |
88113e09 MKS |
4097 | /* DDR mode is supported only if controller has dtr_caps=true. |
4098 | * default considered as SDR mode for SPI and QSPI controller. | |
4099 | * Note: This is applicable only to QSPI controller. | |
4100 | */ | |
4101 | if (xfer->dtr_mode && !ctlr->dtr_caps) | |
4102 | return -EINVAL; | |
4103 | ||
4d94bd21 II |
4104 | /* |
4105 | * SPI transfer length should be multiple of SPI word size | |
350de7ce | 4106 | * where SPI word size should be power-of-two multiple. |
4d94bd21 II |
4107 | */ |
4108 | if (xfer->bits_per_word <= 8) | |
4109 | w_size = 1; | |
4110 | else if (xfer->bits_per_word <= 16) | |
4111 | w_size = 2; | |
4112 | else | |
4113 | w_size = 4; | |
4114 | ||
4d94bd21 | 4115 | /* No partial transfers accepted */ |
6ea31293 | 4116 | if (xfer->len % w_size) |
4d94bd21 II |
4117 | return -EINVAL; |
4118 | ||
8caab75f GU |
4119 | if (xfer->speed_hz && ctlr->min_speed_hz && |
4120 | xfer->speed_hz < ctlr->min_speed_hz) | |
a2fd4f9f | 4121 | return -EINVAL; |
f477b7fb | 4122 | |
4123 | if (xfer->tx_buf && !xfer->tx_nbits) | |
4124 | xfer->tx_nbits = SPI_NBITS_SINGLE; | |
4125 | if (xfer->rx_buf && !xfer->rx_nbits) | |
4126 | xfer->rx_nbits = SPI_NBITS_SINGLE; | |
350de7ce AS |
4127 | /* |
4128 | * Check transfer tx/rx_nbits: | |
1afd9989 GU |
4129 | * 1. check the value matches one of single, dual and quad |
4130 | * 2. check tx/rx_nbits match the mode in spi_device | |
f477b7fb | 4131 | */ |
db90a441 | 4132 | if (xfer->tx_buf) { |
d962608c DB |
4133 | if (spi->mode & SPI_NO_TX) |
4134 | return -EINVAL; | |
db90a441 SP |
4135 | if (xfer->tx_nbits != SPI_NBITS_SINGLE && |
4136 | xfer->tx_nbits != SPI_NBITS_DUAL && | |
d6a711a8 PC |
4137 | xfer->tx_nbits != SPI_NBITS_QUAD && |
4138 | xfer->tx_nbits != SPI_NBITS_OCTAL) | |
db90a441 SP |
4139 | return -EINVAL; |
4140 | if ((xfer->tx_nbits == SPI_NBITS_DUAL) && | |
4141 | !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) | |
4142 | return -EINVAL; | |
4143 | if ((xfer->tx_nbits == SPI_NBITS_QUAD) && | |
4144 | !(spi->mode & SPI_TX_QUAD)) | |
4145 | return -EINVAL; | |
db90a441 | 4146 | } |
95c8222f | 4147 | /* Check transfer rx_nbits */ |
db90a441 | 4148 | if (xfer->rx_buf) { |
d962608c DB |
4149 | if (spi->mode & SPI_NO_RX) |
4150 | return -EINVAL; | |
db90a441 SP |
4151 | if (xfer->rx_nbits != SPI_NBITS_SINGLE && |
4152 | xfer->rx_nbits != SPI_NBITS_DUAL && | |
d6a711a8 PC |
4153 | xfer->rx_nbits != SPI_NBITS_QUAD && |
4154 | xfer->rx_nbits != SPI_NBITS_OCTAL) | |
db90a441 SP |
4155 | return -EINVAL; |
4156 | if ((xfer->rx_nbits == SPI_NBITS_DUAL) && | |
4157 | !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) | |
4158 | return -EINVAL; | |
4159 | if ((xfer->rx_nbits == SPI_NBITS_QUAD) && | |
4160 | !(spi->mode & SPI_RX_QUAD)) | |
4161 | return -EINVAL; | |
db90a441 | 4162 | } |
b7bb367a | 4163 | |
6c613f68 AA |
4164 | if (_spi_xfer_word_delay_update(xfer, spi)) |
4165 | return -EINVAL; | |
700a2819 DL |
4166 | |
4167 | /* Make sure controller supports required offload features. */ | |
4168 | if (xfer->offload_flags) { | |
4169 | if (!message->offload) | |
4170 | return -EINVAL; | |
4171 | ||
4172 | if (xfer->offload_flags & ~message->offload->xfer_flags) | |
4173 | return -EINVAL; | |
4174 | } | |
e6811d1d LD |
4175 | } |
4176 | ||
cf32b71e | 4177 | message->status = -EINPROGRESS; |
90808738 MB |
4178 | |
4179 | return 0; | |
4180 | } | |
4181 | ||
fab53fea DL |
4182 | /* |
4183 | * spi_split_transfers - generic handling of transfer splitting | |
4184 | * @msg: the message to split | |
4185 | * | |
4186 | * Under certain conditions, a SPI controller may not support arbitrary | |
4187 | * transfer sizes or other features required by a peripheral. This function | |
4188 | * will split the transfers in the message into smaller transfers that are | |
4189 | * supported by the controller. | |
4190 | * | |
4191 | * Controllers with special requirements not covered here can also split | |
4192 | * transfers in the optimize_message() callback. | |
4193 | * | |
4194 | * Context: can sleep | |
4195 | * Return: zero on success, else a negative error code | |
4196 | */ | |
4197 | static int spi_split_transfers(struct spi_message *msg) | |
4198 | { | |
4199 | struct spi_controller *ctlr = msg->spi->controller; | |
4200 | struct spi_transfer *xfer; | |
4201 | int ret; | |
4202 | ||
4203 | /* | |
4204 | * If an SPI controller does not support toggling the CS line on each | |
4205 | * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO | |
4206 | * for the CS line, we can emulate the CS-per-word hardware function by | |
4207 | * splitting transfers into one-word transfers and ensuring that | |
4208 | * cs_change is set for each transfer. | |
4209 | */ | |
4210 | if ((msg->spi->mode & SPI_CS_WORD) && | |
4211 | (!(ctlr->mode_bits & SPI_CS_WORD) || spi_is_csgpiod(msg->spi))) { | |
4212 | ret = spi_split_transfers_maxwords(ctlr, msg, 1); | |
4213 | if (ret) | |
4214 | return ret; | |
4215 | ||
4216 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | |
4217 | /* Don't change cs_change on the last entry in the list */ | |
4218 | if (list_is_last(&xfer->transfer_list, &msg->transfers)) | |
4219 | break; | |
4220 | ||
4221 | xfer->cs_change = 1; | |
4222 | } | |
4223 | } else { | |
4224 | ret = spi_split_transfers_maxsize(ctlr, msg, | |
4225 | spi_max_transfer_size(msg->spi)); | |
4226 | if (ret) | |
4227 | return ret; | |
4228 | } | |
4229 | ||
4230 | return 0; | |
4231 | } | |
4232 | ||
7b1d87af DL |
4233 | /* |
4234 | * __spi_optimize_message - shared implementation for spi_optimize_message() | |
4235 | * and spi_maybe_optimize_message() | |
4236 | * @spi: the device that will be used for the message | |
4237 | * @msg: the message to optimize | |
4238 | * | |
4239 | * Peripheral drivers will call spi_optimize_message() and the spi core will | |
4240 | * call spi_maybe_optimize_message() instead of calling this directly. | |
4241 | * | |
4242 | * It is not valid to call this on a message that has already been optimized. | |
4243 | * | |
4244 | * Return: zero on success, else a negative error code | |
4245 | */ | |
4246 | static int __spi_optimize_message(struct spi_device *spi, | |
4247 | struct spi_message *msg) | |
4248 | { | |
4249 | struct spi_controller *ctlr = spi->controller; | |
4250 | int ret; | |
4251 | ||
4252 | ret = __spi_validate(spi, msg); | |
4253 | if (ret) | |
4254 | return ret; | |
4255 | ||
fab53fea DL |
4256 | ret = spi_split_transfers(msg); |
4257 | if (ret) | |
4258 | return ret; | |
4259 | ||
7b1d87af DL |
4260 | if (ctlr->optimize_message) { |
4261 | ret = ctlr->optimize_message(msg); | |
fab53fea DL |
4262 | if (ret) { |
4263 | spi_res_release(ctlr, msg); | |
7b1d87af | 4264 | return ret; |
fab53fea | 4265 | } |
7b1d87af DL |
4266 | } |
4267 | ||
4268 | msg->optimized = true; | |
4269 | ||
4270 | return 0; | |
4271 | } | |
4272 | ||
4273 | /* | |
4274 | * spi_maybe_optimize_message - optimize message if it isn't already pre-optimized | |
4275 | * @spi: the device that will be used for the message | |
4276 | * @msg: the message to optimize | |
4277 | * Return: zero on success, else a negative error code | |
4278 | */ | |
4279 | static int spi_maybe_optimize_message(struct spi_device *spi, | |
4280 | struct spi_message *msg) | |
4281 | { | |
ca52aa4c DL |
4282 | if (spi->controller->defer_optimize_message) { |
4283 | msg->spi = spi; | |
4284 | return 0; | |
4285 | } | |
4286 | ||
7b1d87af DL |
4287 | if (msg->pre_optimized) |
4288 | return 0; | |
4289 | ||
4290 | return __spi_optimize_message(spi, msg); | |
4291 | } | |
4292 | ||
4293 | /** | |
4294 | * spi_optimize_message - do any one-time validation and setup for a SPI message | |
4295 | * @spi: the device that will be used for the message | |
4296 | * @msg: the message to optimize | |
4297 | * | |
4298 | * Peripheral drivers that reuse the same message repeatedly may call this to | |
4299 | * perform as much message prep as possible once, rather than repeating it each | |
4300 | * time a message transfer is performed to improve throughput and reduce CPU | |
4301 | * usage. | |
4302 | * | |
4303 | * Once a message has been optimized, it cannot be modified with the exception | |
4304 | * of updating the contents of any xfer->tx_buf (the pointer can't be changed, | |
4305 | * only the data in the memory it points to). | |
4306 | * | |
4307 | * Calls to this function must be balanced with calls to spi_unoptimize_message() | |
4308 | * to avoid leaking resources. | |
4309 | * | |
4310 | * Context: can sleep | |
4311 | * Return: zero on success, else a negative error code | |
4312 | */ | |
4313 | int spi_optimize_message(struct spi_device *spi, struct spi_message *msg) | |
4314 | { | |
4315 | int ret; | |
4316 | ||
ca52aa4c DL |
4317 | /* |
4318 | * Pre-optimization is not supported and optimization is deferred e.g. | |
4319 | * when using spi-mux. | |
4320 | */ | |
4321 | if (spi->controller->defer_optimize_message) | |
4322 | return 0; | |
4323 | ||
7b1d87af DL |
4324 | ret = __spi_optimize_message(spi, msg); |
4325 | if (ret) | |
4326 | return ret; | |
4327 | ||
4328 | /* | |
4329 | * This flag indicates that the peripheral driver called spi_optimize_message() | |
4330 | * and therefore we shouldn't unoptimize message automatically when finalizing | |
4331 | * the message but rather wait until spi_unoptimize_message() is called | |
4332 | * by the peripheral driver. | |
4333 | */ | |
4334 | msg->pre_optimized = true; | |
4335 | ||
4336 | return 0; | |
4337 | } | |
4338 | EXPORT_SYMBOL_GPL(spi_optimize_message); | |
4339 | ||
4340 | /** | |
4341 | * spi_unoptimize_message - releases any resources allocated by spi_optimize_message() | |
4342 | * @msg: the message to unoptimize | |
4343 | * | |
4344 | * Calls to this function must be balanced with calls to spi_optimize_message(). | |
4345 | * | |
4346 | * Context: can sleep | |
4347 | */ | |
4348 | void spi_unoptimize_message(struct spi_message *msg) | |
4349 | { | |
ca52aa4c DL |
4350 | if (msg->spi->controller->defer_optimize_message) |
4351 | return; | |
4352 | ||
7b1d87af DL |
4353 | __spi_unoptimize_message(msg); |
4354 | msg->pre_optimized = false; | |
4355 | } | |
4356 | EXPORT_SYMBOL_GPL(spi_unoptimize_message); | |
4357 | ||
90808738 MB |
4358 | static int __spi_async(struct spi_device *spi, struct spi_message *message) |
4359 | { | |
8caab75f | 4360 | struct spi_controller *ctlr = spi->controller; |
b42faeee | 4361 | struct spi_transfer *xfer; |
90808738 | 4362 | |
b5932f5c BB |
4363 | /* |
4364 | * Some controllers do not support doing regular SPI transfers. Return | |
4365 | * ENOTSUPP when this is the case. | |
4366 | */ | |
4367 | if (!ctlr->transfer) | |
4368 | return -ENOTSUPP; | |
4369 | ||
6598b91b DJ |
4370 | SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async); |
4371 | SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async); | |
eca2ebc7 | 4372 | |
90808738 MB |
4373 | trace_spi_message_submit(message); |
4374 | ||
b42faeee VO |
4375 | if (!ctlr->ptp_sts_supported) { |
4376 | list_for_each_entry(xfer, &message->transfers, transfer_list) { | |
4377 | xfer->ptp_sts_word_pre = 0; | |
4378 | ptp_read_system_prets(xfer->ptp_sts); | |
4379 | } | |
4380 | } | |
4381 | ||
8caab75f | 4382 | return ctlr->transfer(spi, message); |
cf32b71e ES |
4383 | } |
4384 | ||
d4a0055f DL |
4385 | static void devm_spi_unoptimize_message(void *msg) |
4386 | { | |
4387 | spi_unoptimize_message(msg); | |
4388 | } | |
4389 | ||
4390 | /** | |
4391 | * devm_spi_optimize_message - managed version of spi_optimize_message() | |
4392 | * @dev: the device that manages @msg (usually @spi->dev) | |
4393 | * @spi: the device that will be used for the message | |
4394 | * @msg: the message to optimize | |
4395 | * Return: zero on success, else a negative error code | |
4396 | * | |
4397 | * spi_unoptimize_message() will automatically be called when the device is | |
4398 | * removed. | |
4399 | */ | |
4400 | int devm_spi_optimize_message(struct device *dev, struct spi_device *spi, | |
4401 | struct spi_message *msg) | |
4402 | { | |
4403 | int ret; | |
4404 | ||
4405 | ret = spi_optimize_message(spi, msg); | |
4406 | if (ret) | |
4407 | return ret; | |
4408 | ||
4409 | return devm_add_action_or_reset(dev, devm_spi_unoptimize_message, msg); | |
4410 | } | |
7e74a45c | 4411 | EXPORT_SYMBOL_GPL(devm_spi_optimize_message); |
d4a0055f | 4412 | |
568d0697 DB |
4413 | /** |
4414 | * spi_async - asynchronous SPI transfer | |
4415 | * @spi: device with which data will be exchanged | |
4416 | * @message: describes the data transfers, including completion callback | |
702ca026 | 4417 | * Context: any (IRQs may be blocked, etc) |
568d0697 DB |
4418 | * |
4419 | * This call may be used in_irq and other contexts which can't sleep, | |
4420 | * as well as from task contexts which can sleep. | |
4421 | * | |
4422 | * The completion callback is invoked in a context which can't sleep. | |
4423 | * Before that invocation, the value of message->status is undefined. | |
4424 | * When the callback is issued, message->status holds either zero (to | |
4425 | * indicate complete success) or a negative error code. After that | |
4426 | * callback returns, the driver which issued the transfer request may | |
4427 | * deallocate the associated memory; it's no longer in use by any SPI | |
4428 | * core or controller driver code. | |
4429 | * | |
4430 | * Note that although all messages to a spi_device are handled in | |
4431 | * FIFO order, messages may go to different devices in other orders. | |
4432 | * Some device might be higher priority, or have various "hard" access | |
4433 | * time requirements, for example. | |
4434 | * | |
4435 | * On detection of any fault during the transfer, processing of | |
4436 | * the entire message is aborted, and the device is deselected. | |
4437 | * Until returning from the associated message completion callback, | |
4438 | * no other spi_message queued to that device will be processed. | |
4439 | * (This rule applies equally to all the synchronous transfer calls, | |
4440 | * which are wrappers around this core asynchronous primitive.) | |
97d56dc6 JMC |
4441 | * |
4442 | * Return: zero on success, else a negative error code. | |
568d0697 DB |
4443 | */ |
4444 | int spi_async(struct spi_device *spi, struct spi_message *message) | |
4445 | { | |
8caab75f | 4446 | struct spi_controller *ctlr = spi->controller; |
cf32b71e ES |
4447 | int ret; |
4448 | unsigned long flags; | |
568d0697 | 4449 | |
7b1d87af DL |
4450 | ret = spi_maybe_optimize_message(spi, message); |
4451 | if (ret) | |
90808738 MB |
4452 | return ret; |
4453 | ||
8caab75f | 4454 | spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); |
568d0697 | 4455 | |
8caab75f | 4456 | if (ctlr->bus_lock_flag) |
cf32b71e ES |
4457 | ret = -EBUSY; |
4458 | else | |
4459 | ret = __spi_async(spi, message); | |
568d0697 | 4460 | |
8caab75f | 4461 | spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); |
cf32b71e ES |
4462 | |
4463 | return ret; | |
568d0697 DB |
4464 | } |
4465 | EXPORT_SYMBOL_GPL(spi_async); | |
4466 | ||
ae7d2346 DJ |
4467 | static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg) |
4468 | { | |
4469 | bool was_busy; | |
4470 | int ret; | |
4471 | ||
4472 | mutex_lock(&ctlr->io_mutex); | |
4473 | ||
1a9cafcb | 4474 | was_busy = ctlr->busy; |
ae7d2346 | 4475 | |
72c5c59b | 4476 | ctlr->cur_msg = msg; |
ae7d2346 DJ |
4477 | ret = __spi_pump_transfer_message(ctlr, msg, was_busy); |
4478 | if (ret) | |
bef4a48f | 4479 | dev_err(&ctlr->dev, "noqueue transfer failed\n"); |
69fa9590 DJ |
4480 | ctlr->cur_msg = NULL; |
4481 | ctlr->fallback = false; | |
4482 | ||
ae7d2346 DJ |
4483 | if (!was_busy) { |
4484 | kfree(ctlr->dummy_rx); | |
4485 | ctlr->dummy_rx = NULL; | |
4486 | kfree(ctlr->dummy_tx); | |
4487 | ctlr->dummy_tx = NULL; | |
4488 | if (ctlr->unprepare_transfer_hardware && | |
4489 | ctlr->unprepare_transfer_hardware(ctlr)) | |
4490 | dev_err(&ctlr->dev, | |
4491 | "failed to unprepare transfer hardware\n"); | |
4492 | spi_idle_runtime_pm(ctlr); | |
4493 | } | |
4494 | ||
ae7d2346 DJ |
4495 | mutex_unlock(&ctlr->io_mutex); |
4496 | } | |
4497 | ||
7d077197 DB |
4498 | /*-------------------------------------------------------------------------*/ |
4499 | ||
350de7ce AS |
4500 | /* |
4501 | * Utility methods for SPI protocol drivers, layered on | |
7d077197 DB |
4502 | * top of the core. Some other utility methods are defined as |
4503 | * inline functions. | |
4504 | */ | |
4505 | ||
5d870c8e AM |
4506 | static void spi_complete(void *arg) |
4507 | { | |
4508 | complete(arg); | |
4509 | } | |
4510 | ||
ef4d96ec | 4511 | static int __spi_sync(struct spi_device *spi, struct spi_message *message) |
cf32b71e ES |
4512 | { |
4513 | DECLARE_COMPLETION_ONSTACK(done); | |
0da9a579 | 4514 | unsigned long flags; |
cf32b71e | 4515 | int status; |
8caab75f | 4516 | struct spi_controller *ctlr = spi->controller; |
0461a414 | 4517 | |
bef4a48f MH |
4518 | if (__spi_check_suspended(ctlr)) { |
4519 | dev_warn_once(&spi->dev, "Attempted to sync while suspend\n"); | |
4520 | return -ESHUTDOWN; | |
4521 | } | |
4522 | ||
7b1d87af DL |
4523 | status = spi_maybe_optimize_message(spi, message); |
4524 | if (status) | |
0461a414 | 4525 | return status; |
cf32b71e | 4526 | |
6598b91b DJ |
4527 | SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync); |
4528 | SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync); | |
eca2ebc7 | 4529 | |
350de7ce | 4530 | /* |
ae7d2346 DJ |
4531 | * Checking queue_empty here only guarantees async/sync message |
4532 | * ordering when coming from the same context. It does not need to | |
4533 | * guard against reentrancy from a different context. The io_mutex | |
4534 | * will catch those cases. | |
0461a414 | 4535 | */ |
b30f7c8e | 4536 | if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) { |
ae7d2346 DJ |
4537 | message->actual_length = 0; |
4538 | message->status = -EINPROGRESS; | |
0461a414 MB |
4539 | |
4540 | trace_spi_message_submit(message); | |
4541 | ||
ae7d2346 DJ |
4542 | SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate); |
4543 | SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate); | |
0461a414 | 4544 | |
ae7d2346 DJ |
4545 | __spi_transfer_message_noqueue(ctlr, message); |
4546 | ||
4547 | return message->status; | |
0461a414 | 4548 | } |
cf32b71e | 4549 | |
ae7d2346 DJ |
4550 | /* |
4551 | * There are messages in the async queue that could have originated | |
4552 | * from the same context, so we need to preserve ordering. | |
4553 | * Therefor we send the message to the async queue and wait until they | |
4554 | * are completed. | |
4555 | */ | |
4556 | message->complete = spi_complete; | |
4557 | message->context = &done; | |
0da9a579 DL |
4558 | |
4559 | spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); | |
4560 | status = __spi_async(spi, message); | |
4561 | spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); | |
4562 | ||
cf32b71e ES |
4563 | if (status == 0) { |
4564 | wait_for_completion(&done); | |
4565 | status = message->status; | |
4566 | } | |
4756fa52 | 4567 | message->complete = NULL; |
cf32b71e | 4568 | message->context = NULL; |
ae7d2346 | 4569 | |
cf32b71e ES |
4570 | return status; |
4571 | } | |
4572 | ||
8ae12a0d DB |
4573 | /** |
4574 | * spi_sync - blocking/synchronous SPI data transfers | |
4575 | * @spi: device with which data will be exchanged | |
4576 | * @message: describes the data transfers | |
33e34dc6 | 4577 | * Context: can sleep |
8ae12a0d DB |
4578 | * |
4579 | * This call may only be used from a context that may sleep. The sleep | |
4580 | * is non-interruptible, and has no timeout. Low-overhead controller | |
4581 | * drivers may DMA directly into and out of the message buffers. | |
4582 | * | |
4583 | * Note that the SPI device's chip select is active during the message, | |
4584 | * and then is normally disabled between messages. Drivers for some | |
4585 | * frequently-used devices may want to minimize costs of selecting a chip, | |
4586 | * by leaving it selected in anticipation that the next message will go | |
4587 | * to the same chip. (That may increase power usage.) | |
4588 | * | |
0c868461 DB |
4589 | * Also, the caller is guaranteeing that the memory associated with the |
4590 | * message will not be freed before this call returns. | |
4591 | * | |
97d56dc6 | 4592 | * Return: zero on success, else a negative error code. |
8ae12a0d DB |
4593 | */ |
4594 | int spi_sync(struct spi_device *spi, struct spi_message *message) | |
4595 | { | |
ef4d96ec MB |
4596 | int ret; |
4597 | ||
8caab75f | 4598 | mutex_lock(&spi->controller->bus_lock_mutex); |
ef4d96ec | 4599 | ret = __spi_sync(spi, message); |
8caab75f | 4600 | mutex_unlock(&spi->controller->bus_lock_mutex); |
ef4d96ec MB |
4601 | |
4602 | return ret; | |
8ae12a0d DB |
4603 | } |
4604 | EXPORT_SYMBOL_GPL(spi_sync); | |
4605 | ||
cf32b71e ES |
4606 | /** |
4607 | * spi_sync_locked - version of spi_sync with exclusive bus usage | |
4608 | * @spi: device with which data will be exchanged | |
4609 | * @message: describes the data transfers | |
4610 | * Context: can sleep | |
4611 | * | |
4612 | * This call may only be used from a context that may sleep. The sleep | |
4613 | * is non-interruptible, and has no timeout. Low-overhead controller | |
4614 | * drivers may DMA directly into and out of the message buffers. | |
4615 | * | |
4616 | * This call should be used by drivers that require exclusive access to the | |
25985edc | 4617 | * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must |
cf32b71e ES |
4618 | * be released by a spi_bus_unlock call when the exclusive access is over. |
4619 | * | |
97d56dc6 | 4620 | * Return: zero on success, else a negative error code. |
cf32b71e ES |
4621 | */ |
4622 | int spi_sync_locked(struct spi_device *spi, struct spi_message *message) | |
4623 | { | |
ef4d96ec | 4624 | return __spi_sync(spi, message); |
cf32b71e ES |
4625 | } |
4626 | EXPORT_SYMBOL_GPL(spi_sync_locked); | |
4627 | ||
4628 | /** | |
4629 | * spi_bus_lock - obtain a lock for exclusive SPI bus usage | |
91ce208d | 4630 | * @ctlr: SPI bus controller that should be locked for exclusive bus access |
cf32b71e ES |
4631 | * Context: can sleep |
4632 | * | |
4633 | * This call may only be used from a context that may sleep. The sleep | |
4634 | * is non-interruptible, and has no timeout. | |
4635 | * | |
4636 | * This call should be used by drivers that require exclusive access to the | |
4637 | * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the | |
4638 | * exclusive access is over. Data transfer must be done by spi_sync_locked | |
4639 | * and spi_async_locked calls when the SPI bus lock is held. | |
4640 | * | |
97d56dc6 | 4641 | * Return: always zero. |
cf32b71e | 4642 | */ |
8caab75f | 4643 | int spi_bus_lock(struct spi_controller *ctlr) |
cf32b71e ES |
4644 | { |
4645 | unsigned long flags; | |
4646 | ||
8caab75f | 4647 | mutex_lock(&ctlr->bus_lock_mutex); |
cf32b71e | 4648 | |
8caab75f GU |
4649 | spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); |
4650 | ctlr->bus_lock_flag = 1; | |
4651 | spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); | |
cf32b71e | 4652 | |
95c8222f | 4653 | /* Mutex remains locked until spi_bus_unlock() is called */ |
cf32b71e ES |
4654 | |
4655 | return 0; | |
4656 | } | |
4657 | EXPORT_SYMBOL_GPL(spi_bus_lock); | |
4658 | ||
4659 | /** | |
4660 | * spi_bus_unlock - release the lock for exclusive SPI bus usage | |
91ce208d | 4661 | * @ctlr: SPI bus controller that was locked for exclusive bus access |
cf32b71e ES |
4662 | * Context: can sleep |
4663 | * | |
4664 | * This call may only be used from a context that may sleep. The sleep | |
4665 | * is non-interruptible, and has no timeout. | |
4666 | * | |
4667 | * This call releases an SPI bus lock previously obtained by an spi_bus_lock | |
4668 | * call. | |
4669 | * | |
97d56dc6 | 4670 | * Return: always zero. |
cf32b71e | 4671 | */ |
8caab75f | 4672 | int spi_bus_unlock(struct spi_controller *ctlr) |
cf32b71e | 4673 | { |
8caab75f | 4674 | ctlr->bus_lock_flag = 0; |
cf32b71e | 4675 | |
8caab75f | 4676 | mutex_unlock(&ctlr->bus_lock_mutex); |
cf32b71e ES |
4677 | |
4678 | return 0; | |
4679 | } | |
4680 | EXPORT_SYMBOL_GPL(spi_bus_unlock); | |
4681 | ||
95c8222f | 4682 | /* Portable code must never pass more than 32 bytes */ |
5fe5f05e | 4683 | #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES) |
8ae12a0d DB |
4684 | |
4685 | static u8 *buf; | |
4686 | ||
4687 | /** | |
4688 | * spi_write_then_read - SPI synchronous write followed by read | |
4689 | * @spi: device with which data will be exchanged | |
702ca026 | 4690 | * @txbuf: data to be written (need not be DMA-safe) |
8ae12a0d | 4691 | * @n_tx: size of txbuf, in bytes |
702ca026 | 4692 | * @rxbuf: buffer into which data will be read (need not be DMA-safe) |
27570497 | 4693 | * @n_rx: size of rxbuf, in bytes |
33e34dc6 | 4694 | * Context: can sleep |
8ae12a0d DB |
4695 | * |
4696 | * This performs a half duplex MicroWire style transaction with the | |
4697 | * device, sending txbuf and then reading rxbuf. The return value | |
4698 | * is zero for success, else a negative errno status code. | |
b885244e | 4699 | * This call may only be used from a context that may sleep. |
8ae12a0d | 4700 | * |
c373643b | 4701 | * Parameters to this routine are always copied using a small buffer. |
33e34dc6 | 4702 | * Performance-sensitive or bulk transfer code should instead use |
702ca026 | 4703 | * spi_{async,sync}() calls with DMA-safe buffers. |
97d56dc6 JMC |
4704 | * |
4705 | * Return: zero on success, else a negative error code. | |
8ae12a0d DB |
4706 | */ |
4707 | int spi_write_then_read(struct spi_device *spi, | |
0c4a1590 MB |
4708 | const void *txbuf, unsigned n_tx, |
4709 | void *rxbuf, unsigned n_rx) | |
8ae12a0d | 4710 | { |
068f4070 | 4711 | static DEFINE_MUTEX(lock); |
8ae12a0d DB |
4712 | |
4713 | int status; | |
4714 | struct spi_message message; | |
bdff549e | 4715 | struct spi_transfer x[2]; |
8ae12a0d DB |
4716 | u8 *local_buf; |
4717 | ||
350de7ce AS |
4718 | /* |
4719 | * Use preallocated DMA-safe buffer if we can. We can't avoid | |
b3a223ee MB |
4720 | * copying here, (as a pure convenience thing), but we can |
4721 | * keep heap costs out of the hot path unless someone else is | |
4722 | * using the pre-allocated buffer or the transfer is too large. | |
8ae12a0d | 4723 | */ |
b3a223ee | 4724 | if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) { |
2cd94c8a MB |
4725 | local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx), |
4726 | GFP_KERNEL | GFP_DMA); | |
b3a223ee MB |
4727 | if (!local_buf) |
4728 | return -ENOMEM; | |
4729 | } else { | |
4730 | local_buf = buf; | |
4731 | } | |
8ae12a0d | 4732 | |
8275c642 | 4733 | spi_message_init(&message); |
5fe5f05e | 4734 | memset(x, 0, sizeof(x)); |
bdff549e DB |
4735 | if (n_tx) { |
4736 | x[0].len = n_tx; | |
4737 | spi_message_add_tail(&x[0], &message); | |
4738 | } | |
4739 | if (n_rx) { | |
4740 | x[1].len = n_rx; | |
4741 | spi_message_add_tail(&x[1], &message); | |
4742 | } | |
8275c642 | 4743 | |
8ae12a0d | 4744 | memcpy(local_buf, txbuf, n_tx); |
bdff549e DB |
4745 | x[0].tx_buf = local_buf; |
4746 | x[1].rx_buf = local_buf + n_tx; | |
8ae12a0d | 4747 | |
702ca026 | 4748 | /* Do the I/O */ |
8ae12a0d | 4749 | status = spi_sync(spi, &message); |
9b938b74 | 4750 | if (status == 0) |
bdff549e | 4751 | memcpy(rxbuf, x[1].rx_buf, n_rx); |
8ae12a0d | 4752 | |
bdff549e | 4753 | if (x[0].tx_buf == buf) |
068f4070 | 4754 | mutex_unlock(&lock); |
8ae12a0d DB |
4755 | else |
4756 | kfree(local_buf); | |
4757 | ||
4758 | return status; | |
4759 | } | |
4760 | EXPORT_SYMBOL_GPL(spi_write_then_read); | |
4761 | ||
4762 | /*-------------------------------------------------------------------------*/ | |
4763 | ||
da21fde0 | 4764 | #if IS_ENABLED(CONFIG_OF_DYNAMIC) |
95c8222f | 4765 | /* Must call put_device() when done with returned spi_device device */ |
da21fde0 | 4766 | static struct spi_device *of_find_spi_device_by_node(struct device_node *node) |
ce79d54a | 4767 | { |
cfba5de9 SP |
4768 | struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node); |
4769 | ||
ce79d54a PA |
4770 | return dev ? to_spi_device(dev) : NULL; |
4771 | } | |
4772 | ||
95c8222f | 4773 | /* The spi controllers are not using spi_bus, so we find it with another way */ |
8caab75f | 4774 | static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node) |
ce79d54a PA |
4775 | { |
4776 | struct device *dev; | |
4777 | ||
91ce208d | 4778 | dev = class_find_device_by_of_node(&spi_controller_class, node); |
6c364062 | 4779 | if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) |
91ce208d | 4780 | dev = class_find_device_by_of_node(&spi_target_class, node); |
ce79d54a PA |
4781 | if (!dev) |
4782 | return NULL; | |
4783 | ||
95c8222f | 4784 | /* Reference got in class_find_device */ |
8caab75f | 4785 | return container_of(dev, struct spi_controller, dev); |
ce79d54a PA |
4786 | } |
4787 | ||
4788 | static int of_spi_notify(struct notifier_block *nb, unsigned long action, | |
4789 | void *arg) | |
4790 | { | |
4791 | struct of_reconfig_data *rd = arg; | |
8caab75f | 4792 | struct spi_controller *ctlr; |
ce79d54a PA |
4793 | struct spi_device *spi; |
4794 | ||
4795 | switch (of_reconfig_get_state_change(action, arg)) { | |
4796 | case OF_RECONFIG_CHANGE_ADD: | |
8caab75f GU |
4797 | ctlr = of_find_spi_controller_by_node(rd->dn->parent); |
4798 | if (ctlr == NULL) | |
95c8222f | 4799 | return NOTIFY_OK; /* Not for us */ |
ce79d54a | 4800 | |
bd6c1644 | 4801 | if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { |
8caab75f | 4802 | put_device(&ctlr->dev); |
bd6c1644 GU |
4803 | return NOTIFY_OK; |
4804 | } | |
4805 | ||
1a50d940 GU |
4806 | /* |
4807 | * Clear the flag before adding the device so that fw_devlink | |
4808 | * doesn't skip adding consumers to this device. | |
4809 | */ | |
4810 | rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE; | |
8caab75f GU |
4811 | spi = of_register_spi_device(ctlr, rd->dn); |
4812 | put_device(&ctlr->dev); | |
ce79d54a PA |
4813 | |
4814 | if (IS_ERR(spi)) { | |
25c56c88 RH |
4815 | pr_err("%s: failed to create for '%pOF'\n", |
4816 | __func__, rd->dn); | |
e0af98a7 | 4817 | of_node_clear_flag(rd->dn, OF_POPULATED); |
ce79d54a PA |
4818 | return notifier_from_errno(PTR_ERR(spi)); |
4819 | } | |
4820 | break; | |
4821 | ||
4822 | case OF_RECONFIG_CHANGE_REMOVE: | |
95c8222f | 4823 | /* Already depopulated? */ |
bd6c1644 GU |
4824 | if (!of_node_check_flag(rd->dn, OF_POPULATED)) |
4825 | return NOTIFY_OK; | |
4826 | ||
95c8222f | 4827 | /* Find our device by node */ |
ce79d54a PA |
4828 | spi = of_find_spi_device_by_node(rd->dn); |
4829 | if (spi == NULL) | |
95c8222f | 4830 | return NOTIFY_OK; /* No? not meant for us */ |
ce79d54a | 4831 | |
95c8222f | 4832 | /* Unregister takes one ref away */ |
ce79d54a PA |
4833 | spi_unregister_device(spi); |
4834 | ||
95c8222f | 4835 | /* And put the reference of the find */ |
ce79d54a PA |
4836 | put_device(&spi->dev); |
4837 | break; | |
4838 | } | |
4839 | ||
4840 | return NOTIFY_OK; | |
4841 | } | |
4842 | ||
4843 | static struct notifier_block spi_of_notifier = { | |
4844 | .notifier_call = of_spi_notify, | |
4845 | }; | |
4846 | #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ | |
4847 | extern struct notifier_block spi_of_notifier; | |
4848 | #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */ | |
4849 | ||
7f24467f | 4850 | #if IS_ENABLED(CONFIG_ACPI) |
8caab75f | 4851 | static int spi_acpi_controller_match(struct device *dev, const void *data) |
7f24467f | 4852 | { |
b6ffe0e6 | 4853 | return device_match_acpi_dev(dev->parent, data); |
7f24467f OP |
4854 | } |
4855 | ||
a8ecbc54 | 4856 | struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev) |
7f24467f OP |
4857 | { |
4858 | struct device *dev; | |
4859 | ||
91ce208d | 4860 | dev = class_find_device(&spi_controller_class, NULL, adev, |
8caab75f | 4861 | spi_acpi_controller_match); |
6c364062 | 4862 | if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) |
91ce208d | 4863 | dev = class_find_device(&spi_target_class, NULL, adev, |
8caab75f | 4864 | spi_acpi_controller_match); |
7f24467f OP |
4865 | if (!dev) |
4866 | return NULL; | |
4867 | ||
8caab75f | 4868 | return container_of(dev, struct spi_controller, dev); |
7f24467f | 4869 | } |
a8ecbc54 | 4870 | EXPORT_SYMBOL_GPL(acpi_spi_find_controller_by_adev); |
7f24467f OP |
4871 | |
4872 | static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev) | |
4873 | { | |
4874 | struct device *dev; | |
4875 | ||
00500147 | 4876 | dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev); |
5b16668e | 4877 | return to_spi_device(dev); |
7f24467f OP |
4878 | } |
4879 | ||
4880 | static int acpi_spi_notify(struct notifier_block *nb, unsigned long value, | |
4881 | void *arg) | |
4882 | { | |
4883 | struct acpi_device *adev = arg; | |
8caab75f | 4884 | struct spi_controller *ctlr; |
7f24467f OP |
4885 | struct spi_device *spi; |
4886 | ||
4887 | switch (value) { | |
4888 | case ACPI_RECONFIG_DEVICE_ADD: | |
62fcb99b | 4889 | ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev)); |
8caab75f | 4890 | if (!ctlr) |
7f24467f OP |
4891 | break; |
4892 | ||
8caab75f GU |
4893 | acpi_register_spi_device(ctlr, adev); |
4894 | put_device(&ctlr->dev); | |
7f24467f OP |
4895 | break; |
4896 | case ACPI_RECONFIG_DEVICE_REMOVE: | |
4897 | if (!acpi_device_enumerated(adev)) | |
4898 | break; | |
4899 | ||
4900 | spi = acpi_spi_find_device_by_adev(adev); | |
4901 | if (!spi) | |
4902 | break; | |
4903 | ||
4904 | spi_unregister_device(spi); | |
4905 | put_device(&spi->dev); | |
4906 | break; | |
4907 | } | |
4908 | ||
4909 | return NOTIFY_OK; | |
4910 | } | |
4911 | ||
4912 | static struct notifier_block spi_acpi_notifier = { | |
4913 | .notifier_call = acpi_spi_notify, | |
4914 | }; | |
4915 | #else | |
4916 | extern struct notifier_block spi_acpi_notifier; | |
4917 | #endif | |
4918 | ||
8ae12a0d DB |
4919 | static int __init spi_init(void) |
4920 | { | |
b885244e DB |
4921 | int status; |
4922 | ||
e94b1766 | 4923 | buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL); |
b885244e DB |
4924 | if (!buf) { |
4925 | status = -ENOMEM; | |
4926 | goto err0; | |
4927 | } | |
4928 | ||
4929 | status = bus_register(&spi_bus_type); | |
4930 | if (status < 0) | |
4931 | goto err1; | |
8ae12a0d | 4932 | |
91ce208d | 4933 | status = class_register(&spi_controller_class); |
b885244e DB |
4934 | if (status < 0) |
4935 | goto err2; | |
ce79d54a | 4936 | |
6c364062 | 4937 | if (IS_ENABLED(CONFIG_SPI_SLAVE)) { |
91ce208d | 4938 | status = class_register(&spi_target_class); |
6c364062 GU |
4939 | if (status < 0) |
4940 | goto err3; | |
4941 | } | |
4942 | ||
5267720e | 4943 | if (IS_ENABLED(CONFIG_OF_DYNAMIC)) |
ce79d54a | 4944 | WARN_ON(of_reconfig_notifier_register(&spi_of_notifier)); |
7f24467f OP |
4945 | if (IS_ENABLED(CONFIG_ACPI)) |
4946 | WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier)); | |
ce79d54a | 4947 | |
8ae12a0d | 4948 | return 0; |
b885244e | 4949 | |
6c364062 | 4950 | err3: |
91ce208d | 4951 | class_unregister(&spi_controller_class); |
b885244e DB |
4952 | err2: |
4953 | bus_unregister(&spi_bus_type); | |
4954 | err1: | |
4955 | kfree(buf); | |
4956 | buf = NULL; | |
4957 | err0: | |
4958 | return status; | |
8ae12a0d | 4959 | } |
b885244e | 4960 | |
350de7ce AS |
4961 | /* |
4962 | * A board_info is normally registered in arch_initcall(), | |
4963 | * but even essential drivers wait till later. | |
b885244e | 4964 | * |
350de7ce AS |
4965 | * REVISIT only boardinfo really needs static linking. The rest (device and |
4966 | * driver registration) _could_ be dynamically linked (modular) ... Costs | |
b885244e | 4967 | * include needing to have boardinfo data structures be much more public. |
8ae12a0d | 4968 | */ |
673c0c00 | 4969 | postcore_initcall(spi_init); |