]>
Commit | Line | Data |
---|---|---|
2235acb2 JC |
1 | /* The industrial I/O simple minimally locked ring buffer. |
2 | * | |
3 | * Copyright (c) 2008 Jonathan Cameron | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License version 2 as published by | |
7 | * the Free Software Foundation. | |
8 | */ | |
9 | ||
5a0e3ad6 | 10 | #include <linux/slab.h> |
2235acb2 | 11 | #include <linux/kernel.h> |
2235acb2 JC |
12 | #include <linux/module.h> |
13 | #include <linux/device.h> | |
14 | #include <linux/workqueue.h> | |
15 | #include "ring_sw.h" | |
16 | ||
17 | static inline int __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring, | |
18 | int bytes_per_datum, int length) | |
19 | { | |
20 | if ((length == 0) || (bytes_per_datum == 0)) | |
21 | return -EINVAL; | |
22 | ||
23 | __iio_init_ring_buffer(&ring->buf, bytes_per_datum, length); | |
c852ab66 | 24 | spin_lock_init(&ring->use_lock); |
2235acb2 JC |
25 | ring->data = kmalloc(length*ring->buf.bpd, GFP_KERNEL); |
26 | ring->read_p = 0; | |
27 | ring->write_p = 0; | |
28 | ring->last_written_p = 0; | |
29 | ring->half_p = 0; | |
30 | return ring->data ? 0 : -ENOMEM; | |
31 | } | |
32 | ||
33 | static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring) | |
34 | { | |
35 | kfree(ring->data); | |
36 | } | |
37 | ||
38 | void iio_mark_sw_rb_in_use(struct iio_ring_buffer *r) | |
39 | { | |
40 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); | |
41 | spin_lock(&ring->use_lock); | |
42 | ring->use_count++; | |
43 | spin_unlock(&ring->use_lock); | |
44 | } | |
45 | EXPORT_SYMBOL(iio_mark_sw_rb_in_use); | |
46 | ||
47 | void iio_unmark_sw_rb_in_use(struct iio_ring_buffer *r) | |
48 | { | |
49 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); | |
50 | spin_lock(&ring->use_lock); | |
51 | ring->use_count--; | |
52 | spin_unlock(&ring->use_lock); | |
53 | } | |
54 | EXPORT_SYMBOL(iio_unmark_sw_rb_in_use); | |
55 | ||
56 | ||
57 | /* Ring buffer related functionality */ | |
58 | /* Store to ring is typically called in the bh of a data ready interrupt handler | |
59 | * in the device driver */ | |
60 | /* Lock always held if their is a chance this may be called */ | |
61 | /* Only one of these per ring may run concurrently - enforced by drivers */ | |
62 | int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring, | |
63 | unsigned char *data, | |
64 | s64 timestamp) | |
65 | { | |
66 | int ret = 0; | |
67 | int code; | |
68 | unsigned char *temp_ptr, *change_test_ptr; | |
69 | ||
70 | /* initial store */ | |
71 | if (unlikely(ring->write_p == 0)) { | |
72 | ring->write_p = ring->data; | |
73 | /* Doesn't actually matter if this is out of the set | |
74 | * as long as the read pointer is valid before this | |
75 | * passes it - guaranteed as set later in this function. | |
76 | */ | |
77 | ring->half_p = ring->data - ring->buf.length*ring->buf.bpd/2; | |
78 | } | |
79 | /* Copy data to where ever the current write pointer says */ | |
80 | memcpy(ring->write_p, data, ring->buf.bpd); | |
81 | barrier(); | |
82 | /* Update the pointer used to get most recent value. | |
83 | * Always valid as either points to latest or second latest value. | |
84 | * Before this runs it is null and read attempts fail with -EAGAIN. | |
85 | */ | |
86 | ring->last_written_p = ring->write_p; | |
87 | barrier(); | |
88 | /* temp_ptr used to ensure we never have an invalid pointer | |
89 | * it may be slightly lagging, but never invalid | |
90 | */ | |
91 | temp_ptr = ring->write_p + ring->buf.bpd; | |
92 | /* End of ring, back to the beginning */ | |
93 | if (temp_ptr == ring->data + ring->buf.length*ring->buf.bpd) | |
94 | temp_ptr = ring->data; | |
95 | /* Update the write pointer | |
96 | * always valid as long as this is the only function able to write. | |
97 | * Care needed with smp systems to ensure more than one ring fill | |
98 | * is never scheduled. | |
99 | */ | |
100 | ring->write_p = temp_ptr; | |
101 | ||
102 | if (ring->read_p == 0) | |
103 | ring->read_p = ring->data; | |
104 | /* Buffer full - move the read pointer and create / escalate | |
105 | * ring event */ | |
106 | /* Tricky case - if the read pointer moves before we adjust it. | |
107 | * Handle by not pushing if it has moved - may result in occasional | |
108 | * unnecessary buffer full events when it wasn't quite true. | |
109 | */ | |
110 | else if (ring->write_p == ring->read_p) { | |
111 | change_test_ptr = ring->read_p; | |
112 | temp_ptr = change_test_ptr + ring->buf.bpd; | |
113 | if (temp_ptr | |
114 | == ring->data + ring->buf.length*ring->buf.bpd) { | |
115 | temp_ptr = ring->data; | |
116 | } | |
117 | /* We are moving pointer on one because the ring is full. Any | |
118 | * change to the read pointer will be this or greater. | |
119 | */ | |
120 | if (change_test_ptr == ring->read_p) | |
121 | ring->read_p = temp_ptr; | |
122 | ||
123 | spin_lock(&ring->buf.shared_ev_pointer.lock); | |
124 | ||
125 | ret = iio_push_or_escallate_ring_event(&ring->buf, | |
126 | IIO_EVENT_CODE_RING_100_FULL, | |
127 | timestamp); | |
128 | spin_unlock(&ring->buf.shared_ev_pointer.lock); | |
129 | if (ret) | |
130 | goto error_ret; | |
131 | } | |
132 | /* investigate if our event barrier has been passed */ | |
133 | /* There are definite 'issues' with this and chances of | |
134 | * simultaneous read */ | |
135 | /* Also need to use loop count to ensure this only happens once */ | |
136 | ring->half_p += ring->buf.bpd; | |
137 | if (ring->half_p == ring->data + ring->buf.length*ring->buf.bpd) | |
138 | ring->half_p = ring->data; | |
139 | if (ring->half_p == ring->read_p) { | |
140 | spin_lock(&ring->buf.shared_ev_pointer.lock); | |
141 | code = IIO_EVENT_CODE_RING_50_FULL; | |
142 | ret = __iio_push_event(&ring->buf.ev_int, | |
143 | code, | |
144 | timestamp, | |
145 | &ring->buf.shared_ev_pointer); | |
146 | spin_unlock(&ring->buf.shared_ev_pointer.lock); | |
147 | } | |
148 | error_ret: | |
149 | return ret; | |
150 | } | |
151 | ||
152 | int iio_rip_sw_rb(struct iio_ring_buffer *r, | |
153 | size_t count, u8 **data, int *dead_offset) | |
154 | { | |
155 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); | |
156 | ||
157 | u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p; | |
158 | int ret, max_copied; | |
159 | int bytes_to_rip; | |
160 | ||
161 | /* A userspace program has probably made an error if it tries to | |
162 | * read something that is not a whole number of bpds. | |
163 | * Return an error. | |
164 | */ | |
165 | if (count % ring->buf.bpd) { | |
166 | ret = -EINVAL; | |
167 | printk(KERN_INFO "Ring buffer read request not whole number of" | |
168 | "samples: Request bytes %zd, Current bpd %d\n", | |
169 | count, ring->buf.bpd); | |
170 | goto error_ret; | |
171 | } | |
172 | /* Limit size to whole of ring buffer */ | |
173 | bytes_to_rip = min((size_t)(ring->buf.bpd*ring->buf.length), count); | |
174 | ||
175 | *data = kmalloc(bytes_to_rip, GFP_KERNEL); | |
176 | if (*data == NULL) { | |
177 | ret = -ENOMEM; | |
178 | goto error_ret; | |
179 | } | |
180 | ||
181 | /* build local copy */ | |
182 | initial_read_p = ring->read_p; | |
183 | if (unlikely(initial_read_p == 0)) { /* No data here as yet */ | |
184 | ret = 0; | |
185 | goto error_free_data_cpy; | |
186 | } | |
187 | ||
188 | initial_write_p = ring->write_p; | |
189 | ||
190 | /* Need a consistent pair */ | |
191 | while ((initial_read_p != ring->read_p) | |
192 | || (initial_write_p != ring->write_p)) { | |
193 | initial_read_p = ring->read_p; | |
194 | initial_write_p = ring->write_p; | |
195 | } | |
196 | if (initial_write_p == initial_read_p) { | |
197 | /* No new data available.*/ | |
198 | ret = 0; | |
199 | goto error_free_data_cpy; | |
200 | } | |
201 | ||
202 | if (initial_write_p >= initial_read_p + bytes_to_rip) { | |
203 | /* write_p is greater than necessary, all is easy */ | |
204 | max_copied = bytes_to_rip; | |
205 | memcpy(*data, initial_read_p, max_copied); | |
206 | end_read_p = initial_read_p + max_copied; | |
207 | } else if (initial_write_p > initial_read_p) { | |
208 | /*not enough data to cpy */ | |
209 | max_copied = initial_write_p - initial_read_p; | |
210 | memcpy(*data, initial_read_p, max_copied); | |
211 | end_read_p = initial_write_p; | |
212 | } else { | |
213 | /* going through 'end' of ring buffer */ | |
214 | max_copied = ring->data | |
215 | + ring->buf.length*ring->buf.bpd - initial_read_p; | |
216 | memcpy(*data, initial_read_p, max_copied); | |
217 | /* possible we are done if we align precisely with end */ | |
218 | if (max_copied == bytes_to_rip) | |
219 | end_read_p = ring->data; | |
220 | else if (initial_write_p | |
221 | > ring->data + bytes_to_rip - max_copied) { | |
222 | /* enough data to finish */ | |
223 | memcpy(*data + max_copied, ring->data, | |
224 | bytes_to_rip - max_copied); | |
225 | max_copied = bytes_to_rip; | |
226 | end_read_p = ring->data + (bytes_to_rip - max_copied); | |
227 | } else { /* not enough data */ | |
228 | memcpy(*data + max_copied, ring->data, | |
229 | initial_write_p - ring->data); | |
230 | max_copied += initial_write_p - ring->data; | |
231 | end_read_p = initial_write_p; | |
232 | } | |
233 | } | |
234 | /* Now to verify which section was cleanly copied - i.e. how far | |
235 | * read pointer has been pushed */ | |
236 | current_read_p = ring->read_p; | |
237 | ||
238 | if (initial_read_p <= current_read_p) | |
239 | *dead_offset = current_read_p - initial_read_p; | |
240 | else | |
241 | *dead_offset = ring->buf.length*ring->buf.bpd | |
242 | - (initial_read_p - current_read_p); | |
243 | ||
244 | /* possible issue if the initial write has been lapped or indeed | |
245 | * the point we were reading to has been passed */ | |
246 | /* No valid data read. | |
247 | * In this case the read pointer is already correct having been | |
248 | * pushed further than we would look. */ | |
249 | if (max_copied - *dead_offset < 0) { | |
250 | ret = 0; | |
251 | goto error_free_data_cpy; | |
252 | } | |
253 | ||
254 | /* setup the next read position */ | |
255 | /* Beware, this may fail due to concurrency fun and games. | |
256 | * Possible that sufficient fill commands have run to push the read | |
257 | * pointer past where we would be after the rip. If this occurs, leave | |
258 | * it be. | |
259 | */ | |
260 | /* Tricky - deal with loops */ | |
261 | ||
262 | while (ring->read_p != end_read_p) | |
263 | ring->read_p = end_read_p; | |
264 | ||
265 | return max_copied - *dead_offset; | |
266 | ||
267 | error_free_data_cpy: | |
268 | kfree(*data); | |
269 | error_ret: | |
270 | return ret; | |
271 | } | |
272 | EXPORT_SYMBOL(iio_rip_sw_rb); | |
273 | ||
274 | int iio_store_to_sw_rb(struct iio_ring_buffer *r, u8 *data, s64 timestamp) | |
275 | { | |
276 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); | |
277 | return iio_store_to_sw_ring(ring, data, timestamp); | |
278 | } | |
279 | EXPORT_SYMBOL(iio_store_to_sw_rb); | |
280 | ||
281 | int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer *ring, | |
282 | unsigned char *data) | |
283 | { | |
284 | unsigned char *last_written_p_copy; | |
285 | ||
286 | iio_mark_sw_rb_in_use(&ring->buf); | |
287 | again: | |
288 | barrier(); | |
289 | last_written_p_copy = ring->last_written_p; | |
290 | barrier(); /*unnessecary? */ | |
291 | /* Check there is anything here */ | |
292 | if (last_written_p_copy == 0) | |
293 | return -EAGAIN; | |
294 | memcpy(data, last_written_p_copy, ring->buf.bpd); | |
295 | ||
296 | if (unlikely(ring->last_written_p >= last_written_p_copy)) | |
297 | goto again; | |
298 | ||
299 | iio_unmark_sw_rb_in_use(&ring->buf); | |
300 | return 0; | |
301 | } | |
302 | ||
303 | int iio_read_last_from_sw_rb(struct iio_ring_buffer *r, | |
304 | unsigned char *data) | |
305 | { | |
306 | return iio_read_last_from_sw_ring(iio_to_sw_ring(r), data); | |
307 | } | |
308 | EXPORT_SYMBOL(iio_read_last_from_sw_rb); | |
309 | ||
310 | int iio_request_update_sw_rb(struct iio_ring_buffer *r) | |
311 | { | |
312 | int ret = 0; | |
313 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); | |
314 | ||
315 | spin_lock(&ring->use_lock); | |
316 | if (!ring->update_needed) | |
317 | goto error_ret; | |
318 | if (ring->use_count) { | |
319 | ret = -EAGAIN; | |
320 | goto error_ret; | |
321 | } | |
322 | __iio_free_sw_ring_buffer(ring); | |
323 | ret = __iio_init_sw_ring_buffer(ring, ring->buf.bpd, ring->buf.length); | |
324 | error_ret: | |
325 | spin_unlock(&ring->use_lock); | |
326 | return ret; | |
327 | } | |
328 | EXPORT_SYMBOL(iio_request_update_sw_rb); | |
329 | ||
330 | int iio_get_bpd_sw_rb(struct iio_ring_buffer *r) | |
331 | { | |
332 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); | |
333 | return ring->buf.bpd; | |
334 | } | |
335 | EXPORT_SYMBOL(iio_get_bpd_sw_rb); | |
336 | ||
337 | int iio_set_bpd_sw_rb(struct iio_ring_buffer *r, size_t bpd) | |
338 | { | |
339 | if (r->bpd != bpd) { | |
340 | r->bpd = bpd; | |
341 | if (r->access.mark_param_change) | |
342 | r->access.mark_param_change(r); | |
343 | } | |
344 | return 0; | |
345 | } | |
346 | EXPORT_SYMBOL(iio_set_bpd_sw_rb); | |
347 | ||
348 | int iio_get_length_sw_rb(struct iio_ring_buffer *r) | |
349 | { | |
350 | return r->length; | |
351 | } | |
352 | EXPORT_SYMBOL(iio_get_length_sw_rb); | |
353 | ||
354 | int iio_set_length_sw_rb(struct iio_ring_buffer *r, int length) | |
355 | { | |
356 | if (r->length != length) { | |
357 | r->length = length; | |
358 | if (r->access.mark_param_change) | |
359 | r->access.mark_param_change(r); | |
360 | } | |
361 | return 0; | |
362 | } | |
363 | EXPORT_SYMBOL(iio_set_length_sw_rb); | |
364 | ||
365 | int iio_mark_update_needed_sw_rb(struct iio_ring_buffer *r) | |
366 | { | |
367 | struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r); | |
368 | ring->update_needed = true; | |
369 | return 0; | |
370 | } | |
371 | EXPORT_SYMBOL(iio_mark_update_needed_sw_rb); | |
372 | ||
373 | static void iio_sw_rb_release(struct device *dev) | |
374 | { | |
375 | struct iio_ring_buffer *r = to_iio_ring_buffer(dev); | |
376 | kfree(iio_to_sw_ring(r)); | |
377 | } | |
378 | ||
379 | static IIO_RING_ENABLE_ATTR; | |
380 | static IIO_RING_BPS_ATTR; | |
381 | static IIO_RING_LENGTH_ATTR; | |
382 | ||
383 | /* Standard set of ring buffer attributes */ | |
384 | static struct attribute *iio_ring_attributes[] = { | |
385 | &dev_attr_length.attr, | |
386 | &dev_attr_bps.attr, | |
387 | &dev_attr_ring_enable.attr, | |
388 | NULL, | |
389 | }; | |
390 | ||
391 | static struct attribute_group iio_ring_attribute_group = { | |
392 | .attrs = iio_ring_attributes, | |
393 | }; | |
394 | ||
3860dc82 | 395 | static const struct attribute_group *iio_ring_attribute_groups[] = { |
2235acb2 JC |
396 | &iio_ring_attribute_group, |
397 | NULL | |
398 | }; | |
399 | ||
400 | static struct device_type iio_sw_ring_type = { | |
401 | .release = iio_sw_rb_release, | |
402 | .groups = iio_ring_attribute_groups, | |
403 | }; | |
404 | ||
405 | struct iio_ring_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev) | |
406 | { | |
407 | struct iio_ring_buffer *buf; | |
408 | struct iio_sw_ring_buffer *ring; | |
409 | ||
410 | ring = kzalloc(sizeof *ring, GFP_KERNEL); | |
411 | if (!ring) | |
412 | return 0; | |
413 | buf = &ring->buf; | |
414 | ||
415 | iio_ring_buffer_init(buf, indio_dev); | |
416 | buf->dev.type = &iio_sw_ring_type; | |
417 | device_initialize(&buf->dev); | |
418 | buf->dev.parent = &indio_dev->dev; | |
419 | buf->dev.class = &iio_class; | |
420 | dev_set_drvdata(&buf->dev, (void *)buf); | |
421 | ||
422 | return buf; | |
423 | } | |
424 | EXPORT_SYMBOL(iio_sw_rb_allocate); | |
425 | ||
426 | void iio_sw_rb_free(struct iio_ring_buffer *r) | |
427 | { | |
428 | if (r) | |
429 | iio_put_ring_buffer(r); | |
430 | } | |
431 | EXPORT_SYMBOL(iio_sw_rb_free); | |
432 | MODULE_DESCRIPTION("Industrialio I/O software ring buffer"); | |
433 | MODULE_LICENSE("GPL"); |