]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/staging/gasket/gasket_core.c
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / drivers / staging / gasket / gasket_core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Gasket generic driver framework. This file contains the implementation
4 * for the Gasket generic driver framework - the functionality that is common
5 * across Gasket devices.
6 *
7 * Copyright (C) 2018 Google, Inc.
8 */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include "gasket_core.h"
13
14 #include "gasket_interrupt.h"
15 #include "gasket_ioctl.h"
16 #include "gasket_page_table.h"
17 #include "gasket_sysfs.h"
18
19 #include <linux/capability.h>
20 #include <linux/compiler.h>
21 #include <linux/delay.h>
22 #include <linux/device.h>
23 #include <linux/fs.h>
24 #include <linux/init.h>
25 #include <linux/of.h>
26 #include <linux/pid_namespace.h>
27 #include <linux/printk.h>
28 #include <linux/sched.h>
29
30 #ifdef GASKET_KERNEL_TRACE_SUPPORT
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/gasket_mmap.h>
33 #else
34 #define trace_gasket_mmap_exit(x)
35 #define trace_gasket_mmap_entry(x, ...)
36 #endif
37
38 /*
39 * "Private" members of gasket_driver_desc.
40 *
41 * Contains internal per-device type tracking data, i.e., data not appropriate
42 * as part of the public interface for the generic framework.
43 */
44 struct gasket_internal_desc {
45 /* Device-specific-driver-provided configuration information. */
46 const struct gasket_driver_desc *driver_desc;
47
48 /* Protects access to per-driver data (i.e. this structure). */
49 struct mutex mutex;
50
51 /* Kernel-internal device class. */
52 struct class *class;
53
54 /* Instantiated / present devices of this type. */
55 struct gasket_dev *devs[GASKET_DEV_MAX];
56 };
57
58 /* do_map_region() needs be able to return more than just true/false. */
59 enum do_map_region_status {
60 /* The region was successfully mapped. */
61 DO_MAP_REGION_SUCCESS,
62
63 /* Attempted to map region and failed. */
64 DO_MAP_REGION_FAILURE,
65
66 /* The requested region to map was not part of a mappable region. */
67 DO_MAP_REGION_INVALID,
68 };
69
70 /* Global data definitions. */
71 /* Mutex - only for framework-wide data. Other data should be protected by
72 * finer-grained locks.
73 */
74 static DEFINE_MUTEX(g_mutex);
75
76 /* List of all registered device descriptions & their supporting data. */
77 static struct gasket_internal_desc g_descs[GASKET_FRAMEWORK_DESC_MAX];
78
79 /* Mapping of statuses to human-readable strings. Must end with {0,NULL}. */
80 static const struct gasket_num_name gasket_status_name_table[] = {
81 { GASKET_STATUS_DEAD, "DEAD" },
82 { GASKET_STATUS_ALIVE, "ALIVE" },
83 { GASKET_STATUS_LAMED, "LAMED" },
84 { GASKET_STATUS_DRIVER_EXIT, "DRIVER_EXITING" },
85 { 0, NULL },
86 };
87
88 /* Enumeration of the automatic Gasket framework sysfs nodes. */
89 enum gasket_sysfs_attribute_type {
90 ATTR_BAR_OFFSETS,
91 ATTR_BAR_SIZES,
92 ATTR_DRIVER_VERSION,
93 ATTR_FRAMEWORK_VERSION,
94 ATTR_DEVICE_TYPE,
95 ATTR_HARDWARE_REVISION,
96 ATTR_PCI_ADDRESS,
97 ATTR_STATUS,
98 ATTR_IS_DEVICE_OWNED,
99 ATTR_DEVICE_OWNER,
100 ATTR_WRITE_OPEN_COUNT,
101 ATTR_RESET_COUNT,
102 ATTR_USER_MEM_RANGES
103 };
104
105 /* Perform a standard Gasket callback. */
106 static inline int
107 check_and_invoke_callback(struct gasket_dev *gasket_dev,
108 int (*cb_function)(struct gasket_dev *))
109 {
110 int ret = 0;
111
112 if (cb_function) {
113 mutex_lock(&gasket_dev->mutex);
114 ret = cb_function(gasket_dev);
115 mutex_unlock(&gasket_dev->mutex);
116 }
117 return ret;
118 }
119
120 /* Perform a standard Gasket callback without grabbing gasket_dev->mutex. */
121 static inline int
122 gasket_check_and_invoke_callback_nolock(struct gasket_dev *gasket_dev,
123 int (*cb_function)(struct gasket_dev *))
124 {
125 int ret = 0;
126
127 if (cb_function)
128 ret = cb_function(gasket_dev);
129 return ret;
130 }
131
132 /*
133 * Return nonzero if the gasket_cdev_info is owned by the current thread group
134 * ID.
135 */
136 static int gasket_owned_by_current_tgid(struct gasket_cdev_info *info)
137 {
138 return (info->ownership.is_owned &&
139 (info->ownership.owner == current->tgid));
140 }
141
142 /*
143 * Find the next free gasket_internal_dev slot.
144 *
145 * Returns the located slot number on success or a negative number on failure.
146 */
147 static int gasket_find_dev_slot(struct gasket_internal_desc *internal_desc,
148 const char *kobj_name)
149 {
150 int i;
151
152 mutex_lock(&internal_desc->mutex);
153
154 /* Search for a previous instance of this device. */
155 for (i = 0; i < GASKET_DEV_MAX; i++) {
156 if (internal_desc->devs[i] &&
157 strcmp(internal_desc->devs[i]->kobj_name, kobj_name) == 0) {
158 pr_err("Duplicate device %s\n", kobj_name);
159 mutex_unlock(&internal_desc->mutex);
160 return -EBUSY;
161 }
162 }
163
164 /* Find a free device slot. */
165 for (i = 0; i < GASKET_DEV_MAX; i++) {
166 if (!internal_desc->devs[i])
167 break;
168 }
169
170 if (i == GASKET_DEV_MAX) {
171 pr_err("Too many registered devices; max %d\n", GASKET_DEV_MAX);
172 mutex_unlock(&internal_desc->mutex);
173 return -EBUSY;
174 }
175
176 mutex_unlock(&internal_desc->mutex);
177 return i;
178 }
179
180 /*
181 * Allocate and initialize a Gasket device structure, add the device to the
182 * device list.
183 *
184 * Returns 0 if successful, a negative error code otherwise.
185 */
186 static int gasket_alloc_dev(struct gasket_internal_desc *internal_desc,
187 struct device *parent, struct gasket_dev **pdev)
188 {
189 int dev_idx;
190 const struct gasket_driver_desc *driver_desc =
191 internal_desc->driver_desc;
192 struct gasket_dev *gasket_dev;
193 struct gasket_cdev_info *dev_info;
194 const char *parent_name = dev_name(parent);
195
196 pr_debug("Allocating a Gasket device, parent %s.\n", parent_name);
197
198 *pdev = NULL;
199
200 dev_idx = gasket_find_dev_slot(internal_desc, parent_name);
201 if (dev_idx < 0)
202 return dev_idx;
203
204 gasket_dev = *pdev = kzalloc(sizeof(*gasket_dev), GFP_KERNEL);
205 if (!gasket_dev) {
206 pr_err("no memory for device, parent %s\n", parent_name);
207 return -ENOMEM;
208 }
209 internal_desc->devs[dev_idx] = gasket_dev;
210
211 mutex_init(&gasket_dev->mutex);
212
213 gasket_dev->internal_desc = internal_desc;
214 gasket_dev->dev_idx = dev_idx;
215 snprintf(gasket_dev->kobj_name, GASKET_NAME_MAX, "%s", parent_name);
216 gasket_dev->dev = get_device(parent);
217 /* gasket_bar_data is uninitialized. */
218 gasket_dev->num_page_tables = driver_desc->num_page_tables;
219 /* max_page_table_size and *page table are uninit'ed */
220 /* interrupt_data is not initialized. */
221 /* status is 0, or GASKET_STATUS_DEAD */
222
223 dev_info = &gasket_dev->dev_info;
224 snprintf(dev_info->name, GASKET_NAME_MAX, "%s_%u", driver_desc->name,
225 gasket_dev->dev_idx);
226 dev_info->devt =
227 MKDEV(driver_desc->major, driver_desc->minor +
228 gasket_dev->dev_idx);
229 dev_info->device =
230 device_create(internal_desc->class, parent, dev_info->devt,
231 gasket_dev, dev_info->name);
232
233 /* cdev has not yet been added; cdev_added is 0 */
234 dev_info->gasket_dev_ptr = gasket_dev;
235 /* ownership is all 0, indicating no owner or opens. */
236
237 return 0;
238 }
239
240 /* Free a Gasket device. */
241 static void gasket_free_dev(struct gasket_dev *gasket_dev)
242 {
243 struct gasket_internal_desc *internal_desc = gasket_dev->internal_desc;
244
245 mutex_lock(&internal_desc->mutex);
246 internal_desc->devs[gasket_dev->dev_idx] = NULL;
247 mutex_unlock(&internal_desc->mutex);
248 put_device(gasket_dev->dev);
249 kfree(gasket_dev);
250 }
251
252 /*
253 * Maps the specified bar into kernel space.
254 *
255 * Returns 0 on success, a negative error code otherwise.
256 * A zero-sized BAR will not be mapped, but is not an error.
257 */
258 static int gasket_map_pci_bar(struct gasket_dev *gasket_dev, int bar_num)
259 {
260 struct gasket_internal_desc *internal_desc = gasket_dev->internal_desc;
261 const struct gasket_driver_desc *driver_desc =
262 internal_desc->driver_desc;
263 ulong desc_bytes = driver_desc->bar_descriptions[bar_num].size;
264 int ret;
265
266 if (desc_bytes == 0)
267 return 0;
268
269 if (driver_desc->bar_descriptions[bar_num].type != PCI_BAR) {
270 /* not PCI: skip this entry */
271 return 0;
272 }
273 /*
274 * pci_resource_start and pci_resource_len return a "resource_size_t",
275 * which is safely castable to ulong (which itself is the arg to
276 * request_mem_region).
277 */
278 gasket_dev->bar_data[bar_num].phys_base =
279 (ulong)pci_resource_start(gasket_dev->pci_dev, bar_num);
280 if (!gasket_dev->bar_data[bar_num].phys_base) {
281 dev_err(gasket_dev->dev, "Cannot get BAR%u base address\n",
282 bar_num);
283 return -EINVAL;
284 }
285
286 gasket_dev->bar_data[bar_num].length_bytes =
287 (ulong)pci_resource_len(gasket_dev->pci_dev, bar_num);
288 if (gasket_dev->bar_data[bar_num].length_bytes < desc_bytes) {
289 dev_err(gasket_dev->dev,
290 "PCI BAR %u space is too small: %lu; expected >= %lu\n",
291 bar_num, gasket_dev->bar_data[bar_num].length_bytes,
292 desc_bytes);
293 return -ENOMEM;
294 }
295
296 if (!request_mem_region(gasket_dev->bar_data[bar_num].phys_base,
297 gasket_dev->bar_data[bar_num].length_bytes,
298 gasket_dev->dev_info.name)) {
299 dev_err(gasket_dev->dev,
300 "Cannot get BAR %d memory region %p\n",
301 bar_num, &gasket_dev->pci_dev->resource[bar_num]);
302 return -EINVAL;
303 }
304
305 gasket_dev->bar_data[bar_num].virt_base =
306 ioremap(gasket_dev->bar_data[bar_num].phys_base,
307 gasket_dev->bar_data[bar_num].length_bytes);
308 if (!gasket_dev->bar_data[bar_num].virt_base) {
309 dev_err(gasket_dev->dev,
310 "Cannot remap BAR %d memory region %p\n",
311 bar_num, &gasket_dev->pci_dev->resource[bar_num]);
312 ret = -ENOMEM;
313 goto fail;
314 }
315
316 dma_set_mask(&gasket_dev->pci_dev->dev, DMA_BIT_MASK(64));
317 dma_set_coherent_mask(&gasket_dev->pci_dev->dev, DMA_BIT_MASK(64));
318
319 return 0;
320
321 fail:
322 iounmap(gasket_dev->bar_data[bar_num].virt_base);
323 release_mem_region(gasket_dev->bar_data[bar_num].phys_base,
324 gasket_dev->bar_data[bar_num].length_bytes);
325 return ret;
326 }
327
328 /*
329 * Releases PCI BAR mapping.
330 *
331 * A zero-sized or not-mapped BAR will not be unmapped, but is not an error.
332 */
333 static void gasket_unmap_pci_bar(struct gasket_dev *dev, int bar_num)
334 {
335 ulong base, bytes;
336 struct gasket_internal_desc *internal_desc = dev->internal_desc;
337 const struct gasket_driver_desc *driver_desc =
338 internal_desc->driver_desc;
339
340 if (driver_desc->bar_descriptions[bar_num].size == 0 ||
341 !dev->bar_data[bar_num].virt_base)
342 return;
343
344 if (driver_desc->bar_descriptions[bar_num].type != PCI_BAR)
345 return;
346
347 iounmap(dev->bar_data[bar_num].virt_base);
348 dev->bar_data[bar_num].virt_base = NULL;
349
350 base = pci_resource_start(dev->pci_dev, bar_num);
351 if (!base) {
352 dev_err(dev->dev, "cannot get PCI BAR%u base address\n",
353 bar_num);
354 return;
355 }
356
357 bytes = pci_resource_len(dev->pci_dev, bar_num);
358 release_mem_region(base, bytes);
359 }
360
361 /*
362 * Setup PCI memory mapping for the specified device.
363 *
364 * Reads the BAR registers and sets up pointers to the device's memory mapped
365 * IO space.
366 *
367 * Returns 0 on success and a negative value otherwise.
368 */
369 static int gasket_setup_pci(struct pci_dev *pci_dev,
370 struct gasket_dev *gasket_dev)
371 {
372 int i, mapped_bars, ret;
373
374 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
375 ret = gasket_map_pci_bar(gasket_dev, i);
376 if (ret) {
377 mapped_bars = i;
378 goto fail;
379 }
380 }
381
382 return 0;
383
384 fail:
385 for (i = 0; i < mapped_bars; i++)
386 gasket_unmap_pci_bar(gasket_dev, i);
387
388 return -ENOMEM;
389 }
390
391 /* Unmaps memory for the specified device. */
392 static void gasket_cleanup_pci(struct gasket_dev *gasket_dev)
393 {
394 int i;
395
396 for (i = 0; i < PCI_STD_NUM_BARS; i++)
397 gasket_unmap_pci_bar(gasket_dev, i);
398 }
399
400 /* Determine the health of the Gasket device. */
401 static int gasket_get_hw_status(struct gasket_dev *gasket_dev)
402 {
403 int status;
404 int i;
405 const struct gasket_driver_desc *driver_desc =
406 gasket_dev->internal_desc->driver_desc;
407
408 status = gasket_check_and_invoke_callback_nolock(gasket_dev,
409 driver_desc->device_status_cb);
410 if (status != GASKET_STATUS_ALIVE) {
411 dev_dbg(gasket_dev->dev, "Hardware reported status %d.\n",
412 status);
413 return status;
414 }
415
416 status = gasket_interrupt_system_status(gasket_dev);
417 if (status != GASKET_STATUS_ALIVE) {
418 dev_dbg(gasket_dev->dev,
419 "Interrupt system reported status %d.\n", status);
420 return status;
421 }
422
423 for (i = 0; i < driver_desc->num_page_tables; ++i) {
424 status = gasket_page_table_system_status(gasket_dev->page_table[i]);
425 if (status != GASKET_STATUS_ALIVE) {
426 dev_dbg(gasket_dev->dev,
427 "Page table %d reported status %d.\n",
428 i, status);
429 return status;
430 }
431 }
432
433 return GASKET_STATUS_ALIVE;
434 }
435
436 static ssize_t
437 gasket_write_mappable_regions(char *buf,
438 const struct gasket_driver_desc *driver_desc,
439 int bar_index)
440 {
441 int i;
442 ssize_t written;
443 ssize_t total_written = 0;
444 ulong min_addr, max_addr;
445 struct gasket_bar_desc bar_desc =
446 driver_desc->bar_descriptions[bar_index];
447
448 if (bar_desc.permissions == GASKET_NOMAP)
449 return 0;
450 for (i = 0;
451 i < bar_desc.num_mappable_regions && total_written < PAGE_SIZE;
452 i++) {
453 min_addr = bar_desc.mappable_regions[i].start -
454 driver_desc->legacy_mmap_address_offset;
455 max_addr = bar_desc.mappable_regions[i].start -
456 driver_desc->legacy_mmap_address_offset +
457 bar_desc.mappable_regions[i].length_bytes;
458 written = scnprintf(buf, PAGE_SIZE - total_written,
459 "0x%08lx-0x%08lx\n", min_addr, max_addr);
460 total_written += written;
461 buf += written;
462 }
463 return total_written;
464 }
465
466 static ssize_t gasket_sysfs_data_show(struct device *device,
467 struct device_attribute *attr, char *buf)
468 {
469 int i, ret = 0;
470 ssize_t current_written = 0;
471 const struct gasket_driver_desc *driver_desc;
472 struct gasket_dev *gasket_dev;
473 struct gasket_sysfs_attribute *gasket_attr;
474 const struct gasket_bar_desc *bar_desc;
475 enum gasket_sysfs_attribute_type sysfs_type;
476
477 gasket_dev = gasket_sysfs_get_device_data(device);
478 if (!gasket_dev) {
479 dev_err(device, "No sysfs mapping found for device\n");
480 return 0;
481 }
482
483 gasket_attr = gasket_sysfs_get_attr(device, attr);
484 if (!gasket_attr) {
485 dev_err(device, "No sysfs attr found for device\n");
486 gasket_sysfs_put_device_data(device, gasket_dev);
487 return 0;
488 }
489
490 driver_desc = gasket_dev->internal_desc->driver_desc;
491
492 sysfs_type =
493 (enum gasket_sysfs_attribute_type)gasket_attr->data.attr_type;
494 switch (sysfs_type) {
495 case ATTR_BAR_OFFSETS:
496 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
497 bar_desc = &driver_desc->bar_descriptions[i];
498 if (bar_desc->size == 0)
499 continue;
500 current_written =
501 snprintf(buf, PAGE_SIZE - ret, "%d: 0x%lx\n", i,
502 (ulong)bar_desc->base);
503 buf += current_written;
504 ret += current_written;
505 }
506 break;
507 case ATTR_BAR_SIZES:
508 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
509 bar_desc = &driver_desc->bar_descriptions[i];
510 if (bar_desc->size == 0)
511 continue;
512 current_written =
513 snprintf(buf, PAGE_SIZE - ret, "%d: 0x%lx\n", i,
514 (ulong)bar_desc->size);
515 buf += current_written;
516 ret += current_written;
517 }
518 break;
519 case ATTR_DRIVER_VERSION:
520 ret = snprintf(buf, PAGE_SIZE, "%s\n",
521 gasket_dev->internal_desc->driver_desc->driver_version);
522 break;
523 case ATTR_FRAMEWORK_VERSION:
524 ret = snprintf(buf, PAGE_SIZE, "%s\n",
525 GASKET_FRAMEWORK_VERSION);
526 break;
527 case ATTR_DEVICE_TYPE:
528 ret = snprintf(buf, PAGE_SIZE, "%s\n",
529 gasket_dev->internal_desc->driver_desc->name);
530 break;
531 case ATTR_HARDWARE_REVISION:
532 ret = snprintf(buf, PAGE_SIZE, "%d\n",
533 gasket_dev->hardware_revision);
534 break;
535 case ATTR_PCI_ADDRESS:
536 ret = snprintf(buf, PAGE_SIZE, "%s\n", gasket_dev->kobj_name);
537 break;
538 case ATTR_STATUS:
539 ret = snprintf(buf, PAGE_SIZE, "%s\n",
540 gasket_num_name_lookup(gasket_dev->status,
541 gasket_status_name_table));
542 break;
543 case ATTR_IS_DEVICE_OWNED:
544 ret = snprintf(buf, PAGE_SIZE, "%d\n",
545 gasket_dev->dev_info.ownership.is_owned);
546 break;
547 case ATTR_DEVICE_OWNER:
548 ret = snprintf(buf, PAGE_SIZE, "%d\n",
549 gasket_dev->dev_info.ownership.owner);
550 break;
551 case ATTR_WRITE_OPEN_COUNT:
552 ret = snprintf(buf, PAGE_SIZE, "%d\n",
553 gasket_dev->dev_info.ownership.write_open_count);
554 break;
555 case ATTR_RESET_COUNT:
556 ret = snprintf(buf, PAGE_SIZE, "%d\n", gasket_dev->reset_count);
557 break;
558 case ATTR_USER_MEM_RANGES:
559 for (i = 0; i < PCI_STD_NUM_BARS; ++i) {
560 current_written =
561 gasket_write_mappable_regions(buf, driver_desc,
562 i);
563 buf += current_written;
564 ret += current_written;
565 }
566 break;
567 default:
568 dev_dbg(gasket_dev->dev, "Unknown attribute: %s\n",
569 attr->attr.name);
570 ret = 0;
571 break;
572 }
573
574 gasket_sysfs_put_attr(device, gasket_attr);
575 gasket_sysfs_put_device_data(device, gasket_dev);
576 return ret;
577 }
578
579 /* These attributes apply to all Gasket driver instances. */
580 static const struct gasket_sysfs_attribute gasket_sysfs_generic_attrs[] = {
581 GASKET_SYSFS_RO(bar_offsets, gasket_sysfs_data_show, ATTR_BAR_OFFSETS),
582 GASKET_SYSFS_RO(bar_sizes, gasket_sysfs_data_show, ATTR_BAR_SIZES),
583 GASKET_SYSFS_RO(driver_version, gasket_sysfs_data_show,
584 ATTR_DRIVER_VERSION),
585 GASKET_SYSFS_RO(framework_version, gasket_sysfs_data_show,
586 ATTR_FRAMEWORK_VERSION),
587 GASKET_SYSFS_RO(device_type, gasket_sysfs_data_show, ATTR_DEVICE_TYPE),
588 GASKET_SYSFS_RO(revision, gasket_sysfs_data_show,
589 ATTR_HARDWARE_REVISION),
590 GASKET_SYSFS_RO(pci_address, gasket_sysfs_data_show, ATTR_PCI_ADDRESS),
591 GASKET_SYSFS_RO(status, gasket_sysfs_data_show, ATTR_STATUS),
592 GASKET_SYSFS_RO(is_device_owned, gasket_sysfs_data_show,
593 ATTR_IS_DEVICE_OWNED),
594 GASKET_SYSFS_RO(device_owner, gasket_sysfs_data_show,
595 ATTR_DEVICE_OWNER),
596 GASKET_SYSFS_RO(write_open_count, gasket_sysfs_data_show,
597 ATTR_WRITE_OPEN_COUNT),
598 GASKET_SYSFS_RO(reset_count, gasket_sysfs_data_show, ATTR_RESET_COUNT),
599 GASKET_SYSFS_RO(user_mem_ranges, gasket_sysfs_data_show,
600 ATTR_USER_MEM_RANGES),
601 GASKET_END_OF_ATTR_ARRAY
602 };
603
604 /* Add a char device and related info. */
605 static int gasket_add_cdev(struct gasket_cdev_info *dev_info,
606 const struct file_operations *file_ops,
607 struct module *owner)
608 {
609 int ret;
610
611 cdev_init(&dev_info->cdev, file_ops);
612 dev_info->cdev.owner = owner;
613 ret = cdev_add(&dev_info->cdev, dev_info->devt, 1);
614 if (ret) {
615 dev_err(dev_info->gasket_dev_ptr->dev,
616 "cannot add char device [ret=%d]\n", ret);
617 return ret;
618 }
619 dev_info->cdev_added = 1;
620
621 return 0;
622 }
623
624 /* Disable device operations. */
625 void gasket_disable_device(struct gasket_dev *gasket_dev)
626 {
627 const struct gasket_driver_desc *driver_desc =
628 gasket_dev->internal_desc->driver_desc;
629 int i;
630
631 /* Only delete the device if it has been successfully added. */
632 if (gasket_dev->dev_info.cdev_added)
633 cdev_del(&gasket_dev->dev_info.cdev);
634
635 gasket_dev->status = GASKET_STATUS_DEAD;
636
637 gasket_interrupt_cleanup(gasket_dev);
638
639 for (i = 0; i < driver_desc->num_page_tables; ++i) {
640 if (gasket_dev->page_table[i]) {
641 gasket_page_table_reset(gasket_dev->page_table[i]);
642 gasket_page_table_cleanup(gasket_dev->page_table[i]);
643 }
644 }
645 }
646 EXPORT_SYMBOL(gasket_disable_device);
647
648 /*
649 * Registered driver descriptor lookup for PCI devices.
650 *
651 * Precondition: Called with g_mutex held (to avoid a race on return).
652 * Returns NULL if no matching device was found.
653 */
654 static struct gasket_internal_desc *
655 lookup_pci_internal_desc(struct pci_dev *pci_dev)
656 {
657 int i;
658
659 __must_hold(&g_mutex);
660 for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
661 if (g_descs[i].driver_desc &&
662 g_descs[i].driver_desc->pci_id_table &&
663 pci_match_id(g_descs[i].driver_desc->pci_id_table, pci_dev))
664 return &g_descs[i];
665 }
666
667 return NULL;
668 }
669
670 /*
671 * Verifies that the user has permissions to perform the requested mapping and
672 * that the provided descriptor/range is of adequate size to hold the range to
673 * be mapped.
674 */
675 static bool gasket_mmap_has_permissions(struct gasket_dev *gasket_dev,
676 struct vm_area_struct *vma,
677 int bar_permissions)
678 {
679 int requested_permissions;
680 /* Always allow sysadmin to access. */
681 if (capable(CAP_SYS_ADMIN))
682 return true;
683
684 /* Never allow non-sysadmins to access to a dead device. */
685 if (gasket_dev->status != GASKET_STATUS_ALIVE) {
686 dev_dbg(gasket_dev->dev, "Device is dead.\n");
687 return false;
688 }
689
690 /* Make sure that no wrong flags are set. */
691 requested_permissions =
692 (vma->vm_flags & VM_ACCESS_FLAGS);
693 if (requested_permissions & ~(bar_permissions)) {
694 dev_dbg(gasket_dev->dev,
695 "Attempting to map a region with requested permissions 0x%x, but region has permissions 0x%x.\n",
696 requested_permissions, bar_permissions);
697 return false;
698 }
699
700 /* Do not allow a non-owner to write. */
701 if ((vma->vm_flags & VM_WRITE) &&
702 !gasket_owned_by_current_tgid(&gasket_dev->dev_info)) {
703 dev_dbg(gasket_dev->dev,
704 "Attempting to mmap a region for write without owning device.\n");
705 return false;
706 }
707
708 return true;
709 }
710
711 /*
712 * Verifies that the input address is within the region allocated to coherent
713 * buffer.
714 */
715 static bool
716 gasket_is_coherent_region(const struct gasket_driver_desc *driver_desc,
717 ulong address)
718 {
719 struct gasket_coherent_buffer_desc coh_buff_desc =
720 driver_desc->coherent_buffer_description;
721
722 if (coh_buff_desc.permissions != GASKET_NOMAP) {
723 if ((address >= coh_buff_desc.base) &&
724 (address < coh_buff_desc.base + coh_buff_desc.size)) {
725 return true;
726 }
727 }
728 return false;
729 }
730
731 static int gasket_get_bar_index(const struct gasket_dev *gasket_dev,
732 ulong phys_addr)
733 {
734 int i;
735 const struct gasket_driver_desc *driver_desc;
736
737 driver_desc = gasket_dev->internal_desc->driver_desc;
738 for (i = 0; i < PCI_STD_NUM_BARS; ++i) {
739 struct gasket_bar_desc bar_desc =
740 driver_desc->bar_descriptions[i];
741
742 if (bar_desc.permissions != GASKET_NOMAP) {
743 if (phys_addr >= bar_desc.base &&
744 phys_addr < (bar_desc.base + bar_desc.size)) {
745 return i;
746 }
747 }
748 }
749 /* If we haven't found the address by now, it is invalid. */
750 return -EINVAL;
751 }
752
753 /*
754 * Sets the actual bounds to map, given the device's mappable region.
755 *
756 * Given the device's mappable region, along with the user-requested mapping
757 * start offset and length of the user region, determine how much of this
758 * mappable region can be mapped into the user's region (start/end offsets),
759 * and the physical offset (phys_offset) into the BAR where the mapping should
760 * begin (either the VMA's or region lower bound).
761 *
762 * In other words, this calculates the overlap between the VMA
763 * (bar_offset, requested_length) and the given gasket_mappable_region.
764 *
765 * Returns true if there's anything to map, and false otherwise.
766 */
767 static bool
768 gasket_mm_get_mapping_addrs(const struct gasket_mappable_region *region,
769 ulong bar_offset, ulong requested_length,
770 struct gasket_mappable_region *mappable_region,
771 ulong *virt_offset)
772 {
773 ulong range_start = region->start;
774 ulong range_length = region->length_bytes;
775 ulong range_end = range_start + range_length;
776
777 *virt_offset = 0;
778 if (bar_offset + requested_length < range_start) {
779 /*
780 * If the requested region is completely below the range,
781 * there is nothing to map.
782 */
783 return false;
784 } else if (bar_offset <= range_start) {
785 /* If the bar offset is below this range's start
786 * but the requested length continues into it:
787 * 1) Only map starting from the beginning of this
788 * range's phys. offset, so we don't map unmappable
789 * memory.
790 * 2) The length of the virtual memory to not map is the
791 * delta between the bar offset and the
792 * mappable start (and since the mappable start is
793 * bigger, start - req.)
794 * 3) The map length is the minimum of the mappable
795 * requested length (requested_length - virt_offset)
796 * and the actual mappable length of the range.
797 */
798 mappable_region->start = range_start;
799 *virt_offset = range_start - bar_offset;
800 mappable_region->length_bytes =
801 min(requested_length - *virt_offset, range_length);
802 return true;
803 } else if (bar_offset > range_start &&
804 bar_offset < range_end) {
805 /*
806 * If the bar offset is within this range:
807 * 1) Map starting from the bar offset.
808 * 2) Because there is no forbidden memory between the
809 * bar offset and the range start,
810 * virt_offset is 0.
811 * 3) The map length is the minimum of the requested
812 * length and the remaining length in the buffer
813 * (range_end - bar_offset)
814 */
815 mappable_region->start = bar_offset;
816 *virt_offset = 0;
817 mappable_region->length_bytes =
818 min(requested_length, range_end - bar_offset);
819 return true;
820 }
821
822 /*
823 * If the requested [start] offset is above range_end,
824 * there's nothing to map.
825 */
826 return false;
827 }
828
829 /*
830 * Calculates the offset where the VMA range begins in its containing BAR.
831 * The offset is written into bar_offset on success.
832 * Returns zero on success, anything else on error.
833 */
834 static int gasket_mm_vma_bar_offset(const struct gasket_dev *gasket_dev,
835 const struct vm_area_struct *vma,
836 ulong *bar_offset)
837 {
838 ulong raw_offset;
839 int bar_index;
840 const struct gasket_driver_desc *driver_desc =
841 gasket_dev->internal_desc->driver_desc;
842
843 raw_offset = (vma->vm_pgoff << PAGE_SHIFT) +
844 driver_desc->legacy_mmap_address_offset;
845 bar_index = gasket_get_bar_index(gasket_dev, raw_offset);
846 if (bar_index < 0) {
847 dev_err(gasket_dev->dev,
848 "Unable to find matching bar for address 0x%lx\n",
849 raw_offset);
850 trace_gasket_mmap_exit(bar_index);
851 return bar_index;
852 }
853 *bar_offset =
854 raw_offset - driver_desc->bar_descriptions[bar_index].base;
855
856 return 0;
857 }
858
859 int gasket_mm_unmap_region(const struct gasket_dev *gasket_dev,
860 struct vm_area_struct *vma,
861 const struct gasket_mappable_region *map_region)
862 {
863 ulong bar_offset;
864 ulong virt_offset;
865 struct gasket_mappable_region mappable_region;
866 int ret;
867
868 if (map_region->length_bytes == 0)
869 return 0;
870
871 ret = gasket_mm_vma_bar_offset(gasket_dev, vma, &bar_offset);
872 if (ret)
873 return ret;
874
875 if (!gasket_mm_get_mapping_addrs(map_region, bar_offset,
876 vma->vm_end - vma->vm_start,
877 &mappable_region, &virt_offset))
878 return 1;
879
880 /*
881 * The length passed to zap_vma_ptes MUST BE A MULTIPLE OF
882 * PAGE_SIZE! Trust me. I have the scars.
883 *
884 * Next multiple of y: ceil_div(x, y) * y
885 */
886 zap_vma_ptes(vma, vma->vm_start + virt_offset,
887 DIV_ROUND_UP(mappable_region.length_bytes, PAGE_SIZE) *
888 PAGE_SIZE);
889 return 0;
890 }
891 EXPORT_SYMBOL(gasket_mm_unmap_region);
892
893 /* Maps a virtual address + range to a physical offset of a BAR. */
894 static enum do_map_region_status
895 do_map_region(const struct gasket_dev *gasket_dev, struct vm_area_struct *vma,
896 struct gasket_mappable_region *mappable_region)
897 {
898 /* Maximum size of a single call to io_remap_pfn_range. */
899 /* I pulled this number out of thin air. */
900 const ulong max_chunk_size = 64 * 1024 * 1024;
901 ulong chunk_size, mapped_bytes = 0;
902
903 const struct gasket_driver_desc *driver_desc =
904 gasket_dev->internal_desc->driver_desc;
905
906 ulong bar_offset, virt_offset;
907 struct gasket_mappable_region region_to_map;
908 ulong phys_offset, map_length;
909 ulong virt_base, phys_base;
910 int bar_index, ret;
911
912 ret = gasket_mm_vma_bar_offset(gasket_dev, vma, &bar_offset);
913 if (ret)
914 return DO_MAP_REGION_INVALID;
915
916 if (!gasket_mm_get_mapping_addrs(mappable_region, bar_offset,
917 vma->vm_end - vma->vm_start,
918 &region_to_map, &virt_offset))
919 return DO_MAP_REGION_INVALID;
920 phys_offset = region_to_map.start;
921 map_length = region_to_map.length_bytes;
922
923 virt_base = vma->vm_start + virt_offset;
924 bar_index =
925 gasket_get_bar_index(gasket_dev,
926 (vma->vm_pgoff << PAGE_SHIFT) +
927 driver_desc->legacy_mmap_address_offset);
928
929 if (bar_index < 0)
930 return DO_MAP_REGION_INVALID;
931
932 phys_base = gasket_dev->bar_data[bar_index].phys_base + phys_offset;
933 while (mapped_bytes < map_length) {
934 /*
935 * io_remap_pfn_range can take a while, so we chunk its
936 * calls and call cond_resched between each.
937 */
938 chunk_size = min(max_chunk_size, map_length - mapped_bytes);
939
940 cond_resched();
941 ret = io_remap_pfn_range(vma, virt_base + mapped_bytes,
942 (phys_base + mapped_bytes) >>
943 PAGE_SHIFT, chunk_size,
944 vma->vm_page_prot);
945 if (ret) {
946 dev_err(gasket_dev->dev,
947 "Error remapping PFN range.\n");
948 goto fail;
949 }
950 mapped_bytes += chunk_size;
951 }
952
953 return DO_MAP_REGION_SUCCESS;
954
955 fail:
956 /* Unmap the partial chunk we mapped. */
957 mappable_region->length_bytes = mapped_bytes;
958 if (gasket_mm_unmap_region(gasket_dev, vma, mappable_region))
959 dev_err(gasket_dev->dev,
960 "Error unmapping partial region 0x%lx (0x%lx bytes)\n",
961 (ulong)virt_offset,
962 (ulong)mapped_bytes);
963
964 return DO_MAP_REGION_FAILURE;
965 }
966
967 /* Map a region of coherent memory. */
968 static int gasket_mmap_coherent(struct gasket_dev *gasket_dev,
969 struct vm_area_struct *vma)
970 {
971 const struct gasket_driver_desc *driver_desc =
972 gasket_dev->internal_desc->driver_desc;
973 const ulong requested_length = vma->vm_end - vma->vm_start;
974 int ret;
975 ulong permissions;
976
977 if (requested_length == 0 || requested_length >
978 gasket_dev->coherent_buffer.length_bytes) {
979 trace_gasket_mmap_exit(-EINVAL);
980 return -EINVAL;
981 }
982
983 permissions = driver_desc->coherent_buffer_description.permissions;
984 if (!gasket_mmap_has_permissions(gasket_dev, vma, permissions)) {
985 dev_err(gasket_dev->dev, "Permission checking failed.\n");
986 trace_gasket_mmap_exit(-EPERM);
987 return -EPERM;
988 }
989
990 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
991
992 ret = remap_pfn_range(vma, vma->vm_start,
993 (gasket_dev->coherent_buffer.phys_base) >>
994 PAGE_SHIFT, requested_length, vma->vm_page_prot);
995 if (ret) {
996 dev_err(gasket_dev->dev, "Error remapping PFN range err=%d.\n",
997 ret);
998 trace_gasket_mmap_exit(ret);
999 return ret;
1000 }
1001
1002 /* Record the user virtual to dma_address mapping that was
1003 * created by the kernel.
1004 */
1005 gasket_set_user_virt(gasket_dev, requested_length,
1006 gasket_dev->coherent_buffer.phys_base,
1007 vma->vm_start);
1008 return 0;
1009 }
1010
1011 /* Map a device's BARs into user space. */
1012 static int gasket_mmap(struct file *filp, struct vm_area_struct *vma)
1013 {
1014 int i, ret;
1015 int bar_index;
1016 int has_mapped_anything = 0;
1017 ulong permissions;
1018 ulong raw_offset, vma_size;
1019 bool is_coherent_region;
1020 const struct gasket_driver_desc *driver_desc;
1021 struct gasket_dev *gasket_dev = (struct gasket_dev *)filp->private_data;
1022 const struct gasket_bar_desc *bar_desc;
1023 struct gasket_mappable_region *map_regions = NULL;
1024 int num_map_regions = 0;
1025 enum do_map_region_status map_status;
1026
1027 driver_desc = gasket_dev->internal_desc->driver_desc;
1028
1029 if (vma->vm_start & ~PAGE_MASK) {
1030 dev_err(gasket_dev->dev,
1031 "Base address not page-aligned: 0x%lx\n",
1032 vma->vm_start);
1033 trace_gasket_mmap_exit(-EINVAL);
1034 return -EINVAL;
1035 }
1036
1037 /* Calculate the offset of this range into physical mem. */
1038 raw_offset = (vma->vm_pgoff << PAGE_SHIFT) +
1039 driver_desc->legacy_mmap_address_offset;
1040 vma_size = vma->vm_end - vma->vm_start;
1041 trace_gasket_mmap_entry(gasket_dev->dev_info.name, raw_offset,
1042 vma_size);
1043
1044 /*
1045 * Check if the raw offset is within a bar region. If not, check if it
1046 * is a coherent region.
1047 */
1048 bar_index = gasket_get_bar_index(gasket_dev, raw_offset);
1049 is_coherent_region = gasket_is_coherent_region(driver_desc, raw_offset);
1050 if (bar_index < 0 && !is_coherent_region) {
1051 dev_err(gasket_dev->dev,
1052 "Unable to find matching bar for address 0x%lx\n",
1053 raw_offset);
1054 trace_gasket_mmap_exit(bar_index);
1055 return bar_index;
1056 }
1057 if (bar_index > 0 && is_coherent_region) {
1058 dev_err(gasket_dev->dev,
1059 "double matching bar and coherent buffers for address 0x%lx\n",
1060 raw_offset);
1061 trace_gasket_mmap_exit(bar_index);
1062 return -EINVAL;
1063 }
1064
1065 vma->vm_private_data = gasket_dev;
1066
1067 if (is_coherent_region)
1068 return gasket_mmap_coherent(gasket_dev, vma);
1069
1070 /* Everything in the rest of this function is for normal BAR mapping. */
1071
1072 /*
1073 * Subtract the base of the bar from the raw offset to get the
1074 * memory location within the bar to map.
1075 */
1076 bar_desc = &driver_desc->bar_descriptions[bar_index];
1077 permissions = bar_desc->permissions;
1078 if (!gasket_mmap_has_permissions(gasket_dev, vma, permissions)) {
1079 dev_err(gasket_dev->dev, "Permission checking failed.\n");
1080 trace_gasket_mmap_exit(-EPERM);
1081 return -EPERM;
1082 }
1083
1084 if (driver_desc->get_mappable_regions_cb) {
1085 ret = driver_desc->get_mappable_regions_cb(gasket_dev,
1086 bar_index,
1087 &map_regions,
1088 &num_map_regions);
1089 if (ret)
1090 return ret;
1091 } else {
1092 if (!gasket_mmap_has_permissions(gasket_dev, vma,
1093 bar_desc->permissions)) {
1094 dev_err(gasket_dev->dev,
1095 "Permission checking failed.\n");
1096 trace_gasket_mmap_exit(-EPERM);
1097 return -EPERM;
1098 }
1099 num_map_regions = bar_desc->num_mappable_regions;
1100 map_regions = kcalloc(num_map_regions,
1101 sizeof(*bar_desc->mappable_regions),
1102 GFP_KERNEL);
1103 if (map_regions) {
1104 memcpy(map_regions, bar_desc->mappable_regions,
1105 num_map_regions *
1106 sizeof(*bar_desc->mappable_regions));
1107 }
1108 }
1109
1110 if (!map_regions || num_map_regions == 0) {
1111 dev_err(gasket_dev->dev, "No mappable regions returned!\n");
1112 return -EINVAL;
1113 }
1114
1115 /* Marks the VMA's pages as uncacheable. */
1116 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1117 for (i = 0; i < num_map_regions; i++) {
1118 map_status = do_map_region(gasket_dev, vma, &map_regions[i]);
1119 /* Try the next region if this one was not mappable. */
1120 if (map_status == DO_MAP_REGION_INVALID)
1121 continue;
1122 if (map_status == DO_MAP_REGION_FAILURE) {
1123 ret = -ENOMEM;
1124 goto fail;
1125 }
1126
1127 has_mapped_anything = 1;
1128 }
1129
1130 kfree(map_regions);
1131
1132 /* If we could not map any memory, the request was invalid. */
1133 if (!has_mapped_anything) {
1134 dev_err(gasket_dev->dev,
1135 "Map request did not contain a valid region.\n");
1136 trace_gasket_mmap_exit(-EINVAL);
1137 return -EINVAL;
1138 }
1139
1140 trace_gasket_mmap_exit(0);
1141 return 0;
1142
1143 fail:
1144 /* Need to unmap any mapped ranges. */
1145 num_map_regions = i;
1146 for (i = 0; i < num_map_regions; i++)
1147 if (gasket_mm_unmap_region(gasket_dev, vma,
1148 &bar_desc->mappable_regions[i]))
1149 dev_err(gasket_dev->dev, "Error unmapping range %d.\n",
1150 i);
1151 kfree(map_regions);
1152
1153 return ret;
1154 }
1155
1156 /*
1157 * Open the char device file.
1158 *
1159 * If the open is for writing, and the device is not owned, this process becomes
1160 * the owner. If the open is for writing and the device is already owned by
1161 * some other process, it is an error. If this process is the owner, increment
1162 * the open count.
1163 *
1164 * Returns 0 if successful, a negative error number otherwise.
1165 */
1166 static int gasket_open(struct inode *inode, struct file *filp)
1167 {
1168 int ret;
1169 struct gasket_dev *gasket_dev;
1170 const struct gasket_driver_desc *driver_desc;
1171 struct gasket_ownership *ownership;
1172 char task_name[TASK_COMM_LEN];
1173 struct gasket_cdev_info *dev_info =
1174 container_of(inode->i_cdev, struct gasket_cdev_info, cdev);
1175 struct pid_namespace *pid_ns = task_active_pid_ns(current);
1176 bool is_root = ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN);
1177
1178 gasket_dev = dev_info->gasket_dev_ptr;
1179 driver_desc = gasket_dev->internal_desc->driver_desc;
1180 ownership = &dev_info->ownership;
1181 get_task_comm(task_name, current);
1182 filp->private_data = gasket_dev;
1183 inode->i_size = 0;
1184
1185 dev_dbg(gasket_dev->dev,
1186 "Attempting to open with tgid %u (%s) (f_mode: 0%03o, fmode_write: %d is_root: %u)\n",
1187 current->tgid, task_name, filp->f_mode,
1188 (filp->f_mode & FMODE_WRITE), is_root);
1189
1190 /* Always allow non-writing accesses. */
1191 if (!(filp->f_mode & FMODE_WRITE)) {
1192 dev_dbg(gasket_dev->dev, "Allowing read-only opening.\n");
1193 return 0;
1194 }
1195
1196 mutex_lock(&gasket_dev->mutex);
1197
1198 dev_dbg(gasket_dev->dev,
1199 "Current owner open count (owning tgid %u): %d.\n",
1200 ownership->owner, ownership->write_open_count);
1201
1202 /* Opening a node owned by another TGID is an error (unless root) */
1203 if (ownership->is_owned && ownership->owner != current->tgid &&
1204 !is_root) {
1205 dev_err(gasket_dev->dev,
1206 "Process %u is opening a node held by %u.\n",
1207 current->tgid, ownership->owner);
1208 mutex_unlock(&gasket_dev->mutex);
1209 return -EPERM;
1210 }
1211
1212 /* If the node is not owned, assign it to the current TGID. */
1213 if (!ownership->is_owned) {
1214 ret = gasket_check_and_invoke_callback_nolock(gasket_dev,
1215 driver_desc->device_open_cb);
1216 if (ret) {
1217 dev_err(gasket_dev->dev,
1218 "Error in device open cb: %d\n", ret);
1219 mutex_unlock(&gasket_dev->mutex);
1220 return ret;
1221 }
1222 ownership->is_owned = 1;
1223 ownership->owner = current->tgid;
1224 dev_dbg(gasket_dev->dev, "Device owner is now tgid %u\n",
1225 ownership->owner);
1226 }
1227
1228 ownership->write_open_count++;
1229
1230 dev_dbg(gasket_dev->dev, "New open count (owning tgid %u): %d\n",
1231 ownership->owner, ownership->write_open_count);
1232
1233 mutex_unlock(&gasket_dev->mutex);
1234 return 0;
1235 }
1236
1237 /*
1238 * Called on a close of the device file. If this process is the owner,
1239 * decrement the open count. On last close by the owner, free up buffers and
1240 * eventfd contexts, and release ownership.
1241 *
1242 * Returns 0 if successful, a negative error number otherwise.
1243 */
1244 static int gasket_release(struct inode *inode, struct file *file)
1245 {
1246 int i;
1247 struct gasket_dev *gasket_dev;
1248 struct gasket_ownership *ownership;
1249 const struct gasket_driver_desc *driver_desc;
1250 char task_name[TASK_COMM_LEN];
1251 struct gasket_cdev_info *dev_info =
1252 container_of(inode->i_cdev, struct gasket_cdev_info, cdev);
1253 struct pid_namespace *pid_ns = task_active_pid_ns(current);
1254 bool is_root = ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN);
1255
1256 gasket_dev = dev_info->gasket_dev_ptr;
1257 driver_desc = gasket_dev->internal_desc->driver_desc;
1258 ownership = &dev_info->ownership;
1259 get_task_comm(task_name, current);
1260 mutex_lock(&gasket_dev->mutex);
1261
1262 dev_dbg(gasket_dev->dev,
1263 "Releasing device node. Call origin: tgid %u (%s) (f_mode: 0%03o, fmode_write: %d, is_root: %u)\n",
1264 current->tgid, task_name, file->f_mode,
1265 (file->f_mode & FMODE_WRITE), is_root);
1266 dev_dbg(gasket_dev->dev, "Current open count (owning tgid %u): %d\n",
1267 ownership->owner, ownership->write_open_count);
1268
1269 if (file->f_mode & FMODE_WRITE) {
1270 ownership->write_open_count--;
1271 if (ownership->write_open_count == 0) {
1272 dev_dbg(gasket_dev->dev, "Device is now free\n");
1273 ownership->is_owned = 0;
1274 ownership->owner = 0;
1275
1276 /* Forces chip reset before we unmap the page tables. */
1277 driver_desc->device_reset_cb(gasket_dev);
1278
1279 for (i = 0; i < driver_desc->num_page_tables; ++i) {
1280 gasket_page_table_unmap_all(gasket_dev->page_table[i]);
1281 gasket_page_table_garbage_collect(gasket_dev->page_table[i]);
1282 gasket_free_coherent_memory_all(gasket_dev, i);
1283 }
1284
1285 /* Closes device, enters power save. */
1286 gasket_check_and_invoke_callback_nolock(gasket_dev,
1287 driver_desc->device_close_cb);
1288 }
1289 }
1290
1291 dev_dbg(gasket_dev->dev, "New open count (owning tgid %u): %d\n",
1292 ownership->owner, ownership->write_open_count);
1293 mutex_unlock(&gasket_dev->mutex);
1294 return 0;
1295 }
1296
1297 /*
1298 * Gasket ioctl dispatch function.
1299 *
1300 * Check if the ioctl is a generic ioctl. If not, pass the ioctl to the
1301 * ioctl_handler_cb registered in the driver description.
1302 * If the ioctl is a generic ioctl, pass it to gasket_ioctl_handler.
1303 */
1304 static long gasket_ioctl(struct file *filp, uint cmd, ulong arg)
1305 {
1306 struct gasket_dev *gasket_dev;
1307 const struct gasket_driver_desc *driver_desc;
1308 void __user *argp = (void __user *)arg;
1309 char path[256];
1310
1311 gasket_dev = (struct gasket_dev *)filp->private_data;
1312 driver_desc = gasket_dev->internal_desc->driver_desc;
1313 if (!driver_desc) {
1314 dev_dbg(gasket_dev->dev,
1315 "Unable to find device descriptor for file %s\n",
1316 d_path(&filp->f_path, path, 256));
1317 return -ENODEV;
1318 }
1319
1320 if (!gasket_is_supported_ioctl(cmd)) {
1321 /*
1322 * The ioctl handler is not a standard Gasket callback, since
1323 * it requires different arguments. This means we can't use
1324 * check_and_invoke_callback.
1325 */
1326 if (driver_desc->ioctl_handler_cb)
1327 return driver_desc->ioctl_handler_cb(filp, cmd, argp);
1328
1329 dev_dbg(gasket_dev->dev, "Received unknown ioctl 0x%x\n", cmd);
1330 return -EINVAL;
1331 }
1332
1333 return gasket_handle_ioctl(filp, cmd, argp);
1334 }
1335
1336 /* File operations for all Gasket devices. */
1337 static const struct file_operations gasket_file_ops = {
1338 .owner = THIS_MODULE,
1339 .llseek = no_llseek,
1340 .mmap = gasket_mmap,
1341 .open = gasket_open,
1342 .release = gasket_release,
1343 .unlocked_ioctl = gasket_ioctl,
1344 };
1345
1346 /* Perform final init and marks the device as active. */
1347 int gasket_enable_device(struct gasket_dev *gasket_dev)
1348 {
1349 int tbl_idx;
1350 int ret;
1351 const struct gasket_driver_desc *driver_desc =
1352 gasket_dev->internal_desc->driver_desc;
1353
1354 ret = gasket_interrupt_init(gasket_dev);
1355 if (ret) {
1356 dev_err(gasket_dev->dev,
1357 "Critical failure to allocate interrupts: %d\n", ret);
1358 gasket_interrupt_cleanup(gasket_dev);
1359 return ret;
1360 }
1361
1362 for (tbl_idx = 0; tbl_idx < driver_desc->num_page_tables; tbl_idx++) {
1363 dev_dbg(gasket_dev->dev, "Initializing page table %d.\n",
1364 tbl_idx);
1365 ret = gasket_page_table_init(&gasket_dev->page_table[tbl_idx],
1366 &gasket_dev->bar_data[driver_desc->page_table_bar_index],
1367 &driver_desc->page_table_configs[tbl_idx],
1368 gasket_dev->dev,
1369 gasket_dev->pci_dev);
1370 if (ret) {
1371 dev_err(gasket_dev->dev,
1372 "Couldn't init page table %d: %d\n",
1373 tbl_idx, ret);
1374 return ret;
1375 }
1376 /*
1377 * Make sure that the page table is clear and set to simple
1378 * addresses.
1379 */
1380 gasket_page_table_reset(gasket_dev->page_table[tbl_idx]);
1381 }
1382
1383 /*
1384 * hardware_revision_cb returns a positive integer (the rev) if
1385 * successful.)
1386 */
1387 ret = check_and_invoke_callback(gasket_dev,
1388 driver_desc->hardware_revision_cb);
1389 if (ret < 0) {
1390 dev_err(gasket_dev->dev,
1391 "Error getting hardware revision: %d\n", ret);
1392 return ret;
1393 }
1394 gasket_dev->hardware_revision = ret;
1395
1396 /* device_status_cb returns a device status, not an error code. */
1397 gasket_dev->status = gasket_get_hw_status(gasket_dev);
1398 if (gasket_dev->status == GASKET_STATUS_DEAD)
1399 dev_err(gasket_dev->dev, "Device reported as unhealthy.\n");
1400
1401 ret = gasket_add_cdev(&gasket_dev->dev_info, &gasket_file_ops,
1402 driver_desc->module);
1403 if (ret)
1404 return ret;
1405
1406 return 0;
1407 }
1408 EXPORT_SYMBOL(gasket_enable_device);
1409
1410 static int __gasket_add_device(struct device *parent_dev,
1411 struct gasket_internal_desc *internal_desc,
1412 struct gasket_dev **gasket_devp)
1413 {
1414 int ret;
1415 struct gasket_dev *gasket_dev;
1416 const struct gasket_driver_desc *driver_desc =
1417 internal_desc->driver_desc;
1418
1419 ret = gasket_alloc_dev(internal_desc, parent_dev, &gasket_dev);
1420 if (ret)
1421 return ret;
1422 if (IS_ERR(gasket_dev->dev_info.device)) {
1423 dev_err(parent_dev, "Cannot create %s device %s [ret = %ld]\n",
1424 driver_desc->name, gasket_dev->dev_info.name,
1425 PTR_ERR(gasket_dev->dev_info.device));
1426 ret = -ENODEV;
1427 goto free_gasket_dev;
1428 }
1429
1430 ret = gasket_sysfs_create_mapping(gasket_dev->dev_info.device,
1431 gasket_dev);
1432 if (ret)
1433 goto remove_device;
1434
1435 ret = gasket_sysfs_create_entries(gasket_dev->dev_info.device,
1436 gasket_sysfs_generic_attrs);
1437 if (ret)
1438 goto remove_sysfs_mapping;
1439
1440 *gasket_devp = gasket_dev;
1441 return 0;
1442
1443 remove_sysfs_mapping:
1444 gasket_sysfs_remove_mapping(gasket_dev->dev_info.device);
1445 remove_device:
1446 device_destroy(internal_desc->class, gasket_dev->dev_info.devt);
1447 free_gasket_dev:
1448 gasket_free_dev(gasket_dev);
1449 return ret;
1450 }
1451
1452 static void __gasket_remove_device(struct gasket_internal_desc *internal_desc,
1453 struct gasket_dev *gasket_dev)
1454 {
1455 gasket_sysfs_remove_mapping(gasket_dev->dev_info.device);
1456 device_destroy(internal_desc->class, gasket_dev->dev_info.devt);
1457 gasket_free_dev(gasket_dev);
1458 }
1459
1460 /*
1461 * Add PCI gasket device.
1462 *
1463 * Called by Gasket device probe function.
1464 * Allocates device metadata and maps device memory. The device driver must
1465 * call gasket_enable_device after driver init is complete to place the device
1466 * in active use.
1467 */
1468 int gasket_pci_add_device(struct pci_dev *pci_dev,
1469 struct gasket_dev **gasket_devp)
1470 {
1471 int ret;
1472 struct gasket_internal_desc *internal_desc;
1473 struct gasket_dev *gasket_dev;
1474 struct device *parent;
1475
1476 dev_dbg(&pci_dev->dev, "add PCI gasket device\n");
1477
1478 mutex_lock(&g_mutex);
1479 internal_desc = lookup_pci_internal_desc(pci_dev);
1480 mutex_unlock(&g_mutex);
1481 if (!internal_desc) {
1482 dev_err(&pci_dev->dev,
1483 "PCI add device called for unknown driver type\n");
1484 return -ENODEV;
1485 }
1486
1487 parent = &pci_dev->dev;
1488 ret = __gasket_add_device(parent, internal_desc, &gasket_dev);
1489 if (ret)
1490 return ret;
1491
1492 gasket_dev->pci_dev = pci_dev;
1493 ret = gasket_setup_pci(pci_dev, gasket_dev);
1494 if (ret)
1495 goto cleanup_pci;
1496
1497 /*
1498 * Once we've created the mapping structures successfully, attempt to
1499 * create a symlink to the pci directory of this object.
1500 */
1501 ret = sysfs_create_link(&gasket_dev->dev_info.device->kobj,
1502 &pci_dev->dev.kobj, dev_name(&pci_dev->dev));
1503 if (ret) {
1504 dev_err(gasket_dev->dev,
1505 "Cannot create sysfs pci link: %d\n", ret);
1506 goto cleanup_pci;
1507 }
1508
1509 *gasket_devp = gasket_dev;
1510 return 0;
1511
1512 cleanup_pci:
1513 gasket_cleanup_pci(gasket_dev);
1514 __gasket_remove_device(internal_desc, gasket_dev);
1515 return ret;
1516 }
1517 EXPORT_SYMBOL(gasket_pci_add_device);
1518
1519 /* Remove a PCI gasket device. */
1520 void gasket_pci_remove_device(struct pci_dev *pci_dev)
1521 {
1522 int i;
1523 struct gasket_internal_desc *internal_desc;
1524 struct gasket_dev *gasket_dev = NULL;
1525 /* Find the device desc. */
1526 mutex_lock(&g_mutex);
1527 internal_desc = lookup_pci_internal_desc(pci_dev);
1528 if (!internal_desc) {
1529 mutex_unlock(&g_mutex);
1530 return;
1531 }
1532 mutex_unlock(&g_mutex);
1533
1534 /* Now find the specific device */
1535 mutex_lock(&internal_desc->mutex);
1536 for (i = 0; i < GASKET_DEV_MAX; i++) {
1537 if (internal_desc->devs[i] &&
1538 internal_desc->devs[i]->pci_dev == pci_dev) {
1539 gasket_dev = internal_desc->devs[i];
1540 break;
1541 }
1542 }
1543 mutex_unlock(&internal_desc->mutex);
1544
1545 if (!gasket_dev)
1546 return;
1547
1548 dev_dbg(gasket_dev->dev, "remove %s PCI gasket device\n",
1549 internal_desc->driver_desc->name);
1550
1551 gasket_cleanup_pci(gasket_dev);
1552 __gasket_remove_device(internal_desc, gasket_dev);
1553 }
1554 EXPORT_SYMBOL(gasket_pci_remove_device);
1555
1556 /**
1557 * Lookup a name by number in a num_name table.
1558 * @num: Number to lookup.
1559 * @table: Array of num_name structures, the table for the lookup.
1560 *
1561 * Description: Searches for num in the table. If found, the
1562 * corresponding name is returned; otherwise NULL
1563 * is returned.
1564 *
1565 * The table must have a NULL name pointer at the end.
1566 */
1567 const char *gasket_num_name_lookup(uint num,
1568 const struct gasket_num_name *table)
1569 {
1570 uint i = 0;
1571
1572 while (table[i].snn_name) {
1573 if (num == table[i].snn_num)
1574 break;
1575 ++i;
1576 }
1577
1578 return table[i].snn_name;
1579 }
1580 EXPORT_SYMBOL(gasket_num_name_lookup);
1581
1582 int gasket_reset(struct gasket_dev *gasket_dev)
1583 {
1584 int ret;
1585
1586 mutex_lock(&gasket_dev->mutex);
1587 ret = gasket_reset_nolock(gasket_dev);
1588 mutex_unlock(&gasket_dev->mutex);
1589 return ret;
1590 }
1591 EXPORT_SYMBOL(gasket_reset);
1592
1593 int gasket_reset_nolock(struct gasket_dev *gasket_dev)
1594 {
1595 int ret;
1596 int i;
1597 const struct gasket_driver_desc *driver_desc;
1598
1599 driver_desc = gasket_dev->internal_desc->driver_desc;
1600 if (!driver_desc->device_reset_cb)
1601 return 0;
1602
1603 ret = driver_desc->device_reset_cb(gasket_dev);
1604 if (ret) {
1605 dev_dbg(gasket_dev->dev, "Device reset cb returned %d.\n",
1606 ret);
1607 return ret;
1608 }
1609
1610 /* Reinitialize the page tables and interrupt framework. */
1611 for (i = 0; i < driver_desc->num_page_tables; ++i)
1612 gasket_page_table_reset(gasket_dev->page_table[i]);
1613
1614 ret = gasket_interrupt_reinit(gasket_dev);
1615 if (ret) {
1616 dev_dbg(gasket_dev->dev, "Unable to reinit interrupts: %d.\n",
1617 ret);
1618 return ret;
1619 }
1620
1621 /* Get current device health. */
1622 gasket_dev->status = gasket_get_hw_status(gasket_dev);
1623 if (gasket_dev->status == GASKET_STATUS_DEAD) {
1624 dev_dbg(gasket_dev->dev, "Device reported as dead.\n");
1625 return -EINVAL;
1626 }
1627
1628 return 0;
1629 }
1630 EXPORT_SYMBOL(gasket_reset_nolock);
1631
1632 gasket_ioctl_permissions_cb_t
1633 gasket_get_ioctl_permissions_cb(struct gasket_dev *gasket_dev)
1634 {
1635 return gasket_dev->internal_desc->driver_desc->ioctl_permissions_cb;
1636 }
1637 EXPORT_SYMBOL(gasket_get_ioctl_permissions_cb);
1638
1639 /* Get the driver structure for a given gasket_dev.
1640 * @dev: pointer to gasket_dev, implementing the requested driver.
1641 */
1642 const struct gasket_driver_desc *gasket_get_driver_desc(struct gasket_dev *dev)
1643 {
1644 return dev->internal_desc->driver_desc;
1645 }
1646
1647 /* Get the device structure for a given gasket_dev.
1648 * @dev: pointer to gasket_dev, implementing the requested driver.
1649 */
1650 struct device *gasket_get_device(struct gasket_dev *dev)
1651 {
1652 return dev->dev;
1653 }
1654
1655 /**
1656 * Asynchronously waits on device.
1657 * @gasket_dev: Device struct.
1658 * @bar: Bar
1659 * @offset: Register offset
1660 * @mask: Register mask
1661 * @val: Expected value
1662 * @max_retries: number of sleep periods
1663 * @delay_ms: Timeout in milliseconds
1664 *
1665 * Description: Busy waits for a specific combination of bits to be set on a
1666 * Gasket register.
1667 **/
1668 int gasket_wait_with_reschedule(struct gasket_dev *gasket_dev, int bar,
1669 u64 offset, u64 mask, u64 val,
1670 uint max_retries, u64 delay_ms)
1671 {
1672 uint retries = 0;
1673 u64 tmp;
1674
1675 while (retries < max_retries) {
1676 tmp = gasket_dev_read_64(gasket_dev, bar, offset);
1677 if ((tmp & mask) == val)
1678 return 0;
1679 msleep(delay_ms);
1680 retries++;
1681 }
1682 dev_dbg(gasket_dev->dev, "%s timeout: reg %llx timeout (%llu ms)\n",
1683 __func__, offset, max_retries * delay_ms);
1684 return -ETIMEDOUT;
1685 }
1686 EXPORT_SYMBOL(gasket_wait_with_reschedule);
1687
1688 /* See gasket_core.h for description. */
1689 int gasket_register_device(const struct gasket_driver_desc *driver_desc)
1690 {
1691 int i, ret;
1692 int desc_idx = -1;
1693 struct gasket_internal_desc *internal;
1694
1695 pr_debug("Loading %s driver version %s\n", driver_desc->name,
1696 driver_desc->driver_version);
1697 /* Check for duplicates and find a free slot. */
1698 mutex_lock(&g_mutex);
1699
1700 for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
1701 if (g_descs[i].driver_desc == driver_desc) {
1702 pr_err("%s driver already loaded/registered\n",
1703 driver_desc->name);
1704 mutex_unlock(&g_mutex);
1705 return -EBUSY;
1706 }
1707 }
1708
1709 /* This and the above loop could be combined, but this reads easier. */
1710 for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
1711 if (!g_descs[i].driver_desc) {
1712 g_descs[i].driver_desc = driver_desc;
1713 desc_idx = i;
1714 break;
1715 }
1716 }
1717 mutex_unlock(&g_mutex);
1718
1719 if (desc_idx == -1) {
1720 pr_err("too many drivers loaded, max %d\n",
1721 GASKET_FRAMEWORK_DESC_MAX);
1722 return -EBUSY;
1723 }
1724
1725 internal = &g_descs[desc_idx];
1726 mutex_init(&internal->mutex);
1727 memset(internal->devs, 0, sizeof(struct gasket_dev *) * GASKET_DEV_MAX);
1728 internal->class =
1729 class_create(driver_desc->module, driver_desc->name);
1730
1731 if (IS_ERR(internal->class)) {
1732 pr_err("Cannot register %s class [ret=%ld]\n",
1733 driver_desc->name, PTR_ERR(internal->class));
1734 ret = PTR_ERR(internal->class);
1735 goto unregister_gasket_driver;
1736 }
1737
1738 ret = register_chrdev_region(MKDEV(driver_desc->major,
1739 driver_desc->minor), GASKET_DEV_MAX,
1740 driver_desc->name);
1741 if (ret) {
1742 pr_err("cannot register %s char driver [ret=%d]\n",
1743 driver_desc->name, ret);
1744 goto destroy_class;
1745 }
1746
1747 return 0;
1748
1749 destroy_class:
1750 class_destroy(internal->class);
1751
1752 unregister_gasket_driver:
1753 mutex_lock(&g_mutex);
1754 g_descs[desc_idx].driver_desc = NULL;
1755 mutex_unlock(&g_mutex);
1756 return ret;
1757 }
1758 EXPORT_SYMBOL(gasket_register_device);
1759
1760 /* See gasket_core.h for description. */
1761 void gasket_unregister_device(const struct gasket_driver_desc *driver_desc)
1762 {
1763 int i, desc_idx;
1764 struct gasket_internal_desc *internal_desc = NULL;
1765
1766 mutex_lock(&g_mutex);
1767 for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
1768 if (g_descs[i].driver_desc == driver_desc) {
1769 internal_desc = &g_descs[i];
1770 desc_idx = i;
1771 break;
1772 }
1773 }
1774
1775 if (!internal_desc) {
1776 mutex_unlock(&g_mutex);
1777 pr_err("request to unregister unknown desc: %s, %d:%d\n",
1778 driver_desc->name, driver_desc->major,
1779 driver_desc->minor);
1780 return;
1781 }
1782
1783 unregister_chrdev_region(MKDEV(driver_desc->major, driver_desc->minor),
1784 GASKET_DEV_MAX);
1785
1786 class_destroy(internal_desc->class);
1787
1788 /* Finally, effectively "remove" the driver. */
1789 g_descs[desc_idx].driver_desc = NULL;
1790 mutex_unlock(&g_mutex);
1791
1792 pr_debug("removed %s driver\n", driver_desc->name);
1793 }
1794 EXPORT_SYMBOL(gasket_unregister_device);
1795
1796 static int __init gasket_init(void)
1797 {
1798 int i;
1799
1800 mutex_lock(&g_mutex);
1801 for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
1802 g_descs[i].driver_desc = NULL;
1803 mutex_init(&g_descs[i].mutex);
1804 }
1805
1806 gasket_sysfs_init();
1807
1808 mutex_unlock(&g_mutex);
1809 return 0;
1810 }
1811
1812 MODULE_DESCRIPTION("Google Gasket driver framework");
1813 MODULE_VERSION(GASKET_FRAMEWORK_VERSION);
1814 MODULE_LICENSE("GPL v2");
1815 MODULE_AUTHOR("Rob Springer <rspringer@google.com>");
1816 module_init(gasket_init);