1 // SPDX-License-Identifier: GPL-2.0
3 * Gasket generic driver framework. This file contains the implementation
4 * for the Gasket generic driver framework - the functionality that is common
5 * across Gasket devices.
7 * Copyright (C) 2018 Google, Inc.
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include "gasket_core.h"
14 #include "gasket_interrupt.h"
15 #include "gasket_ioctl.h"
16 #include "gasket_page_table.h"
17 #include "gasket_sysfs.h"
19 #include <linux/capability.h>
20 #include <linux/compiler.h>
21 #include <linux/delay.h>
22 #include <linux/device.h>
24 #include <linux/init.h>
26 #include <linux/pid_namespace.h>
27 #include <linux/printk.h>
28 #include <linux/sched.h>
30 #ifdef GASKET_KERNEL_TRACE_SUPPORT
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/gasket_mmap.h>
34 #define trace_gasket_mmap_exit(x)
35 #define trace_gasket_mmap_entry(x, ...)
39 * "Private" members of gasket_driver_desc.
41 * Contains internal per-device type tracking data, i.e., data not appropriate
42 * as part of the public interface for the generic framework.
44 struct gasket_internal_desc
{
45 /* Device-specific-driver-provided configuration information. */
46 const struct gasket_driver_desc
*driver_desc
;
48 /* Protects access to per-driver data (i.e. this structure). */
51 /* Kernel-internal device class. */
54 /* Instantiated / present devices of this type. */
55 struct gasket_dev
*devs
[GASKET_DEV_MAX
];
58 /* do_map_region() needs be able to return more than just true/false. */
59 enum do_map_region_status
{
60 /* The region was successfully mapped. */
61 DO_MAP_REGION_SUCCESS
,
63 /* Attempted to map region and failed. */
64 DO_MAP_REGION_FAILURE
,
66 /* The requested region to map was not part of a mappable region. */
67 DO_MAP_REGION_INVALID
,
70 /* Global data definitions. */
71 /* Mutex - only for framework-wide data. Other data should be protected by
72 * finer-grained locks.
74 static DEFINE_MUTEX(g_mutex
);
76 /* List of all registered device descriptions & their supporting data. */
77 static struct gasket_internal_desc g_descs
[GASKET_FRAMEWORK_DESC_MAX
];
79 /* Mapping of statuses to human-readable strings. Must end with {0,NULL}. */
80 static const struct gasket_num_name gasket_status_name_table
[] = {
81 { GASKET_STATUS_DEAD
, "DEAD" },
82 { GASKET_STATUS_ALIVE
, "ALIVE" },
83 { GASKET_STATUS_LAMED
, "LAMED" },
84 { GASKET_STATUS_DRIVER_EXIT
, "DRIVER_EXITING" },
88 /* Enumeration of the automatic Gasket framework sysfs nodes. */
89 enum gasket_sysfs_attribute_type
{
93 ATTR_FRAMEWORK_VERSION
,
95 ATTR_HARDWARE_REVISION
,
100 ATTR_WRITE_OPEN_COUNT
,
105 /* Perform a standard Gasket callback. */
107 check_and_invoke_callback(struct gasket_dev
*gasket_dev
,
108 int (*cb_function
)(struct gasket_dev
*))
113 mutex_lock(&gasket_dev
->mutex
);
114 ret
= cb_function(gasket_dev
);
115 mutex_unlock(&gasket_dev
->mutex
);
120 /* Perform a standard Gasket callback without grabbing gasket_dev->mutex. */
122 gasket_check_and_invoke_callback_nolock(struct gasket_dev
*gasket_dev
,
123 int (*cb_function
)(struct gasket_dev
*))
128 ret
= cb_function(gasket_dev
);
133 * Return nonzero if the gasket_cdev_info is owned by the current thread group
136 static int gasket_owned_by_current_tgid(struct gasket_cdev_info
*info
)
138 return (info
->ownership
.is_owned
&&
139 (info
->ownership
.owner
== current
->tgid
));
143 * Find the next free gasket_internal_dev slot.
145 * Returns the located slot number on success or a negative number on failure.
147 static int gasket_find_dev_slot(struct gasket_internal_desc
*internal_desc
,
148 const char *kobj_name
)
152 mutex_lock(&internal_desc
->mutex
);
154 /* Search for a previous instance of this device. */
155 for (i
= 0; i
< GASKET_DEV_MAX
; i
++) {
156 if (internal_desc
->devs
[i
] &&
157 strcmp(internal_desc
->devs
[i
]->kobj_name
, kobj_name
) == 0) {
158 pr_err("Duplicate device %s\n", kobj_name
);
159 mutex_unlock(&internal_desc
->mutex
);
164 /* Find a free device slot. */
165 for (i
= 0; i
< GASKET_DEV_MAX
; i
++) {
166 if (!internal_desc
->devs
[i
])
170 if (i
== GASKET_DEV_MAX
) {
171 pr_err("Too many registered devices; max %d\n", GASKET_DEV_MAX
);
172 mutex_unlock(&internal_desc
->mutex
);
176 mutex_unlock(&internal_desc
->mutex
);
181 * Allocate and initialize a Gasket device structure, add the device to the
184 * Returns 0 if successful, a negative error code otherwise.
186 static int gasket_alloc_dev(struct gasket_internal_desc
*internal_desc
,
187 struct device
*parent
, struct gasket_dev
**pdev
)
190 const struct gasket_driver_desc
*driver_desc
=
191 internal_desc
->driver_desc
;
192 struct gasket_dev
*gasket_dev
;
193 struct gasket_cdev_info
*dev_info
;
194 const char *parent_name
= dev_name(parent
);
196 pr_debug("Allocating a Gasket device, parent %s.\n", parent_name
);
200 dev_idx
= gasket_find_dev_slot(internal_desc
, parent_name
);
204 gasket_dev
= *pdev
= kzalloc(sizeof(*gasket_dev
), GFP_KERNEL
);
206 pr_err("no memory for device, parent %s\n", parent_name
);
209 internal_desc
->devs
[dev_idx
] = gasket_dev
;
211 mutex_init(&gasket_dev
->mutex
);
213 gasket_dev
->internal_desc
= internal_desc
;
214 gasket_dev
->dev_idx
= dev_idx
;
215 snprintf(gasket_dev
->kobj_name
, GASKET_NAME_MAX
, "%s", parent_name
);
216 gasket_dev
->dev
= get_device(parent
);
217 /* gasket_bar_data is uninitialized. */
218 gasket_dev
->num_page_tables
= driver_desc
->num_page_tables
;
219 /* max_page_table_size and *page table are uninit'ed */
220 /* interrupt_data is not initialized. */
221 /* status is 0, or GASKET_STATUS_DEAD */
223 dev_info
= &gasket_dev
->dev_info
;
224 snprintf(dev_info
->name
, GASKET_NAME_MAX
, "%s_%u", driver_desc
->name
,
225 gasket_dev
->dev_idx
);
227 MKDEV(driver_desc
->major
, driver_desc
->minor
+
228 gasket_dev
->dev_idx
);
230 device_create(internal_desc
->class, parent
, dev_info
->devt
,
231 gasket_dev
, dev_info
->name
);
233 /* cdev has not yet been added; cdev_added is 0 */
234 dev_info
->gasket_dev_ptr
= gasket_dev
;
235 /* ownership is all 0, indicating no owner or opens. */
240 /* Free a Gasket device. */
241 static void gasket_free_dev(struct gasket_dev
*gasket_dev
)
243 struct gasket_internal_desc
*internal_desc
= gasket_dev
->internal_desc
;
245 mutex_lock(&internal_desc
->mutex
);
246 internal_desc
->devs
[gasket_dev
->dev_idx
] = NULL
;
247 mutex_unlock(&internal_desc
->mutex
);
248 put_device(gasket_dev
->dev
);
253 * Maps the specified bar into kernel space.
255 * Returns 0 on success, a negative error code otherwise.
256 * A zero-sized BAR will not be mapped, but is not an error.
258 static int gasket_map_pci_bar(struct gasket_dev
*gasket_dev
, int bar_num
)
260 struct gasket_internal_desc
*internal_desc
= gasket_dev
->internal_desc
;
261 const struct gasket_driver_desc
*driver_desc
=
262 internal_desc
->driver_desc
;
263 ulong desc_bytes
= driver_desc
->bar_descriptions
[bar_num
].size
;
269 if (driver_desc
->bar_descriptions
[bar_num
].type
!= PCI_BAR
) {
270 /* not PCI: skip this entry */
274 * pci_resource_start and pci_resource_len return a "resource_size_t",
275 * which is safely castable to ulong (which itself is the arg to
276 * request_mem_region).
278 gasket_dev
->bar_data
[bar_num
].phys_base
=
279 (ulong
)pci_resource_start(gasket_dev
->pci_dev
, bar_num
);
280 if (!gasket_dev
->bar_data
[bar_num
].phys_base
) {
281 dev_err(gasket_dev
->dev
, "Cannot get BAR%u base address\n",
286 gasket_dev
->bar_data
[bar_num
].length_bytes
=
287 (ulong
)pci_resource_len(gasket_dev
->pci_dev
, bar_num
);
288 if (gasket_dev
->bar_data
[bar_num
].length_bytes
< desc_bytes
) {
289 dev_err(gasket_dev
->dev
,
290 "PCI BAR %u space is too small: %lu; expected >= %lu\n",
291 bar_num
, gasket_dev
->bar_data
[bar_num
].length_bytes
,
296 if (!request_mem_region(gasket_dev
->bar_data
[bar_num
].phys_base
,
297 gasket_dev
->bar_data
[bar_num
].length_bytes
,
298 gasket_dev
->dev_info
.name
)) {
299 dev_err(gasket_dev
->dev
,
300 "Cannot get BAR %d memory region %p\n",
301 bar_num
, &gasket_dev
->pci_dev
->resource
[bar_num
]);
305 gasket_dev
->bar_data
[bar_num
].virt_base
=
306 ioremap(gasket_dev
->bar_data
[bar_num
].phys_base
,
307 gasket_dev
->bar_data
[bar_num
].length_bytes
);
308 if (!gasket_dev
->bar_data
[bar_num
].virt_base
) {
309 dev_err(gasket_dev
->dev
,
310 "Cannot remap BAR %d memory region %p\n",
311 bar_num
, &gasket_dev
->pci_dev
->resource
[bar_num
]);
316 dma_set_mask(&gasket_dev
->pci_dev
->dev
, DMA_BIT_MASK(64));
317 dma_set_coherent_mask(&gasket_dev
->pci_dev
->dev
, DMA_BIT_MASK(64));
322 iounmap(gasket_dev
->bar_data
[bar_num
].virt_base
);
323 release_mem_region(gasket_dev
->bar_data
[bar_num
].phys_base
,
324 gasket_dev
->bar_data
[bar_num
].length_bytes
);
329 * Releases PCI BAR mapping.
331 * A zero-sized or not-mapped BAR will not be unmapped, but is not an error.
333 static void gasket_unmap_pci_bar(struct gasket_dev
*dev
, int bar_num
)
336 struct gasket_internal_desc
*internal_desc
= dev
->internal_desc
;
337 const struct gasket_driver_desc
*driver_desc
=
338 internal_desc
->driver_desc
;
340 if (driver_desc
->bar_descriptions
[bar_num
].size
== 0 ||
341 !dev
->bar_data
[bar_num
].virt_base
)
344 if (driver_desc
->bar_descriptions
[bar_num
].type
!= PCI_BAR
)
347 iounmap(dev
->bar_data
[bar_num
].virt_base
);
348 dev
->bar_data
[bar_num
].virt_base
= NULL
;
350 base
= pci_resource_start(dev
->pci_dev
, bar_num
);
352 dev_err(dev
->dev
, "cannot get PCI BAR%u base address\n",
357 bytes
= pci_resource_len(dev
->pci_dev
, bar_num
);
358 release_mem_region(base
, bytes
);
362 * Setup PCI memory mapping for the specified device.
364 * Reads the BAR registers and sets up pointers to the device's memory mapped
367 * Returns 0 on success and a negative value otherwise.
369 static int gasket_setup_pci(struct pci_dev
*pci_dev
,
370 struct gasket_dev
*gasket_dev
)
372 int i
, mapped_bars
, ret
;
374 for (i
= 0; i
< PCI_STD_NUM_BARS
; i
++) {
375 ret
= gasket_map_pci_bar(gasket_dev
, i
);
385 for (i
= 0; i
< mapped_bars
; i
++)
386 gasket_unmap_pci_bar(gasket_dev
, i
);
391 /* Unmaps memory for the specified device. */
392 static void gasket_cleanup_pci(struct gasket_dev
*gasket_dev
)
396 for (i
= 0; i
< PCI_STD_NUM_BARS
; i
++)
397 gasket_unmap_pci_bar(gasket_dev
, i
);
400 /* Determine the health of the Gasket device. */
401 static int gasket_get_hw_status(struct gasket_dev
*gasket_dev
)
405 const struct gasket_driver_desc
*driver_desc
=
406 gasket_dev
->internal_desc
->driver_desc
;
408 status
= gasket_check_and_invoke_callback_nolock(gasket_dev
,
409 driver_desc
->device_status_cb
);
410 if (status
!= GASKET_STATUS_ALIVE
) {
411 dev_dbg(gasket_dev
->dev
, "Hardware reported status %d.\n",
416 status
= gasket_interrupt_system_status(gasket_dev
);
417 if (status
!= GASKET_STATUS_ALIVE
) {
418 dev_dbg(gasket_dev
->dev
,
419 "Interrupt system reported status %d.\n", status
);
423 for (i
= 0; i
< driver_desc
->num_page_tables
; ++i
) {
424 status
= gasket_page_table_system_status(gasket_dev
->page_table
[i
]);
425 if (status
!= GASKET_STATUS_ALIVE
) {
426 dev_dbg(gasket_dev
->dev
,
427 "Page table %d reported status %d.\n",
433 return GASKET_STATUS_ALIVE
;
437 gasket_write_mappable_regions(char *buf
,
438 const struct gasket_driver_desc
*driver_desc
,
443 ssize_t total_written
= 0;
444 ulong min_addr
, max_addr
;
445 struct gasket_bar_desc bar_desc
=
446 driver_desc
->bar_descriptions
[bar_index
];
448 if (bar_desc
.permissions
== GASKET_NOMAP
)
451 i
< bar_desc
.num_mappable_regions
&& total_written
< PAGE_SIZE
;
453 min_addr
= bar_desc
.mappable_regions
[i
].start
-
454 driver_desc
->legacy_mmap_address_offset
;
455 max_addr
= bar_desc
.mappable_regions
[i
].start
-
456 driver_desc
->legacy_mmap_address_offset
+
457 bar_desc
.mappable_regions
[i
].length_bytes
;
458 written
= scnprintf(buf
, PAGE_SIZE
- total_written
,
459 "0x%08lx-0x%08lx\n", min_addr
, max_addr
);
460 total_written
+= written
;
463 return total_written
;
466 static ssize_t
gasket_sysfs_data_show(struct device
*device
,
467 struct device_attribute
*attr
, char *buf
)
470 ssize_t current_written
= 0;
471 const struct gasket_driver_desc
*driver_desc
;
472 struct gasket_dev
*gasket_dev
;
473 struct gasket_sysfs_attribute
*gasket_attr
;
474 const struct gasket_bar_desc
*bar_desc
;
475 enum gasket_sysfs_attribute_type sysfs_type
;
477 gasket_dev
= gasket_sysfs_get_device_data(device
);
479 dev_err(device
, "No sysfs mapping found for device\n");
483 gasket_attr
= gasket_sysfs_get_attr(device
, attr
);
485 dev_err(device
, "No sysfs attr found for device\n");
486 gasket_sysfs_put_device_data(device
, gasket_dev
);
490 driver_desc
= gasket_dev
->internal_desc
->driver_desc
;
493 (enum gasket_sysfs_attribute_type
)gasket_attr
->data
.attr_type
;
494 switch (sysfs_type
) {
495 case ATTR_BAR_OFFSETS
:
496 for (i
= 0; i
< PCI_STD_NUM_BARS
; i
++) {
497 bar_desc
= &driver_desc
->bar_descriptions
[i
];
498 if (bar_desc
->size
== 0)
501 snprintf(buf
, PAGE_SIZE
- ret
, "%d: 0x%lx\n", i
,
502 (ulong
)bar_desc
->base
);
503 buf
+= current_written
;
504 ret
+= current_written
;
508 for (i
= 0; i
< PCI_STD_NUM_BARS
; i
++) {
509 bar_desc
= &driver_desc
->bar_descriptions
[i
];
510 if (bar_desc
->size
== 0)
513 snprintf(buf
, PAGE_SIZE
- ret
, "%d: 0x%lx\n", i
,
514 (ulong
)bar_desc
->size
);
515 buf
+= current_written
;
516 ret
+= current_written
;
519 case ATTR_DRIVER_VERSION
:
520 ret
= snprintf(buf
, PAGE_SIZE
, "%s\n",
521 gasket_dev
->internal_desc
->driver_desc
->driver_version
);
523 case ATTR_FRAMEWORK_VERSION
:
524 ret
= snprintf(buf
, PAGE_SIZE
, "%s\n",
525 GASKET_FRAMEWORK_VERSION
);
527 case ATTR_DEVICE_TYPE
:
528 ret
= snprintf(buf
, PAGE_SIZE
, "%s\n",
529 gasket_dev
->internal_desc
->driver_desc
->name
);
531 case ATTR_HARDWARE_REVISION
:
532 ret
= snprintf(buf
, PAGE_SIZE
, "%d\n",
533 gasket_dev
->hardware_revision
);
535 case ATTR_PCI_ADDRESS
:
536 ret
= snprintf(buf
, PAGE_SIZE
, "%s\n", gasket_dev
->kobj_name
);
539 ret
= snprintf(buf
, PAGE_SIZE
, "%s\n",
540 gasket_num_name_lookup(gasket_dev
->status
,
541 gasket_status_name_table
));
543 case ATTR_IS_DEVICE_OWNED
:
544 ret
= snprintf(buf
, PAGE_SIZE
, "%d\n",
545 gasket_dev
->dev_info
.ownership
.is_owned
);
547 case ATTR_DEVICE_OWNER
:
548 ret
= snprintf(buf
, PAGE_SIZE
, "%d\n",
549 gasket_dev
->dev_info
.ownership
.owner
);
551 case ATTR_WRITE_OPEN_COUNT
:
552 ret
= snprintf(buf
, PAGE_SIZE
, "%d\n",
553 gasket_dev
->dev_info
.ownership
.write_open_count
);
555 case ATTR_RESET_COUNT
:
556 ret
= snprintf(buf
, PAGE_SIZE
, "%d\n", gasket_dev
->reset_count
);
558 case ATTR_USER_MEM_RANGES
:
559 for (i
= 0; i
< PCI_STD_NUM_BARS
; ++i
) {
561 gasket_write_mappable_regions(buf
, driver_desc
,
563 buf
+= current_written
;
564 ret
+= current_written
;
568 dev_dbg(gasket_dev
->dev
, "Unknown attribute: %s\n",
574 gasket_sysfs_put_attr(device
, gasket_attr
);
575 gasket_sysfs_put_device_data(device
, gasket_dev
);
579 /* These attributes apply to all Gasket driver instances. */
580 static const struct gasket_sysfs_attribute gasket_sysfs_generic_attrs
[] = {
581 GASKET_SYSFS_RO(bar_offsets
, gasket_sysfs_data_show
, ATTR_BAR_OFFSETS
),
582 GASKET_SYSFS_RO(bar_sizes
, gasket_sysfs_data_show
, ATTR_BAR_SIZES
),
583 GASKET_SYSFS_RO(driver_version
, gasket_sysfs_data_show
,
584 ATTR_DRIVER_VERSION
),
585 GASKET_SYSFS_RO(framework_version
, gasket_sysfs_data_show
,
586 ATTR_FRAMEWORK_VERSION
),
587 GASKET_SYSFS_RO(device_type
, gasket_sysfs_data_show
, ATTR_DEVICE_TYPE
),
588 GASKET_SYSFS_RO(revision
, gasket_sysfs_data_show
,
589 ATTR_HARDWARE_REVISION
),
590 GASKET_SYSFS_RO(pci_address
, gasket_sysfs_data_show
, ATTR_PCI_ADDRESS
),
591 GASKET_SYSFS_RO(status
, gasket_sysfs_data_show
, ATTR_STATUS
),
592 GASKET_SYSFS_RO(is_device_owned
, gasket_sysfs_data_show
,
593 ATTR_IS_DEVICE_OWNED
),
594 GASKET_SYSFS_RO(device_owner
, gasket_sysfs_data_show
,
596 GASKET_SYSFS_RO(write_open_count
, gasket_sysfs_data_show
,
597 ATTR_WRITE_OPEN_COUNT
),
598 GASKET_SYSFS_RO(reset_count
, gasket_sysfs_data_show
, ATTR_RESET_COUNT
),
599 GASKET_SYSFS_RO(user_mem_ranges
, gasket_sysfs_data_show
,
600 ATTR_USER_MEM_RANGES
),
601 GASKET_END_OF_ATTR_ARRAY
604 /* Add a char device and related info. */
605 static int gasket_add_cdev(struct gasket_cdev_info
*dev_info
,
606 const struct file_operations
*file_ops
,
607 struct module
*owner
)
611 cdev_init(&dev_info
->cdev
, file_ops
);
612 dev_info
->cdev
.owner
= owner
;
613 ret
= cdev_add(&dev_info
->cdev
, dev_info
->devt
, 1);
615 dev_err(dev_info
->gasket_dev_ptr
->dev
,
616 "cannot add char device [ret=%d]\n", ret
);
619 dev_info
->cdev_added
= 1;
624 /* Disable device operations. */
625 void gasket_disable_device(struct gasket_dev
*gasket_dev
)
627 const struct gasket_driver_desc
*driver_desc
=
628 gasket_dev
->internal_desc
->driver_desc
;
631 /* Only delete the device if it has been successfully added. */
632 if (gasket_dev
->dev_info
.cdev_added
)
633 cdev_del(&gasket_dev
->dev_info
.cdev
);
635 gasket_dev
->status
= GASKET_STATUS_DEAD
;
637 gasket_interrupt_cleanup(gasket_dev
);
639 for (i
= 0; i
< driver_desc
->num_page_tables
; ++i
) {
640 if (gasket_dev
->page_table
[i
]) {
641 gasket_page_table_reset(gasket_dev
->page_table
[i
]);
642 gasket_page_table_cleanup(gasket_dev
->page_table
[i
]);
646 EXPORT_SYMBOL(gasket_disable_device
);
649 * Registered driver descriptor lookup for PCI devices.
651 * Precondition: Called with g_mutex held (to avoid a race on return).
652 * Returns NULL if no matching device was found.
654 static struct gasket_internal_desc
*
655 lookup_pci_internal_desc(struct pci_dev
*pci_dev
)
659 __must_hold(&g_mutex
);
660 for (i
= 0; i
< GASKET_FRAMEWORK_DESC_MAX
; i
++) {
661 if (g_descs
[i
].driver_desc
&&
662 g_descs
[i
].driver_desc
->pci_id_table
&&
663 pci_match_id(g_descs
[i
].driver_desc
->pci_id_table
, pci_dev
))
671 * Verifies that the user has permissions to perform the requested mapping and
672 * that the provided descriptor/range is of adequate size to hold the range to
675 static bool gasket_mmap_has_permissions(struct gasket_dev
*gasket_dev
,
676 struct vm_area_struct
*vma
,
679 int requested_permissions
;
680 /* Always allow sysadmin to access. */
681 if (capable(CAP_SYS_ADMIN
))
684 /* Never allow non-sysadmins to access to a dead device. */
685 if (gasket_dev
->status
!= GASKET_STATUS_ALIVE
) {
686 dev_dbg(gasket_dev
->dev
, "Device is dead.\n");
690 /* Make sure that no wrong flags are set. */
691 requested_permissions
=
692 (vma
->vm_flags
& VM_ACCESS_FLAGS
);
693 if (requested_permissions
& ~(bar_permissions
)) {
694 dev_dbg(gasket_dev
->dev
,
695 "Attempting to map a region with requested permissions 0x%x, but region has permissions 0x%x.\n",
696 requested_permissions
, bar_permissions
);
700 /* Do not allow a non-owner to write. */
701 if ((vma
->vm_flags
& VM_WRITE
) &&
702 !gasket_owned_by_current_tgid(&gasket_dev
->dev_info
)) {
703 dev_dbg(gasket_dev
->dev
,
704 "Attempting to mmap a region for write without owning device.\n");
712 * Verifies that the input address is within the region allocated to coherent
716 gasket_is_coherent_region(const struct gasket_driver_desc
*driver_desc
,
719 struct gasket_coherent_buffer_desc coh_buff_desc
=
720 driver_desc
->coherent_buffer_description
;
722 if (coh_buff_desc
.permissions
!= GASKET_NOMAP
) {
723 if ((address
>= coh_buff_desc
.base
) &&
724 (address
< coh_buff_desc
.base
+ coh_buff_desc
.size
)) {
731 static int gasket_get_bar_index(const struct gasket_dev
*gasket_dev
,
735 const struct gasket_driver_desc
*driver_desc
;
737 driver_desc
= gasket_dev
->internal_desc
->driver_desc
;
738 for (i
= 0; i
< PCI_STD_NUM_BARS
; ++i
) {
739 struct gasket_bar_desc bar_desc
=
740 driver_desc
->bar_descriptions
[i
];
742 if (bar_desc
.permissions
!= GASKET_NOMAP
) {
743 if (phys_addr
>= bar_desc
.base
&&
744 phys_addr
< (bar_desc
.base
+ bar_desc
.size
)) {
749 /* If we haven't found the address by now, it is invalid. */
754 * Sets the actual bounds to map, given the device's mappable region.
756 * Given the device's mappable region, along with the user-requested mapping
757 * start offset and length of the user region, determine how much of this
758 * mappable region can be mapped into the user's region (start/end offsets),
759 * and the physical offset (phys_offset) into the BAR where the mapping should
760 * begin (either the VMA's or region lower bound).
762 * In other words, this calculates the overlap between the VMA
763 * (bar_offset, requested_length) and the given gasket_mappable_region.
765 * Returns true if there's anything to map, and false otherwise.
768 gasket_mm_get_mapping_addrs(const struct gasket_mappable_region
*region
,
769 ulong bar_offset
, ulong requested_length
,
770 struct gasket_mappable_region
*mappable_region
,
773 ulong range_start
= region
->start
;
774 ulong range_length
= region
->length_bytes
;
775 ulong range_end
= range_start
+ range_length
;
778 if (bar_offset
+ requested_length
< range_start
) {
780 * If the requested region is completely below the range,
781 * there is nothing to map.
784 } else if (bar_offset
<= range_start
) {
785 /* If the bar offset is below this range's start
786 * but the requested length continues into it:
787 * 1) Only map starting from the beginning of this
788 * range's phys. offset, so we don't map unmappable
790 * 2) The length of the virtual memory to not map is the
791 * delta between the bar offset and the
792 * mappable start (and since the mappable start is
793 * bigger, start - req.)
794 * 3) The map length is the minimum of the mappable
795 * requested length (requested_length - virt_offset)
796 * and the actual mappable length of the range.
798 mappable_region
->start
= range_start
;
799 *virt_offset
= range_start
- bar_offset
;
800 mappable_region
->length_bytes
=
801 min(requested_length
- *virt_offset
, range_length
);
803 } else if (bar_offset
> range_start
&&
804 bar_offset
< range_end
) {
806 * If the bar offset is within this range:
807 * 1) Map starting from the bar offset.
808 * 2) Because there is no forbidden memory between the
809 * bar offset and the range start,
811 * 3) The map length is the minimum of the requested
812 * length and the remaining length in the buffer
813 * (range_end - bar_offset)
815 mappable_region
->start
= bar_offset
;
817 mappable_region
->length_bytes
=
818 min(requested_length
, range_end
- bar_offset
);
823 * If the requested [start] offset is above range_end,
824 * there's nothing to map.
830 * Calculates the offset where the VMA range begins in its containing BAR.
831 * The offset is written into bar_offset on success.
832 * Returns zero on success, anything else on error.
834 static int gasket_mm_vma_bar_offset(const struct gasket_dev
*gasket_dev
,
835 const struct vm_area_struct
*vma
,
840 const struct gasket_driver_desc
*driver_desc
=
841 gasket_dev
->internal_desc
->driver_desc
;
843 raw_offset
= (vma
->vm_pgoff
<< PAGE_SHIFT
) +
844 driver_desc
->legacy_mmap_address_offset
;
845 bar_index
= gasket_get_bar_index(gasket_dev
, raw_offset
);
847 dev_err(gasket_dev
->dev
,
848 "Unable to find matching bar for address 0x%lx\n",
850 trace_gasket_mmap_exit(bar_index
);
854 raw_offset
- driver_desc
->bar_descriptions
[bar_index
].base
;
859 int gasket_mm_unmap_region(const struct gasket_dev
*gasket_dev
,
860 struct vm_area_struct
*vma
,
861 const struct gasket_mappable_region
*map_region
)
865 struct gasket_mappable_region mappable_region
;
868 if (map_region
->length_bytes
== 0)
871 ret
= gasket_mm_vma_bar_offset(gasket_dev
, vma
, &bar_offset
);
875 if (!gasket_mm_get_mapping_addrs(map_region
, bar_offset
,
876 vma
->vm_end
- vma
->vm_start
,
877 &mappable_region
, &virt_offset
))
881 * The length passed to zap_vma_ptes MUST BE A MULTIPLE OF
882 * PAGE_SIZE! Trust me. I have the scars.
884 * Next multiple of y: ceil_div(x, y) * y
886 zap_vma_ptes(vma
, vma
->vm_start
+ virt_offset
,
887 DIV_ROUND_UP(mappable_region
.length_bytes
, PAGE_SIZE
) *
891 EXPORT_SYMBOL(gasket_mm_unmap_region
);
893 /* Maps a virtual address + range to a physical offset of a BAR. */
894 static enum do_map_region_status
895 do_map_region(const struct gasket_dev
*gasket_dev
, struct vm_area_struct
*vma
,
896 struct gasket_mappable_region
*mappable_region
)
898 /* Maximum size of a single call to io_remap_pfn_range. */
899 /* I pulled this number out of thin air. */
900 const ulong max_chunk_size
= 64 * 1024 * 1024;
901 ulong chunk_size
, mapped_bytes
= 0;
903 const struct gasket_driver_desc
*driver_desc
=
904 gasket_dev
->internal_desc
->driver_desc
;
906 ulong bar_offset
, virt_offset
;
907 struct gasket_mappable_region region_to_map
;
908 ulong phys_offset
, map_length
;
909 ulong virt_base
, phys_base
;
912 ret
= gasket_mm_vma_bar_offset(gasket_dev
, vma
, &bar_offset
);
914 return DO_MAP_REGION_INVALID
;
916 if (!gasket_mm_get_mapping_addrs(mappable_region
, bar_offset
,
917 vma
->vm_end
- vma
->vm_start
,
918 ®ion_to_map
, &virt_offset
))
919 return DO_MAP_REGION_INVALID
;
920 phys_offset
= region_to_map
.start
;
921 map_length
= region_to_map
.length_bytes
;
923 virt_base
= vma
->vm_start
+ virt_offset
;
925 gasket_get_bar_index(gasket_dev
,
926 (vma
->vm_pgoff
<< PAGE_SHIFT
) +
927 driver_desc
->legacy_mmap_address_offset
);
930 return DO_MAP_REGION_INVALID
;
932 phys_base
= gasket_dev
->bar_data
[bar_index
].phys_base
+ phys_offset
;
933 while (mapped_bytes
< map_length
) {
935 * io_remap_pfn_range can take a while, so we chunk its
936 * calls and call cond_resched between each.
938 chunk_size
= min(max_chunk_size
, map_length
- mapped_bytes
);
941 ret
= io_remap_pfn_range(vma
, virt_base
+ mapped_bytes
,
942 (phys_base
+ mapped_bytes
) >>
943 PAGE_SHIFT
, chunk_size
,
946 dev_err(gasket_dev
->dev
,
947 "Error remapping PFN range.\n");
950 mapped_bytes
+= chunk_size
;
953 return DO_MAP_REGION_SUCCESS
;
956 /* Unmap the partial chunk we mapped. */
957 mappable_region
->length_bytes
= mapped_bytes
;
958 if (gasket_mm_unmap_region(gasket_dev
, vma
, mappable_region
))
959 dev_err(gasket_dev
->dev
,
960 "Error unmapping partial region 0x%lx (0x%lx bytes)\n",
962 (ulong
)mapped_bytes
);
964 return DO_MAP_REGION_FAILURE
;
967 /* Map a region of coherent memory. */
968 static int gasket_mmap_coherent(struct gasket_dev
*gasket_dev
,
969 struct vm_area_struct
*vma
)
971 const struct gasket_driver_desc
*driver_desc
=
972 gasket_dev
->internal_desc
->driver_desc
;
973 const ulong requested_length
= vma
->vm_end
- vma
->vm_start
;
977 if (requested_length
== 0 || requested_length
>
978 gasket_dev
->coherent_buffer
.length_bytes
) {
979 trace_gasket_mmap_exit(-EINVAL
);
983 permissions
= driver_desc
->coherent_buffer_description
.permissions
;
984 if (!gasket_mmap_has_permissions(gasket_dev
, vma
, permissions
)) {
985 dev_err(gasket_dev
->dev
, "Permission checking failed.\n");
986 trace_gasket_mmap_exit(-EPERM
);
990 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
992 ret
= remap_pfn_range(vma
, vma
->vm_start
,
993 (gasket_dev
->coherent_buffer
.phys_base
) >>
994 PAGE_SHIFT
, requested_length
, vma
->vm_page_prot
);
996 dev_err(gasket_dev
->dev
, "Error remapping PFN range err=%d.\n",
998 trace_gasket_mmap_exit(ret
);
1002 /* Record the user virtual to dma_address mapping that was
1003 * created by the kernel.
1005 gasket_set_user_virt(gasket_dev
, requested_length
,
1006 gasket_dev
->coherent_buffer
.phys_base
,
1011 /* Map a device's BARs into user space. */
1012 static int gasket_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
1016 int has_mapped_anything
= 0;
1018 ulong raw_offset
, vma_size
;
1019 bool is_coherent_region
;
1020 const struct gasket_driver_desc
*driver_desc
;
1021 struct gasket_dev
*gasket_dev
= (struct gasket_dev
*)filp
->private_data
;
1022 const struct gasket_bar_desc
*bar_desc
;
1023 struct gasket_mappable_region
*map_regions
= NULL
;
1024 int num_map_regions
= 0;
1025 enum do_map_region_status map_status
;
1027 driver_desc
= gasket_dev
->internal_desc
->driver_desc
;
1029 if (vma
->vm_start
& ~PAGE_MASK
) {
1030 dev_err(gasket_dev
->dev
,
1031 "Base address not page-aligned: 0x%lx\n",
1033 trace_gasket_mmap_exit(-EINVAL
);
1037 /* Calculate the offset of this range into physical mem. */
1038 raw_offset
= (vma
->vm_pgoff
<< PAGE_SHIFT
) +
1039 driver_desc
->legacy_mmap_address_offset
;
1040 vma_size
= vma
->vm_end
- vma
->vm_start
;
1041 trace_gasket_mmap_entry(gasket_dev
->dev_info
.name
, raw_offset
,
1045 * Check if the raw offset is within a bar region. If not, check if it
1046 * is a coherent region.
1048 bar_index
= gasket_get_bar_index(gasket_dev
, raw_offset
);
1049 is_coherent_region
= gasket_is_coherent_region(driver_desc
, raw_offset
);
1050 if (bar_index
< 0 && !is_coherent_region
) {
1051 dev_err(gasket_dev
->dev
,
1052 "Unable to find matching bar for address 0x%lx\n",
1054 trace_gasket_mmap_exit(bar_index
);
1057 if (bar_index
> 0 && is_coherent_region
) {
1058 dev_err(gasket_dev
->dev
,
1059 "double matching bar and coherent buffers for address 0x%lx\n",
1061 trace_gasket_mmap_exit(bar_index
);
1065 vma
->vm_private_data
= gasket_dev
;
1067 if (is_coherent_region
)
1068 return gasket_mmap_coherent(gasket_dev
, vma
);
1070 /* Everything in the rest of this function is for normal BAR mapping. */
1073 * Subtract the base of the bar from the raw offset to get the
1074 * memory location within the bar to map.
1076 bar_desc
= &driver_desc
->bar_descriptions
[bar_index
];
1077 permissions
= bar_desc
->permissions
;
1078 if (!gasket_mmap_has_permissions(gasket_dev
, vma
, permissions
)) {
1079 dev_err(gasket_dev
->dev
, "Permission checking failed.\n");
1080 trace_gasket_mmap_exit(-EPERM
);
1084 if (driver_desc
->get_mappable_regions_cb
) {
1085 ret
= driver_desc
->get_mappable_regions_cb(gasket_dev
,
1092 if (!gasket_mmap_has_permissions(gasket_dev
, vma
,
1093 bar_desc
->permissions
)) {
1094 dev_err(gasket_dev
->dev
,
1095 "Permission checking failed.\n");
1096 trace_gasket_mmap_exit(-EPERM
);
1099 num_map_regions
= bar_desc
->num_mappable_regions
;
1100 map_regions
= kcalloc(num_map_regions
,
1101 sizeof(*bar_desc
->mappable_regions
),
1104 memcpy(map_regions
, bar_desc
->mappable_regions
,
1106 sizeof(*bar_desc
->mappable_regions
));
1110 if (!map_regions
|| num_map_regions
== 0) {
1111 dev_err(gasket_dev
->dev
, "No mappable regions returned!\n");
1115 /* Marks the VMA's pages as uncacheable. */
1116 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
1117 for (i
= 0; i
< num_map_regions
; i
++) {
1118 map_status
= do_map_region(gasket_dev
, vma
, &map_regions
[i
]);
1119 /* Try the next region if this one was not mappable. */
1120 if (map_status
== DO_MAP_REGION_INVALID
)
1122 if (map_status
== DO_MAP_REGION_FAILURE
) {
1127 has_mapped_anything
= 1;
1132 /* If we could not map any memory, the request was invalid. */
1133 if (!has_mapped_anything
) {
1134 dev_err(gasket_dev
->dev
,
1135 "Map request did not contain a valid region.\n");
1136 trace_gasket_mmap_exit(-EINVAL
);
1140 trace_gasket_mmap_exit(0);
1144 /* Need to unmap any mapped ranges. */
1145 num_map_regions
= i
;
1146 for (i
= 0; i
< num_map_regions
; i
++)
1147 if (gasket_mm_unmap_region(gasket_dev
, vma
,
1148 &bar_desc
->mappable_regions
[i
]))
1149 dev_err(gasket_dev
->dev
, "Error unmapping range %d.\n",
1157 * Open the char device file.
1159 * If the open is for writing, and the device is not owned, this process becomes
1160 * the owner. If the open is for writing and the device is already owned by
1161 * some other process, it is an error. If this process is the owner, increment
1164 * Returns 0 if successful, a negative error number otherwise.
1166 static int gasket_open(struct inode
*inode
, struct file
*filp
)
1169 struct gasket_dev
*gasket_dev
;
1170 const struct gasket_driver_desc
*driver_desc
;
1171 struct gasket_ownership
*ownership
;
1172 char task_name
[TASK_COMM_LEN
];
1173 struct gasket_cdev_info
*dev_info
=
1174 container_of(inode
->i_cdev
, struct gasket_cdev_info
, cdev
);
1175 struct pid_namespace
*pid_ns
= task_active_pid_ns(current
);
1176 bool is_root
= ns_capable(pid_ns
->user_ns
, CAP_SYS_ADMIN
);
1178 gasket_dev
= dev_info
->gasket_dev_ptr
;
1179 driver_desc
= gasket_dev
->internal_desc
->driver_desc
;
1180 ownership
= &dev_info
->ownership
;
1181 get_task_comm(task_name
, current
);
1182 filp
->private_data
= gasket_dev
;
1185 dev_dbg(gasket_dev
->dev
,
1186 "Attempting to open with tgid %u (%s) (f_mode: 0%03o, fmode_write: %d is_root: %u)\n",
1187 current
->tgid
, task_name
, filp
->f_mode
,
1188 (filp
->f_mode
& FMODE_WRITE
), is_root
);
1190 /* Always allow non-writing accesses. */
1191 if (!(filp
->f_mode
& FMODE_WRITE
)) {
1192 dev_dbg(gasket_dev
->dev
, "Allowing read-only opening.\n");
1196 mutex_lock(&gasket_dev
->mutex
);
1198 dev_dbg(gasket_dev
->dev
,
1199 "Current owner open count (owning tgid %u): %d.\n",
1200 ownership
->owner
, ownership
->write_open_count
);
1202 /* Opening a node owned by another TGID is an error (unless root) */
1203 if (ownership
->is_owned
&& ownership
->owner
!= current
->tgid
&&
1205 dev_err(gasket_dev
->dev
,
1206 "Process %u is opening a node held by %u.\n",
1207 current
->tgid
, ownership
->owner
);
1208 mutex_unlock(&gasket_dev
->mutex
);
1212 /* If the node is not owned, assign it to the current TGID. */
1213 if (!ownership
->is_owned
) {
1214 ret
= gasket_check_and_invoke_callback_nolock(gasket_dev
,
1215 driver_desc
->device_open_cb
);
1217 dev_err(gasket_dev
->dev
,
1218 "Error in device open cb: %d\n", ret
);
1219 mutex_unlock(&gasket_dev
->mutex
);
1222 ownership
->is_owned
= 1;
1223 ownership
->owner
= current
->tgid
;
1224 dev_dbg(gasket_dev
->dev
, "Device owner is now tgid %u\n",
1228 ownership
->write_open_count
++;
1230 dev_dbg(gasket_dev
->dev
, "New open count (owning tgid %u): %d\n",
1231 ownership
->owner
, ownership
->write_open_count
);
1233 mutex_unlock(&gasket_dev
->mutex
);
1238 * Called on a close of the device file. If this process is the owner,
1239 * decrement the open count. On last close by the owner, free up buffers and
1240 * eventfd contexts, and release ownership.
1242 * Returns 0 if successful, a negative error number otherwise.
1244 static int gasket_release(struct inode
*inode
, struct file
*file
)
1247 struct gasket_dev
*gasket_dev
;
1248 struct gasket_ownership
*ownership
;
1249 const struct gasket_driver_desc
*driver_desc
;
1250 char task_name
[TASK_COMM_LEN
];
1251 struct gasket_cdev_info
*dev_info
=
1252 container_of(inode
->i_cdev
, struct gasket_cdev_info
, cdev
);
1253 struct pid_namespace
*pid_ns
= task_active_pid_ns(current
);
1254 bool is_root
= ns_capable(pid_ns
->user_ns
, CAP_SYS_ADMIN
);
1256 gasket_dev
= dev_info
->gasket_dev_ptr
;
1257 driver_desc
= gasket_dev
->internal_desc
->driver_desc
;
1258 ownership
= &dev_info
->ownership
;
1259 get_task_comm(task_name
, current
);
1260 mutex_lock(&gasket_dev
->mutex
);
1262 dev_dbg(gasket_dev
->dev
,
1263 "Releasing device node. Call origin: tgid %u (%s) (f_mode: 0%03o, fmode_write: %d, is_root: %u)\n",
1264 current
->tgid
, task_name
, file
->f_mode
,
1265 (file
->f_mode
& FMODE_WRITE
), is_root
);
1266 dev_dbg(gasket_dev
->dev
, "Current open count (owning tgid %u): %d\n",
1267 ownership
->owner
, ownership
->write_open_count
);
1269 if (file
->f_mode
& FMODE_WRITE
) {
1270 ownership
->write_open_count
--;
1271 if (ownership
->write_open_count
== 0) {
1272 dev_dbg(gasket_dev
->dev
, "Device is now free\n");
1273 ownership
->is_owned
= 0;
1274 ownership
->owner
= 0;
1276 /* Forces chip reset before we unmap the page tables. */
1277 driver_desc
->device_reset_cb(gasket_dev
);
1279 for (i
= 0; i
< driver_desc
->num_page_tables
; ++i
) {
1280 gasket_page_table_unmap_all(gasket_dev
->page_table
[i
]);
1281 gasket_page_table_garbage_collect(gasket_dev
->page_table
[i
]);
1282 gasket_free_coherent_memory_all(gasket_dev
, i
);
1285 /* Closes device, enters power save. */
1286 gasket_check_and_invoke_callback_nolock(gasket_dev
,
1287 driver_desc
->device_close_cb
);
1291 dev_dbg(gasket_dev
->dev
, "New open count (owning tgid %u): %d\n",
1292 ownership
->owner
, ownership
->write_open_count
);
1293 mutex_unlock(&gasket_dev
->mutex
);
1298 * Gasket ioctl dispatch function.
1300 * Check if the ioctl is a generic ioctl. If not, pass the ioctl to the
1301 * ioctl_handler_cb registered in the driver description.
1302 * If the ioctl is a generic ioctl, pass it to gasket_ioctl_handler.
1304 static long gasket_ioctl(struct file
*filp
, uint cmd
, ulong arg
)
1306 struct gasket_dev
*gasket_dev
;
1307 const struct gasket_driver_desc
*driver_desc
;
1308 void __user
*argp
= (void __user
*)arg
;
1311 gasket_dev
= (struct gasket_dev
*)filp
->private_data
;
1312 driver_desc
= gasket_dev
->internal_desc
->driver_desc
;
1314 dev_dbg(gasket_dev
->dev
,
1315 "Unable to find device descriptor for file %s\n",
1316 d_path(&filp
->f_path
, path
, 256));
1320 if (!gasket_is_supported_ioctl(cmd
)) {
1322 * The ioctl handler is not a standard Gasket callback, since
1323 * it requires different arguments. This means we can't use
1324 * check_and_invoke_callback.
1326 if (driver_desc
->ioctl_handler_cb
)
1327 return driver_desc
->ioctl_handler_cb(filp
, cmd
, argp
);
1329 dev_dbg(gasket_dev
->dev
, "Received unknown ioctl 0x%x\n", cmd
);
1333 return gasket_handle_ioctl(filp
, cmd
, argp
);
1336 /* File operations for all Gasket devices. */
1337 static const struct file_operations gasket_file_ops
= {
1338 .owner
= THIS_MODULE
,
1339 .llseek
= no_llseek
,
1340 .mmap
= gasket_mmap
,
1341 .open
= gasket_open
,
1342 .release
= gasket_release
,
1343 .unlocked_ioctl
= gasket_ioctl
,
1346 /* Perform final init and marks the device as active. */
1347 int gasket_enable_device(struct gasket_dev
*gasket_dev
)
1351 const struct gasket_driver_desc
*driver_desc
=
1352 gasket_dev
->internal_desc
->driver_desc
;
1354 ret
= gasket_interrupt_init(gasket_dev
);
1356 dev_err(gasket_dev
->dev
,
1357 "Critical failure to allocate interrupts: %d\n", ret
);
1358 gasket_interrupt_cleanup(gasket_dev
);
1362 for (tbl_idx
= 0; tbl_idx
< driver_desc
->num_page_tables
; tbl_idx
++) {
1363 dev_dbg(gasket_dev
->dev
, "Initializing page table %d.\n",
1365 ret
= gasket_page_table_init(&gasket_dev
->page_table
[tbl_idx
],
1366 &gasket_dev
->bar_data
[driver_desc
->page_table_bar_index
],
1367 &driver_desc
->page_table_configs
[tbl_idx
],
1369 gasket_dev
->pci_dev
);
1371 dev_err(gasket_dev
->dev
,
1372 "Couldn't init page table %d: %d\n",
1377 * Make sure that the page table is clear and set to simple
1380 gasket_page_table_reset(gasket_dev
->page_table
[tbl_idx
]);
1384 * hardware_revision_cb returns a positive integer (the rev) if
1387 ret
= check_and_invoke_callback(gasket_dev
,
1388 driver_desc
->hardware_revision_cb
);
1390 dev_err(gasket_dev
->dev
,
1391 "Error getting hardware revision: %d\n", ret
);
1394 gasket_dev
->hardware_revision
= ret
;
1396 /* device_status_cb returns a device status, not an error code. */
1397 gasket_dev
->status
= gasket_get_hw_status(gasket_dev
);
1398 if (gasket_dev
->status
== GASKET_STATUS_DEAD
)
1399 dev_err(gasket_dev
->dev
, "Device reported as unhealthy.\n");
1401 ret
= gasket_add_cdev(&gasket_dev
->dev_info
, &gasket_file_ops
,
1402 driver_desc
->module
);
1408 EXPORT_SYMBOL(gasket_enable_device
);
1410 static int __gasket_add_device(struct device
*parent_dev
,
1411 struct gasket_internal_desc
*internal_desc
,
1412 struct gasket_dev
**gasket_devp
)
1415 struct gasket_dev
*gasket_dev
;
1416 const struct gasket_driver_desc
*driver_desc
=
1417 internal_desc
->driver_desc
;
1419 ret
= gasket_alloc_dev(internal_desc
, parent_dev
, &gasket_dev
);
1422 if (IS_ERR(gasket_dev
->dev_info
.device
)) {
1423 dev_err(parent_dev
, "Cannot create %s device %s [ret = %ld]\n",
1424 driver_desc
->name
, gasket_dev
->dev_info
.name
,
1425 PTR_ERR(gasket_dev
->dev_info
.device
));
1427 goto free_gasket_dev
;
1430 ret
= gasket_sysfs_create_mapping(gasket_dev
->dev_info
.device
,
1435 ret
= gasket_sysfs_create_entries(gasket_dev
->dev_info
.device
,
1436 gasket_sysfs_generic_attrs
);
1438 goto remove_sysfs_mapping
;
1440 *gasket_devp
= gasket_dev
;
1443 remove_sysfs_mapping
:
1444 gasket_sysfs_remove_mapping(gasket_dev
->dev_info
.device
);
1446 device_destroy(internal_desc
->class, gasket_dev
->dev_info
.devt
);
1448 gasket_free_dev(gasket_dev
);
1452 static void __gasket_remove_device(struct gasket_internal_desc
*internal_desc
,
1453 struct gasket_dev
*gasket_dev
)
1455 gasket_sysfs_remove_mapping(gasket_dev
->dev_info
.device
);
1456 device_destroy(internal_desc
->class, gasket_dev
->dev_info
.devt
);
1457 gasket_free_dev(gasket_dev
);
1461 * Add PCI gasket device.
1463 * Called by Gasket device probe function.
1464 * Allocates device metadata and maps device memory. The device driver must
1465 * call gasket_enable_device after driver init is complete to place the device
1468 int gasket_pci_add_device(struct pci_dev
*pci_dev
,
1469 struct gasket_dev
**gasket_devp
)
1472 struct gasket_internal_desc
*internal_desc
;
1473 struct gasket_dev
*gasket_dev
;
1474 struct device
*parent
;
1476 dev_dbg(&pci_dev
->dev
, "add PCI gasket device\n");
1478 mutex_lock(&g_mutex
);
1479 internal_desc
= lookup_pci_internal_desc(pci_dev
);
1480 mutex_unlock(&g_mutex
);
1481 if (!internal_desc
) {
1482 dev_err(&pci_dev
->dev
,
1483 "PCI add device called for unknown driver type\n");
1487 parent
= &pci_dev
->dev
;
1488 ret
= __gasket_add_device(parent
, internal_desc
, &gasket_dev
);
1492 gasket_dev
->pci_dev
= pci_dev
;
1493 ret
= gasket_setup_pci(pci_dev
, gasket_dev
);
1498 * Once we've created the mapping structures successfully, attempt to
1499 * create a symlink to the pci directory of this object.
1501 ret
= sysfs_create_link(&gasket_dev
->dev_info
.device
->kobj
,
1502 &pci_dev
->dev
.kobj
, dev_name(&pci_dev
->dev
));
1504 dev_err(gasket_dev
->dev
,
1505 "Cannot create sysfs pci link: %d\n", ret
);
1509 *gasket_devp
= gasket_dev
;
1513 gasket_cleanup_pci(gasket_dev
);
1514 __gasket_remove_device(internal_desc
, gasket_dev
);
1517 EXPORT_SYMBOL(gasket_pci_add_device
);
1519 /* Remove a PCI gasket device. */
1520 void gasket_pci_remove_device(struct pci_dev
*pci_dev
)
1523 struct gasket_internal_desc
*internal_desc
;
1524 struct gasket_dev
*gasket_dev
= NULL
;
1525 /* Find the device desc. */
1526 mutex_lock(&g_mutex
);
1527 internal_desc
= lookup_pci_internal_desc(pci_dev
);
1528 if (!internal_desc
) {
1529 mutex_unlock(&g_mutex
);
1532 mutex_unlock(&g_mutex
);
1534 /* Now find the specific device */
1535 mutex_lock(&internal_desc
->mutex
);
1536 for (i
= 0; i
< GASKET_DEV_MAX
; i
++) {
1537 if (internal_desc
->devs
[i
] &&
1538 internal_desc
->devs
[i
]->pci_dev
== pci_dev
) {
1539 gasket_dev
= internal_desc
->devs
[i
];
1543 mutex_unlock(&internal_desc
->mutex
);
1548 dev_dbg(gasket_dev
->dev
, "remove %s PCI gasket device\n",
1549 internal_desc
->driver_desc
->name
);
1551 gasket_cleanup_pci(gasket_dev
);
1552 __gasket_remove_device(internal_desc
, gasket_dev
);
1554 EXPORT_SYMBOL(gasket_pci_remove_device
);
1557 * Lookup a name by number in a num_name table.
1558 * @num: Number to lookup.
1559 * @table: Array of num_name structures, the table for the lookup.
1561 * Description: Searches for num in the table. If found, the
1562 * corresponding name is returned; otherwise NULL
1565 * The table must have a NULL name pointer at the end.
1567 const char *gasket_num_name_lookup(uint num
,
1568 const struct gasket_num_name
*table
)
1572 while (table
[i
].snn_name
) {
1573 if (num
== table
[i
].snn_num
)
1578 return table
[i
].snn_name
;
1580 EXPORT_SYMBOL(gasket_num_name_lookup
);
1582 int gasket_reset(struct gasket_dev
*gasket_dev
)
1586 mutex_lock(&gasket_dev
->mutex
);
1587 ret
= gasket_reset_nolock(gasket_dev
);
1588 mutex_unlock(&gasket_dev
->mutex
);
1591 EXPORT_SYMBOL(gasket_reset
);
1593 int gasket_reset_nolock(struct gasket_dev
*gasket_dev
)
1597 const struct gasket_driver_desc
*driver_desc
;
1599 driver_desc
= gasket_dev
->internal_desc
->driver_desc
;
1600 if (!driver_desc
->device_reset_cb
)
1603 ret
= driver_desc
->device_reset_cb(gasket_dev
);
1605 dev_dbg(gasket_dev
->dev
, "Device reset cb returned %d.\n",
1610 /* Reinitialize the page tables and interrupt framework. */
1611 for (i
= 0; i
< driver_desc
->num_page_tables
; ++i
)
1612 gasket_page_table_reset(gasket_dev
->page_table
[i
]);
1614 ret
= gasket_interrupt_reinit(gasket_dev
);
1616 dev_dbg(gasket_dev
->dev
, "Unable to reinit interrupts: %d.\n",
1621 /* Get current device health. */
1622 gasket_dev
->status
= gasket_get_hw_status(gasket_dev
);
1623 if (gasket_dev
->status
== GASKET_STATUS_DEAD
) {
1624 dev_dbg(gasket_dev
->dev
, "Device reported as dead.\n");
1630 EXPORT_SYMBOL(gasket_reset_nolock
);
1632 gasket_ioctl_permissions_cb_t
1633 gasket_get_ioctl_permissions_cb(struct gasket_dev
*gasket_dev
)
1635 return gasket_dev
->internal_desc
->driver_desc
->ioctl_permissions_cb
;
1637 EXPORT_SYMBOL(gasket_get_ioctl_permissions_cb
);
1639 /* Get the driver structure for a given gasket_dev.
1640 * @dev: pointer to gasket_dev, implementing the requested driver.
1642 const struct gasket_driver_desc
*gasket_get_driver_desc(struct gasket_dev
*dev
)
1644 return dev
->internal_desc
->driver_desc
;
1647 /* Get the device structure for a given gasket_dev.
1648 * @dev: pointer to gasket_dev, implementing the requested driver.
1650 struct device
*gasket_get_device(struct gasket_dev
*dev
)
1656 * Asynchronously waits on device.
1657 * @gasket_dev: Device struct.
1659 * @offset: Register offset
1660 * @mask: Register mask
1661 * @val: Expected value
1662 * @max_retries: number of sleep periods
1663 * @delay_ms: Timeout in milliseconds
1665 * Description: Busy waits for a specific combination of bits to be set on a
1668 int gasket_wait_with_reschedule(struct gasket_dev
*gasket_dev
, int bar
,
1669 u64 offset
, u64 mask
, u64 val
,
1670 uint max_retries
, u64 delay_ms
)
1675 while (retries
< max_retries
) {
1676 tmp
= gasket_dev_read_64(gasket_dev
, bar
, offset
);
1677 if ((tmp
& mask
) == val
)
1682 dev_dbg(gasket_dev
->dev
, "%s timeout: reg %llx timeout (%llu ms)\n",
1683 __func__
, offset
, max_retries
* delay_ms
);
1686 EXPORT_SYMBOL(gasket_wait_with_reschedule
);
1688 /* See gasket_core.h for description. */
1689 int gasket_register_device(const struct gasket_driver_desc
*driver_desc
)
1693 struct gasket_internal_desc
*internal
;
1695 pr_debug("Loading %s driver version %s\n", driver_desc
->name
,
1696 driver_desc
->driver_version
);
1697 /* Check for duplicates and find a free slot. */
1698 mutex_lock(&g_mutex
);
1700 for (i
= 0; i
< GASKET_FRAMEWORK_DESC_MAX
; i
++) {
1701 if (g_descs
[i
].driver_desc
== driver_desc
) {
1702 pr_err("%s driver already loaded/registered\n",
1704 mutex_unlock(&g_mutex
);
1709 /* This and the above loop could be combined, but this reads easier. */
1710 for (i
= 0; i
< GASKET_FRAMEWORK_DESC_MAX
; i
++) {
1711 if (!g_descs
[i
].driver_desc
) {
1712 g_descs
[i
].driver_desc
= driver_desc
;
1717 mutex_unlock(&g_mutex
);
1719 if (desc_idx
== -1) {
1720 pr_err("too many drivers loaded, max %d\n",
1721 GASKET_FRAMEWORK_DESC_MAX
);
1725 internal
= &g_descs
[desc_idx
];
1726 mutex_init(&internal
->mutex
);
1727 memset(internal
->devs
, 0, sizeof(struct gasket_dev
*) * GASKET_DEV_MAX
);
1729 class_create(driver_desc
->module
, driver_desc
->name
);
1731 if (IS_ERR(internal
->class)) {
1732 pr_err("Cannot register %s class [ret=%ld]\n",
1733 driver_desc
->name
, PTR_ERR(internal
->class));
1734 ret
= PTR_ERR(internal
->class);
1735 goto unregister_gasket_driver
;
1738 ret
= register_chrdev_region(MKDEV(driver_desc
->major
,
1739 driver_desc
->minor
), GASKET_DEV_MAX
,
1742 pr_err("cannot register %s char driver [ret=%d]\n",
1743 driver_desc
->name
, ret
);
1750 class_destroy(internal
->class);
1752 unregister_gasket_driver
:
1753 mutex_lock(&g_mutex
);
1754 g_descs
[desc_idx
].driver_desc
= NULL
;
1755 mutex_unlock(&g_mutex
);
1758 EXPORT_SYMBOL(gasket_register_device
);
1760 /* See gasket_core.h for description. */
1761 void gasket_unregister_device(const struct gasket_driver_desc
*driver_desc
)
1764 struct gasket_internal_desc
*internal_desc
= NULL
;
1766 mutex_lock(&g_mutex
);
1767 for (i
= 0; i
< GASKET_FRAMEWORK_DESC_MAX
; i
++) {
1768 if (g_descs
[i
].driver_desc
== driver_desc
) {
1769 internal_desc
= &g_descs
[i
];
1775 if (!internal_desc
) {
1776 mutex_unlock(&g_mutex
);
1777 pr_err("request to unregister unknown desc: %s, %d:%d\n",
1778 driver_desc
->name
, driver_desc
->major
,
1779 driver_desc
->minor
);
1783 unregister_chrdev_region(MKDEV(driver_desc
->major
, driver_desc
->minor
),
1786 class_destroy(internal_desc
->class);
1788 /* Finally, effectively "remove" the driver. */
1789 g_descs
[desc_idx
].driver_desc
= NULL
;
1790 mutex_unlock(&g_mutex
);
1792 pr_debug("removed %s driver\n", driver_desc
->name
);
1794 EXPORT_SYMBOL(gasket_unregister_device
);
1796 static int __init
gasket_init(void)
1800 mutex_lock(&g_mutex
);
1801 for (i
= 0; i
< GASKET_FRAMEWORK_DESC_MAX
; i
++) {
1802 g_descs
[i
].driver_desc
= NULL
;
1803 mutex_init(&g_descs
[i
].mutex
);
1806 gasket_sysfs_init();
1808 mutex_unlock(&g_mutex
);
1812 MODULE_DESCRIPTION("Google Gasket driver framework");
1813 MODULE_VERSION(GASKET_FRAMEWORK_VERSION
);
1814 MODULE_LICENSE("GPL v2");
1815 MODULE_AUTHOR("Rob Springer <rspringer@google.com>");
1816 module_init(gasket_init
);