1 From b01457f2cabf7e9b16f217ef7e4cb739655c407b Mon Sep 17 00:00:00 2001
2 From: Naushir Patuck <naush@raspberrypi.com>
3 Date: Tue, 21 May 2024 12:56:17 +0100
4 Subject: [PATCH 1104/1135] drivers: media: pci: Add Hailo accelerator device
7 Add version 4.17.1 of the Hailo PCIe device drivers.
8 Sourced from https://github.com/hailo-ai/hailort-drivers/
10 Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
12 drivers/media/pci/Kconfig | 1 +
13 drivers/media/pci/Makefile | 3 +-
14 drivers/media/pci/hailo/Kconfig | 6 +
15 drivers/media/pci/hailo/Makefile | 32 +
16 drivers/media/pci/hailo/common/fw_operation.c | 103 ++
17 drivers/media/pci/hailo/common/fw_operation.h | 25 +
18 .../media/pci/hailo/common/fw_validation.c | 112 ++
19 .../media/pci/hailo/common/fw_validation.h | 66 ++
20 .../pci/hailo/common/hailo_ioctl_common.h | 575 ++++++++++
21 .../pci/hailo/common/hailo_pcie_version.h | 13 +
22 .../media/pci/hailo/common/hailo_resource.c | 128 +++
23 .../media/pci/hailo/common/hailo_resource.h | 39 +
24 drivers/media/pci/hailo/common/pcie_common.c | 641 +++++++++++
25 drivers/media/pci/hailo/common/pcie_common.h | 128 +++
26 drivers/media/pci/hailo/common/utils.h | 39 +
27 drivers/media/pci/hailo/common/vdma_common.c | 684 +++++++++++
28 drivers/media/pci/hailo/common/vdma_common.h | 243 ++++
29 .../pci/hailo/include/hailo_pcie_version.h | 14 +
30 drivers/media/pci/hailo/src/fops.c | 736 ++++++++++++
31 drivers/media/pci/hailo/src/fops.h | 21 +
32 drivers/media/pci/hailo/src/pcie.c | 1012 +++++++++++++++++
33 drivers/media/pci/hailo/src/pcie.h | 82 ++
34 drivers/media/pci/hailo/src/sysfs.c | 36 +
35 drivers/media/pci/hailo/src/sysfs.h | 13 +
36 drivers/media/pci/hailo/src/utils.c | 27 +
37 drivers/media/pci/hailo/src/utils.h | 21 +
38 drivers/media/pci/hailo/utils/compact.h | 153 +++
39 drivers/media/pci/hailo/utils/fw_common.h | 19 +
40 drivers/media/pci/hailo/utils/logs.c | 8 +
41 drivers/media/pci/hailo/utils/logs.h | 45 +
42 drivers/media/pci/hailo/vdma/ioctl.c | 698 ++++++++++++
43 drivers/media/pci/hailo/vdma/ioctl.h | 37 +
44 drivers/media/pci/hailo/vdma/memory.c | 551 +++++++++
45 drivers/media/pci/hailo/vdma/memory.h | 54 +
46 drivers/media/pci/hailo/vdma/vdma.c | 336 ++++++
47 drivers/media/pci/hailo/vdma/vdma.h | 143 +++
48 39 files changed, 6849 insertions(+), 1 deletion(-)
49 create mode 100644 drivers/media/pci/hailo/Kconfig
50 create mode 100644 drivers/media/pci/hailo/Makefile
51 create mode 100644 drivers/media/pci/hailo/common/fw_operation.c
52 create mode 100644 drivers/media/pci/hailo/common/fw_operation.h
53 create mode 100644 drivers/media/pci/hailo/common/fw_validation.c
54 create mode 100644 drivers/media/pci/hailo/common/fw_validation.h
55 create mode 100644 drivers/media/pci/hailo/common/hailo_ioctl_common.h
56 create mode 100644 drivers/media/pci/hailo/common/hailo_pcie_version.h
57 create mode 100644 drivers/media/pci/hailo/common/hailo_resource.c
58 create mode 100644 drivers/media/pci/hailo/common/hailo_resource.h
59 create mode 100644 drivers/media/pci/hailo/common/pcie_common.c
60 create mode 100644 drivers/media/pci/hailo/common/pcie_common.h
61 create mode 100644 drivers/media/pci/hailo/common/utils.h
62 create mode 100644 drivers/media/pci/hailo/common/vdma_common.c
63 create mode 100644 drivers/media/pci/hailo/common/vdma_common.h
64 create mode 100755 drivers/media/pci/hailo/include/hailo_pcie_version.h
65 create mode 100644 drivers/media/pci/hailo/src/fops.c
66 create mode 100644 drivers/media/pci/hailo/src/fops.h
67 create mode 100644 drivers/media/pci/hailo/src/pcie.c
68 create mode 100644 drivers/media/pci/hailo/src/pcie.h
69 create mode 100644 drivers/media/pci/hailo/src/sysfs.c
70 create mode 100644 drivers/media/pci/hailo/src/sysfs.h
71 create mode 100644 drivers/media/pci/hailo/src/utils.c
72 create mode 100644 drivers/media/pci/hailo/src/utils.h
73 create mode 100644 drivers/media/pci/hailo/utils/compact.h
74 create mode 100644 drivers/media/pci/hailo/utils/fw_common.h
75 create mode 100644 drivers/media/pci/hailo/utils/logs.c
76 create mode 100644 drivers/media/pci/hailo/utils/logs.h
77 create mode 100644 drivers/media/pci/hailo/vdma/ioctl.c
78 create mode 100644 drivers/media/pci/hailo/vdma/ioctl.h
79 create mode 100644 drivers/media/pci/hailo/vdma/memory.c
80 create mode 100644 drivers/media/pci/hailo/vdma/memory.h
81 create mode 100644 drivers/media/pci/hailo/vdma/vdma.c
82 create mode 100644 drivers/media/pci/hailo/vdma/vdma.h
84 --- a/drivers/media/pci/Kconfig
85 +++ b/drivers/media/pci/Kconfig
86 @@ -74,6 +74,7 @@ config VIDEO_PCI_SKELETON
87 when developing new drivers.
89 source "drivers/media/pci/intel/Kconfig"
90 +source "drivers/media/pci/hailo/Kconfig"
92 endif #MEDIA_PCI_SUPPORT
94 --- a/drivers/media/pci/Makefile
95 +++ b/drivers/media/pci/Makefile
96 @@ -17,7 +17,8 @@ obj-y += ttpci/ \
104 # Please keep it alphabetically sorted by Kconfig name
105 # (e. g. LC_ALL=C sort Makefile)
107 +++ b/drivers/media/pci/hailo/Kconfig
110 +config MEDIA_PCI_HAILO
111 + tristate "Hailo AI accelerator PCIe driver"
114 + Enable build of Hailo AI accelerator PCIe driver.
116 +++ b/drivers/media/pci/hailo/Makefile
118 +# SPDX-License-Identifier: GPL-2.0
120 +COMMON_SRC_DIRECTORY=common
121 +VDMA_SRC_DIRECTORY=vdma
122 +UTILS_SRC_DIRECTORY=utils
124 +obj-$(CONFIG_MEDIA_PCI_HAILO) := hailo_pci.o
126 +hailo_pci-objs += src/pcie.o
127 +hailo_pci-objs += src/fops.o
128 +hailo_pci-objs += src/utils.o
129 +hailo_pci-objs += src/sysfs.o
131 +hailo_pci-objs += $(COMMON_SRC_DIRECTORY)/fw_validation.o
132 +hailo_pci-objs += $(COMMON_SRC_DIRECTORY)/fw_operation.o
133 +hailo_pci-objs += $(COMMON_SRC_DIRECTORY)/pcie_common.o
134 +hailo_pci-objs += $(COMMON_SRC_DIRECTORY)/vdma_common.o
135 +hailo_pci-objs += $(COMMON_SRC_DIRECTORY)/hailo_resource.o
137 +hailo_pci-objs += $(UTILS_SRC_DIRECTORY)/logs.o
139 +hailo_pci-objs += $(VDMA_SRC_DIRECTORY)/vdma.o
140 +hailo_pci-objs += $(VDMA_SRC_DIRECTORY)/memory.o
141 +hailo_pci-objs += $(VDMA_SRC_DIRECTORY)/ioctl.o
143 +ccflags-y += -Werror
144 +ccflags-y += -DHAILO_RASBERRY_PIE
145 +ccflags-y += -I$(srctree)/$(src)
146 +ccflags-y += -I$(srctree)/$(src)/include
147 +ccflags-y += -I$(srctree)/$(src)/common
149 +clean-files := $(hailo_pci-objs)
151 +++ b/drivers/media/pci/hailo/common/fw_operation.c
153 +// SPDX-License-Identifier: GPL-2.0
155 + * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
158 +#include "fw_operation.h"
160 +#include <linux/errno.h>
161 +#include <linux/types.h>
162 +#include <linux/kernel.h>
163 +#include <linux/bug.h>
168 +} FW_DEBUG_BUFFER_HEADER_t;
170 +#define DEBUG_BUFFER_DATA_SIZE (DEBUG_BUFFER_TOTAL_SIZE - sizeof(FW_DEBUG_BUFFER_HEADER_t))
172 +int hailo_read_firmware_notification(struct hailo_resource *resource, struct hailo_d2h_notification *notification)
174 + hailo_d2h_buffer_details_t d2h_buffer_details = {0, 0};
175 + hailo_resource_read_buffer(resource, 0, sizeof(d2h_buffer_details),
176 + &d2h_buffer_details);
178 + if ((sizeof(notification->buffer) < d2h_buffer_details.buffer_len) || (0 == d2h_buffer_details.is_buffer_in_use)) {
182 + notification->buffer_len = d2h_buffer_details.buffer_len;
183 + hailo_resource_read_buffer(resource, sizeof(d2h_buffer_details), notification->buffer_len, notification->buffer);
185 + // Write is_buffer_in_use = false
186 + hailo_resource_write16(resource, 0, 0);
190 +static inline size_t calculate_log_ready_to_read(FW_DEBUG_BUFFER_HEADER_t *header)
192 + size_t ready_to_read = 0;
193 + size_t host_offset = header->host_offset;
194 + size_t chip_offset = header->chip_offset;
196 + if (chip_offset >= host_offset) {
197 + ready_to_read = chip_offset - host_offset;
199 + ready_to_read = DEBUG_BUFFER_DATA_SIZE - (host_offset - chip_offset);
202 + return ready_to_read;
205 +long hailo_read_firmware_log(struct hailo_resource *fw_logger_resource, struct hailo_read_log_params *params)
207 + FW_DEBUG_BUFFER_HEADER_t debug_buffer_header = {0};
208 + size_t read_offset = 0;
209 + size_t ready_to_read = 0;
210 + size_t size_to_read = 0;
211 + uintptr_t user_buffer = (uintptr_t)params->buffer;
213 + if (params->buffer_size > ARRAY_SIZE(params->buffer)) {
217 + hailo_resource_read_buffer(fw_logger_resource, 0, sizeof(debug_buffer_header),
218 + &debug_buffer_header);
220 + /* Point to the start of the data buffer. */
221 + ready_to_read = calculate_log_ready_to_read(&debug_buffer_header);
222 + if (0 == ready_to_read) {
223 + params->read_bytes = 0;
226 + /* If ready to read is bigger than the buffer size, read only buffer size bytes. */
227 + ready_to_read = min(ready_to_read, params->buffer_size);
229 + /* Point to the data that is read to be read by the host. */
230 + read_offset = sizeof(debug_buffer_header) + debug_buffer_header.host_offset;
231 + /* Check if the offset should cycle back to beginning. */
232 + if (DEBUG_BUFFER_DATA_SIZE <= debug_buffer_header.host_offset + ready_to_read) {
233 + size_to_read = DEBUG_BUFFER_DATA_SIZE - debug_buffer_header.host_offset;
234 + hailo_resource_read_buffer(fw_logger_resource, read_offset, size_to_read, (void*)user_buffer);
236 + user_buffer += size_to_read;
237 + size_to_read = ready_to_read - size_to_read;
238 + /* Point back to the beginning of the data buffer. */
239 + read_offset -= debug_buffer_header.host_offset;
242 + size_to_read = ready_to_read;
245 + /* size_to_read may become 0 if the read reached DEBUG_BUFFER_DATA_SIZE exactly */
246 + hailo_resource_read_buffer(fw_logger_resource, read_offset, size_to_read, (void*)user_buffer);
248 + /* Change current_offset to represent the new host offset. */
249 + read_offset += size_to_read;
250 + hailo_resource_write32(fw_logger_resource, offsetof(FW_DEBUG_BUFFER_HEADER_t, host_offset),
251 + (u32)(read_offset - sizeof(debug_buffer_header)));
253 + params->read_bytes = ready_to_read;
256 \ No newline at end of file
258 +++ b/drivers/media/pci/hailo/common/fw_operation.h
260 +// SPDX-License-Identifier: GPL-2.0
262 + * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
265 +#ifndef _HAILO_COMMON_FIRMWARE_OPERATION_H_
266 +#define _HAILO_COMMON_FIRMWARE_OPERATION_H_
268 +#include "hailo_resource.h"
270 +#define DEBUG_BUFFER_TOTAL_SIZE (4*1024)
276 +int hailo_read_firmware_notification(struct hailo_resource *resource, struct hailo_d2h_notification *notification);
278 +long hailo_read_firmware_log(struct hailo_resource *fw_logger_resource, struct hailo_read_log_params *params);
284 +#endif /* _HAILO_COMMON_FIRMWARE_OPERATION_H_ */
286 +++ b/drivers/media/pci/hailo/common/fw_validation.c
288 +// SPDX-License-Identifier: GPL-2.0
290 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
293 +#include "fw_validation.h"
294 +#include <linux/errno.h>
295 +#include <linux/types.h>
299 +/* when reading the firmware we don't want to read past the firmware_size,
300 + so we have a consumed_firmware_offset that is updated _before_ accessing data at that offset
301 + of firmware_base_address */
302 +#define CONSUME_FIRMWARE(__size, __err) do { \
303 + consumed_firmware_offset += (u32) (__size); \
304 + if ((firmware_size < (__size)) || (firmware_size < consumed_firmware_offset)) { \
310 +int FW_VALIDATION__validate_fw_header(uintptr_t firmware_base_address,
311 + size_t firmware_size, u32 max_code_size, u32 *outer_consumed_firmware_offset,
312 + firmware_header_t **out_firmware_header, enum hailo_board_type board_type)
315 + firmware_header_t *firmware_header = NULL;
316 + u32 consumed_firmware_offset = *outer_consumed_firmware_offset;
317 + u32 expected_firmware_magic = 0;
319 + firmware_header = (firmware_header_t *) (firmware_base_address + consumed_firmware_offset);
320 + CONSUME_FIRMWARE(sizeof(firmware_header_t), -EINVAL);
322 + switch (board_type) {
323 + case HAILO_BOARD_TYPE_HAILO8:
324 + expected_firmware_magic = FIRMWARE_HEADER_MAGIC_HAILO8;
326 + case HAILO_BOARD_TYPE_HAILO15:
327 + expected_firmware_magic = FIRMWARE_HEADER_MAGIC_HAILO15;
329 + case HAILO_BOARD_TYPE_PLUTO:
330 + expected_firmware_magic = FIRMWARE_HEADER_MAGIC_PLUTO;
337 + if (expected_firmware_magic != firmware_header->magic) {
342 + /* Validate that the firmware header version is supported */
343 + switch(firmware_header->header_version) {
344 + case FIRMWARE_HEADER_VERSION_INITIAL:
352 + if (MINIMUM_FIRMWARE_CODE_SIZE > firmware_header->code_size) {
357 + if (max_code_size < firmware_header->code_size) {
362 + CONSUME_FIRMWARE(firmware_header->code_size, -EINVAL);
364 + *outer_consumed_firmware_offset = consumed_firmware_offset;
365 + *out_firmware_header = firmware_header;
372 +int FW_VALIDATION__validate_cert_header(uintptr_t firmware_base_address,
373 + size_t firmware_size, u32 *outer_consumed_firmware_offset, secure_boot_certificate_t **out_firmware_cert)
376 + secure_boot_certificate_t *firmware_cert = NULL;
378 + u32 consumed_firmware_offset = *outer_consumed_firmware_offset;
380 + firmware_cert = (secure_boot_certificate_t *) (firmware_base_address + consumed_firmware_offset);
381 + CONSUME_FIRMWARE(sizeof(secure_boot_certificate_t), -EINVAL);
383 + if ((MAXIMUM_FIRMWARE_CERT_KEY_SIZE < firmware_cert->key_size) ||
384 + (MAXIMUM_FIRMWARE_CERT_CONTENT_SIZE < firmware_cert->content_size)) {
389 + CONSUME_FIRMWARE(firmware_cert->key_size, -EINVAL);
390 + CONSUME_FIRMWARE(firmware_cert->content_size, -EINVAL);
392 + *outer_consumed_firmware_offset = consumed_firmware_offset;
393 + *out_firmware_cert = firmware_cert;
401 +++ b/drivers/media/pci/hailo/common/fw_validation.h
403 +// SPDX-License-Identifier: GPL-2.0
405 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
408 +#ifndef PCIE_COMMON_FIRMWARE_HEADER_UTILS_H_
409 +#define PCIE_COMMON_FIRMWARE_HEADER_UTILS_H_
411 +#include "hailo_ioctl_common.h"
412 +#include <linux/types.h>
414 +#define FIRMWARE_HEADER_MAGIC_HAILO8 (0x1DD89DE0)
415 +#define FIRMWARE_HEADER_MAGIC_HAILO15 (0xE905DAAB)
416 +// TODO - HRT-11344 : change fw magic to pluto specific
417 +#define FIRMWARE_HEADER_MAGIC_PLUTO (0xE905DAAB)
419 +#ifndef HAILO_EMULATOR
420 +#define FIRMWARE_WAIT_TIMEOUT_MS (5000)
421 +#else /* ifndef HAILO_EMULATOR */
422 +#define FIRMWARE_WAIT_TIMEOUT_MS (500000)
423 +#endif /* ifndef HAILO_EMULATOR */
426 + FIRMWARE_HEADER_VERSION_INITIAL = 0,
429 + FIRMWARE_HEADER_VERSION_COUNT
430 +} firmware_header_version_t;
434 + u32 header_version;
435 + u32 firmware_major;
436 + u32 firmware_minor;
437 + u32 firmware_revision;
439 +} firmware_header_t;
443 +#pragma warning(push)
444 +#pragma warning(disable:4200)
445 +#endif /* _MSC_VER */
450 + u8 certificates_data[0];
451 +} secure_boot_certificate_t;
454 +#pragma warning(pop)
455 +#endif /* _MSC_VER */
457 +#define MINIMUM_FIRMWARE_CODE_SIZE (20*4)
458 +#define MAXIMUM_FIRMWARE_CERT_KEY_SIZE (0x1000)
459 +#define MAXIMUM_FIRMWARE_CERT_CONTENT_SIZE (0x1000)
461 +int FW_VALIDATION__validate_fw_header(uintptr_t firmware_base_address,
462 + size_t firmware_size, u32 max_code_size, u32 *outer_consumed_firmware_offset,
463 + firmware_header_t **out_firmware_header, enum hailo_board_type board_type);
465 +int FW_VALIDATION__validate_cert_header(uintptr_t firmware_base_address,
466 + size_t firmware_size, u32 *outer_consumed_firmware_offset, secure_boot_certificate_t **out_firmware_cert);
469 \ No newline at end of file
471 +++ b/drivers/media/pci/hailo/common/hailo_ioctl_common.h
473 +// SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) AND MIT
475 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
478 +#ifndef _HAILO_IOCTL_COMMON_H_
479 +#define _HAILO_IOCTL_COMMON_H_
482 +// This value is not easily changeable.
483 +// For example: the channel interrupts ioctls assume we have up to 32 channels
484 +#define MAX_VDMA_CHANNELS_PER_ENGINE (32)
485 +#define MAX_VDMA_ENGINES (3)
486 +#define SIZE_OF_VDMA_DESCRIPTOR (16)
487 +#define VDMA_DEST_CHANNELS_START (16)
489 +#define HAILO_VDMA_MAX_ONGOING_TRANSFERS (128)
490 +#define HAILO_VDMA_MAX_ONGOING_TRANSFERS_MASK (HAILO_VDMA_MAX_ONGOING_TRANSFERS - 1)
492 +#define CHANNEL_IRQ_TIMESTAMPS_SIZE (HAILO_VDMA_MAX_ONGOING_TRANSFERS * 2)
493 +#define CHANNEL_IRQ_TIMESTAMPS_SIZE_MASK (CHANNEL_IRQ_TIMESTAMPS_SIZE - 1)
495 +#define INVALID_DRIVER_HANDLE_VALUE ((uintptr_t)-1)
497 +// Used by windows and unix driver to raise the right CPU control handle to the FW. The same as in pcie_service FW
498 +#define FW_ACCESS_CORE_CPU_CONTROL_SHIFT (1)
499 +#define FW_ACCESS_CORE_CPU_CONTROL_MASK (1 << FW_ACCESS_CORE_CPU_CONTROL_SHIFT)
500 +#define FW_ACCESS_CONTROL_INTERRUPT_SHIFT (0)
501 +#define FW_ACCESS_APP_CPU_CONTROL_MASK (1 << FW_ACCESS_CONTROL_INTERRUPT_SHIFT)
502 +#define FW_ACCESS_DRIVER_SHUTDOWN_SHIFT (2)
503 +#define FW_ACCESS_DRIVER_SHUTDOWN_MASK (1 << FW_ACCESS_DRIVER_SHUTDOWN_SHIFT)
505 +#define INVALID_VDMA_CHANNEL (0xff)
507 +#if !defined(__cplusplus) && defined(NTDDI_VERSION)
509 +typedef ULONG uint32_t;
510 +typedef UCHAR uint8_t;
511 +typedef USHORT uint16_t;
512 +typedef ULONGLONG uint64_t;
513 +#endif /* !defined(__cplusplus) && defined(NTDDI_VERSION) */
518 +#include <initguid.h>
520 +#if !defined(bool) && !defined(__cplusplus)
521 +typedef uint8_t bool;
522 +#endif // !defined(bool) && !defined(__cplusplus)
524 +#if !defined(INT_MAX)
525 +#define INT_MAX 0x7FFFFFFF
526 +#endif // !defined(INT_MAX)
529 +// {d88d31f1-fede-4e71-ac2a-6ce0018c1501}
530 +DEFINE_GUID (GUID_DEVINTERFACE_HailoKM,
531 + 0xd88d31f1,0xfede,0x4e71,0xac,0x2a,0x6c,0xe0,0x01,0x8c,0x15,0x01);
533 +#define HAILO_GENERAL_IOCTL_MAGIC 0
534 +#define HAILO_VDMA_IOCTL_MAGIC 1
535 +#define HAILO_NON_LINUX_IOCTL_MAGIC 2
537 +#define HAILO_IOCTL_COMPATIBLE CTL_CODE(FILE_DEVICE_UNKNOWN, 0x802, METHOD_BUFFERED, FILE_ANY_ACCESS)
540 +typedef struct tCompatibleHailoIoctlParam
552 +} tCompatibleHailoIoctlParam;
554 +static ULONG FORCEINLINE _IOC_(ULONG nr, ULONG type, ULONG size, bool read, bool write)
556 + struct tCompatibleHailoIoctlParam param;
557 + param.u.bits.Code = nr;
558 + param.u.bits.Size = size;
559 + param.u.bits.Type = type;
560 + param.u.bits.Read = read ? 1 : 0;
561 + param.u.bits.Write = write ? 1 : 0;
562 + return param.u.value;
565 +#define _IOW_(type,nr,size) _IOC_(nr, type, sizeof(size), true, false)
566 +#define _IOR_(type,nr,size) _IOC_(nr, type, sizeof(size), false, true)
567 +#define _IOWR_(type,nr,size) _IOC_(nr, type, sizeof(size), true, true)
568 +#define _IO_(type,nr) _IOC_(nr, type, 0, false, false)
570 +#elif defined(__linux__) // #ifdef _MSC_VER
572 +// include the userspace headers only if this file is included by user space program
573 +// It is discourged to include them when compiling the driver (https://lwn.net/Articles/113349/)
575 +#include <sys/types.h>
577 +#include <linux/types.h>
578 +#include <linux/limits.h>
579 +#include <linux/kernel.h>
580 +#endif // ifndef __KERNEL__
582 +#include <linux/ioctl.h>
586 +#define _IOWR_ _IOWR
589 +#define HAILO_GENERAL_IOCTL_MAGIC 'g'
590 +#define HAILO_VDMA_IOCTL_MAGIC 'v'
591 +#define HAILO_NON_LINUX_IOCTL_MAGIC 'w'
593 +#elif defined(__QNX__) // #ifdef _MSC_VER
596 +#include <sys/types.h>
597 +#include <sys/mman.h>
598 +#include <stdbool.h>
600 +// defines for devctl
601 +#define _IOW_ __DIOF
602 +#define _IOR_ __DIOT
603 +#define _IOWR_ __DIOTF
605 +#define HAILO_GENERAL_IOCTL_MAGIC _DCMD_ALL
606 +#define HAILO_VDMA_IOCTL_MAGIC _DCMD_MISC
607 +#define HAILO_NON_LINUX_IOCTL_MAGIC _DCMD_PROC
609 +#else // #ifdef _MSC_VER
610 +#error "unsupported platform!"
613 +#pragma pack(push, 1)
615 +struct hailo_channel_interrupt_timestamp {
616 + uint64_t timestamp_ns;
617 + uint16_t desc_num_processed;
621 + uint16_t is_buffer_in_use;
622 + uint16_t buffer_len;
623 +} hailo_d2h_buffer_details_t;
625 +// This struct is the same as `enum dma_data_direction` (defined in linux/dma-direction)
626 +enum hailo_dma_data_direction {
627 + HAILO_DMA_BIDIRECTIONAL = 0,
628 + HAILO_DMA_TO_DEVICE = 1,
629 + HAILO_DMA_FROM_DEVICE = 2,
630 + HAILO_DMA_NONE = 3,
632 + /** Max enum value to maintain ABI Integrity */
633 + HAILO_DMA_MAX_ENUM = INT_MAX,
636 +// Enum that determines if buffer should be allocated from user space or from driver
637 +enum hailo_allocation_mode {
638 + HAILO_ALLOCATION_MODE_USERSPACE = 0,
639 + HAILO_ALLOCATION_MODE_DRIVER = 1,
641 + /** Max enum value to maintain ABI Integrity */
642 + HAILO_ALLOCATION_MODE_MAX_ENUM = INT_MAX,
645 +/* structure used in ioctl HAILO_VDMA_BUFFER_MAP */
646 +struct hailo_vdma_buffer_map_params {
647 +#if defined(__linux__) || defined(_MSC_VER)
648 + void* user_address; // in
649 +#elif defined(__QNX__)
650 + shm_handle_t shared_memory_handle; // in
652 +#error "unsupported platform!"
655 + enum hailo_dma_data_direction data_direction; // in
656 + uintptr_t allocated_buffer_handle; // in
657 + size_t mapped_handle; // out
660 +/* structure used in ioctl HAILO_VDMA_BUFFER_UNMAP */
661 +struct hailo_vdma_buffer_unmap_params {
662 + size_t mapped_handle;
665 +/* structure used in ioctl HAILO_DESC_LIST_CREATE */
666 +struct hailo_desc_list_create_params {
667 + size_t desc_count; // in
668 + uint16_t desc_page_size; // in
669 + bool is_circular; // in
670 + uintptr_t desc_handle; // out
671 + uint64_t dma_address; // out
674 +/* structure used in ioctl HAILO_DESC_LIST_RELEASE */
675 +struct hailo_desc_list_release_params {
676 + uintptr_t desc_handle; // in
679 +/* structure used in ioctl HAILO_NON_LINUX_DESC_LIST_MMAP */
680 +struct hailo_non_linux_desc_list_mmap_params {
681 + uintptr_t desc_handle; // in
683 + void* user_address; // out
686 +/* structure used in ioctl HAILO_DESC_LIST_BIND_VDMA_BUFFER */
687 +struct hailo_desc_list_bind_vdma_buffer_params {
688 + size_t buffer_handle; // in
689 + size_t buffer_size; // in
690 + size_t buffer_offset; // in
691 + uintptr_t desc_handle; // in
692 + uint8_t channel_index; // in
693 + uint32_t starting_desc; // in
696 +/* structure used in ioctl HAILO_VDMA_INTERRUPTS_ENABLE */
697 +struct hailo_vdma_interrupts_enable_params {
698 + uint32_t channels_bitmap_per_engine[MAX_VDMA_ENGINES]; // in
699 + bool enable_timestamps_measure; // in
702 +/* structure used in ioctl HAILO_VDMA_INTERRUPTS_DISABLE */
703 +struct hailo_vdma_interrupts_disable_params {
704 + uint32_t channels_bitmap_per_engine[MAX_VDMA_ENGINES]; // in
707 +/* structure used in ioctl HAILO_VDMA_INTERRUPTS_WAIT */
708 +struct hailo_vdma_interrupts_channel_data {
709 + uint8_t engine_index;
710 + uint8_t channel_index;
711 + bool is_active; // If not activate, num_processed is ignored.
712 + uint16_t host_num_processed;
713 + uint8_t host_error; // Channel errors bits on source side
714 + uint8_t device_error; // Channel errors bits on dest side
715 + bool validation_success; // If the validation of the channel was successful
718 +struct hailo_vdma_interrupts_wait_params {
719 + uint32_t channels_bitmap_per_engine[MAX_VDMA_ENGINES]; // in
720 + uint8_t channels_count; // out
721 + struct hailo_vdma_interrupts_channel_data
722 + irq_data[MAX_VDMA_CHANNELS_PER_ENGINE * MAX_VDMA_ENGINES]; // out
725 +/* structure used in ioctl HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS */
726 +struct hailo_vdma_interrupts_read_timestamp_params {
727 + uint8_t engine_index; // in
728 + uint8_t channel_index; // in
729 + uint32_t timestamps_count; // out
730 + struct hailo_channel_interrupt_timestamp timestamps[CHANNEL_IRQ_TIMESTAMPS_SIZE]; // out
733 +/* structure used in ioctl HAILO_FW_CONTROL */
734 +#define MAX_CONTROL_LENGTH (1500)
735 +#define PCIE_EXPECTED_MD5_LENGTH (16)
738 +/* structure used in ioctl HAILO_FW_CONTROL and HAILO_READ_LOG */
740 + HAILO_CPU_ID_CPU0 = 0,
744 + /** Max enum value to maintain ABI Integrity */
745 + HAILO_CPU_MAX_ENUM = INT_MAX,
748 +struct hailo_fw_control {
749 + // expected_md5+buffer_len+buffer must be in this order at the start of the struct
750 + uint8_t expected_md5[PCIE_EXPECTED_MD5_LENGTH];
751 + uint32_t buffer_len;
752 + uint8_t buffer[MAX_CONTROL_LENGTH];
753 + uint32_t timeout_ms;
754 + enum hailo_cpu_id cpu_id;
757 +/* structure used in ioctl HAILO_MEMORY_TRANSFER */
758 +// Max bar transfer size gotten from ATR0_TABLE_SIZE
759 +#define MAX_MEMORY_TRANSFER_LENGTH (4096)
761 +enum hailo_transfer_direction {
765 + /** Max enum value to maintain ABI Integrity */
766 + TRANSFER_MAX_ENUM = INT_MAX,
769 +enum hailo_transfer_memory_type {
770 + HAILO_TRANSFER_DEVICE_DIRECT_MEMORY,
773 + HAILO_TRANSFER_MEMORY_VDMA0 = 0x100,
774 + HAILO_TRANSFER_MEMORY_VDMA1,
775 + HAILO_TRANSFER_MEMORY_VDMA2,
777 + // PCIe driver memories
778 + HAILO_TRANSFER_MEMORY_PCIE_BAR0 = 0x200,
779 + HAILO_TRANSFER_MEMORY_PCIE_BAR2 = 0x202,
780 + HAILO_TRANSFER_MEMORY_PCIE_BAR4 = 0x204,
782 + // DRAM DMA driver memories
783 + HAILO_TRANSFER_MEMORY_DMA_ENGINE0 = 0x300,
784 + HAILO_TRANSFER_MEMORY_DMA_ENGINE1,
785 + HAILO_TRANSFER_MEMORY_DMA_ENGINE2,
787 + /** Max enum value to maintain ABI Integrity */
788 + HAILO_TRANSFER_MEMORY_MAX_ENUM = INT_MAX,
791 +struct hailo_memory_transfer_params {
792 + enum hailo_transfer_direction transfer_direction; // in
793 + enum hailo_transfer_memory_type memory_type; // in
794 + uint64_t address; // in
795 + size_t count; // in
796 + uint8_t buffer[MAX_MEMORY_TRANSFER_LENGTH]; // in/out
799 +/* structure used in ioctl HAILO_VDMA_BUFFER_SYNC */
800 +enum hailo_vdma_buffer_sync_type {
801 + HAILO_SYNC_FOR_CPU,
802 + HAILO_SYNC_FOR_DEVICE,
804 + /** Max enum value to maintain ABI Integrity */
805 + HAILO_SYNC_MAX_ENUM = INT_MAX,
808 +struct hailo_vdma_buffer_sync_params {
809 + size_t handle; // in
810 + enum hailo_vdma_buffer_sync_type sync_type; // in
811 + size_t offset; // in
812 + size_t count; // in
815 +/* structure used in ioctl HAILO_READ_NOTIFICATION */
816 +#define MAX_NOTIFICATION_LENGTH (1500)
818 +struct hailo_d2h_notification {
819 + size_t buffer_len; // out
820 + uint8_t buffer[MAX_NOTIFICATION_LENGTH]; // out
823 +enum hailo_board_type {
824 + HAILO_BOARD_TYPE_HAILO8 = 0,
825 + HAILO_BOARD_TYPE_HAILO15,
826 + HAILO_BOARD_TYPE_PLUTO,
827 + HAILO_BOARD_TYPE_COUNT,
829 + /** Max enum value to maintain ABI Integrity */
830 + HAILO_BOARD_TYPE_MAX_ENUM = INT_MAX
833 +enum hailo_dma_type {
834 + HAILO_DMA_TYPE_PCIE,
835 + HAILO_DMA_TYPE_DRAM,
837 + /** Max enum value to maintain ABI Integrity */
838 + HAILO_DMA_TYPE_MAX_ENUM = INT_MAX,
841 +struct hailo_device_properties {
842 + uint16_t desc_max_page_size;
843 + enum hailo_board_type board_type;
844 + enum hailo_allocation_mode allocation_mode;
845 + enum hailo_dma_type dma_type;
846 + size_t dma_engines_count;
849 + pid_t resource_manager_pid;
853 +struct hailo_driver_info {
854 + uint32_t major_version;
855 + uint32_t minor_version;
856 + uint32_t revision_version;
859 +/* structure used in ioctl HAILO_READ_LOG */
860 +#define MAX_FW_LOG_BUFFER_LENGTH (512)
862 +struct hailo_read_log_params {
863 + enum hailo_cpu_id cpu_id; // in
864 + uint8_t buffer[MAX_FW_LOG_BUFFER_LENGTH]; // out
865 + size_t buffer_size; // in
866 + size_t read_bytes; // out
869 +/* structure used in ioctl HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC */
870 +struct hailo_allocate_low_memory_buffer_params {
871 + size_t buffer_size; // in
872 + uintptr_t buffer_handle; // out
875 +/* structure used in ioctl HAILO_VDMA_LOW_MEMORY_BUFFER_FREE */
876 +struct hailo_free_low_memory_buffer_params {
877 + uintptr_t buffer_handle; // in
880 +struct hailo_mark_as_in_use_params {
881 + bool in_use; // out
884 +/* structure used in ioctl HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC */
885 +struct hailo_allocate_continuous_buffer_params {
886 + size_t buffer_size; // in
887 + uintptr_t buffer_handle; // out
888 + uint64_t dma_address; // out
891 +/* structure used in ioctl HAILO_VDMA_CONTINUOUS_BUFFER_FREE */
892 +struct hailo_free_continuous_buffer_params {
893 + uintptr_t buffer_handle; // in
896 +/* structures used in ioctl HAILO_VDMA_LAUNCH_TRANSFER */
897 +struct hailo_vdma_transfer_buffer {
898 + size_t mapped_buffer_handle; // in
899 + uint32_t offset; // in
900 + uint32_t size; // in
903 +enum hailo_vdma_interrupts_domain {
904 + HAILO_VDMA_INTERRUPTS_DOMAIN_NONE = 0,
905 + HAILO_VDMA_INTERRUPTS_DOMAIN_DEVICE = (1 << 0),
906 + HAILO_VDMA_INTERRUPTS_DOMAIN_HOST = (1 << 1),
908 + /** Max enum value to maintain ABI Integrity */
909 + HAILO_VDMA_INTERRUPTS_DOMAIN_MAX_ENUM = INT_MAX,
912 +// We allow maximum 2 buffers per transfer since we may have an extra buffer
913 +// to make sure each buffer is aligned to page size.
914 +#define HAILO_MAX_BUFFERS_PER_SINGLE_TRANSFER (2)
916 +struct hailo_vdma_launch_transfer_params {
917 + uint8_t engine_index; // in
918 + uint8_t channel_index; // in
920 + uintptr_t desc_handle; // in
921 + uint32_t starting_desc; // in
923 + bool should_bind; // in, if false, assumes buffer already bound.
924 + uint8_t buffers_count; // in
925 + struct hailo_vdma_transfer_buffer
926 + buffers[HAILO_MAX_BUFFERS_PER_SINGLE_TRANSFER]; // in
928 + enum hailo_vdma_interrupts_domain first_interrupts_domain; // in
929 + enum hailo_vdma_interrupts_domain last_interrupts_domain; // in
931 + bool is_debug; // in, if set program hw to send
932 + // more info (e.g desc complete status)
934 + uint32_t descs_programed; // out, amount of descriptors programed.
938 +struct tCompatibleHailoIoctlData
940 + tCompatibleHailoIoctlParam Parameters;
943 + struct hailo_memory_transfer_params MemoryTransfer;
944 + struct hailo_vdma_interrupts_enable_params VdmaInterruptsEnable;
945 + struct hailo_vdma_interrupts_disable_params VdmaInterruptsDisable;
946 + struct hailo_vdma_interrupts_read_timestamp_params VdmaInterruptsReadTimestamps;
947 + struct hailo_vdma_interrupts_wait_params VdmaInterruptsWait;
948 + struct hailo_vdma_buffer_sync_params VdmaBufferSync;
949 + struct hailo_fw_control FirmwareControl;
950 + struct hailo_vdma_buffer_map_params VdmaBufferMap;
951 + struct hailo_vdma_buffer_unmap_params VdmaBufferUnmap;
952 + struct hailo_desc_list_create_params DescListCreate;
953 + struct hailo_desc_list_release_params DescListReleaseParam;
954 + struct hailo_desc_list_bind_vdma_buffer_params DescListBind;
955 + struct hailo_d2h_notification D2HNotification;
956 + struct hailo_device_properties DeviceProperties;
957 + struct hailo_driver_info DriverInfo;
958 + struct hailo_non_linux_desc_list_mmap_params DescListMmap;
959 + struct hailo_read_log_params ReadLog;
960 + struct hailo_mark_as_in_use_params MarkAsInUse;
961 + struct hailo_vdma_launch_transfer_params LaunchTransfer;
968 +enum hailo_general_ioctl_code {
969 + HAILO_MEMORY_TRANSFER_CODE,
970 + HAILO_FW_CONTROL_CODE,
971 + HAILO_READ_NOTIFICATION_CODE,
972 + HAILO_DISABLE_NOTIFICATION_CODE,
973 + HAILO_QUERY_DEVICE_PROPERTIES_CODE,
974 + HAILO_QUERY_DRIVER_INFO_CODE,
975 + HAILO_READ_LOG_CODE,
976 + HAILO_RESET_NN_CORE_CODE,
979 + HAILO_GENERAL_IOCTL_MAX_NR,
982 +#define HAILO_MEMORY_TRANSFER _IOWR_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_MEMORY_TRANSFER_CODE, struct hailo_memory_transfer_params)
983 +#define HAILO_FW_CONTROL _IOWR_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_FW_CONTROL_CODE, struct hailo_fw_control)
984 +#define HAILO_READ_NOTIFICATION _IOW_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_READ_NOTIFICATION_CODE, struct hailo_d2h_notification)
985 +#define HAILO_DISABLE_NOTIFICATION _IO_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_DISABLE_NOTIFICATION_CODE)
986 +#define HAILO_QUERY_DEVICE_PROPERTIES _IOW_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_QUERY_DEVICE_PROPERTIES_CODE, struct hailo_device_properties)
987 +#define HAILO_QUERY_DRIVER_INFO _IOW_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_QUERY_DRIVER_INFO_CODE, struct hailo_driver_info)
988 +#define HAILO_READ_LOG _IOWR_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_READ_LOG_CODE, struct hailo_read_log_params)
989 +#define HAILO_RESET_NN_CORE _IO_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_RESET_NN_CORE_CODE)
991 +enum hailo_vdma_ioctl_code {
992 + HAILO_VDMA_INTERRUPTS_ENABLE_CODE,
993 + HAILO_VDMA_INTERRUPTS_DISABLE_CODE,
994 + HAILO_VDMA_INTERRUPTS_WAIT_CODE,
995 + HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS_CODE,
996 + HAILO_VDMA_BUFFER_MAP_CODE,
997 + HAILO_VDMA_BUFFER_UNMAP_CODE,
998 + HAILO_VDMA_BUFFER_SYNC_CODE,
999 + HAILO_DESC_LIST_CREATE_CODE,
1000 + HAILO_DESC_LIST_RELEASE_CODE,
1001 + HAILO_DESC_LIST_BIND_VDMA_BUFFER_CODE,
1002 + HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC_CODE,
1003 + HAILO_VDMA_LOW_MEMORY_BUFFER_FREE_CODE,
1004 + HAILO_MARK_AS_IN_USE_CODE,
1005 + HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC_CODE,
1006 + HAILO_VDMA_CONTINUOUS_BUFFER_FREE_CODE,
1007 + HAILO_VDMA_LAUNCH_TRANSFER_CODE,
1010 + HAILO_VDMA_IOCTL_MAX_NR,
1013 +#define HAILO_VDMA_INTERRUPTS_ENABLE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_INTERRUPTS_ENABLE_CODE, struct hailo_vdma_interrupts_enable_params)
1014 +#define HAILO_VDMA_INTERRUPTS_DISABLE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_INTERRUPTS_DISABLE_CODE, struct hailo_vdma_interrupts_disable_params)
1015 +#define HAILO_VDMA_INTERRUPTS_WAIT _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_INTERRUPTS_WAIT_CODE, struct hailo_vdma_interrupts_wait_params)
1016 +#define HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS_CODE, struct hailo_vdma_interrupts_read_timestamp_params)
1018 +#define HAILO_VDMA_BUFFER_MAP _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_BUFFER_MAP_CODE, struct hailo_vdma_buffer_map_params)
1019 +#define HAILO_VDMA_BUFFER_UNMAP _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_BUFFER_UNMAP_CODE, struct hailo_vdma_buffer_unmap_params)
1020 +#define HAILO_VDMA_BUFFER_SYNC _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_BUFFER_SYNC_CODE, struct hailo_vdma_buffer_sync_params)
1022 +#define HAILO_DESC_LIST_CREATE _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_DESC_LIST_CREATE_CODE, struct hailo_desc_list_create_params)
1023 +#define HAILO_DESC_LIST_RELEASE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_DESC_LIST_RELEASE_CODE, struct hailo_desc_list_release_params)
1024 +#define HAILO_DESC_LIST_BIND_VDMA_BUFFER _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_DESC_LIST_BIND_VDMA_BUFFER_CODE, struct hailo_desc_list_bind_vdma_buffer_params)
1026 +#define HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC_CODE, struct hailo_allocate_low_memory_buffer_params)
1027 +#define HAILO_VDMA_LOW_MEMORY_BUFFER_FREE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_LOW_MEMORY_BUFFER_FREE_CODE, struct hailo_free_low_memory_buffer_params)
1029 +#define HAILO_MARK_AS_IN_USE _IOW_(HAILO_VDMA_IOCTL_MAGIC, HAILO_MARK_AS_IN_USE_CODE, struct hailo_mark_as_in_use_params)
1031 +#define HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC_CODE, struct hailo_allocate_continuous_buffer_params)
1032 +#define HAILO_VDMA_CONTINUOUS_BUFFER_FREE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_CONTINUOUS_BUFFER_FREE_CODE, struct hailo_free_continuous_buffer_params)
1034 +#define HAILO_VDMA_LAUNCH_TRANSFER _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_LAUNCH_TRANSFER_CODE, struct hailo_vdma_launch_transfer_params)
1037 +enum hailo_non_linux_ioctl_code {
1038 + HAILO_NON_LINUX_DESC_LIST_MMAP_CODE,
1041 + HAILO_NON_LINUX_IOCTL_MAX_NR,
1044 +#define HAILO_NON_LINUX_DESC_LIST_MMAP _IOWR_(HAILO_NON_LINUX_IOCTL_MAGIC, HAILO_NON_LINUX_DESC_LIST_MMAP_CODE, struct hailo_non_linux_desc_list_mmap_params)
1047 +#endif /* _HAILO_IOCTL_COMMON_H_ */
1049 +++ b/drivers/media/pci/hailo/common/hailo_pcie_version.h
1051 +// SPDX-License-Identifier: GPL-2.0
1053 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
1056 +#ifndef _HAILO_COMMON_PCIE_VERSION_H_
1057 +#define _HAILO_COMMON_PCIE_VERSION_H_
1059 +#define HAILO_DRV_VER_MAJOR 4
1060 +#define HAILO_DRV_VER_MINOR 17
1061 +#define HAILO_DRV_VER_REVISION 0
1063 +#endif /* _HAILO_COMMON_PCIE_VERSION_H_ */
1064 \ No newline at end of file
1066 +++ b/drivers/media/pci/hailo/common/hailo_resource.c
1068 +// SPDX-License-Identifier: GPL-2.0
1070 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
1073 +#include "hailo_resource.h"
1075 +#include <linux/io.h>
1076 +#include <linux/errno.h>
1077 +#include <linux/types.h>
1078 +#include <linux/kernel.h>
1081 +u8 hailo_resource_read8(struct hailo_resource *resource, size_t offset)
1083 + return ioread8((u8*)resource->address + offset);
1086 +u16 hailo_resource_read16(struct hailo_resource *resource, size_t offset)
1088 + return ioread16((u8*)resource->address + offset);
1091 +u32 hailo_resource_read32(struct hailo_resource *resource, size_t offset)
1093 + return ioread32((u8*)resource->address + offset);
1096 +void hailo_resource_write8(struct hailo_resource *resource, size_t offset, u8 value)
1098 + iowrite8(value, (u8*)resource->address + offset);
1101 +void hailo_resource_write16(struct hailo_resource *resource, size_t offset, u16 value)
1103 + iowrite16(value, (u8*)resource->address + offset);
1106 +void hailo_resource_write32(struct hailo_resource *resource, size_t offset, u32 value)
1108 + iowrite32(value, (u8*)resource->address + offset);
1111 +void hailo_resource_read_buffer(struct hailo_resource *resource, size_t offset, size_t count, void *to)
1113 + // Copied and modified from linux aarch64 (using ioread32 instead of readq that does not work all the time)
1114 + uintptr_t to_ptr = (uintptr_t)to;
1115 + while ((count > 0) && (!IS_ALIGNED(to_ptr, 4) || !IS_ALIGNED((uintptr_t)resource->address + offset, 4))) {
1116 + *(u8*)to_ptr = hailo_resource_read8(resource, offset);
1122 + while (count >= 4) {
1123 + *(u32*)to_ptr = hailo_resource_read32(resource, offset);
1129 + while (count > 0) {
1130 + *(u8*)to_ptr = hailo_resource_read8(resource, offset);
1137 +int hailo_resource_write_buffer(struct hailo_resource *resource, size_t offset, size_t count, const void *from)
1139 + // read the bytes after writing them for flushing the data. This function also checks if the pcie link
1141 + uintptr_t from_ptr = (uintptr_t)from;
1142 + while (count && (!IS_ALIGNED(resource->address + offset, 4) || !IS_ALIGNED(from_ptr, 4))) {
1143 + hailo_resource_write8(resource, offset, *(u8*)from_ptr);
1144 + if (hailo_resource_read8(resource, offset) != *(u8*)from_ptr) {
1152 + while (count >= 4) {
1153 + hailo_resource_write32(resource, offset, *(u32*)from_ptr);
1154 + if (hailo_resource_read32(resource, offset) != *(u32*)from_ptr) {
1163 + hailo_resource_write8(resource, offset, *(u8*)from_ptr);
1164 + if (hailo_resource_read8(resource, offset) != *(u8*)from_ptr) {
1175 +int hailo_resource_transfer(struct hailo_resource *resource, struct hailo_memory_transfer_params *transfer)
1177 + // Check for transfer size (address is in resources address-space)
1178 + if ((transfer->address + transfer->count) > (u64)resource->size) {
1182 + if (transfer->count > ARRAY_SIZE(transfer->buffer)) {
1186 + switch (transfer->transfer_direction) {
1187 + case TRANSFER_READ:
1188 + hailo_resource_read_buffer(resource, (u32)transfer->address, transfer->count, transfer->buffer);
1190 + case TRANSFER_WRITE:
1191 + return hailo_resource_write_buffer(resource, (u32)transfer->address, transfer->count, transfer->buffer);
1197 +++ b/drivers/media/pci/hailo/common/hailo_resource.h
1199 +// SPDX-License-Identifier: GPL-2.0
1201 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
1204 +#ifndef _HAILO_COMMON_HAILO_RESOURCE_H_
1205 +#define _HAILO_COMMON_HAILO_RESOURCE_H_
1207 +#include "hailo_ioctl_common.h"
1208 +#include <linux/types.h>
1210 +struct hailo_resource {
1211 + uintptr_t address;
1219 +// Implemented by the specific platform
1220 +u32 hailo_resource_read32(struct hailo_resource *resource, size_t offset);
1221 +u16 hailo_resource_read16(struct hailo_resource *resource, size_t offset);
1222 +u8 hailo_resource_read8(struct hailo_resource *resource, size_t offset);
1223 +void hailo_resource_write32(struct hailo_resource *resource, size_t offset, u32 value);
1224 +void hailo_resource_write16(struct hailo_resource *resource, size_t offset, u16 value);
1225 +void hailo_resource_write8(struct hailo_resource *resource, size_t offset, u8 value);
1227 +void hailo_resource_read_buffer(struct hailo_resource *resource, size_t offset, size_t count, void *to);
1228 +int hailo_resource_write_buffer(struct hailo_resource *resource, size_t offset, size_t count, const void *from);
1230 +// Transfer (read/write) the given resource into/from transfer params.
1231 +int hailo_resource_transfer(struct hailo_resource *resource, struct hailo_memory_transfer_params *transfer);
1237 +#endif /* _HAILO_COMMON_HAILO_RESOURCE_H_ */
1238 \ No newline at end of file
1240 +++ b/drivers/media/pci/hailo/common/pcie_common.c
1242 +// SPDX-License-Identifier: GPL-2.0
1244 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
1247 +#include "pcie_common.h"
1248 +#include "fw_operation.h"
1250 +#include <linux/errno.h>
1251 +#include <linux/bug.h>
1252 +#include <linux/delay.h>
1253 +#include <linux/kernel.h>
1256 +#define BSC_IMASK_HOST (0x0188)
1257 +#define BCS_ISTATUS_HOST (0x018C)
1258 +#define BCS_SOURCE_INTERRUPT_PER_CHANNEL (0x400)
1259 +#define BCS_DESTINATION_INTERRUPT_PER_CHANNEL (0x500)
1261 +#define PO2_ROUND_UP(size, alignment) ((size + alignment-1) & ~(alignment-1))
1263 +#define ATR0_PARAM (0x17)
1264 +#define ATR0_SRC_ADDR (0x0)
1265 +#define ATR0_TRSL_ADDR2 (0x0)
1266 +#define ATR0_TRSL_PARAM (6)
1268 +#define ATR0_PCIE_BRIDGE_OFFSET (0x700)
1269 +#define ATR0_TABLE_SIZE (0x1000u)
1270 +#define ATR0_TABLE_SIZE_MASK (0x1000u - 1)
1272 +#define MAXIMUM_APP_FIRMWARE_CODE_SIZE (0x40000)
1273 +#define MAXIMUM_CORE_FIRMWARE_CODE_SIZE (0x20000)
1275 +#define FIRMWARE_LOAD_WAIT_MAX_RETRIES (100)
1276 +#define FIRMWARE_LOAD_SLEEP_MS (50)
1278 +#define PCIE_APP_CPU_DEBUG_OFFSET (8*1024)
1279 +#define PCIE_CORE_CPU_DEBUG_OFFSET (PCIE_APP_CPU_DEBUG_OFFSET + DEBUG_BUFFER_TOTAL_SIZE)
1281 +#define PCIE_D2H_NOTIFICATION_SRAM_OFFSET (0x640 + 0x640)
1282 +#define PCIE_REQUEST_SIZE_OFFSET (0x640)
1284 +#define PCIE_CONFIG_VENDOR_OFFSET (0x0098)
1286 +#define HAILO_PCIE_HOST_DMA_DATA_ID (0)
1287 +#define HAILO_PCIE_DMA_DEVICE_INTERRUPTS_BITMASK (1 << 4)
1288 +#define HAILO_PCIE_DMA_HOST_INTERRUPTS_BITMASK (1 << 5)
1290 +typedef u32 hailo_ptr_t;
1292 +struct hailo_fw_addresses {
1293 + u32 boot_fw_header;
1294 + u32 app_fw_code_ram_base;
1295 + u32 boot_key_cert;
1296 + u32 boot_cont_cert;
1297 + u32 boot_fw_trigger;
1298 + u32 core_code_ram_base;
1299 + u32 core_fw_header;
1300 + u32 atr0_trsl_addr1;
1301 + u32 raise_ready_offset;
1304 +struct hailo_atr_config {
1307 + u32 atr_trsl_addr_1;
1308 + u32 atr_trsl_addr_2;
1309 + u32 atr_trsl_param;
1312 +struct hailo_board_compatibility {
1313 + struct hailo_fw_addresses fw_addresses;
1314 + const char *fw_filename;
1315 + const struct hailo_config_constants board_cfg;
1316 + const struct hailo_config_constants fw_cfg;
1319 +static const struct hailo_board_compatibility compat[HAILO_BOARD_TYPE_COUNT] = {
1320 + [HAILO_BOARD_TYPE_HAILO8] = {
1322 + .boot_fw_header = 0xE0030,
1323 + .boot_fw_trigger = 0xE0980,
1324 + .boot_key_cert = 0xE0048,
1325 + .boot_cont_cert = 0xE0390,
1326 + .app_fw_code_ram_base = 0x60000,
1327 + .core_code_ram_base = 0xC0000,
1328 + .core_fw_header = 0xA0000,
1329 + .atr0_trsl_addr1 = 0x60000000,
1330 + .raise_ready_offset = 0x1684,
1332 + .fw_filename = "hailo/hailo8_fw.bin",
1334 + .filename = "hailo/hailo8_board_cfg.bin",
1335 + .address = 0x60001000,
1336 + .max_size = PCIE_HAILO8_BOARD_CFG_MAX_SIZE,
1339 + .filename = "hailo/hailo8_fw_cfg.bin",
1340 + .address = 0x60001500,
1341 + .max_size = PCIE_HAILO8_FW_CFG_MAX_SIZE,
1344 + [HAILO_BOARD_TYPE_HAILO15] = {
1346 + .boot_fw_header = 0x88000,
1347 + .boot_fw_trigger = 0x88c98,
1348 + .boot_key_cert = 0x88018,
1349 + .boot_cont_cert = 0x886a8,
1350 + .app_fw_code_ram_base = 0x20000,
1351 + .core_code_ram_base = 0x60000,
1352 + .core_fw_header = 0xC0000,
1353 + .atr0_trsl_addr1 = 0x000BE000,
1354 + .raise_ready_offset = 0x1754,
1356 + .fw_filename = "hailo/hailo15_fw.bin",
1368 + // HRT-11344 : none of these matter except raise_ready_offset seeing as we load fw seperately - not through driver
1369 + // After implementing bootloader put correct values here
1370 + [HAILO_BOARD_TYPE_PLUTO] = {
1372 + .boot_fw_header = 0x88000,
1373 + .boot_fw_trigger = 0x88c98,
1374 + .boot_key_cert = 0x88018,
1375 + .boot_cont_cert = 0x886a8,
1376 + .app_fw_code_ram_base = 0x20000,
1377 + .core_code_ram_base = 0x60000,
1378 + .core_fw_header = 0xC0000,
1379 + .atr0_trsl_addr1 = 0x000BE000,
1380 + // NOTE: After they update hw consts - check register fw_access_interrupt_w1s of pcie_config
1381 + .raise_ready_offset = 0x174c,
1383 + .fw_filename = "hailo/pluto_fw.bin",
1398 +bool hailo_pcie_read_interrupt(struct hailo_pcie_resources *resources, struct hailo_pcie_interrupt_source *source)
1400 + u32 channel_data_source = 0;
1401 + u32 channel_data_dest = 0;
1402 + memset(source, 0, sizeof(*source));
1404 + source->interrupt_bitmask = hailo_resource_read32(&resources->config, BCS_ISTATUS_HOST);
1405 + if (0 == source->interrupt_bitmask) {
1410 + hailo_resource_write32(&resources->config, BCS_ISTATUS_HOST, source->interrupt_bitmask);
1412 + if (source->interrupt_bitmask & BCS_ISTATUS_HOST_VDMA_SRC_IRQ_MASK) {
1413 + channel_data_source = hailo_resource_read32(&resources->config, BCS_SOURCE_INTERRUPT_PER_CHANNEL);
1414 + hailo_resource_write32(&resources->config, BCS_SOURCE_INTERRUPT_PER_CHANNEL, channel_data_source);
1416 + if (source->interrupt_bitmask & BCS_ISTATUS_HOST_VDMA_DEST_IRQ_MASK) {
1417 + channel_data_dest = hailo_resource_read32(&resources->config, BCS_DESTINATION_INTERRUPT_PER_CHANNEL);
1418 + hailo_resource_write32(&resources->config, BCS_DESTINATION_INTERRUPT_PER_CHANNEL, channel_data_dest);
1420 + source->vdma_channels_bitmap = channel_data_source | channel_data_dest;
1425 +int hailo_pcie_write_firmware_control(struct hailo_pcie_resources *resources, const struct hailo_fw_control *command)
1428 + u32 request_size = 0;
1429 + u8 fw_access_value = FW_ACCESS_APP_CPU_CONTROL_MASK;
1430 + const struct hailo_fw_addresses *fw_addresses = &(compat[resources->board_type].fw_addresses);
1432 + if (!hailo_pcie_is_firmware_loaded(resources)) {
1436 + // Copy md5 + buffer_len + buffer
1437 + request_size = sizeof(command->expected_md5) + sizeof(command->buffer_len) + command->buffer_len;
1438 + err = hailo_resource_write_buffer(&resources->fw_access, 0, PO2_ROUND_UP(request_size, FW_CODE_SECTION_ALIGNMENT),
1444 + // Raise the bit for the CPU that will handle the control
1445 + fw_access_value = (command->cpu_id == HAILO_CPU_ID_CPU1) ? FW_ACCESS_CORE_CPU_CONTROL_MASK :
1446 + FW_ACCESS_APP_CPU_CONTROL_MASK;
1448 + // Raise ready flag to FW
1449 + hailo_resource_write32(&resources->fw_access, fw_addresses->raise_ready_offset, (u32)fw_access_value);
1453 +int hailo_pcie_read_firmware_control(struct hailo_pcie_resources *resources, struct hailo_fw_control *command)
1455 + u32 response_header_size = 0;
1457 + // Copy response md5 + buffer_len
1458 + response_header_size = sizeof(command->expected_md5) + sizeof(command->buffer_len);
1460 + hailo_resource_read_buffer(&resources->fw_access, PCIE_REQUEST_SIZE_OFFSET, response_header_size, command);
1462 + if (sizeof(command->buffer) < command->buffer_len) {
1466 + // Copy response buffer
1467 + hailo_resource_read_buffer(&resources->fw_access, PCIE_REQUEST_SIZE_OFFSET + (size_t)response_header_size,
1468 + command->buffer_len, &command->buffer);
1473 +void hailo_pcie_write_firmware_driver_shutdown(struct hailo_pcie_resources *resources)
1475 + const struct hailo_fw_addresses *fw_addresses = &(compat[resources->board_type].fw_addresses);
1476 + const u32 fw_access_value = FW_ACCESS_DRIVER_SHUTDOWN_MASK;
1478 + // Write shutdown flag to FW
1479 + hailo_resource_write32(&resources->fw_access, fw_addresses->raise_ready_offset, fw_access_value);
1482 +int hailo_pcie_read_firmware_notification(struct hailo_pcie_resources *resources,
1483 + struct hailo_d2h_notification *notification)
1485 + struct hailo_resource notification_resource;
1487 + if (PCIE_D2H_NOTIFICATION_SRAM_OFFSET > resources->fw_access.size) {
1491 + notification_resource.address = resources->fw_access.address + PCIE_D2H_NOTIFICATION_SRAM_OFFSET,
1492 + notification_resource.size = sizeof(struct hailo_d2h_notification);
1494 + return hailo_read_firmware_notification(¬ification_resource, notification);
1497 +static void write_atr_table(struct hailo_pcie_resources *resources,
1498 + struct hailo_atr_config *atr)
1500 + hailo_resource_write_buffer(&resources->config, ATR0_PCIE_BRIDGE_OFFSET,
1501 + sizeof(*atr), (void*)atr);
1504 +static void read_atr_table(struct hailo_pcie_resources *resources,
1505 + struct hailo_atr_config *atr)
1507 + hailo_resource_read_buffer(&resources->config, ATR0_PCIE_BRIDGE_OFFSET,
1508 + sizeof(*atr), (void*)atr);
1511 +static void configure_atr_table(struct hailo_pcie_resources *resources,
1512 + hailo_ptr_t base_address)
1514 + struct hailo_atr_config atr = {
1515 + .atr_param = ATR0_PARAM,
1516 + .atr_src = ATR0_SRC_ADDR,
1517 + .atr_trsl_addr_1 = (u32)base_address,
1518 + .atr_trsl_addr_2 = ATR0_TRSL_ADDR2,
1519 + .atr_trsl_param = ATR0_TRSL_PARAM
1521 + write_atr_table(resources, &atr);
1524 +static void write_memory_chunk(struct hailo_pcie_resources *resources,
1525 + hailo_ptr_t dest, u32 dest_offset, const void *src, u32 len)
1527 + BUG_ON(dest_offset + len > (u32)resources->fw_access.size);
1529 + configure_atr_table(resources, dest);
1530 + (void)hailo_resource_write_buffer(&resources->fw_access, dest_offset, len, src);
1533 +static void read_memory_chunk(
1534 + struct hailo_pcie_resources *resources, hailo_ptr_t src, u32 src_offset, void *dest, u32 len)
1536 + BUG_ON(src_offset + len > (u32)resources->fw_access.size);
1538 + configure_atr_table(resources, src);
1539 + (void)hailo_resource_read_buffer(&resources->fw_access, src_offset, len, dest);
1542 +// Note: this function modify the device ATR table (that is also used by the firmware for control and vdma).
1543 +// Use with caution, and restore the original atr if needed.
1544 +static void write_memory(struct hailo_pcie_resources *resources, hailo_ptr_t dest, const void *src, u32 len)
1546 + hailo_ptr_t base_address = dest & ~ATR0_TABLE_SIZE_MASK;
1547 + u32 chunk_len = 0;
1550 + if (base_address != dest) {
1551 + // Data is not aligned, write the first chunk
1552 + chunk_len = min(base_address + ATR0_TABLE_SIZE - dest, len);
1553 + write_memory_chunk(resources, base_address, dest - base_address, src, chunk_len);
1554 + offset += chunk_len;
1557 + while (offset < len) {
1558 + chunk_len = min(len - offset, ATR0_TABLE_SIZE);
1559 + write_memory_chunk(resources, dest + offset, 0, (const u8*)src + offset, chunk_len);
1560 + offset += chunk_len;
1564 +// Note: this function modify the device ATR table (that is also used by the firmware for control and vdma).
1565 +// Use with caution, and restore the original atr if needed.
1566 +static void read_memory(struct hailo_pcie_resources *resources, hailo_ptr_t src, void *dest, u32 len)
1568 + hailo_ptr_t base_address = src & ~ATR0_TABLE_SIZE_MASK;
1569 + u32 chunk_len = 0;
1572 + if (base_address != src) {
1573 + // Data is not aligned, write the first chunk
1574 + chunk_len = min(base_address + ATR0_TABLE_SIZE - src, len);
1575 + read_memory_chunk(resources, base_address, src - base_address, dest, chunk_len);
1576 + offset += chunk_len;
1579 + while (offset < len) {
1580 + chunk_len = min(len - offset, ATR0_TABLE_SIZE);
1581 + read_memory_chunk(resources, src + offset, 0, (u8*)dest + offset, chunk_len);
1582 + offset += chunk_len;
1586 +static void hailo_write_app_firmware(struct hailo_pcie_resources *resources, firmware_header_t *fw_header,
1587 + secure_boot_certificate_t *fw_cert)
1589 + const struct hailo_fw_addresses *fw_addresses = &(compat[resources->board_type].fw_addresses);
1590 + void *fw_code = (void*)((u8*)fw_header + sizeof(firmware_header_t));
1591 + void *key_data = &fw_cert->certificates_data[0];
1592 + void *content_data = &fw_cert->certificates_data[fw_cert->key_size];
1594 + write_memory(resources, fw_addresses->boot_fw_header, fw_header, sizeof(firmware_header_t));
1596 + write_memory(resources, fw_addresses->app_fw_code_ram_base, fw_code, fw_header->code_size);
1598 + write_memory(resources, fw_addresses->boot_key_cert, key_data, fw_cert->key_size);
1599 + write_memory(resources, fw_addresses->boot_cont_cert, content_data, fw_cert->content_size);
1602 +static void hailo_write_core_firmware(struct hailo_pcie_resources *resources, firmware_header_t *fw_header)
1604 + const struct hailo_fw_addresses *fw_addresses = &(compat[resources->board_type].fw_addresses);
1605 + void *fw_code = (void*)((u8*)fw_header + sizeof(firmware_header_t));
1607 + write_memory(resources, fw_addresses->core_code_ram_base, fw_code, fw_header->code_size);
1608 + write_memory(resources, fw_addresses->core_fw_header, fw_header, sizeof(firmware_header_t));
1611 +static void hailo_trigger_firmware_boot(struct hailo_pcie_resources *resources)
1613 + const struct hailo_fw_addresses *fw_addresses = &(compat[resources->board_type].fw_addresses);
1614 + u32 pcie_finished = 1;
1616 + write_memory(resources, fw_addresses->boot_fw_trigger,
1617 + (void*)&pcie_finished, sizeof(pcie_finished));
1621 +* Validates the FW headers.
1622 +* @param[in] address Address of the firmware.
1623 +* @param[in] firmware_size Size of the firmware.
1624 +* @param[out] out_app_firmware_header (optional) App firmware header
1625 +* @param[out] out_core_firmware_header (optional) Core firmware header
1626 +* @param[out] out_firmware_cert (optional) Firmware certificate header
1628 +static int FW_VALIDATION__validate_fw_headers(uintptr_t firmware_base_address, size_t firmware_size,
1629 + firmware_header_t **out_app_firmware_header, firmware_header_t **out_core_firmware_header,
1630 + secure_boot_certificate_t **out_firmware_cert, enum hailo_board_type board_type)
1632 + firmware_header_t *app_firmware_header = NULL;
1633 + firmware_header_t *core_firmware_header = NULL;
1634 + secure_boot_certificate_t *firmware_cert = NULL;
1635 + int err = -EINVAL;
1636 + u32 consumed_firmware_offset = 0;
1638 + err = FW_VALIDATION__validate_fw_header(firmware_base_address, firmware_size, MAXIMUM_APP_FIRMWARE_CODE_SIZE,
1639 + &consumed_firmware_offset, &app_firmware_header, board_type);
1645 + err = FW_VALIDATION__validate_cert_header(firmware_base_address, firmware_size,
1646 + &consumed_firmware_offset, &firmware_cert);
1652 + err = FW_VALIDATION__validate_fw_header(firmware_base_address, firmware_size, MAXIMUM_CORE_FIRMWARE_CODE_SIZE,
1653 + &consumed_firmware_offset, &core_firmware_header, board_type);
1659 + if (consumed_firmware_offset != firmware_size) {
1660 + /* it is an error if there is leftover data after the last firmware header */
1665 + /* the out params are all optional */
1666 + if (NULL != out_app_firmware_header) {
1667 + *out_app_firmware_header = app_firmware_header;
1669 + if (NULL != out_firmware_cert) {
1670 + *out_firmware_cert = firmware_cert;
1672 + if (NULL != out_core_firmware_header) {
1673 + *out_core_firmware_header = core_firmware_header;
1681 +int hailo_pcie_write_firmware(struct hailo_pcie_resources *resources, const void *fw_data, size_t fw_size)
1683 + firmware_header_t *app_firmware_header = NULL;
1684 + secure_boot_certificate_t *firmware_cert = NULL;
1685 + firmware_header_t *core_firmware_header = NULL;
1687 + int err = FW_VALIDATION__validate_fw_headers((uintptr_t)fw_data, fw_size,
1688 + &app_firmware_header, &core_firmware_header, &firmware_cert, resources->board_type);
1693 + hailo_write_app_firmware(resources, app_firmware_header, firmware_cert);
1694 + hailo_write_core_firmware(resources, core_firmware_header);
1696 + hailo_trigger_firmware_boot(resources);
1701 +bool hailo_pcie_is_firmware_loaded(struct hailo_pcie_resources *resources)
1703 + u32 offset = ATR0_PCIE_BRIDGE_OFFSET + offsetof(struct hailo_atr_config, atr_trsl_addr_1);
1704 + u32 atr_value = hailo_resource_read32(&resources->config, offset);
1705 + return atr_value == compat[resources->board_type].fw_addresses.atr0_trsl_addr1;
1708 +bool hailo_pcie_wait_for_firmware(struct hailo_pcie_resources *resources)
1711 + for (retries = 0; retries < FIRMWARE_LOAD_WAIT_MAX_RETRIES; retries++) {
1712 + if (hailo_pcie_is_firmware_loaded(resources)) {
1716 + msleep(FIRMWARE_LOAD_SLEEP_MS);
1722 +int hailo_pcie_write_config_common(struct hailo_pcie_resources *resources, const void* config_data,
1723 + const size_t config_size, const struct hailo_config_constants *config_consts)
1725 + if (config_size > config_consts->max_size) {
1729 + write_memory(resources, config_consts->address, config_data, (u32)config_size);
1733 +const struct hailo_config_constants* hailo_pcie_get_board_config_constants(const enum hailo_board_type board_type) {
1734 + BUG_ON(board_type >= HAILO_BOARD_TYPE_COUNT || board_type < 0);
1735 + return &compat[board_type].board_cfg;
1738 +const struct hailo_config_constants* hailo_pcie_get_user_config_constants(const enum hailo_board_type board_type) {
1739 + BUG_ON(board_type >= HAILO_BOARD_TYPE_COUNT || board_type < 0);
1740 + return &compat[board_type].fw_cfg;
1743 +const char* hailo_pcie_get_fw_filename(const enum hailo_board_type board_type) {
1744 + BUG_ON(board_type >= HAILO_BOARD_TYPE_COUNT || board_type < 0);
1745 + return compat[board_type].fw_filename;
1748 +void hailo_pcie_update_channel_interrupts_mask(struct hailo_pcie_resources* resources, u32 channels_bitmap)
1751 + u32 mask = hailo_resource_read32(&resources->config, BSC_IMASK_HOST);
1753 + // Clear old channel interrupts
1754 + mask &= ~BCS_ISTATUS_HOST_VDMA_SRC_IRQ_MASK;
1755 + mask &= ~BCS_ISTATUS_HOST_VDMA_DEST_IRQ_MASK;
1756 + // Set interrupt by the bitmap
1757 + for (i = 0; i < MAX_VDMA_CHANNELS_PER_ENGINE; ++i) {
1758 + if (hailo_test_bit(i, &channels_bitmap)) {
1759 + // based on 18.5.2 "vDMA Interrupt Registers" in PLDA documentation
1760 + u32 offset = (i < VDMA_DEST_CHANNELS_START) ? 0 : 8;
1761 + hailo_set_bit((((int)i*8) / MAX_VDMA_CHANNELS_PER_ENGINE) + offset, &mask);
1764 + hailo_resource_write32(&resources->config, BSC_IMASK_HOST, mask);
1767 +void hailo_pcie_enable_interrupts(struct hailo_pcie_resources *resources)
1769 + u32 mask = hailo_resource_read32(&resources->config, BSC_IMASK_HOST);
1771 + hailo_resource_write32(&resources->config, BCS_ISTATUS_HOST, 0xFFFFFFFF);
1772 + hailo_resource_write32(&resources->config, BCS_DESTINATION_INTERRUPT_PER_CHANNEL, 0xFFFFFFFF);
1773 + hailo_resource_write32(&resources->config, BCS_SOURCE_INTERRUPT_PER_CHANNEL, 0xFFFFFFFF);
1775 + mask |= BCS_ISTATUS_HOST_FW_IRQ_CONTROL_MASK | BCS_ISTATUS_HOST_FW_IRQ_NOTIFICATION | BCS_ISTATUS_HOST_DRIVER_DOWN;
1776 + hailo_resource_write32(&resources->config, BSC_IMASK_HOST, mask);
1779 +void hailo_pcie_disable_interrupts(struct hailo_pcie_resources* resources)
1781 + hailo_resource_write32(&resources->config, BSC_IMASK_HOST, 0);
1784 +long hailo_pcie_read_firmware_log(struct hailo_pcie_resources *resources, struct hailo_read_log_params *params)
1787 + struct hailo_resource log_resource = {resources->fw_access.address, DEBUG_BUFFER_TOTAL_SIZE};
1789 + if (HAILO_CPU_ID_CPU0 == params->cpu_id) {
1790 + log_resource.address += PCIE_APP_CPU_DEBUG_OFFSET;
1791 + } else if (HAILO_CPU_ID_CPU1 == params->cpu_id) {
1792 + log_resource.address += PCIE_CORE_CPU_DEBUG_OFFSET;
1797 + if (0 == params->buffer_size) {
1798 + params->read_bytes = 0;
1802 + err = hailo_read_firmware_log(&log_resource, params);
1810 +static int direct_memory_transfer(struct hailo_pcie_resources *resources,
1811 + struct hailo_memory_transfer_params *params)
1813 + int err = -EINVAL;
1814 + struct hailo_atr_config previous_atr = {0};
1816 + if (params->address > U32_MAX) {
1820 + // Store previous ATR (Read/write modify the ATR).
1821 + read_atr_table(resources, &previous_atr);
1823 + switch (params->transfer_direction) {
1824 + case TRANSFER_READ:
1825 + read_memory(resources, (u32)params->address, params->buffer, (u32)params->count);
1827 + case TRANSFER_WRITE:
1828 + write_memory(resources, (u32)params->address, params->buffer, (u32)params->count);
1837 + write_atr_table(resources, &previous_atr);
1841 +int hailo_pcie_memory_transfer(struct hailo_pcie_resources *resources, struct hailo_memory_transfer_params *params)
1843 + if (params->count > ARRAY_SIZE(params->buffer)) {
1847 + switch (params->memory_type) {
1848 + case HAILO_TRANSFER_DEVICE_DIRECT_MEMORY:
1849 + return direct_memory_transfer(resources, params);
1850 + case HAILO_TRANSFER_MEMORY_PCIE_BAR0:
1851 + return hailo_resource_transfer(&resources->config, params);
1852 + case HAILO_TRANSFER_MEMORY_PCIE_BAR2:
1853 + case HAILO_TRANSFER_MEMORY_VDMA0:
1854 + return hailo_resource_transfer(&resources->vdma_registers, params);
1855 + case HAILO_TRANSFER_MEMORY_PCIE_BAR4:
1856 + return hailo_resource_transfer(&resources->fw_access, params);
1862 +bool hailo_pcie_is_device_connected(struct hailo_pcie_resources *resources)
1864 + return PCI_VENDOR_ID_HAILO == hailo_resource_read16(&resources->config, PCIE_CONFIG_VENDOR_OFFSET);
1867 +// On PCIe, just return the address
1868 +static u64 encode_dma_address(dma_addr_t dma_address, u8 channel_id)
1871 + return (u64)dma_address;
1874 +struct hailo_vdma_hw hailo_pcie_vdma_hw = {
1876 + .encode_desc_dma_address = encode_dma_address
1878 + .ddr_data_id = HAILO_PCIE_HOST_DMA_DATA_ID,
1879 + .device_interrupts_bitmask = HAILO_PCIE_DMA_DEVICE_INTERRUPTS_BITMASK,
1880 + .host_interrupts_bitmask = HAILO_PCIE_DMA_HOST_INTERRUPTS_BITMASK,
1883 \ No newline at end of file
1885 +++ b/drivers/media/pci/hailo/common/pcie_common.h
1887 +// SPDX-License-Identifier: GPL-2.0
1889 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
1892 +#ifndef _HAILO_COMMON_PCIE_COMMON_H_
1893 +#define _HAILO_COMMON_PCIE_COMMON_H_
1895 +#include "hailo_resource.h"
1896 +#include "hailo_ioctl_common.h"
1897 +#include "fw_validation.h"
1898 +#include "fw_operation.h"
1900 +#include "vdma_common.h"
1902 +#include <linux/types.h>
1905 +#define BCS_ISTATUS_HOST_FW_IRQ_CONTROL_MASK (0x04000000)
1906 +#define BCS_ISTATUS_HOST_FW_IRQ_NOTIFICATION (0x02000000)
1907 +#define BCS_ISTATUS_HOST_DRIVER_DOWN (0x08000000)
1908 +#define BCS_ISTATUS_HOST_VDMA_SRC_IRQ_MASK (0x000000FF)
1909 +#define BCS_ISTATUS_HOST_VDMA_DEST_IRQ_MASK (0x0000FF00)
1911 +#define PCIE_HAILO8_BOARD_CFG_MAX_SIZE (0x500)
1912 +#define PCIE_HAILO8_FW_CFG_MAX_SIZE (0x500)
1914 +#define FW_CODE_SECTION_ALIGNMENT (4)
1916 +#define HAILO_PCIE_CONFIG_BAR (0)
1917 +#define HAILO_PCIE_VDMA_REGS_BAR (2)
1918 +#define HAILO_PCIE_FW_ACCESS_BAR (4)
1920 +#define HAILO_PCIE_DMA_ENGINES_COUNT (1)
1922 +#define DRIVER_NAME "hailo"
1924 +#define PCI_VENDOR_ID_HAILO 0x1e60
1925 +#define PCI_DEVICE_ID_HAILO_HAILO8 0x2864
1926 +#define PCI_DEVICE_ID_HAILO_HAILO15 0x45C4
1927 +#define PCI_DEVICE_ID_HAILO_PLUTO 0x43a2
1929 +struct hailo_pcie_resources {
1930 + struct hailo_resource config; // BAR0
1931 + struct hailo_resource vdma_registers; // BAR2
1932 + struct hailo_resource fw_access; // BAR4
1933 + enum hailo_board_type board_type;
1936 +enum hailo_pcie_interrupt_masks {
1937 + FW_CONTROL = BCS_ISTATUS_HOST_FW_IRQ_CONTROL_MASK,
1938 + FW_NOTIFICATION = BCS_ISTATUS_HOST_FW_IRQ_NOTIFICATION,
1939 + DRIVER_DOWN = BCS_ISTATUS_HOST_DRIVER_DOWN,
1940 + VDMA_SRC_IRQ_MASK = BCS_ISTATUS_HOST_VDMA_SRC_IRQ_MASK,
1941 + VDMA_DEST_IRQ_MASK = BCS_ISTATUS_HOST_VDMA_DEST_IRQ_MASK
1944 +struct hailo_pcie_interrupt_source {
1945 + u32 interrupt_bitmask;
1946 + u32 vdma_channels_bitmap;
1949 +struct hailo_config_constants {
1950 + const char *filename;
1955 +// TODO: HRT-6144 - Align Windows/Linux to QNX
1957 +enum hailo_bar_index {
1964 +enum hailo_bar_index {
1973 +#endif // ifdef (__QNX__)
1979 +extern struct hailo_vdma_hw hailo_pcie_vdma_hw;
1981 +// Reads the interrupt source from BARs, return false if there is no interrupt.
1982 +// note - this function clears the interrupt signals.
1983 +bool hailo_pcie_read_interrupt(struct hailo_pcie_resources *resources, struct hailo_pcie_interrupt_source *source);
1984 +void hailo_pcie_update_channel_interrupts_mask(struct hailo_pcie_resources *resources, u32 channels_bitmap);
1985 +void hailo_pcie_enable_interrupts(struct hailo_pcie_resources *resources);
1986 +void hailo_pcie_disable_interrupts(struct hailo_pcie_resources *resources);
1988 +int hailo_pcie_write_firmware_control(struct hailo_pcie_resources *resources, const struct hailo_fw_control *command);
1989 +int hailo_pcie_read_firmware_control(struct hailo_pcie_resources *resources, struct hailo_fw_control *command);
1991 +int hailo_pcie_write_firmware(struct hailo_pcie_resources *resources, const void *fw_data, size_t fw_size);
1992 +bool hailo_pcie_is_firmware_loaded(struct hailo_pcie_resources *resources);
1993 +bool hailo_pcie_wait_for_firmware(struct hailo_pcie_resources *resources);
1995 +int hailo_pcie_read_firmware_notification(struct hailo_pcie_resources *resources,
1996 + struct hailo_d2h_notification *notification);
1998 +int hailo_pcie_write_config_common(struct hailo_pcie_resources *resources, const void* config_data,
1999 + const size_t config_size, const struct hailo_config_constants *config_consts);
2000 +const struct hailo_config_constants* hailo_pcie_get_board_config_constants(const enum hailo_board_type board_type);
2001 +const struct hailo_config_constants* hailo_pcie_get_user_config_constants(const enum hailo_board_type board_type);
2002 +const char* hailo_pcie_get_fw_filename(const enum hailo_board_type board_type);
2004 +long hailo_pcie_read_firmware_log(struct hailo_pcie_resources *resources, struct hailo_read_log_params *params);
2005 +int hailo_pcie_memory_transfer(struct hailo_pcie_resources *resources, struct hailo_memory_transfer_params *params);
2007 +bool hailo_pcie_is_device_connected(struct hailo_pcie_resources *resources);
2008 +void hailo_pcie_write_firmware_driver_shutdown(struct hailo_pcie_resources *resources);
2014 +#endif /* _HAILO_COMMON_PCIE_COMMON_H_ */
2015 \ No newline at end of file
2017 +++ b/drivers/media/pci/hailo/common/utils.h
2019 +// SPDX-License-Identifier: GPL-2.0
2021 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
2024 +#ifndef _HAILO_DRIVER_UTILS_H_
2025 +#define _HAILO_DRIVER_UTILS_H_
2027 +#include <linux/bitops.h>
2029 +#define hailo_clear_bit(bit, pval) { *(pval) &= ~(1 << bit); }
2030 +#define hailo_test_bit(pos,var_addr) ((*var_addr) & (1<<(pos)))
2037 +static inline bool is_powerof2(size_t v) {
2039 + return (v & (v - 1)) == 0;
2042 +static inline void hailo_set_bit(int nr, u32* addr) {
2043 + u32 mask = BIT_MASK(nr);
2044 + u32 *p = addr + BIT_WORD(nr);
2049 +#ifndef DIV_ROUND_UP
2050 +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
2057 +#endif // _HAILO_DRIVER_UTILS_H_
2058 \ No newline at end of file
2060 +++ b/drivers/media/pci/hailo/common/vdma_common.c
2062 +// SPDX-License-Identifier: GPL-2.0
2064 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
2067 +#include "vdma_common.h"
2069 +#include <linux/types.h>
2070 +#include <linux/errno.h>
2071 +#include <linux/bug.h>
2072 +#include <linux/circ_buf.h>
2073 +#include <linux/ktime.h>
2074 +#include <linux/timekeeping.h>
2075 +#include <linux/kernel.h>
2076 +#include <linux/kconfig.h>
2077 +#include <linux/printk.h>
2080 +#define CHANNEL_BASE_OFFSET(channel_index) ((channel_index) << 5)
2081 +#define CHANNEL_HOST_OFFSET(channel_index) CHANNEL_BASE_OFFSET(channel_index) + \
2082 + (channel_index < VDMA_DEST_CHANNELS_START ? 0 : 0x10)
2083 +#define CHANNEL_DEVICE_OFFSET(channel_index) CHANNEL_BASE_OFFSET(channel_index) + \
2084 + (channel_index < VDMA_DEST_CHANNELS_START ? 0x10 : 0)
2086 +#define CHANNEL_CONTROL_OFFSET (0x0)
2087 +#define CHANNEL_NUM_AVAIL_OFFSET (0x2)
2088 +#define CHANNEL_NUM_PROC_OFFSET (0x4)
2089 +#define CHANNEL_ERROR_OFFSET (0x8)
2091 +#define VDMA_CHANNEL_CONTROL_START (0x1)
2092 +#define VDMA_CHANNEL_CONTROL_ABORT (0b00)
2093 +#define VDMA_CHANNEL_CONTROL_ABORT_PAUSE (0b10)
2094 +#define VDMA_CHANNEL_CONTROL_START_ABORT_PAUSE_RESUME_BITMASK (0x3)
2095 +#define VDMA_CHANNEL_CONTROL_START_ABORT_BITMASK (0x1)
2097 +#define DESCRIPTOR_PAGE_SIZE_SHIFT (8)
2098 +#define DESCRIPTOR_DESC_CONTROL (0x2)
2099 +#define DESCRIPTOR_ADDR_L_MASK (0xFFFFFFC0)
2101 +#define DESCRIPTOR_DESC_STATUS_DONE_BIT (0x0)
2102 +#define DESCRIPTOR_DESC_STATUS_ERROR_BIT (0x1)
2103 +#define DESCRIPTOR_DESC_STATUS_MASK (0xFF)
2105 +#define DESC_STATUS_REQ (1 << 0)
2106 +#define DESC_STATUS_REQ_ERR (1 << 1)
2107 +#define DESC_REQUEST_IRQ_PROCESSED (1 << 2)
2108 +#define DESC_REQUEST_IRQ_ERR (1 << 3)
2111 +#define DWORD_SIZE (4)
2112 +#define WORD_SIZE (2)
2113 +#define BYTE_SIZE (1)
2115 +#define TIMESTAMPS_CIRC_SPACE(timestamp_list) \
2116 + CIRC_SPACE((timestamp_list).head, (timestamp_list).tail, CHANNEL_IRQ_TIMESTAMPS_SIZE)
2117 +#define TIMESTAMPS_CIRC_CNT(timestamp_list) \
2118 + CIRC_CNT((timestamp_list).head, (timestamp_list).tail, CHANNEL_IRQ_TIMESTAMPS_SIZE)
2120 +#define ONGOING_TRANSFERS_CIRC_SPACE(transfers_list) \
2121 + CIRC_SPACE((transfers_list).head, (transfers_list).tail, HAILO_VDMA_MAX_ONGOING_TRANSFERS)
2122 +#define ONGOING_TRANSFERS_CIRC_CNT(transfers_list) \
2123 + CIRC_CNT((transfers_list).head, (transfers_list).tail, HAILO_VDMA_MAX_ONGOING_TRANSFERS)
2125 +#ifndef for_each_sgtable_dma_sg
2126 +#define for_each_sgtable_dma_sg(sgt, sg, i) \
2127 + for_each_sg((sgt)->sgl, sg, (sgt)->nents, i)
2128 +#endif /* for_each_sgtable_dma_sg */
2131 +static int ongoing_transfer_push(struct hailo_vdma_channel *channel,
2132 + struct hailo_ongoing_transfer *ongoing_transfer)
2134 + struct hailo_ongoing_transfers_list *transfers = &channel->ongoing_transfers;
2135 + if (!ONGOING_TRANSFERS_CIRC_SPACE(*transfers)) {
2139 + if (ongoing_transfer->dirty_descs_count > ARRAY_SIZE(ongoing_transfer->dirty_descs)) {
2143 + transfers->transfers[transfers->head] = *ongoing_transfer;
2144 + transfers->head = (transfers->head + 1) & HAILO_VDMA_MAX_ONGOING_TRANSFERS_MASK;
2148 +static int ongoing_transfer_pop(struct hailo_vdma_channel *channel,
2149 + struct hailo_ongoing_transfer *ongoing_transfer)
2151 + struct hailo_ongoing_transfers_list *transfers = &channel->ongoing_transfers;
2152 + if (!ONGOING_TRANSFERS_CIRC_CNT(*transfers)) {
2156 + if (ongoing_transfer) {
2157 + *ongoing_transfer = transfers->transfers[transfers->tail];
2159 + transfers->tail = (transfers->tail + 1) & HAILO_VDMA_MAX_ONGOING_TRANSFERS_MASK;
2163 +static void clear_dirty_desc(struct hailo_vdma_descriptors_list *desc_list, u16 desc)
2165 + desc_list->desc_list[desc].PageSize_DescControl =
2166 + (u32)((desc_list->desc_page_size << DESCRIPTOR_PAGE_SIZE_SHIFT) + DESCRIPTOR_DESC_CONTROL);
2169 +static void clear_dirty_descs(struct hailo_vdma_channel *channel,
2170 + struct hailo_ongoing_transfer *ongoing_transfer)
2173 + struct hailo_vdma_descriptors_list *desc_list = channel->last_desc_list;
2174 + BUG_ON(ongoing_transfer->dirty_descs_count > ARRAY_SIZE(ongoing_transfer->dirty_descs));
2175 + for (i = 0; i < ongoing_transfer->dirty_descs_count; i++) {
2176 + clear_dirty_desc(desc_list, ongoing_transfer->dirty_descs[i]);
2180 +static bool validate_last_desc_status(struct hailo_vdma_channel *channel,
2181 + struct hailo_ongoing_transfer *ongoing_transfer)
2183 + u16 last_desc = ongoing_transfer->last_desc;
2184 + u32 last_desc_control = channel->last_desc_list->desc_list[last_desc].RemainingPageSize_Status &
2185 + DESCRIPTOR_DESC_STATUS_MASK;
2186 + if (!hailo_test_bit(DESCRIPTOR_DESC_STATUS_DONE_BIT, &last_desc_control)) {
2187 + pr_err("Expecting desc %d to be done\n", last_desc);
2190 + if (hailo_test_bit(DESCRIPTOR_DESC_STATUS_ERROR_BIT, &last_desc_control)) {
2191 + pr_err("Got unexpected error on desc %d\n", last_desc);
2198 +void hailo_vdma_program_descriptor(struct hailo_vdma_descriptor *descriptor, u64 dma_address, size_t page_size,
2201 + descriptor->PageSize_DescControl = (u32)((page_size << DESCRIPTOR_PAGE_SIZE_SHIFT) +
2202 + DESCRIPTOR_DESC_CONTROL);
2203 + descriptor->AddrL_rsvd_DataID = (u32)(((dma_address & DESCRIPTOR_ADDR_L_MASK)) | data_id);
2204 + descriptor->AddrH = (u32)(dma_address >> 32);
2205 + descriptor->RemainingPageSize_Status = 0 ;
2208 +static u8 get_channel_id(u8 channel_index)
2210 + if (channel_index < VDMA_DEST_CHANNELS_START) {
2212 + return channel_index;
2214 + else if ((channel_index >= VDMA_DEST_CHANNELS_START) &&
2215 + (channel_index < MAX_VDMA_CHANNELS_PER_ENGINE)) {
2217 + return channel_index - VDMA_DEST_CHANNELS_START;
2220 + return INVALID_VDMA_CHANNEL;
2224 +static int program_descriptors_in_chunk(
2225 + struct hailo_vdma_hw *vdma_hw,
2226 + dma_addr_t chunk_addr,
2227 + unsigned int chunk_size,
2228 + struct hailo_vdma_descriptors_list *desc_list,
2230 + u32 max_desc_index,
2233 + const u32 desc_per_chunk = DIV_ROUND_UP(chunk_size, desc_list->desc_page_size);
2234 + struct hailo_vdma_descriptor *dma_desc = NULL;
2235 + u16 size_to_program = 0;
2237 + u64 encoded_addr = 0;
2239 + for (index = 0; index < desc_per_chunk; index++) {
2240 + if (desc_index > max_desc_index) {
2244 + encoded_addr = vdma_hw->hw_ops.encode_desc_dma_address(chunk_addr, channel_id);
2245 + if (INVALID_VDMA_ADDRESS == encoded_addr) {
2249 + dma_desc = &desc_list->desc_list[desc_index % desc_list->desc_count];
2250 + size_to_program = chunk_size > desc_list->desc_page_size ?
2251 + desc_list->desc_page_size : (u16)chunk_size;
2252 + hailo_vdma_program_descriptor(dma_desc, encoded_addr, size_to_program, vdma_hw->ddr_data_id);
2254 + chunk_addr += size_to_program;
2255 + chunk_size -= size_to_program;
2259 + return (int)desc_per_chunk;
2262 +int hailo_vdma_program_descriptors_list(
2263 + struct hailo_vdma_hw *vdma_hw,
2264 + struct hailo_vdma_descriptors_list *desc_list,
2265 + u32 starting_desc,
2266 + struct hailo_vdma_mapped_transfer_buffer *buffer,
2269 + const u8 channel_id = get_channel_id(channel_index);
2270 + int desc_programmed = 0;
2271 + u32 max_desc_index = 0;
2272 + u32 chunk_size = 0;
2273 + struct scatterlist *sg_entry = NULL;
2274 + unsigned int i = 0;
2276 + size_t buffer_current_offset = 0;
2277 + dma_addr_t chunk_start_addr = 0;
2278 + u32 program_size = buffer->size;
2280 + if (starting_desc >= desc_list->desc_count) {
2284 + if (buffer->offset % desc_list->desc_page_size != 0) {
2288 + // On circular buffer, allow programming desc_count descriptors (starting
2289 + // from starting_desc). On non circular, don't allow is to pass desc_count
2290 + max_desc_index = desc_list->is_circular ?
2291 + starting_desc + desc_list->desc_count - 1 :
2292 + desc_list->desc_count - 1;
2293 + for_each_sgtable_dma_sg(buffer->sg_table, sg_entry, i) {
2294 + // Skip sg entries until we reach the right buffer offset. offset can be in the middle of an sg entry.
2295 + if (buffer_current_offset + sg_dma_len(sg_entry) < buffer->offset) {
2296 + buffer_current_offset += sg_dma_len(sg_entry);
2299 + chunk_start_addr = (buffer_current_offset < buffer->offset) ?
2300 + sg_dma_address(sg_entry) + (buffer->offset - buffer_current_offset) :
2301 + sg_dma_address(sg_entry);
2302 + chunk_size = (buffer_current_offset < buffer->offset) ?
2303 + (u32)(sg_dma_len(sg_entry) - (buffer->offset - buffer_current_offset)) :
2304 + (u32)(sg_dma_len(sg_entry));
2305 + chunk_size = min((u32)program_size, chunk_size);
2307 + ret = program_descriptors_in_chunk(vdma_hw, chunk_start_addr, chunk_size, desc_list,
2308 + starting_desc, max_desc_index, channel_id);
2313 + desc_programmed += ret;
2314 + starting_desc = starting_desc + ret;
2315 + program_size -= chunk_size;
2316 + buffer_current_offset += sg_dma_len(sg_entry);
2319 + if (program_size != 0) {
2320 + // We didn't program all the buffer.
2324 + return desc_programmed;
2327 +static bool channel_control_reg_is_active(u8 control)
2329 + return (control & VDMA_CHANNEL_CONTROL_START_ABORT_BITMASK) == VDMA_CHANNEL_CONTROL_START;
2332 +static int validate_channel_state(struct hailo_vdma_channel *channel)
2334 + const u8 control = ioread8(channel->host_regs + CHANNEL_CONTROL_OFFSET);
2335 + const u16 hw_num_avail = ioread16(channel->host_regs + CHANNEL_NUM_AVAIL_OFFSET);
2337 + if (!channel_control_reg_is_active(control)) {
2338 + pr_err("Channel %d is not active\n", channel->index);
2342 + if (hw_num_avail != channel->state.num_avail) {
2343 + pr_err("Channel %d hw state out of sync. num available is %d, expected %d\n",
2344 + channel->index, hw_num_avail, channel->state.num_avail);
2351 +static unsigned long get_interrupts_bitmask(struct hailo_vdma_hw *vdma_hw,
2352 + enum hailo_vdma_interrupts_domain interrupts_domain, bool is_debug)
2354 + unsigned long bitmask = 0;
2356 + if (0 != (HAILO_VDMA_INTERRUPTS_DOMAIN_DEVICE & interrupts_domain)) {
2357 + bitmask |= vdma_hw->device_interrupts_bitmask;
2359 + if (0 != (HAILO_VDMA_INTERRUPTS_DOMAIN_HOST & interrupts_domain)) {
2360 + bitmask |= vdma_hw->host_interrupts_bitmask;
2363 + if (bitmask != 0) {
2364 + bitmask |= DESC_REQUEST_IRQ_PROCESSED | DESC_REQUEST_IRQ_ERR;
2366 + bitmask |= DESC_STATUS_REQ | DESC_STATUS_REQ_ERR;
2373 +static void set_num_avail(u8 __iomem *host_regs, u16 num_avail)
2375 + iowrite16(num_avail, host_regs + CHANNEL_NUM_AVAIL_OFFSET);
2378 +static u16 get_num_proc(u8 __iomem *host_regs)
2380 + return ioread16(host_regs + CHANNEL_NUM_PROC_OFFSET);
2383 +static int program_last_desc(
2384 + struct hailo_vdma_descriptors_list *desc_list,
2385 + u32 starting_desc,
2386 + struct hailo_vdma_mapped_transfer_buffer *transfer_buffer)
2388 + u32 total_descs = DIV_ROUND_UP(transfer_buffer->size, desc_list->desc_page_size);
2389 + u32 last_desc = (starting_desc + total_descs - 1) % desc_list->desc_count;
2390 + u32 last_desc_size = transfer_buffer->size - (total_descs - 1) * desc_list->desc_page_size;
2392 + // Configure only last descriptor with residue size
2393 + desc_list->desc_list[last_desc].PageSize_DescControl = (u32)
2394 + ((last_desc_size << DESCRIPTOR_PAGE_SIZE_SHIFT) + DESCRIPTOR_DESC_CONTROL);
2395 + return (int)total_descs;
2398 +int hailo_vdma_launch_transfer(
2399 + struct hailo_vdma_hw *vdma_hw,
2400 + struct hailo_vdma_channel *channel,
2401 + struct hailo_vdma_descriptors_list *desc_list,
2402 + u32 starting_desc,
2404 + struct hailo_vdma_mapped_transfer_buffer *buffers,
2406 + enum hailo_vdma_interrupts_domain first_interrupts_domain,
2407 + enum hailo_vdma_interrupts_domain last_desc_interrupts,
2410 + int ret = -EFAULT;
2411 + u32 total_descs = 0;
2412 + u32 first_desc = starting_desc;
2413 + u32 last_desc = U32_MAX;
2414 + u16 new_num_avail = 0;
2415 + struct hailo_ongoing_transfer ongoing_transfer = {0};
2418 + channel->state.desc_count_mask = (desc_list->desc_count - 1);
2420 + if (NULL == channel->last_desc_list) {
2421 + // First transfer on this active channel, store desc list.
2422 + channel->last_desc_list = desc_list;
2423 + } else if (desc_list != channel->last_desc_list) {
2424 + // Shouldn't happen, desc list may change only after channel deactivation.
2425 + pr_err("Inconsistent desc list given to channel %d\n", channel->index);
2429 + if (channel->state.num_avail != (u16)starting_desc) {
2430 + pr_err("Channel %d state out of sync. num available is %d, expected %d\n",
2431 + channel->index, channel->state.num_avail, (u16)starting_desc);
2435 + if (buffers_count > HAILO_MAX_BUFFERS_PER_SINGLE_TRANSFER) {
2436 + pr_err("Too many buffers %u for single transfer\n", buffers_count);
2441 + ret = validate_channel_state(channel);
2447 + BUILD_BUG_ON_MSG((HAILO_MAX_BUFFERS_PER_SINGLE_TRANSFER + 1) != ARRAY_SIZE(ongoing_transfer.dirty_descs),
2448 + "Unexpected amount of dirty descriptors");
2449 + ongoing_transfer.dirty_descs_count = buffers_count + 1;
2450 + ongoing_transfer.dirty_descs[0] = (u16)starting_desc;
2452 + for (i = 0; i < buffers_count; i++) {
2453 + ret = should_bind ?
2454 + hailo_vdma_program_descriptors_list(vdma_hw, desc_list, starting_desc, &buffers[i], channel->index) :
2455 + program_last_desc(desc_list, starting_desc, &buffers[i]);
2459 + total_descs += ret;
2460 + last_desc = (starting_desc + ret - 1) % desc_list->desc_count;
2461 + starting_desc = (starting_desc + ret) % desc_list->desc_count;
2463 + ongoing_transfer.dirty_descs[i+1] = (u16)last_desc;
2464 + ongoing_transfer.buffers[i] = buffers[i];
2466 + ongoing_transfer.buffers_count = buffers_count;
2468 + desc_list->desc_list[first_desc].PageSize_DescControl |=
2469 + get_interrupts_bitmask(vdma_hw, first_interrupts_domain, is_debug);
2470 + desc_list->desc_list[last_desc].PageSize_DescControl |=
2471 + get_interrupts_bitmask(vdma_hw, last_desc_interrupts, is_debug);
2473 + ongoing_transfer.last_desc = (u16)last_desc;
2474 + ongoing_transfer.is_debug = is_debug;
2475 + ret = ongoing_transfer_push(channel, &ongoing_transfer);
2477 + pr_err("Failed push ongoing transfer to channel %d\n", channel->index);
2481 + new_num_avail = (u16)((last_desc + 1) % desc_list->desc_count);
2482 + channel->state.num_avail = new_num_avail;
2483 + set_num_avail(channel->host_regs, new_num_avail);
2485 + return (int)total_descs;
2488 +static void hailo_vdma_push_timestamp(struct hailo_vdma_channel *channel)
2490 + struct hailo_channel_interrupt_timestamp_list *timestamp_list = &channel->timestamp_list;
2491 + const u16 num_proc = get_num_proc(channel->host_regs);
2492 + if (TIMESTAMPS_CIRC_SPACE(*timestamp_list) != 0) {
2493 + timestamp_list->timestamps[timestamp_list->head].timestamp_ns = ktime_get_ns();
2494 + timestamp_list->timestamps[timestamp_list->head].desc_num_processed = num_proc;
2495 + timestamp_list->head = (timestamp_list->head + 1) & CHANNEL_IRQ_TIMESTAMPS_SIZE_MASK;
2499 +// Returns false if there are no items
2500 +static bool hailo_vdma_pop_timestamp(struct hailo_channel_interrupt_timestamp_list *timestamp_list,
2501 + struct hailo_channel_interrupt_timestamp *out_timestamp)
2503 + if (0 == TIMESTAMPS_CIRC_CNT(*timestamp_list)) {
2507 + *out_timestamp = timestamp_list->timestamps[timestamp_list->tail];
2508 + timestamp_list->tail = (timestamp_list->tail+1) & CHANNEL_IRQ_TIMESTAMPS_SIZE_MASK;
2512 +static void hailo_vdma_pop_timestamps_to_response(struct hailo_vdma_channel *channel,
2513 + struct hailo_vdma_interrupts_read_timestamp_params *result)
2515 + const u32 max_timestamps = ARRAY_SIZE(result->timestamps);
2518 + while (hailo_vdma_pop_timestamp(&channel->timestamp_list, &result->timestamps[i]) &&
2519 + (i < max_timestamps)) {
2520 + // Although the hw_num_processed should be a number between 0 and
2521 + // desc_count-1, if desc_count < 0x10000 (the maximum desc size),
2522 + // the actual hw_num_processed is a number between 1 and desc_count.
2523 + // Therefore the value can be desc_count, in this case we change it to
2525 + result->timestamps[i].desc_num_processed = result->timestamps[i].desc_num_processed &
2526 + channel->state.desc_count_mask;
2530 + result->timestamps_count = i;
2533 +static void channel_state_init(struct hailo_vdma_channel_state *state)
2535 + state->num_avail = state->num_proc = 0;
2537 + // Special value used when the channel is not activate.
2538 + state->desc_count_mask = U32_MAX;
2541 +void hailo_vdma_engine_init(struct hailo_vdma_engine *engine, u8 engine_index,
2542 + const struct hailo_resource *channel_registers)
2544 + u8 channel_index = 0;
2545 + struct hailo_vdma_channel *channel;
2547 + engine->index = engine_index;
2548 + engine->enabled_channels = 0x0;
2549 + engine->interrupted_channels = 0x0;
2551 + for_each_vdma_channel(engine, channel, channel_index) {
2552 + u8 __iomem *regs_base = (u8 __iomem *)channel_registers->address;
2553 + channel->host_regs = regs_base + CHANNEL_HOST_OFFSET(channel_index);
2554 + channel->device_regs = regs_base + CHANNEL_DEVICE_OFFSET(channel_index);
2555 + channel->index = channel_index;
2556 + channel->timestamp_measure_enabled = false;
2558 + channel_state_init(&channel->state);
2559 + channel->last_desc_list = NULL;
2561 + channel->ongoing_transfers.head = 0;
2562 + channel->ongoing_transfers.tail = 0;
2566 +void hailo_vdma_engine_enable_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap,
2567 + bool measure_timestamp)
2569 + struct hailo_vdma_channel *channel = NULL;
2570 + u8 channel_index = 0;
2572 + for_each_vdma_channel(engine, channel, channel_index) {
2573 + if (hailo_test_bit(channel_index, &bitmap)) {
2574 + channel->timestamp_measure_enabled = measure_timestamp;
2575 + channel->timestamp_list.head = channel->timestamp_list.tail = 0;
2579 + engine->enabled_channels |= bitmap;
2582 +void hailo_vdma_engine_disable_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap)
2584 + struct hailo_vdma_channel *channel = NULL;
2585 + u8 channel_index = 0;
2587 + engine->enabled_channels &= ~bitmap;
2589 + for_each_vdma_channel(engine, channel, channel_index) {
2590 + channel_state_init(&channel->state);
2592 + while (ONGOING_TRANSFERS_CIRC_CNT(channel->ongoing_transfers) > 0) {
2593 + struct hailo_ongoing_transfer transfer;
2594 + ongoing_transfer_pop(channel, &transfer);
2596 + if (channel->last_desc_list == NULL) {
2597 + pr_err("Channel %d has ongoing transfers but no desc list\n", channel->index);
2601 + clear_dirty_descs(channel, &transfer);
2604 + channel->last_desc_list = NULL;
2608 +void hailo_vdma_engine_push_timestamps(struct hailo_vdma_engine *engine, u32 bitmap)
2610 + struct hailo_vdma_channel *channel = NULL;
2611 + u8 channel_index = 0;
2613 + for_each_vdma_channel(engine, channel, channel_index) {
2614 + if (unlikely(hailo_test_bit(channel_index, &bitmap) &&
2615 + channel->timestamp_measure_enabled)) {
2616 + hailo_vdma_push_timestamp(channel);
2621 +int hailo_vdma_engine_read_timestamps(struct hailo_vdma_engine *engine,
2622 + struct hailo_vdma_interrupts_read_timestamp_params *params)
2624 + struct hailo_vdma_channel *channel = NULL;
2626 + if (params->channel_index >= MAX_VDMA_CHANNELS_PER_ENGINE) {
2630 + channel = &engine->channels[params->channel_index];
2631 + hailo_vdma_pop_timestamps_to_response(channel, params);
2635 +void hailo_vdma_engine_clear_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap)
2637 + engine->interrupted_channels &= ~bitmap;
2640 +void hailo_vdma_engine_set_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap)
2642 + engine->interrupted_channels |= bitmap;
2645 +static void fill_channel_irq_data(struct hailo_vdma_interrupts_channel_data *irq_data,
2646 + struct hailo_vdma_engine *engine, struct hailo_vdma_channel *channel, u16 num_proc,
2647 + bool validation_success)
2649 + u8 host_control = ioread8(channel->host_regs + CHANNEL_CONTROL_OFFSET);
2650 + u8 device_control = ioread8(channel->device_regs + CHANNEL_CONTROL_OFFSET);
2652 + irq_data->engine_index = engine->index;
2653 + irq_data->channel_index = channel->index;
2655 + irq_data->is_active = channel_control_reg_is_active(host_control) &&
2656 + channel_control_reg_is_active(device_control);
2658 + irq_data->host_num_processed = num_proc;
2659 + irq_data->host_error = ioread8(channel->host_regs + CHANNEL_ERROR_OFFSET);
2660 + irq_data->device_error = ioread8(channel->device_regs + CHANNEL_ERROR_OFFSET);
2661 + irq_data->validation_success = validation_success;
2664 +static bool is_desc_between(u16 begin, u16 end, u16 desc)
2666 + if (begin == end) {
2667 + // There is nothing between
2670 + if (begin < end) {
2671 + // desc needs to be in [begin, end)
2672 + return (begin <= desc) && (desc < end);
2675 + // desc needs to be in [0, end) or [begin, m_descs.size()-1]
2676 + return (desc < end) || (begin <= desc);
2680 +static bool is_transfer_complete(struct hailo_vdma_channel *channel,
2681 + struct hailo_ongoing_transfer *transfer, u16 hw_num_proc)
2683 + if (channel->state.num_avail == hw_num_proc) {
2687 + return is_desc_between(channel->state.num_proc, hw_num_proc, transfer->last_desc);
2690 +int hailo_vdma_engine_fill_irq_data(struct hailo_vdma_interrupts_wait_params *irq_data,
2691 + struct hailo_vdma_engine *engine, u32 irq_channels_bitmap,
2692 + transfer_done_cb_t transfer_done, void *transfer_done_opaque)
2694 + struct hailo_vdma_channel *channel = NULL;
2695 + u8 channel_index = 0;
2696 + bool validation_success = true;
2698 + for_each_vdma_channel(engine, channel, channel_index) {
2699 + u16 hw_num_proc = U16_MAX;
2700 + if (!hailo_test_bit(channel->index, &irq_channels_bitmap)) {
2704 + if (channel->last_desc_list == NULL) {
2705 + // Channel not active or no transfer, skipping.
2709 + if (irq_data->channels_count >= ARRAY_SIZE(irq_data->irq_data)) {
2713 + // Although the hw_num_processed should be a number between 0 and
2714 + // desc_count-1, if desc_count < 0x10000 (the maximum desc size),
2715 + // the actual hw_num_processed is a number between 1 and desc_count.
2716 + // Therefore the value can be desc_count, in this case we change it to
2718 + hw_num_proc = get_num_proc(channel->host_regs) & channel->state.desc_count_mask;
2720 + while (ONGOING_TRANSFERS_CIRC_CNT(channel->ongoing_transfers) > 0) {
2721 + struct hailo_ongoing_transfer *cur_transfer =
2722 + &channel->ongoing_transfers.transfers[channel->ongoing_transfers.tail];
2723 + if (!is_transfer_complete(channel, cur_transfer, hw_num_proc)) {
2727 + if (cur_transfer->is_debug &&
2728 + !validate_last_desc_status(channel, cur_transfer)) {
2729 + validation_success = false;
2732 + clear_dirty_descs(channel, cur_transfer);
2733 + transfer_done(cur_transfer, transfer_done_opaque);
2734 + channel->state.num_proc = (u16)((cur_transfer->last_desc + 1) & channel->state.desc_count_mask);
2736 + ongoing_transfer_pop(channel, NULL);
2739 + fill_channel_irq_data(&irq_data->irq_data[irq_data->channels_count],
2740 + engine, channel, hw_num_proc, validation_success);
2741 + irq_data->channels_count++;
2746 \ No newline at end of file
2748 +++ b/drivers/media/pci/hailo/common/vdma_common.h
2750 +// SPDX-License-Identifier: GPL-2.0
2752 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
2755 +#ifndef _HAILO_COMMON_VDMA_COMMON_H_
2756 +#define _HAILO_COMMON_VDMA_COMMON_H_
2758 +#include "hailo_resource.h"
2761 +#include <linux/types.h>
2762 +#include <linux/scatterlist.h>
2763 +#include <linux/io.h>
2765 +#define VDMA_DESCRIPTOR_LIST_ALIGN (1 << 16)
2766 +#define INVALID_VDMA_ADDRESS (0)
2773 +struct hailo_vdma_descriptor {
2774 + u32 PageSize_DescControl;
2775 + u32 AddrL_rsvd_DataID;
2777 + u32 RemainingPageSize_Status;
2780 +struct hailo_vdma_descriptors_list {
2781 + struct hailo_vdma_descriptor *desc_list;
2782 + u32 desc_count; // Must be power of 2 if is_circular is set.
2783 + u16 desc_page_size;
2787 +struct hailo_channel_interrupt_timestamp_list {
2790 + struct hailo_channel_interrupt_timestamp timestamps[CHANNEL_IRQ_TIMESTAMPS_SIZE];
2794 +// For each buffers in transfer, the last descriptor will be programmed with
2795 +// the residue size. In addition, if configured, the first descriptor (in
2796 +// all transfer) may be programmed with interrupts.
2797 +#define MAX_DIRTY_DESCRIPTORS_PER_TRANSFER \
2798 + (HAILO_MAX_BUFFERS_PER_SINGLE_TRANSFER + 1)
2800 +struct hailo_vdma_mapped_transfer_buffer {
2801 + struct sg_table *sg_table;
2804 + void *opaque; // Drivers can set any opaque data here.
2807 +struct hailo_ongoing_transfer {
2808 + uint16_t last_desc;
2811 + struct hailo_vdma_mapped_transfer_buffer buffers[HAILO_MAX_BUFFERS_PER_SINGLE_TRANSFER];
2813 + // Contains all descriptors that were programmed with non-default values
2814 + // for the transfer (by non-default we mean - different size or different
2815 + // interrupts domain).
2816 + uint8_t dirty_descs_count;
2817 + uint16_t dirty_descs[MAX_DIRTY_DESCRIPTORS_PER_TRANSFER];
2819 + // If set, validate descriptors status on transfer completion.
2823 +struct hailo_ongoing_transfers_list {
2824 + unsigned long head;
2825 + unsigned long tail;
2826 + struct hailo_ongoing_transfer transfers[HAILO_VDMA_MAX_ONGOING_TRANSFERS];
2829 +struct hailo_vdma_channel_state {
2830 + // vdma channel counters. num_avail should be synchronized with the hw
2831 + // num_avail value. num_proc is the last num proc updated when the user
2832 + // reads interrupts.
2836 + // Mask of the num-avail/num-proc counters.
2837 + u32 desc_count_mask;
2840 +struct hailo_vdma_channel {
2843 + u8 __iomem *host_regs;
2844 + u8 __iomem *device_regs;
2846 + // Last descriptors list attached to the channel. When it changes,
2847 + // assumes that the channel got reset.
2848 + struct hailo_vdma_descriptors_list *last_desc_list;
2850 + struct hailo_vdma_channel_state state;
2851 + struct hailo_ongoing_transfers_list ongoing_transfers;
2853 + bool timestamp_measure_enabled;
2854 + struct hailo_channel_interrupt_timestamp_list timestamp_list;
2857 +struct hailo_vdma_engine {
2859 + u32 enabled_channels;
2860 + u32 interrupted_channels;
2861 + struct hailo_vdma_channel channels[MAX_VDMA_CHANNELS_PER_ENGINE];
2864 +struct hailo_vdma_hw_ops {
2865 + // Accepts some dma_addr_t mapped to the device and encodes it using
2866 + // hw specific encode. returns INVALID_VDMA_ADDRESS on failure.
2867 + u64 (*encode_desc_dma_address)(dma_addr_t dma_address, u8 channel_id);
2870 +struct hailo_vdma_hw {
2871 + struct hailo_vdma_hw_ops hw_ops;
2873 + // The data_id code of ddr addresses.
2876 + // Bitmask needed to set on each descriptor to enable interrupts (either host/device).
2877 + unsigned long host_interrupts_bitmask;
2878 + unsigned long device_interrupts_bitmask;
2881 +#define _for_each_element_array(array, size, element, index) \
2882 + for (index = 0, element = &array[index]; index < size; index++, element = &array[index])
2884 +#define for_each_vdma_channel(engine, channel, channel_index) \
2885 + _for_each_element_array(engine->channels, MAX_VDMA_CHANNELS_PER_ENGINE, \
2886 + channel, channel_index)
2888 +void hailo_vdma_program_descriptor(struct hailo_vdma_descriptor *descriptor, u64 dma_address, size_t page_size,
2892 + * Program the given descriptors list to map the given buffer.
2894 + * @param vdma_hw vdma hw object
2895 + * @param desc_list descriptors list object to program
2896 + * @param starting_desc index of the first descriptor to program. If the list
2897 + * is circular, this function may wrap around the list.
2898 + * @param buffer buffer to program to the descriptors list.
2899 + * @param channel_index channel index of the channel attached.
2901 + * @return On success - the amount of descriptors programmed, negative value on error.
2903 +int hailo_vdma_program_descriptors_list(
2904 + struct hailo_vdma_hw *vdma_hw,
2905 + struct hailo_vdma_descriptors_list *desc_list,
2906 + u32 starting_desc,
2907 + struct hailo_vdma_mapped_transfer_buffer *buffer,
2908 + u8 channel_index);
2911 + * Launch a transfer on some vdma channel. Includes:
2912 + * 1. Binding the transfer buffers to the descriptors list.
2913 + * 2. Program the descriptors list.
2914 + * 3. Increase num available
2916 + * @param vdma_hw vdma hw object
2917 + * @param channel vdma channel object.
2918 + * @param desc_list descriptors list object to program.
2919 + * @param starting_desc index of the first descriptor to program.
2920 + * @param buffers_count amount of transfer mapped buffers to program.
2921 + * @param buffers array of buffers to program to the descriptors list.
2922 + * @param should_bind whether to bind the buffer to the descriptors list.
2923 + * @param first_interrupts_domain - interrupts settings on first descriptor.
2924 + * @param last_desc_interrupts - interrupts settings on last descriptor.
2925 + * @param is_debug program descriptors for debug run, adds some overhead (for
2926 + * example, hw will write desc complete status).
2928 + * @return On success - the amount of descriptors programmed, negative value on error.
2930 +int hailo_vdma_launch_transfer(
2931 + struct hailo_vdma_hw *vdma_hw,
2932 + struct hailo_vdma_channel *channel,
2933 + struct hailo_vdma_descriptors_list *desc_list,
2934 + u32 starting_desc,
2936 + struct hailo_vdma_mapped_transfer_buffer *buffers,
2938 + enum hailo_vdma_interrupts_domain first_interrupts_domain,
2939 + enum hailo_vdma_interrupts_domain last_desc_interrupts,
2942 +void hailo_vdma_engine_init(struct hailo_vdma_engine *engine, u8 engine_index,
2943 + const struct hailo_resource *channel_registers);
2945 +// enable/disable channels interrupt (does not update interrupts mask because the
2946 +// implementation is different between PCIe and DRAM DMA. To support it we
2947 +// can add some ops struct to the engine).
2948 +void hailo_vdma_engine_enable_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap,
2949 + bool measure_timestamp);
2950 +void hailo_vdma_engine_disable_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap);
2952 +void hailo_vdma_engine_push_timestamps(struct hailo_vdma_engine *engine, u32 bitmap);
2953 +int hailo_vdma_engine_read_timestamps(struct hailo_vdma_engine *engine,
2954 + struct hailo_vdma_interrupts_read_timestamp_params *params);
2956 +static inline bool hailo_vdma_engine_got_interrupt(struct hailo_vdma_engine *engine,
2957 + u32 channels_bitmap)
2959 + // Reading interrupts without lock is ok (needed only for writes)
2960 + const bool any_interrupt = (0 != (channels_bitmap & engine->interrupted_channels));
2961 + const bool any_disabled = (channels_bitmap != (channels_bitmap & engine->enabled_channels));
2962 + return (any_disabled || any_interrupt);
2965 +// Set/Clear/Read channels interrupts, must called under some lock (driver specific)
2966 +void hailo_vdma_engine_clear_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap);
2967 +void hailo_vdma_engine_set_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap);
2969 +static inline u32 hailo_vdma_engine_read_interrupts(struct hailo_vdma_engine *engine,
2970 + u32 requested_bitmap)
2972 + // Interrupts only for channels that are requested and enabled.
2973 + u32 irq_channels_bitmap = requested_bitmap &
2974 + engine->enabled_channels &
2975 + engine->interrupted_channels;
2976 + engine->interrupted_channels &= ~irq_channels_bitmap;
2978 + return irq_channels_bitmap;
2981 +typedef void(*transfer_done_cb_t)(struct hailo_ongoing_transfer *transfer, void *opaque);
2983 +// Assuming irq_data->channels_count contains the amount of channels already
2984 +// written (used for multiple engines).
2985 +int hailo_vdma_engine_fill_irq_data(struct hailo_vdma_interrupts_wait_params *irq_data,
2986 + struct hailo_vdma_engine *engine, u32 irq_channels_bitmap,
2987 + transfer_done_cb_t transfer_done, void *transfer_done_opaque);
2992 +#endif /* _HAILO_COMMON_VDMA_COMMON_H_ */
2994 +++ b/drivers/media/pci/hailo/include/hailo_pcie_version.h
2996 +// SPDX-License-Identifier: GPL-2.0
2998 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
3001 +#ifndef _HAILO_PCIE_VERSION_H_
3002 +#define _HAILO_PCIE_VERSION_H_
3004 +#include <linux/stringify.h>
3005 +#include "../common/hailo_pcie_version.h"
3007 +#define HAILO_DRV_VER __stringify(HAILO_DRV_VER_MAJOR) "." __stringify(HAILO_DRV_VER_MINOR) "." __stringify(HAILO_DRV_VER_REVISION)
3009 +#endif /* _HAILO_PCIE_VERSION_H_ */
3011 +++ b/drivers/media/pci/hailo/src/fops.c
3013 +// SPDX-License-Identifier: GPL-2.0
3015 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
3018 +#include <linux/version.h>
3019 +#include <linux/pci.h>
3020 +#include <linux/interrupt.h>
3021 +#include <linux/sched.h>
3022 +#include <linux/pagemap.h>
3023 +#include <linux/uaccess.h>
3024 +#include <linux/scatterlist.h>
3025 +#include <linux/slab.h>
3026 +#include <linux/delay.h>
3028 +#include <asm/thread_info.h>
3030 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
3031 +#include <linux/sched/signal.h>
3034 +#include "hailo_pcie_version.h"
3037 +#include "vdma_common.h"
3038 +#include "utils/logs.h"
3039 +#include "vdma/memory.h"
3040 +#include "vdma/ioctl.h"
3041 +#include "utils/compact.h"
3044 +#if LINUX_VERSION_CODE >= KERNEL_VERSION( 4, 13, 0 )
3045 +#define wait_queue_t wait_queue_entry_t
3048 +#if LINUX_VERSION_CODE >= KERNEL_VERSION( 4, 15, 0 )
3049 +#define ACCESS_ONCE READ_ONCE
3052 +#ifndef VM_RESERVED
3053 + #define VMEM_FLAGS (VM_IO | VM_DONTEXPAND | VM_DONTDUMP)
3055 + #define VMEM_FLAGS (VM_IO | VM_RESERVED)
3058 +#define IS_PO2_ALIGNED(size, alignment) (!(size & (alignment-1)))
3060 +// On pcie driver there is only one dma engine
3061 +#define DEFAULT_VDMA_ENGINE_INDEX (0)
3063 +#if !defined(HAILO_EMULATOR)
3064 +#define DEFAULT_SHUTDOWN_TIMEOUT_MS (5)
3065 +#else /* !defined(HAILO_EMULATOR) */
3066 +#define DEFAULT_SHUTDOWN_TIMEOUT_MS (1000)
3067 +#endif /* !defined(HAILO_EMULATOR) */
3069 +static long hailo_add_notification_wait(struct hailo_pcie_board *board, struct file *filp);
3071 +static struct hailo_file_context *create_file_context(struct hailo_pcie_board *board, struct file *filp)
3073 + struct hailo_file_context *context = kzalloc(sizeof(*context), GFP_KERNEL);
3075 + hailo_err(board, "Failed to alloc file context (required size %zu)\n", sizeof(*context));
3076 + return ERR_PTR(-ENOMEM);
3079 + context->filp = filp;
3080 + hailo_vdma_file_context_init(&context->vdma_context);
3081 + list_add(&context->open_files_list, &board->open_files_list);
3082 + context->is_valid = true;
3086 +static void release_file_context(struct hailo_file_context *context)
3088 + context->is_valid = false;
3089 + list_del(&context->open_files_list);
3093 +static struct hailo_file_context *find_file_context(struct hailo_pcie_board *board, struct file *filp)
3095 + struct hailo_file_context *cur = NULL;
3096 + list_for_each_entry(cur, &board->open_files_list, open_files_list) {
3097 + if (cur->filp == filp) {
3104 +int hailo_pcie_fops_open(struct inode *inode, struct file *filp)
3106 + u32 major = MAJOR(inode->i_rdev);
3107 + u32 minor = MINOR(inode->i_rdev);
3108 + struct hailo_pcie_board *pBoard;
3110 + pci_power_t previous_power_state = PCI_UNKNOWN;
3111 + bool interrupts_enabled_by_filp = false;
3112 + struct hailo_file_context *context = NULL;
3114 + pr_debug(DRIVER_NAME ": (%d: %d-%d): fops_open\n", current->tgid, major, minor);
3116 + // allow multiple processes to open a device, count references in hailo_pcie_get_board_index.
3117 + if (!(pBoard = hailo_pcie_get_board_index(minor))) {
3118 + pr_err(DRIVER_NAME ": fops_open: PCIe board not found for /dev/hailo%d node.\n", minor);
3123 + filp->private_data = pBoard;
3125 + if (down_interruptible(&pBoard->mutex)) {
3126 + hailo_err(pBoard, "fops_open down_interruptible fail tgid:%d\n", current->tgid);
3127 + err = -ERESTARTSYS;
3128 + goto l_decrease_ref_count;
3131 + context = create_file_context(pBoard, filp);
3132 + if (IS_ERR(context)) {
3133 + err = PTR_ERR(context);
3134 + goto l_release_mutex;
3137 + previous_power_state = pBoard->pDev->current_state;
3138 + if (PCI_D0 != previous_power_state) {
3139 + hailo_info(pBoard, "Waking up board");
3140 + err = pci_set_power_state(pBoard->pDev, PCI_D0);
3142 + hailo_err(pBoard, "Failed waking up board %d", err);
3143 + goto l_free_context;
3147 + if (!hailo_pcie_is_device_connected(&pBoard->pcie_resources)) {
3148 + hailo_err(pBoard, "Device disconnected while opening device\n");
3150 + goto l_revert_power_state;
3153 + // enable interrupts
3154 + if (!pBoard->interrupts_enabled) {
3155 + err = hailo_enable_interrupts(pBoard);
3157 + hailo_err(pBoard, "Failed Enabling interrupts %d\n", err);
3158 + goto l_revert_power_state;
3160 + interrupts_enabled_by_filp = true;
3163 + err = hailo_add_notification_wait(pBoard, filp);
3165 + goto l_release_irq;
3168 + hailo_dbg(pBoard, "(%d: %d-%d): fops_open: SUCCESS on /dev/hailo%d\n", current->tgid,
3169 + major, minor, minor);
3171 + up(&pBoard->mutex);
3175 + if (interrupts_enabled_by_filp) {
3176 + hailo_disable_interrupts(pBoard);
3179 +l_revert_power_state:
3180 + if (pBoard->pDev->current_state != previous_power_state) {
3181 + if (pci_set_power_state(pBoard->pDev, previous_power_state) < 0) {
3182 + hailo_err(pBoard, "Failed setting power state back to %d\n", (int)previous_power_state);
3186 + release_file_context(context);
3188 + up(&pBoard->mutex);
3189 +l_decrease_ref_count:
3190 + atomic_dec(&pBoard->ref_count);
3195 +int hailo_pcie_driver_down(struct hailo_pcie_board *board)
3197 + long completion_result = 0;
3200 + reinit_completion(&board->driver_down.reset_completed);
3202 + hailo_pcie_write_firmware_driver_shutdown(&board->pcie_resources);
3204 + // Wait for response
3205 + completion_result =
3206 + wait_for_completion_timeout(&board->driver_down.reset_completed, msecs_to_jiffies(DEFAULT_SHUTDOWN_TIMEOUT_MS));
3207 + if (completion_result <= 0) {
3208 + if (0 == completion_result) {
3209 + hailo_err(board, "hailo_pcie_driver_down, timeout waiting for shutdown response (timeout_ms=%d)\n", DEFAULT_SHUTDOWN_TIMEOUT_MS);
3212 + hailo_info(board, "hailo_pcie_driver_down, wait for completion failed with err=%ld (process was interrupted or killed)\n",
3213 + completion_result);
3214 + err = completion_result;
3223 +int hailo_pcie_fops_release(struct inode *inode, struct file *filp)
3225 + struct hailo_pcie_board *pBoard = (struct hailo_pcie_board *)filp->private_data;
3226 + struct hailo_file_context *context = NULL;
3228 + u32 major = MAJOR(inode->i_rdev);
3229 + u32 minor = MINOR(inode->i_rdev);
3232 + hailo_info(pBoard, "(%d: %d-%d): fops_release\n", current->tgid, major, minor);
3234 + if (down_interruptible(&pBoard->mutex)) {
3235 + hailo_err(pBoard, "fops_release down_interruptible failed");
3236 + return -ERESTARTSYS;
3239 + context = find_file_context(pBoard, filp);
3240 + if (NULL == context) {
3241 + hailo_err(pBoard, "Invalid driver state, file context does not exist\n");
3242 + up(&pBoard->mutex);
3246 + if (false == context->is_valid) {
3247 + // File context is invalid, but open. It's OK to continue finalize and release it.
3248 + hailo_err(pBoard, "Invalid file context\n");
3251 + hailo_pcie_clear_notification_wait_list(pBoard, filp);
3253 + if (filp == pBoard->vdma.used_by_filp) {
3254 + if (hailo_pcie_driver_down(pBoard)) {
3255 + hailo_err(pBoard, "Failed sending FW shutdown event");
3259 + hailo_vdma_file_context_finalize(&context->vdma_context, &pBoard->vdma, filp);
3260 + release_file_context(context);
3262 + if (atomic_dec_and_test(&pBoard->ref_count)) {
3263 + // Disable interrupts
3264 + hailo_disable_interrupts(pBoard);
3266 + if (power_mode_enabled()) {
3267 + if (pBoard->pDev && pci_set_power_state(pBoard->pDev, PCI_D3hot) < 0) {
3268 + hailo_err(pBoard, "Failed setting power state to D3hot");
3272 + // deallocate board if already removed
3273 + if (!pBoard->pDev) {
3274 + hailo_dbg(pBoard, "fops_close, freed board\n");
3275 + up(&pBoard->mutex);
3280 + hailo_dbg(pBoard, "fops_close, released resources for board\n");
3281 + up(&pBoard->mutex);
3284 + up(&pBoard->mutex);
3287 + hailo_dbg(pBoard, "(%d: %d-%d): fops_close: SUCCESS on /dev/hailo%d\n", current->tgid,
3288 + major, minor, minor);
3294 +static long hailo_memory_transfer_ioctl(struct hailo_pcie_board *board, unsigned long arg)
3297 + struct hailo_memory_transfer_params* transfer = &board->memory_transfer_params;
3299 + hailo_dbg(board, "Start memory transfer ioctl\n");
3301 + if (copy_from_user(transfer, (void __user*)arg, sizeof(*transfer))) {
3302 + hailo_err(board, "copy_from_user fail\n");
3306 + err = hailo_pcie_memory_transfer(&board->pcie_resources, transfer);
3308 + hailo_err(board, "memory transfer failed %ld", err);
3311 + if (copy_to_user((void __user*)arg, transfer, sizeof(*transfer))) {
3312 + hailo_err(board, "copy_to_user fail\n");
3319 +static long hailo_read_log_ioctl(struct hailo_pcie_board *pBoard, unsigned long arg)
3322 + struct hailo_read_log_params params;
3324 + if (copy_from_user(¶ms, (void __user*)arg, sizeof(params))) {
3325 + hailo_err(pBoard, "HAILO_READ_LOG, copy_from_user fail\n");
3329 + if (0 > (err = hailo_pcie_read_firmware_log(&pBoard->pcie_resources, ¶ms))) {
3330 + hailo_err(pBoard, "HAILO_READ_LOG, reading from log failed with error: %ld \n", err);
3334 + if (copy_to_user((void*)arg, ¶ms, sizeof(params))) {
3341 +static void firmware_notification_irq_handler(struct hailo_pcie_board *board)
3343 + struct hailo_notification_wait *notif_wait_cursor = NULL;
3345 + unsigned long irq_saved_flags = 0;
3347 + spin_lock_irqsave(&board->notification_read_spinlock, irq_saved_flags);
3348 + err = hailo_pcie_read_firmware_notification(&board->pcie_resources, &board->notification_cache);
3349 + spin_unlock_irqrestore(&board->notification_read_spinlock, irq_saved_flags);
3352 + hailo_err(board, "Failed reading firmware notification");
3356 + list_for_each_entry_rcu(notif_wait_cursor, &board->notification_wait_list, notification_wait_list)
3358 + complete(¬if_wait_cursor->notification_completion);
3360 + rcu_read_unlock();
3364 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
3365 +irqreturn_t hailo_irqhandler(int irq, void *dev_id, struct pt_regs *regs)
3367 +irqreturn_t hailo_irqhandler(int irq, void *dev_id)
3370 + irqreturn_t return_value = IRQ_NONE;
3371 + struct hailo_pcie_board *board = (struct hailo_pcie_board *)dev_id;
3372 + bool got_interrupt = false;
3373 + struct hailo_pcie_interrupt_source irq_source = {0};
3375 + hailo_dbg(board, "hailo_irqhandler\n");
3378 + if (!hailo_pcie_is_device_connected(&board->pcie_resources)) {
3379 + hailo_err(board, "Device disconnected while handling irq\n");
3383 + got_interrupt = hailo_pcie_read_interrupt(&board->pcie_resources, &irq_source);
3384 + if (!got_interrupt) {
3388 + return_value = IRQ_HANDLED;
3390 + // wake fw_control if needed
3391 + if (irq_source.interrupt_bitmask & FW_CONTROL) {
3392 + complete(&board->fw_control.completion);
3395 + // wake driver_down if needed
3396 + if (irq_source.interrupt_bitmask & DRIVER_DOWN) {
3397 + complete(&board->driver_down.reset_completed);
3400 + if (irq_source.interrupt_bitmask & FW_NOTIFICATION) {
3401 + if (!completion_done(&board->fw_loaded_completion)) {
3402 + // Complete firmware loaded completion
3403 + complete_all(&board->fw_loaded_completion);
3405 + firmware_notification_irq_handler(board);
3409 + if (0 != irq_source.vdma_channels_bitmap) {
3410 + hailo_vdma_irq_handler(&board->vdma, DEFAULT_VDMA_ENGINE_INDEX,
3411 + irq_source.vdma_channels_bitmap);
3415 + return return_value;
3418 +static long hailo_get_notification_wait_thread(struct hailo_pcie_board *pBoard, struct file *filp,
3419 + struct hailo_notification_wait **current_waiting_thread)
3421 + struct hailo_notification_wait *cursor = NULL;
3422 + // note: safe to access without rcu because the notification_wait_list is closed only on file release
3423 + list_for_each_entry(cursor, &pBoard->notification_wait_list, notification_wait_list)
3425 + if ((current->tgid == cursor->tgid) && (filp == cursor->filp)) {
3426 + *current_waiting_thread = cursor;
3434 +static long hailo_add_notification_wait(struct hailo_pcie_board *board, struct file *filp)
3436 + struct hailo_notification_wait *new_notification_wait = NULL;
3437 + if (!(new_notification_wait = kmalloc(sizeof(*new_notification_wait), GFP_KERNEL))) {
3438 + hailo_err(board, "Failed to allocate notification wait structure.\n");
3441 + new_notification_wait->tgid = current->tgid;
3442 + new_notification_wait->filp = filp;
3443 + new_notification_wait->is_disabled = false;
3444 + init_completion(&new_notification_wait->notification_completion);
3445 + list_add_rcu(&new_notification_wait->notification_wait_list, &board->notification_wait_list);
3449 +static long hailo_read_notification_ioctl(struct hailo_pcie_board *pBoard, unsigned long arg, struct file *filp,
3450 + bool* should_up_board_mutex)
3453 + struct hailo_notification_wait *current_waiting_thread = NULL;
3454 + struct hailo_d2h_notification *notification = &pBoard->notification_to_user;
3455 + unsigned long irq_saved_flags;
3457 + err = hailo_get_notification_wait_thread(pBoard, filp, ¤t_waiting_thread);
3461 + up(&pBoard->mutex);
3463 + if (0 > (err = wait_for_completion_interruptible(¤t_waiting_thread->notification_completion))) {
3464 + hailo_info(pBoard,
3465 + "HAILO_READ_NOTIFICATION - wait_for_completion_interruptible error. err=%ld. tgid=%d (process was interrupted or killed)\n",
3466 + err, current_waiting_thread->tgid);
3467 + *should_up_board_mutex = false;
3471 + if (down_interruptible(&pBoard->mutex)) {
3472 + hailo_info(pBoard, "HAILO_READ_NOTIFICATION - down_interruptible error (process was interrupted or killed)\n");
3473 + *should_up_board_mutex = false;
3474 + err = -ERESTARTSYS;
3478 + // Check if was disabled
3479 + if (current_waiting_thread->is_disabled) {
3480 + hailo_info(pBoard, "HAILO_READ_NOTIFICATION, can't find notification wait for tgid=%d\n", current->tgid);
3485 + reinit_completion(¤t_waiting_thread->notification_completion);
3487 + spin_lock_irqsave(&pBoard->notification_read_spinlock, irq_saved_flags);
3488 + notification->buffer_len = pBoard->notification_cache.buffer_len;
3489 + memcpy(notification->buffer, pBoard->notification_cache.buffer, notification->buffer_len);
3490 + spin_unlock_irqrestore(&pBoard->notification_read_spinlock, irq_saved_flags);
3492 + if (copy_to_user((void __user*)arg, notification, sizeof(*notification))) {
3493 + hailo_err(pBoard, "HAILO_READ_NOTIFICATION copy_to_user fail\n");
3502 +static long hailo_disable_notification(struct hailo_pcie_board *pBoard, struct file *filp)
3504 + struct hailo_notification_wait *cursor = NULL;
3506 + hailo_info(pBoard, "HAILO_DISABLE_NOTIFICATION: disable notification");
3508 + list_for_each_entry_rcu(cursor, &pBoard->notification_wait_list, notification_wait_list) {
3509 + if ((current->tgid == cursor->tgid) && (filp == cursor->filp)) {
3510 + cursor->is_disabled = true;
3511 + complete(&cursor->notification_completion);
3515 + rcu_read_unlock();
3520 +static int hailo_fw_control(struct hailo_pcie_board *pBoard, unsigned long arg, bool* should_up_board_mutex)
3522 + struct hailo_fw_control *command = &pBoard->fw_control.command;
3523 + long completion_result = 0;
3526 + up(&pBoard->mutex);
3527 + *should_up_board_mutex = false;
3529 + if (down_interruptible(&pBoard->fw_control.mutex)) {
3530 + hailo_info(pBoard, "hailo_fw_control down_interruptible fail tgid:%d (process was interrupted or killed)\n", current->tgid);
3531 + return -ERESTARTSYS;
3534 + if (copy_from_user(command, (void __user*)arg, sizeof(*command))) {
3535 + hailo_err(pBoard, "hailo_fw_control, copy_from_user fail\n");
3540 + reinit_completion(&pBoard->fw_control.completion);
3542 + err = hailo_pcie_write_firmware_control(&pBoard->pcie_resources, command);
3544 + hailo_err(pBoard, "Failed writing fw control to pcie\n");
3548 + // Wait for response
3549 + completion_result = wait_for_completion_interruptible_timeout(&pBoard->fw_control.completion, msecs_to_jiffies(command->timeout_ms));
3550 + if (completion_result <= 0) {
3551 + if (0 == completion_result) {
3552 + hailo_err(pBoard, "hailo_fw_control, timeout waiting for control (timeout_ms=%d)\n", command->timeout_ms);
3555 + hailo_info(pBoard, "hailo_fw_control, wait for completion failed with err=%ld (process was interrupted or killed)\n", completion_result);
3561 + err = hailo_pcie_read_firmware_control(&pBoard->pcie_resources, command);
3563 + hailo_err(pBoard, "Failed reading fw control from pcie\n");
3567 + if (copy_to_user((void __user*)arg, command, sizeof(*command))) {
3568 + hailo_err(pBoard, "hailo_fw_control, copy_to_user fail\n");
3574 + up(&pBoard->fw_control.mutex);
3578 +static long hailo_query_device_properties(struct hailo_pcie_board *board, unsigned long arg)
3580 + struct hailo_device_properties props = {
3581 + .desc_max_page_size = board->desc_max_page_size,
3582 + .allocation_mode = board->allocation_mode,
3583 + .dma_type = HAILO_DMA_TYPE_PCIE,
3584 + .dma_engines_count = board->vdma.vdma_engines_count,
3585 + .is_fw_loaded = hailo_pcie_is_firmware_loaded(&board->pcie_resources),
3588 + hailo_info(board, "HAILO_QUERY_DEVICE_PROPERTIES: desc_max_page_size=%u\n", props.desc_max_page_size);
3590 + if (copy_to_user((void __user*)arg, &props, sizeof(props))) {
3591 + hailo_err(board, "HAILO_QUERY_DEVICE_PROPERTIES, copy_to_user failed\n");
3598 +static long hailo_query_driver_info(struct hailo_pcie_board *board, unsigned long arg)
3600 + struct hailo_driver_info info = {
3601 + .major_version = HAILO_DRV_VER_MAJOR,
3602 + .minor_version = HAILO_DRV_VER_MINOR,
3603 + .revision_version = HAILO_DRV_VER_REVISION
3606 + hailo_info(board, "HAILO_QUERY_DRIVER_INFO: major=%u, minor=%u, revision=%u\n",
3607 + info.major_version, info.minor_version, info.revision_version);
3609 + if (copy_to_user((void __user*)arg, &info, sizeof(info))) {
3610 + hailo_err(board, "HAILO_QUERY_DRIVER_INFO, copy_to_user failed\n");
3617 +static long hailo_general_ioctl(struct hailo_file_context *context, struct hailo_pcie_board *board,
3618 + unsigned int cmd, unsigned long arg, struct file *filp, bool *should_up_board_mutex)
3621 + case HAILO_MEMORY_TRANSFER:
3622 + return hailo_memory_transfer_ioctl(board, arg);
3623 + case HAILO_FW_CONTROL:
3624 + return hailo_fw_control(board, arg, should_up_board_mutex);
3625 + case HAILO_READ_NOTIFICATION:
3626 + return hailo_read_notification_ioctl(board, arg, filp, should_up_board_mutex);
3627 + case HAILO_DISABLE_NOTIFICATION:
3628 + return hailo_disable_notification(board, filp);
3629 + case HAILO_QUERY_DEVICE_PROPERTIES:
3630 + return hailo_query_device_properties(board, arg);
3631 + case HAILO_QUERY_DRIVER_INFO:
3632 + return hailo_query_driver_info(board, arg);
3633 + case HAILO_READ_LOG:
3634 + return hailo_read_log_ioctl(board, arg);
3636 + hailo_err(board, "Invalid general ioctl code 0x%x (nr: %d)\n", cmd, _IOC_NR(cmd));
3641 +long hailo_pcie_fops_unlockedioctl(struct file* filp, unsigned int cmd, unsigned long arg)
3644 + struct hailo_pcie_board* board = (struct hailo_pcie_board*) filp->private_data;
3645 + struct hailo_file_context *context = NULL;
3646 + bool should_up_board_mutex = true;
3649 + if (!board || !board->pDev) return -ENODEV;
3651 + hailo_dbg(board, "(%d): fops_unlockedioctl. cmd:%d\n", current->tgid, _IOC_NR(cmd));
3653 + if (_IOC_DIR(cmd) & _IOC_READ)
3655 + err = !compatible_access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd));
3657 + else if (_IOC_DIR(cmd) & _IOC_WRITE)
3659 + err = !compatible_access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
3663 + hailo_err(board, "Invalid ioctl parameter access 0x%x", cmd);
3667 + if (down_interruptible(&board->mutex)) {
3668 + hailo_err(board, "unlockedioctl down_interruptible failed");
3669 + return -ERESTARTSYS;
3671 + BUG_ON(board->mutex.count != 0);
3673 + context = find_file_context(board, filp);
3674 + if (NULL == context) {
3675 + hailo_err(board, "Invalid driver state, file context does not exist\n");
3676 + up(&board->mutex);
3680 + if (false == context->is_valid) {
3681 + hailo_err(board, "Invalid file context\n");
3682 + up(&board->mutex);
3686 + switch (_IOC_TYPE(cmd)) {
3687 + case HAILO_GENERAL_IOCTL_MAGIC:
3688 + err = hailo_general_ioctl(context, board, cmd, arg, filp, &should_up_board_mutex);
3690 + case HAILO_VDMA_IOCTL_MAGIC:
3691 + err = hailo_vdma_ioctl(&context->vdma_context, &board->vdma, cmd, arg, filp, &board->mutex,
3692 + &should_up_board_mutex);
3695 + hailo_err(board, "Invalid ioctl type %d\n", _IOC_TYPE(cmd));
3699 + if (should_up_board_mutex) {
3700 + up(&board->mutex);
3703 + hailo_dbg(board, "(%d): fops_unlockedioct: SUCCESS\n", current->tgid);
3708 +int hailo_pcie_fops_mmap(struct file* filp, struct vm_area_struct *vma)
3712 + uintptr_t vdma_handle = vma->vm_pgoff << PAGE_SHIFT;
3714 + struct hailo_pcie_board* board = (struct hailo_pcie_board*)filp->private_data;
3715 + struct hailo_file_context *context = NULL;
3717 + BUILD_BUG_ON_MSG(sizeof(vma->vm_pgoff) < sizeof(vdma_handle),
3718 + "If this expression fails to compile it means the target HW is not compatible with our approach to use "
3719 + "the page offset paramter of 'mmap' to pass the driver the 'handle' of the desired descriptor");
3721 + vma->vm_pgoff = 0; // vm_pgoff contains vdma_handle page offset, the actual offset from the phys addr is 0
3723 + hailo_info(board, "%d fops_mmap\n", current->tgid);
3725 + if (!board || !board->pDev) return -ENODEV;
3727 + if (down_interruptible(&board->mutex)) {
3728 + hailo_err(board, "hailo_pcie_fops_mmap down_interruptible fail tgid:%d\n", current->tgid);
3729 + return -ERESTARTSYS;
3732 + context = find_file_context(board, filp);
3733 + if (NULL == context) {
3734 + up(&board->mutex);
3735 + hailo_err(board, "Invalid driver state, file context does not exist\n");
3739 + if (false == context->is_valid) {
3740 + up(&board->mutex);
3741 + hailo_err(board, "Invalid file context\n");
3745 + err = hailo_vdma_mmap(&context->vdma_context, &board->vdma, vma, vdma_handle);
3746 + up(&board->mutex);
3750 +++ b/drivers/media/pci/hailo/src/fops.h
3752 +// SPDX-License-Identifier: GPL-2.0
3754 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
3757 +#ifndef _HAILO_PCI_FOPS_H_
3758 +#define _HAILO_PCI_FOPS_H_
3760 +int hailo_pcie_fops_open(struct inode* inode, struct file* filp);
3761 +int hailo_pcie_fops_release(struct inode* inode, struct file* filp);
3762 +long hailo_pcie_fops_unlockedioctl(struct file* filp, unsigned int cmd, unsigned long arg);
3763 +int hailo_pcie_fops_mmap(struct file* filp, struct vm_area_struct *vma);
3764 +int hailo_pcie_driver_down(struct hailo_pcie_board *board);
3766 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
3767 +irqreturn_t hailo_irqhandler(int irq, void* dev_id, struct pt_regs *regs);
3769 +irqreturn_t hailo_irqhandler(int irq, void* dev_id);
3772 +#endif /* _HAILO_PCI_FOPS_H_ */
3774 +++ b/drivers/media/pci/hailo/src/pcie.c
3776 +// SPDX-License-Identifier: GPL-2.0
3778 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
3781 +#include <linux/version.h>
3782 +#include <linux/init.h>
3783 +#include <linux/module.h>
3784 +#include <linux/pci.h>
3785 +#include <linux/pci_regs.h>
3786 +#include <linux/interrupt.h>
3787 +#include <linux/sched.h>
3788 +#include <linux/pagemap.h>
3789 +#include <linux/firmware.h>
3790 +#include <linux/kthread.h>
3792 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)
3793 +#include <linux/dma-direct.h>
3796 +#define KERNEL_CODE 1
3798 +#include "hailo_pcie_version.h"
3799 +#include "hailo_ioctl_common.h"
3803 +#include "utils/logs.h"
3804 +#include "utils/compact.h"
3805 +#include "vdma/vdma.h"
3807 +#if LINUX_VERSION_CODE < KERNEL_VERSION( 5, 4, 0 )
3808 +#include <linux/pci-aspm.h>
3811 +// enum that represents values for the driver parameter to either force buffer from driver , userspace or not force
3812 +// and let driver decide
3813 +enum hailo_allocate_driver_buffer_driver_param {
3814 + HAILO_NO_FORCE_BUFFER = 0,
3815 + HAILO_FORCE_BUFFER_FROM_USERSPACE = 1,
3816 + HAILO_FORCE_BUFFER_FROM_DRIVER = 2,
3820 +static int force_desc_page_size = 0;
3821 +static bool g_is_power_mode_enabled = true;
3822 +static int force_allocation_from_driver = HAILO_NO_FORCE_BUFFER;
3824 +#define DEVICE_NODE_NAME "hailo"
3825 +static int char_major = 0;
3826 +static struct class *chardev_class;
3828 +static LIST_HEAD(g_hailo_board_list);
3829 +static struct semaphore g_hailo_add_board_mutex = __SEMAPHORE_INITIALIZER(g_hailo_add_board_mutex, 1);
3831 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22))
3832 +#define HAILO_IRQ_FLAGS (SA_SHIRQ | SA_INTERRUPT)
3833 +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0))
3834 +#define HAILO_IRQ_FLAGS (IRQF_SHARED | IRQF_DISABLED)
3836 +#define HAILO_IRQ_FLAGS (IRQF_SHARED)
3839 + /* ****************************
3840 + ******************************* */
3841 +bool power_mode_enabled(void)
3843 +#if !defined(HAILO_EMULATOR)
3844 + return g_is_power_mode_enabled;
3845 +#else /* !defined(HAILO_EMULATOR) */
3847 +#endif /* !defined(HAILO_EMULATOR) */
3852 + * Due to an HW bug, on system with low MaxReadReq ( < 512) we need to use different descriptors size.
3853 + * Returns the max descriptor size or 0 on failure.
3855 +static int hailo_get_desc_page_size(struct pci_dev *pdev, u32 *out_page_size)
3857 + u16 pcie_device_control = 0;
3859 + // The default page size must be smaller/equal to 32K (due to PLDA registers limit).
3860 + const u32 max_page_size = 32u * 1024u;
3861 + const u32 defualt_page_size = min((u32)PAGE_SIZE, max_page_size);
3863 + if (force_desc_page_size != 0) {
3864 + // The user given desc_page_size as a module parameter
3865 + if ((force_desc_page_size & (force_desc_page_size - 1)) != 0) {
3866 + pci_err(pdev, "force_desc_page_size must be a power of 2\n");
3870 + if (force_desc_page_size > max_page_size) {
3871 + pci_err(pdev, "force_desc_page_size %d mustn't be larger than %u", force_desc_page_size, max_page_size);
3875 + pci_notice(pdev, "Probing: Force setting max_desc_page_size to %d (recommended value is %lu)\n",
3876 + force_desc_page_size, PAGE_SIZE);
3877 + *out_page_size = force_desc_page_size;
3881 + err = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &pcie_device_control);
3883 + pci_err(pdev, "Couldn't read DEVCTL capability\n");
3887 + switch (pcie_device_control & PCI_EXP_DEVCTL_READRQ) {
3888 + case PCI_EXP_DEVCTL_READRQ_128B:
3889 + pci_notice(pdev, "Probing: Setting max_desc_page_size to 128 (recommended value is %u)\n", defualt_page_size);
3890 + *out_page_size = 128;
3892 + case PCI_EXP_DEVCTL_READRQ_256B:
3893 + pci_notice(pdev, "Probing: Setting max_desc_page_size to 256 (recommended value is %u)\n", defualt_page_size);
3894 + *out_page_size = 256;
3897 + pci_notice(pdev, "Probing: Setting max_desc_page_size to %u, (page_size=%lu)\n", defualt_page_size, PAGE_SIZE);
3898 + *out_page_size = defualt_page_size;
3903 +// should be called only from fops_open (once)
3904 +struct hailo_pcie_board* hailo_pcie_get_board_index(u32 index)
3906 + struct hailo_pcie_board *pBoard, *pRet = NULL;
3908 + down(&g_hailo_add_board_mutex);
3909 + list_for_each_entry(pBoard, &g_hailo_board_list, board_list)
3911 + if ( index == pBoard->board_index )
3913 + atomic_inc(&pBoard->ref_count);
3918 + up(&g_hailo_add_board_mutex);
3924 + * hailo_pcie_disable_aspm - Disable ASPM states
3925 + * @board: pointer to PCI board struct
3926 + * @state: bit-mask of ASPM states to disable
3927 + * @locked: indication if this context holds pci_bus_sem locked.
3929 + * Some devices *must* have certain ASPM states disabled per hardware errata.
3931 +static int hailo_pcie_disable_aspm(struct hailo_pcie_board *board, u16 state, bool locked)
3933 + struct pci_dev *pdev = board->pDev;
3934 + struct pci_dev *parent = pdev->bus->self;
3935 + u16 aspm_dis_mask = 0;
3936 + u16 pdev_aspmc = 0;
3937 + u16 parent_aspmc = 0;
3941 + case PCIE_LINK_STATE_L0S:
3942 + aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S;
3944 + case PCIE_LINK_STATE_L1:
3945 + aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1;
3951 + err = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
3953 + hailo_err(board, "Couldn't read LNKCTL capability\n");
3957 + pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
3960 + err = pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &parent_aspmc);
3962 + hailo_err(board, "Couldn't read slot LNKCTL capability\n");
3965 + parent_aspmc &= PCI_EXP_LNKCTL_ASPMC;
3968 + hailo_notice(board, "Disabling ASPM %s %s\n",
3969 + (aspm_dis_mask & PCI_EXP_LNKCTL_ASPM_L0S) ? "L0s" : "",
3970 + (aspm_dis_mask & PCI_EXP_LNKCTL_ASPM_L1) ? "L1" : "");
3972 + // Disable L0s even if it is currently disabled as ASPM states can be enabled by the kernel when changing power modes
3973 +#ifdef CONFIG_PCIEASPM
3975 + // Older kernel versions (<5.2.21) don't return value for this functions, so we try manual disabling anyway
3976 + (void)pci_disable_link_state_locked(pdev, state);
3978 + (void)pci_disable_link_state(pdev, state);
3981 + /* Double-check ASPM control. If not disabled by the above, the
3982 + * BIOS is preventing that from happening (or CONFIG_PCIEASPM is
3983 + * not enabled); override by writing PCI config space directly.
3985 + err = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
3987 + hailo_err(board, "Couldn't read LNKCTL capability\n");
3990 + pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
3992 + if (!(aspm_dis_mask & pdev_aspmc)) {
3993 + hailo_notice(board, "Successfully disabled ASPM %s %s\n",
3994 + (aspm_dis_mask & PCI_EXP_LNKCTL_ASPM_L0S) ? "L0s" : "",
3995 + (aspm_dis_mask & PCI_EXP_LNKCTL_ASPM_L1) ? "L1" : "");
4000 + /* Both device and parent should have the same ASPM setting.
4001 + * Disable ASPM in downstream component first and then upstream.
4003 + err = pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_dis_mask);
4005 + hailo_err(board, "Couldn't read LNKCTL capability\n");
4009 + err = pcie_capability_clear_word(parent, PCI_EXP_LNKCTL, aspm_dis_mask);
4011 + hailo_err(board, "Couldn't read slot LNKCTL capability\n");
4015 + hailo_notice(board, "Manually disabled ASPM %s %s\n",
4016 + (aspm_dis_mask & PCI_EXP_LNKCTL_ASPM_L0S) ? "L0s" : "",
4017 + (aspm_dis_mask & PCI_EXP_LNKCTL_ASPM_L1) ? "L1" : "");
4022 +static void hailo_pcie_insert_board(struct hailo_pcie_board* pBoard)
4025 + struct hailo_pcie_board *pCurrent, *pNext;
4028 + down(&g_hailo_add_board_mutex);
4029 + if ( list_empty(&g_hailo_board_list) ||
4030 + list_first_entry(&g_hailo_board_list, struct hailo_pcie_board, board_list)->board_index > 0)
4032 + pBoard->board_index = 0;
4033 + list_add(&pBoard->board_list, &g_hailo_board_list);
4035 + up(&g_hailo_add_board_mutex);
4039 + list_for_each_entry_safe(pCurrent, pNext, &g_hailo_board_list, board_list)
4041 + index = pCurrent->board_index+1;
4042 + if( list_is_last(&pCurrent->board_list, &g_hailo_board_list) || (index != pNext->board_index))
4048 + pBoard->board_index = index;
4049 + list_add(&pBoard->board_list, &pCurrent->board_list);
4051 + up(&g_hailo_add_board_mutex);
4056 +static void hailo_pcie_remove_board(struct hailo_pcie_board* pBoard)
4058 + down(&g_hailo_add_board_mutex);
4061 + list_del(&pBoard->board_list);
4063 + up(&g_hailo_add_board_mutex);
4066 +static int hailo_write_config(struct hailo_pcie_resources *resources, struct device *dev,
4067 + const struct hailo_config_constants *config_consts)
4069 + const struct firmware *config = NULL;
4072 + if (NULL == config_consts->filename) {
4073 + // Config not supported for platform
4077 + err = request_firmware_direct(&config, config_consts->filename, dev);
4079 + hailo_dev_info(dev, "Config %s not found\n", config_consts->filename);
4083 + hailo_dev_notice(dev, "Writing config %s\n", config_consts->filename);
4085 + err = hailo_pcie_write_config_common(resources, config->data, config->size, config_consts);
4087 + if (-EINVAL == err) {
4088 + hailo_dev_warn(dev, "Config size %zu is bigger than max %zu\n", config->size, config_consts->max_size);
4090 + release_firmware(config);
4094 + release_firmware(config);
4098 +static bool wait_for_firmware_completion(struct completion *fw_load_completion)
4100 + return (0 != wait_for_completion_timeout(fw_load_completion, FIRMWARE_WAIT_TIMEOUT_MS));
4103 +static int hailo_load_firmware(struct hailo_pcie_resources *resources,
4104 + struct device *dev, struct completion *fw_load_completion)
4106 + const struct firmware *firmware = NULL;
4109 + if (hailo_pcie_is_firmware_loaded(resources)) {
4110 + hailo_dev_warn(dev, "Firmware was already loaded\n");
4114 + reinit_completion(fw_load_completion);
4116 + err = hailo_write_config(resources, dev, hailo_pcie_get_board_config_constants(resources->board_type));
4118 + hailo_dev_err(dev, "Failed writing board config");
4122 + err = hailo_write_config(resources, dev, hailo_pcie_get_user_config_constants(resources->board_type));
4124 + hailo_dev_err(dev, "Failed writing fw config");
4128 + // read firmware file
4129 + err = request_firmware_direct(&firmware, hailo_pcie_get_fw_filename(resources->board_type), dev);
4131 + hailo_dev_warn(dev, "Firmware file not found (/lib/firmware/%s), please upload the firmware manually \n",
4132 + hailo_pcie_get_fw_filename(resources->board_type));
4136 + err = hailo_pcie_write_firmware(resources, firmware->data, firmware->size);
4138 + hailo_dev_err(dev, "Failed writing firmware. err %d\n", err);
4139 + release_firmware(firmware);
4143 + release_firmware(firmware);
4145 + if (!wait_for_firmware_completion(fw_load_completion)) {
4146 + hailo_dev_err(dev, "Timeout waiting for firmware..\n");
4147 + return -ETIMEDOUT;
4150 + hailo_dev_notice(dev, "Firmware was loaded successfully\n");
4154 +static int hailo_activate_board(struct hailo_pcie_board *board)
4158 + (void)hailo_pcie_disable_aspm(board, PCIE_LINK_STATE_L0S, false);
4160 + err = hailo_enable_interrupts(board);
4162 + hailo_err(board, "Failed Enabling interrupts %d\n", err);
4166 + err = hailo_load_firmware(&board->pcie_resources, &board->pDev->dev,
4167 + &board->fw_loaded_completion);
4169 + hailo_err(board, "Firmware load failed\n");
4170 + hailo_disable_interrupts(board);
4174 + hailo_disable_interrupts(board);
4176 + if (power_mode_enabled()) {
4177 + // Setting the device to low power state, until the user opens the device
4178 + err = pci_set_power_state(board->pDev, PCI_D3hot);
4180 + hailo_err(board, "Set power state failed %d\n", err);
4188 +int hailo_enable_interrupts(struct hailo_pcie_board *board)
4192 + if (board->interrupts_enabled) {
4193 + hailo_crit(board, "Failed enabling interrupts (already enabled)\n");
4197 + // TODO HRT-2253: use new api for enabling msi: (pci_alloc_irq_vectors)
4198 + if ((err = pci_enable_msi(board->pDev))) {
4199 + hailo_err(board, "Failed to enable MSI %d\n", err);
4202 + hailo_info(board, "Enabled MSI interrupt\n");
4204 + err = request_irq(board->pDev->irq, hailo_irqhandler, HAILO_IRQ_FLAGS, DRIVER_NAME, board);
4206 + hailo_err(board, "request_irq failed %d\n", err);
4207 + pci_disable_msi(board->pDev);
4210 + hailo_info(board, "irq enabled %u\n", board->pDev->irq);
4212 + hailo_pcie_enable_interrupts(&board->pcie_resources);
4214 + board->interrupts_enabled = true;
4218 +void hailo_disable_interrupts(struct hailo_pcie_board *board)
4221 + if ((NULL == board) || (NULL == board->pDev)) {
4222 + pr_err("Failed to access board or device\n");
4226 + if (!board->interrupts_enabled) {
4230 + board->interrupts_enabled = false;
4231 + hailo_pcie_disable_interrupts(&board->pcie_resources);
4232 + free_irq(board->pDev->irq, board);
4233 + pci_disable_msi(board->pDev);
4236 +static int hailo_bar_iomap(struct pci_dev *pdev, int bar, struct hailo_resource *resource)
4238 + resource->size = pci_resource_len(pdev, bar);
4239 + resource->address = (uintptr_t)(pci_iomap(pdev, bar, resource->size));
4241 + if (!resource->size || !resource->address) {
4242 + pci_err(pdev, "Probing: Invalid PCIe BAR %d", bar);
4246 + pci_notice(pdev, "Probing: mapped bar %d - %p %zu\n", bar,
4247 + (void*)resource->address, resource->size);
4251 +static void hailo_bar_iounmap(struct pci_dev *pdev, struct hailo_resource *resource)
4253 + if (resource->address) {
4254 + pci_iounmap(pdev, (void*)resource->address);
4255 + resource->address = 0;
4256 + resource->size = 0;
4260 +static int pcie_resources_init(struct pci_dev *pdev, struct hailo_pcie_resources *resources,
4261 + enum hailo_board_type board_type)
4263 + int err = -EINVAL;
4264 + if (board_type >= HAILO_BOARD_TYPE_COUNT) {
4265 + pci_err(pdev, "Probing: Invalid board type %d\n", (int)board_type);
4267 + goto failure_exit;
4270 + err = pci_request_regions(pdev, DRIVER_NAME);
4272 + pci_err(pdev, "Probing: Error allocating bars %d\n", err);
4273 + goto failure_exit;
4276 + err = hailo_bar_iomap(pdev, HAILO_PCIE_CONFIG_BAR, &resources->config);
4278 + goto failure_release_regions;
4281 + err = hailo_bar_iomap(pdev, HAILO_PCIE_VDMA_REGS_BAR, &resources->vdma_registers);
4283 + goto failure_release_config;
4286 + err = hailo_bar_iomap(pdev, HAILO_PCIE_FW_ACCESS_BAR, &resources->fw_access);
4288 + goto failure_release_vdma_regs;
4291 + resources->board_type = board_type;
4293 + if (!hailo_pcie_is_device_connected(resources)) {
4294 + pci_err(pdev, "Probing: Failed reading device BARs, device may be disconnected\n");
4296 + goto failure_release_fw_access;
4301 +failure_release_fw_access:
4302 + hailo_bar_iounmap(pdev, &resources->fw_access);
4303 +failure_release_vdma_regs:
4304 + hailo_bar_iounmap(pdev, &resources->vdma_registers);
4305 +failure_release_config:
4306 + hailo_bar_iounmap(pdev, &resources->config);
4307 +failure_release_regions:
4308 + pci_release_regions(pdev);
4313 +static void pcie_resources_release(struct pci_dev *pdev, struct hailo_pcie_resources *resources)
4315 + hailo_bar_iounmap(pdev, &resources->config);
4316 + hailo_bar_iounmap(pdev, &resources->vdma_registers);
4317 + hailo_bar_iounmap(pdev, &resources->fw_access);
4318 + pci_release_regions(pdev);
4321 +static void update_channel_interrupts(struct hailo_vdma_controller *controller,
4322 + size_t engine_index, u32 channels_bitmap)
4324 + struct hailo_pcie_board *board = (struct hailo_pcie_board*) dev_get_drvdata(controller->dev);
4325 + if (engine_index >= board->vdma.vdma_engines_count) {
4326 + hailo_err(board, "Invalid engine index %zu", engine_index);
4330 + hailo_pcie_update_channel_interrupts_mask(&board->pcie_resources, channels_bitmap);
4333 +static struct hailo_vdma_controller_ops pcie_vdma_controller_ops = {
4334 + .update_channel_interrupts = update_channel_interrupts,
4338 +static int hailo_pcie_vdma_controller_init(struct hailo_vdma_controller *controller,
4339 + struct device *dev, struct hailo_resource *vdma_registers)
4341 + const size_t engines_count = 1;
4342 + return hailo_vdma_controller_init(controller, dev, &hailo_pcie_vdma_hw,
4343 + &pcie_vdma_controller_ops, vdma_registers, engines_count);
4346 +// Tries to check if address allocated with kmalloc is dma capable.
4347 +// If kmalloc address is not dma capable we assume other addresses
4348 +// won't be dma capable as well.
4349 +static bool is_kmalloc_dma_capable(struct device *dev)
4351 + void *check_addr = NULL;
4352 + dma_addr_t dma_addr = 0;
4353 + phys_addr_t phys_addr = 0;
4354 + bool capable = false;
4356 + if (!dev->dma_mask) {
4360 + check_addr = kmalloc(PAGE_SIZE, GFP_KERNEL);
4361 + if (NULL == check_addr) {
4362 + dev_err(dev, "failed allocating page!\n");
4366 + phys_addr = virt_to_phys(check_addr);
4367 + dma_addr = phys_to_dma(dev, phys_addr);
4369 + capable = is_dma_capable(dev, dma_addr, PAGE_SIZE);
4370 + kfree(check_addr);
4374 +static int hailo_get_allocation_mode(struct pci_dev *pdev, enum hailo_allocation_mode *allocation_mode)
4376 + // Check if module paramater was given to override driver choice
4377 + if (HAILO_NO_FORCE_BUFFER != force_allocation_from_driver) {
4378 + if (HAILO_FORCE_BUFFER_FROM_USERSPACE == force_allocation_from_driver) {
4379 + *allocation_mode = HAILO_ALLOCATION_MODE_USERSPACE;
4380 + pci_notice(pdev, "Probing: Using userspace allocated vdma buffers\n");
4382 + else if (HAILO_FORCE_BUFFER_FROM_DRIVER == force_allocation_from_driver) {
4383 + *allocation_mode = HAILO_ALLOCATION_MODE_DRIVER;
4384 + pci_notice(pdev, "Probing: Using driver allocated vdma buffers\n");
4387 + pci_err(pdev, "Invalid value for force allocation driver paramater - value given: %d!\n",
4388 + force_allocation_from_driver);
4395 + if (is_kmalloc_dma_capable(&pdev->dev)) {
4396 + *allocation_mode = HAILO_ALLOCATION_MODE_USERSPACE;
4397 + pci_notice(pdev, "Probing: Using userspace allocated vdma buffers\n");
4399 + *allocation_mode = HAILO_ALLOCATION_MODE_DRIVER;
4400 + pci_notice(pdev, "Probing: Using driver allocated vdma buffers\n");
4406 +static int hailo_pcie_probe(struct pci_dev* pDev, const struct pci_device_id* id)
4408 + struct hailo_pcie_board * pBoard;
4409 + struct device *char_device = NULL;
4410 + int err = -EINVAL;
4412 + pci_notice(pDev, "Probing on: %04x:%04x...\n", pDev->vendor, pDev->device);
4413 +#ifdef HAILO_EMULATOR
4414 + pci_notice(pDev, "PCIe driver was compiled in emulator mode\n");
4415 +#endif /* HAILO_EMULATOR */
4416 + if (!g_is_power_mode_enabled) {
4417 + pci_notice(pDev, "PCIe driver was compiled with power modes disabled\n");
4420 + /* Initialize device extension for the board*/
4421 + pci_notice(pDev, "Probing: Allocate memory for device extension, %zu\n", sizeof(struct hailo_pcie_board));
4422 + pBoard = (struct hailo_pcie_board*) kzalloc( sizeof(struct hailo_pcie_board), GFP_KERNEL);
4423 + if (pBoard == NULL)
4425 + pci_err(pDev, "Probing: Failed to allocate memory for device extension structure\n");
4430 + pBoard->pDev = pDev;
4432 + if ( (err = pci_enable_device(pDev)) )
4434 + pci_err(pDev, "Probing: Failed calling pci_enable_device %d\n", err);
4435 + goto probe_free_board;
4437 + pci_notice(pDev, "Probing: Device enabled\n");
4439 + pci_set_master(pDev);
4441 + err = pcie_resources_init(pDev, &pBoard->pcie_resources, id->driver_data);
4443 + pci_err(pDev, "Probing: Failed init pcie resources");
4444 + goto probe_disable_device;
4447 + err = hailo_get_desc_page_size(pDev, &pBoard->desc_max_page_size);
4449 + goto probe_release_pcie_resources;
4452 + pBoard->interrupts_enabled = false;
4453 + init_completion(&pBoard->fw_loaded_completion);
4455 + sema_init(&pBoard->mutex, 1);
4456 + atomic_set(&pBoard->ref_count, 0);
4457 + INIT_LIST_HEAD(&pBoard->open_files_list);
4459 + sema_init(&pBoard->fw_control.mutex, 1);
4460 + spin_lock_init(&pBoard->notification_read_spinlock);
4461 + init_completion(&pBoard->fw_control.completion);
4463 + init_completion(&pBoard->driver_down.reset_completed);
4465 + INIT_LIST_HEAD(&pBoard->notification_wait_list);
4467 + memset(&pBoard->notification_cache, 0, sizeof(pBoard->notification_cache));
4468 + memset(&pBoard->memory_transfer_params, 0, sizeof(pBoard->memory_transfer_params));
4470 + err = hailo_pcie_vdma_controller_init(&pBoard->vdma, &pBoard->pDev->dev,
4471 + &pBoard->pcie_resources.vdma_registers);
4473 + hailo_err(pBoard, "Failed init vdma controller %d\n", err);
4474 + goto probe_release_pcie_resources;
4477 + // Checks the dma mask => it must be called after the device's dma_mask is set by hailo_pcie_vdma_controller_init
4478 + err = hailo_get_allocation_mode(pDev, &pBoard->allocation_mode);
4480 + pci_err(pDev, "Failed determining allocation of buffers from driver. error type: %d\n", err);
4481 + goto probe_release_pcie_resources;
4484 + err = hailo_activate_board(pBoard);
4486 + hailo_err(pBoard, "Failed activating board %d\n", err);
4487 + goto probe_release_pcie_resources;
4490 + /* Keep track on the device, in order, to be able to remove it later */
4491 + pci_set_drvdata(pDev, pBoard);
4492 + hailo_pcie_insert_board(pBoard);
4494 + /* Create dynamically the device node*/
4495 + char_device = device_create_with_groups(chardev_class, NULL,
4496 + MKDEV(char_major, pBoard->board_index),
4498 + g_hailo_dev_groups,
4499 + DEVICE_NODE_NAME"%d", pBoard->board_index);
4500 + if (IS_ERR(char_device)) {
4501 + hailo_err(pBoard, "Failed creating dynamic device %d\n", pBoard->board_index);
4502 + err = PTR_ERR(char_device);
4503 + goto probe_remove_board;
4506 + hailo_notice(pBoard, "Probing: Added board %0x-%0x, /dev/hailo%d\n", pDev->vendor, pDev->device, pBoard->board_index);
4510 +probe_remove_board:
4511 + hailo_pcie_remove_board(pBoard);
4513 +probe_release_pcie_resources:
4514 + pcie_resources_release(pBoard->pDev, &pBoard->pcie_resources);
4516 +probe_disable_device:
4517 + pci_disable_device(pDev);
4527 +static void hailo_pcie_remove(struct pci_dev* pDev)
4529 + struct hailo_pcie_board* pBoard = (struct hailo_pcie_board*) pci_get_drvdata(pDev);
4530 + struct hailo_notification_wait *cursor = NULL;
4532 + pci_notice(pDev, "Remove: Releasing board\n");
4537 + // lock board to wait for any pending operations and for synchronization with open
4538 + down(&pBoard->mutex);
4541 + // remove board from active boards list
4542 + hailo_pcie_remove_board(pBoard);
4545 + /* Delete the device node */
4546 + device_destroy(chardev_class, MKDEV(char_major, pBoard->board_index));
4548 + // disable interrupts - will only disable if they have not been disabled in release already
4549 + hailo_disable_interrupts(pBoard);
4551 + pcie_resources_release(pBoard->pDev, &pBoard->pcie_resources);
4553 + // deassociate device from board to be picked up by char device
4554 + pBoard->pDev = NULL;
4556 + pBoard->vdma.dev = NULL;
4558 + pci_disable_device(pDev);
4560 + pci_set_drvdata(pDev, NULL);
4562 + // Lock rcu_read_lock and send notification_completion to wake anyone waiting on the notification_wait_list when removed
4564 + list_for_each_entry_rcu(cursor, &pBoard->notification_wait_list, notification_wait_list) {
4565 + cursor->is_disabled = true;
4566 + complete(&cursor->notification_completion);
4568 + rcu_read_unlock();
4570 + up(&pBoard->mutex);
4572 + if ( 0 == atomic_read(&pBoard->ref_count) )
4574 + // nobody has the board open - free
4575 + pci_notice(pDev, "Remove: Freed board, /dev/hailo%d\n", pBoard->board_index);
4580 + // board resources are freed on last close
4581 + pci_notice(pDev, "Remove: Scheduled for board removal, /dev/hailo%d\n", pBoard->board_index);
4587 +#ifdef CONFIG_PM_SLEEP
4588 +static int hailo_pcie_suspend(struct device *dev)
4590 + struct hailo_pcie_board *board = (struct hailo_pcie_board*) dev_get_drvdata(dev);
4591 + struct hailo_file_context *cur = NULL;
4594 + // lock board to wait for any pending operations
4595 + down(&board->mutex);
4597 + // Disable all interrupts. All interrupts from Hailo chip would be masked.
4598 + hailo_disable_interrupts(board);
4600 + // Close all vDMA channels
4601 + if (board->vdma.used_by_filp != NULL) {
4602 + err = hailo_pcie_driver_down(board);
4604 + dev_notice(dev, "Error while trying to call FW to close vdma channels\n");
4608 + // Un validate all activae file contexts so every new action would return error to the user.
4609 + list_for_each_entry(cur, &board->open_files_list, open_files_list) {
4610 + cur->is_valid = false;
4614 + up(&board->mutex);
4616 + dev_notice(dev, "PM's suspend\n");
4617 + // Continue system suspend
4621 +static int hailo_pcie_resume(struct device *dev)
4623 + struct hailo_pcie_board *board = (struct hailo_pcie_board*) dev_get_drvdata(dev);
4626 + if ((err = hailo_activate_board(board)) < 0) {
4627 + dev_err(dev, "Failed activating board %d\n", err);
4631 + dev_notice(dev, "PM's resume\n");
4634 +#endif /* CONFIG_PM_SLEEP */
4636 +static SIMPLE_DEV_PM_OPS(hailo_pcie_pm_ops, hailo_pcie_suspend, hailo_pcie_resume);
4638 +#if LINUX_VERSION_CODE >= KERNEL_VERSION( 3, 16, 0 )
4639 +static void hailo_pci_reset_prepare(struct pci_dev *pdev)
4641 + struct hailo_pcie_board* board = (struct hailo_pcie_board*) pci_get_drvdata(pdev);
4643 + /* Reset preparation logic goes here */
4644 + pci_err(pdev, "Reset preparation for PCI device \n");
4648 + // lock board to wait for any pending operations and for synchronization with open
4649 + down(&board->mutex);
4650 + if (board->vdma.used_by_filp != NULL) {
4651 + // Try to close all vDMA channels before reset
4652 + err = hailo_pcie_driver_down(board);
4654 + pci_err(pdev, "Error while trying to call FW to close vdma channels (errno %d)\n", err);
4657 + up(&board->mutex);
4660 +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION( 3, 16, 0 ) */
4662 +#if LINUX_VERSION_CODE < KERNEL_VERSION( 4, 13, 0 ) && LINUX_VERSION_CODE >= KERNEL_VERSION( 3, 16, 0 )
4663 +static void hailo_pci_reset_notify(struct pci_dev *pdev, bool prepare)
4666 + hailo_pci_reset_prepare(pdev);
4671 +static const struct pci_error_handlers hailo_pcie_err_handlers = {
4672 +#if LINUX_VERSION_CODE < KERNEL_VERSION( 3, 16, 0 )
4673 +/* No FLR callback */
4674 +#elif LINUX_VERSION_CODE < KERNEL_VERSION( 4, 13, 0 )
4675 +/* FLR Callback is reset_notify */
4676 + .reset_notify = hailo_pci_reset_notify,
4678 +/* FLR Callback is reset_prepare */
4679 + .reset_prepare = hailo_pci_reset_prepare,
4683 +static struct pci_device_id hailo_pcie_id_table[] =
4685 + {PCI_DEVICE_DATA(HAILO, HAILO8, HAILO_BOARD_TYPE_HAILO8)},
4686 + {PCI_DEVICE_DATA(HAILO, HAILO15, HAILO_BOARD_TYPE_HAILO15)},
4687 + {PCI_DEVICE_DATA(HAILO, PLUTO, HAILO_BOARD_TYPE_PLUTO)},
4691 +static struct file_operations hailo_pcie_fops =
4693 + owner: THIS_MODULE,
4694 + unlocked_ioctl: hailo_pcie_fops_unlockedioctl,
4695 + mmap: hailo_pcie_fops_mmap,
4696 + open: hailo_pcie_fops_open,
4697 + release: hailo_pcie_fops_release
4701 +static struct pci_driver hailo_pci_driver =
4703 + name: DRIVER_NAME,
4704 + id_table: hailo_pcie_id_table,
4705 + probe: hailo_pcie_probe,
4706 + remove: hailo_pcie_remove,
4708 + pm: &hailo_pcie_pm_ops,
4710 + err_handler: &hailo_pcie_err_handlers,
4713 +MODULE_DEVICE_TABLE (pci, hailo_pcie_id_table);
4715 +static int hailo_pcie_register_chrdev(unsigned int major, const char *name)
4719 + char_major = register_chrdev(major, name, &hailo_pcie_fops);
4721 + chardev_class = class_create_compat("hailo_chardev");
4723 + return char_major;
4726 +static void hailo_pcie_unregister_chrdev(unsigned int major, const char *name)
4728 + class_destroy(chardev_class);
4729 + unregister_chrdev(major, name);
4732 +static int __init hailo_pcie_module_init(void)
4736 + pr_notice(DRIVER_NAME ": Init module. driver version %s\n", HAILO_DRV_VER);
4738 + if ( 0 > (char_major = hailo_pcie_register_chrdev(0, DRIVER_NAME)) )
4740 + pr_err(DRIVER_NAME ": Init Error, failed to call register_chrdev.\n");
4742 + return char_major;
4745 + if ( 0 != (err = pci_register_driver(&hailo_pci_driver)))
4747 + pr_err(DRIVER_NAME ": Init Error, failed to call pci_register_driver.\n");
4748 + class_destroy(chardev_class);
4749 + hailo_pcie_unregister_chrdev(char_major, DRIVER_NAME);
4756 +static void __exit hailo_pcie_module_exit(void)
4759 + pr_notice(DRIVER_NAME ": Exit module.\n");
4761 + // Unregister the driver from pci bus
4762 + pci_unregister_driver(&hailo_pci_driver);
4763 + hailo_pcie_unregister_chrdev(char_major, DRIVER_NAME);
4765 + pr_notice(DRIVER_NAME ": Hailo PCIe driver unloaded.\n");
4769 +module_init(hailo_pcie_module_init);
4770 +module_exit(hailo_pcie_module_exit);
4772 +module_param(o_dbg, int, S_IRUGO | S_IWUSR);
4774 +module_param_named(no_power_mode, g_is_power_mode_enabled, invbool, S_IRUGO);
4775 +MODULE_PARM_DESC(no_power_mode, "Disables automatic D0->D3 PCIe transactions");
4777 +module_param(force_allocation_from_driver, int, S_IRUGO);
4778 +MODULE_PARM_DESC(force_allocation_from_driver, "Determines whether to force buffer allocation from driver or userspace");
4780 +module_param(force_desc_page_size, int, S_IRUGO);
4781 +MODULE_PARM_DESC(force_desc_page_size, "Determines the maximum DMA descriptor page size (must be a power of 2)");
4783 +MODULE_AUTHOR("Hailo Technologies Ltd.");
4784 +MODULE_DESCRIPTION("Hailo PCIe driver");
4785 +MODULE_LICENSE("GPL v2");
4786 +MODULE_VERSION(HAILO_DRV_VER);
4789 +++ b/drivers/media/pci/hailo/src/pcie.h
4791 +// SPDX-License-Identifier: GPL-2.0
4793 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
4796 +#ifndef _HAILO_PCI_PCIE_H_
4797 +#define _HAILO_PCI_PCIE_H_
4799 +#include "vdma/vdma.h"
4800 +#include "hailo_ioctl_common.h"
4801 +#include "pcie_common.h"
4802 +#include "utils/fw_common.h"
4804 +#include <linux/pci.h>
4805 +#include <linux/fs.h>
4806 +#include <linux/interrupt.h>
4807 +#include <linux/circ_buf.h>
4808 +#include <linux/device.h>
4810 +#include <linux/ioctl.h>
4812 +struct hailo_fw_control_info {
4813 + // protects that only one fw control will be send at a time
4814 + struct semaphore mutex;
4815 + // called from the interrupt handler to notify that a response is ready
4816 + struct completion completion;
4817 + // the command we are currently handling
4818 + struct hailo_fw_control command;
4821 +struct hailo_pcie_driver_down_info {
4822 + // called from the interrupt handler to notify that FW completed reset
4823 + struct completion reset_completed;
4826 +struct hailo_fw_boot {
4827 + // the filp that enabled interrupts for fw boot. the interrupt is enabled if this is not null
4828 + struct file *filp;
4829 + // called from the interrupt handler to notify that an interrupt was raised
4830 + struct completion completion;
4834 +// Context for each open file handle
4835 +// TODO: store board and use as actual context
4836 +struct hailo_file_context {
4837 + struct list_head open_files_list;
4838 + struct file *filp;
4839 + struct hailo_vdma_file_context vdma_context;
4843 +struct hailo_pcie_board {
4844 + struct list_head board_list;
4845 + struct pci_dev *pDev;
4847 + atomic_t ref_count;
4848 + struct list_head open_files_list;
4849 + struct hailo_pcie_resources pcie_resources;
4850 + struct hailo_fw_control_info fw_control;
4851 + struct hailo_pcie_driver_down_info driver_down;
4852 + struct semaphore mutex;
4853 + struct hailo_vdma_controller vdma;
4854 + spinlock_t notification_read_spinlock;
4855 + struct list_head notification_wait_list;
4856 + struct hailo_d2h_notification notification_cache;
4857 + struct hailo_d2h_notification notification_to_user;
4858 + struct hailo_memory_transfer_params memory_transfer_params;
4859 + u32 desc_max_page_size;
4860 + enum hailo_allocation_mode allocation_mode;
4861 + struct completion fw_loaded_completion;
4862 + bool interrupts_enabled;
4865 +bool power_mode_enabled(void);
4867 +struct hailo_pcie_board* hailo_pcie_get_board_index(u32 index);
4868 +void hailo_disable_interrupts(struct hailo_pcie_board *board);
4869 +int hailo_enable_interrupts(struct hailo_pcie_board *board);
4871 +#endif /* _HAILO_PCI_PCIE_H_ */
4874 +++ b/drivers/media/pci/hailo/src/sysfs.c
4876 +// SPDX-License-Identifier: GPL-2.0
4878 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
4884 +#include <linux/device.h>
4885 +#include <linux/sysfs.h>
4887 +static ssize_t board_location_show(struct device *dev, struct device_attribute *_attr,
4890 + struct hailo_pcie_board *board = (struct hailo_pcie_board *)dev_get_drvdata(dev);
4891 + const char *dev_info = pci_name(board->pDev);
4892 + return sprintf(buf, "%s", dev_info);
4894 +static DEVICE_ATTR_RO(board_location);
4896 +static ssize_t device_id_show(struct device *dev, struct device_attribute *_attr,
4899 + struct hailo_pcie_board *board = (struct hailo_pcie_board *)dev_get_drvdata(dev);
4900 + return sprintf(buf, "%x:%x", board->pDev->vendor, board->pDev->device);
4902 +static DEVICE_ATTR_RO(device_id);
4904 +static struct attribute *hailo_dev_attrs[] = {
4905 + &dev_attr_board_location.attr,
4906 + &dev_attr_device_id.attr,
4910 +ATTRIBUTE_GROUPS(hailo_dev);
4911 +const struct attribute_group **g_hailo_dev_groups = hailo_dev_groups;
4913 +++ b/drivers/media/pci/hailo/src/sysfs.h
4915 +// SPDX-License-Identifier: GPL-2.0
4917 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
4920 +#ifndef _HAILO_PCI_SYSFS_H_
4921 +#define _HAILO_PCI_SYSFS_H_
4923 +#include <linux/sysfs.h>
4925 +extern const struct attribute_group **g_hailo_dev_groups;
4927 +#endif /* _HAILO_PCI_SYSFS_H_ */
4929 +++ b/drivers/media/pci/hailo/src/utils.c
4931 +// SPDX-License-Identifier: GPL-2.0
4933 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
4936 +#include <linux/version.h>
4937 +#include <linux/init.h>
4938 +#include <linux/module.h>
4939 +#include <linux/pci.h>
4941 +#include "hailo_pcie_version.h"
4944 +#include "utils/logs.h"
4947 +void hailo_pcie_clear_notification_wait_list(struct hailo_pcie_board *pBoard, struct file *filp)
4949 + struct hailo_notification_wait *cur = NULL, *next = NULL;
4950 + list_for_each_entry_safe(cur, next, &pBoard->notification_wait_list, notification_wait_list) {
4951 + if (cur->filp == filp) {
4952 + list_del_rcu(&cur->notification_wait_list);
4953 + synchronize_rcu();
4959 +++ b/drivers/media/pci/hailo/src/utils.h
4961 +// SPDX-License-Identifier: GPL-2.0
4963 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
4966 +#ifndef _HAILO_PCI_UTILS_H_
4967 +#define _HAILO_PCI_UTILS_H_
4969 +#include <linux/version.h>
4970 +#include <linux/init.h>
4971 +#include <linux/module.h>
4972 +#include <linux/pci.h>
4973 +#include <linux/interrupt.h>
4974 +#include <linux/sched.h>
4975 +#include <linux/pagemap.h>
4979 +void hailo_pcie_clear_notification_wait_list(struct hailo_pcie_board *pBoard, struct file *filp);
4981 +#endif /* _HAILO_PCI_UTILS_H_ */
4983 +++ b/drivers/media/pci/hailo/utils/compact.h
4985 +// SPDX-License-Identifier: GPL-2.0
4987 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
4990 +#ifndef _HAILO_PCI_COMPACT_H_
4991 +#define _HAILO_PCI_COMPACT_H_
4993 +#include <linux/version.h>
4994 +#include <linux/scatterlist.h>
4995 +#include <linux/vmalloc.h>
4997 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 4, 0)
4998 +#define class_create_compat class_create
5000 +#define class_create_compat(name) class_create(THIS_MODULE, name)
5003 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)
5004 +#define pci_printk(level, pdev, fmt, arg...) \
5005 + dev_printk(level, &(pdev)->dev, fmt, ##arg)
5006 +#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg)
5007 +#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg)
5008 +#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg)
5009 +#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg)
5010 +#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg)
5011 +#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg)
5012 +#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg)
5013 +#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg)
5016 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 5, 0)
5017 +#define get_user_pages_compact get_user_pages
5018 +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
5019 +#define get_user_pages_compact(start, nr_pages, gup_flags, pages) \
5020 + get_user_pages(start, nr_pages, gup_flags, pages, NULL)
5021 +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 168)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
5022 +#define get_user_pages_compact(start, nr_pages, gup_flags, pages) \
5023 + get_user_pages(current, current->mm, start, nr_pages, gup_flags, pages, NULL)
5025 +static inline long get_user_pages_compact(unsigned long start, unsigned long nr_pages,
5026 + unsigned int gup_flags, struct page **pages)
5028 + int write = !!((gup_flags & FOLL_WRITE) == FOLL_WRITE);
5029 + int force = !!((gup_flags & FOLL_FORCE) == FOLL_FORCE);
5030 + return get_user_pages(current, current->mm, start, nr_pages, write, force,
5035 +#ifndef _LINUX_MMAP_LOCK_H
5036 +static inline void mmap_read_lock(struct mm_struct *mm)
5038 + down_read(&mm->mmap_sem);
5041 +static inline void mmap_read_unlock(struct mm_struct *mm)
5043 + up_read(&mm->mmap_sem);
5045 +#endif /* _LINUX_MMAP_LOCK_H */
5047 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0)
5048 +#define sg_alloc_table_from_pages_segment_compat __sg_alloc_table_from_pages
5050 +static inline struct scatterlist *sg_alloc_table_from_pages_segment_compat(struct sg_table *sgt,
5051 + struct page **pages, unsigned int n_pages, unsigned int offset,
5052 + unsigned long size, unsigned int max_segment,
5053 + struct scatterlist *prv, unsigned int left_pages,
5058 + if (NULL != prv) {
5059 + // prv not suported
5060 + return ERR_PTR(-EINVAL);
5063 + if (0 != left_pages) {
5064 + // Left pages not supported
5065 + return ERR_PTR(-EINVAL);
5068 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)
5069 + res = sg_alloc_table_from_pages_segment(sgt, pages, n_pages, offset, size, max_segment, gfp_mask);
5070 +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
5071 + res = __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size, max_segment, gfp_mask);
5073 + res = sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size, gfp_mask);
5076 + return ERR_PTR(res);
5083 +#if LINUX_VERSION_CODE >= KERNEL_VERSION( 5, 0, 0 )
5084 +#define compatible_access_ok(a,b,c) access_ok(b, c)
5086 +#define compatible_access_ok(a,b,c) access_ok(a, b, c)
5089 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)
5090 +#define PCI_DEVICE_DATA(vend, dev, data) \
5091 + .vendor = PCI_VENDOR_ID_##vend, .device = PCI_DEVICE_ID_##vend##_##dev, \
5092 + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
5093 + .driver_data = (kernel_ulong_t)(data)
5096 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
5097 +// On kernels < 4.1.12, kvmalloc, kvfree is not implemented. For simplicity, instead of implement our own
5098 +// kvmalloc/kvfree, just using vmalloc and vfree (It may reduce allocate/access performance, but it worth it).
5099 +static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
5101 + (void)flags; //ignore
5102 + return vmalloc(n * size);
5105 +#define kvfree vfree
5108 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)
5109 +static inline bool is_dma_capable(struct device *dev, dma_addr_t dma_addr, size_t size)
5111 +// Case for Rasberry Pie kernel versions 5.4.83 <=> 5.5.0 - already changed bus_dma_mask -> bus_dma_limit
5112 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) || (defined(HAILO_RASBERRY_PIE) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 83))
5113 + const u64 bus_dma_limit = dev->bus_dma_limit;
5115 + const u64 bus_dma_limit = dev->bus_dma_mask;
5118 + return (dma_addr <= min_not_zero(*dev->dma_mask, bus_dma_limit));
5121 +static inline bool is_dma_capable(struct device *dev, dma_addr_t dma_addr, size_t size)
5123 + // Implementation of dma_capable from linux kernel
5124 + const u64 bus_dma_limit = (*dev->dma_mask + 1) & ~(*dev->dma_mask);
5125 + if (bus_dma_limit && size > bus_dma_limit) {
5129 + if ((dma_addr | (dma_addr + size - 1)) & ~(*dev->dma_mask)) {
5135 +#endif // LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)
5137 +#endif /* _HAILO_PCI_COMPACT_H_ */
5138 \ No newline at end of file
5140 +++ b/drivers/media/pci/hailo/utils/fw_common.h
5142 +// SPDX-License-Identifier: GPL-2.0
5144 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
5147 +#ifndef _HAILO_LINUX_COMMON_H_
5148 +#define _HAILO_LINUX_COMMON_H_
5150 +#include "hailo_ioctl_common.h"
5152 +struct hailo_notification_wait {
5153 + struct list_head notification_wait_list;
5155 + struct file* filp;
5156 + struct completion notification_completion;
5160 +#endif /* _HAILO_LINUX_COMMON_H_ */
5161 \ No newline at end of file
5163 +++ b/drivers/media/pci/hailo/utils/logs.c
5165 +// SPDX-License-Identifier: GPL-2.0
5167 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
5172 +int o_dbg = LOGLEVEL_NOTICE;
5174 +++ b/drivers/media/pci/hailo/utils/logs.h
5176 +// SPDX-License-Identifier: GPL-2.0
5178 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
5181 +#ifndef _COMMON_LOGS_H_
5182 +#define _COMMON_LOGS_H_
5184 +#include <linux/kern_levels.h>
5186 +// Should be used only by "module_param".
5187 +// Specify the current debug level for the logs
5191 +// Logging, same interface as dev_*, uses o_dbg to filter
5193 +#define hailo_printk(level, dev, fmt, ...) \
5195 + int __level = (level[1] - '0'); \
5196 + if (__level <= o_dbg) { \
5197 + dev_printk((level), dev, fmt, ##__VA_ARGS__); \
5201 +#define hailo_emerg(board, fmt, ...) hailo_printk(KERN_EMERG, &(board)->pDev->dev, fmt, ##__VA_ARGS__)
5202 +#define hailo_alert(board, fmt, ...) hailo_printk(KERN_ALERT, &(board)->pDev->dev, fmt, ##__VA_ARGS__)
5203 +#define hailo_crit(board, fmt, ...) hailo_printk(KERN_CRIT, &(board)->pDev->dev, fmt, ##__VA_ARGS__)
5204 +#define hailo_err(board, fmt, ...) hailo_printk(KERN_ERR, &(board)->pDev->dev, fmt, ##__VA_ARGS__)
5205 +#define hailo_warn(board, fmt, ...) hailo_printk(KERN_WARNING, &(board)->pDev->dev, fmt, ##__VA_ARGS__)
5206 +#define hailo_notice(board, fmt, ...) hailo_printk(KERN_NOTICE, &(board)->pDev->dev, fmt, ##__VA_ARGS__)
5207 +#define hailo_info(board, fmt, ...) hailo_printk(KERN_INFO, &(board)->pDev->dev, fmt, ##__VA_ARGS__)
5208 +#define hailo_dbg(board, fmt, ...) hailo_printk(KERN_DEBUG, &(board)->pDev->dev, fmt, ##__VA_ARGS__)
5210 +#define hailo_dev_emerg(dev, fmt, ...) hailo_printk(KERN_EMERG, dev, fmt, ##__VA_ARGS__)
5211 +#define hailo_dev_alert(dev, fmt, ...) hailo_printk(KERN_ALERT, dev, fmt, ##__VA_ARGS__)
5212 +#define hailo_dev_crit(dev, fmt, ...) hailo_printk(KERN_CRIT, dev, fmt, ##__VA_ARGS__)
5213 +#define hailo_dev_err(dev, fmt, ...) hailo_printk(KERN_ERR, dev, fmt, ##__VA_ARGS__)
5214 +#define hailo_dev_warn(dev, fmt, ...) hailo_printk(KERN_WARNING, dev, fmt, ##__VA_ARGS__)
5215 +#define hailo_dev_notice(dev, fmt, ...) hailo_printk(KERN_NOTICE, dev, fmt, ##__VA_ARGS__)
5216 +#define hailo_dev_info(dev, fmt, ...) hailo_printk(KERN_INFO, dev, fmt, ##__VA_ARGS__)
5217 +#define hailo_dev_dbg(dev, fmt, ...) hailo_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__)
5220 +#endif //_COMMON_LOGS_H_
5221 \ No newline at end of file
5223 +++ b/drivers/media/pci/hailo/vdma/ioctl.c
5225 +// SPDX-License-Identifier: GPL-2.0
5227 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
5231 +#include "memory.h"
5232 +#include "utils/logs.h"
5235 +#include <linux/slab.h>
5236 +#include <linux/uaccess.h>
5239 +long hailo_vdma_interrupts_enable_ioctl(struct hailo_vdma_controller *controller, unsigned long arg)
5241 + struct hailo_vdma_interrupts_enable_params input;
5242 + struct hailo_vdma_engine *engine = NULL;
5243 + u8 engine_index = 0;
5244 + u32 channels_bitmap = 0;
5246 + if (copy_from_user(&input, (void *)arg, sizeof(input))) {
5247 + hailo_dev_err(controller->dev, "copy_from_user fail\n");
5251 + // Validate params (ignoring engine_index >= controller->vdma_engines_count).
5252 + for_each_vdma_engine(controller, engine, engine_index) {
5253 + channels_bitmap = input.channels_bitmap_per_engine[engine_index];
5254 + if (0 != (channels_bitmap & engine->enabled_channels)) {
5255 + hailo_dev_err(controller->dev, "Trying to enable channels that are already enabled\n");
5260 + for_each_vdma_engine(controller, engine, engine_index) {
5261 + channels_bitmap = input.channels_bitmap_per_engine[engine_index];
5262 + hailo_vdma_engine_enable_channel_interrupts(engine, channels_bitmap,
5263 + input.enable_timestamps_measure);
5264 + hailo_vdma_update_interrupts_mask(controller, engine_index);
5265 + hailo_dev_info(controller->dev, "Enabled interrupts for engine %u, channels bitmap 0x%x\n",
5266 + engine_index, channels_bitmap);
5272 +long hailo_vdma_interrupts_disable_ioctl(struct hailo_vdma_controller *controller, unsigned long arg)
5274 + struct hailo_vdma_interrupts_disable_params input;
5275 + struct hailo_vdma_engine *engine = NULL;
5276 + u8 engine_index = 0;
5277 + u32 channels_bitmap = 0;
5279 + if (copy_from_user(&input, (void*)arg, sizeof(input))) {
5280 + hailo_dev_err(controller->dev, "copy_from_user fail\n");
5284 + // Validate params (ignoring engine_index >= controller->vdma_engines_count).
5285 + for_each_vdma_engine(controller, engine, engine_index) {
5286 + channels_bitmap = input.channels_bitmap_per_engine[engine_index];
5287 + if (channels_bitmap != (channels_bitmap & engine->enabled_channels)) {
5288 + hailo_dev_err(controller->dev, "Trying to disable channels that were not enabled\n");
5293 + for_each_vdma_engine(controller, engine, engine_index) {
5294 + channels_bitmap = input.channels_bitmap_per_engine[engine_index];
5295 + hailo_vdma_engine_interrupts_disable(controller, engine, engine_index,
5299 + // Wake up threads waiting
5300 + wake_up_interruptible_all(&controller->interrupts_wq);
5305 +static bool got_interrupt(struct hailo_vdma_controller *controller,
5306 + u32 channels_bitmap_per_engine[MAX_VDMA_ENGINES])
5308 + struct hailo_vdma_engine *engine = NULL;
5309 + u8 engine_index = 0;
5310 + for_each_vdma_engine(controller, engine, engine_index) {
5311 + if (hailo_vdma_engine_got_interrupt(engine,
5312 + channels_bitmap_per_engine[engine_index])) {
5319 +static void transfer_done(struct hailo_ongoing_transfer *transfer, void *opaque)
5322 + struct hailo_vdma_controller *controller = (struct hailo_vdma_controller *)opaque;
5323 + for (i = 0; i < transfer->buffers_count; i++) {
5324 + struct hailo_vdma_buffer *mapped_buffer = (struct hailo_vdma_buffer *)transfer->buffers[i].opaque;
5325 + hailo_vdma_buffer_sync_cyclic(controller, mapped_buffer, HAILO_SYNC_FOR_CPU,
5326 + transfer->buffers[i].offset, transfer->buffers[i].size);
5330 +long hailo_vdma_interrupts_wait_ioctl(struct hailo_vdma_controller *controller, unsigned long arg,
5331 + struct semaphore *mutex, bool *should_up_board_mutex)
5334 + struct hailo_vdma_interrupts_wait_params params = {0};
5335 + struct hailo_vdma_engine *engine = NULL;
5336 + bool bitmap_not_empty = false;
5337 + u8 engine_index = 0;
5338 + u32 irq_bitmap = 0;
5339 + unsigned long irq_saved_flags = 0;
5341 + if (copy_from_user(¶ms, (void*)arg, sizeof(params))) {
5342 + hailo_dev_err(controller->dev, "HAILO_VDMA_INTERRUPTS_WAIT, copy_from_user fail\n");
5346 + // We don't need to validate that channels_bitmap_per_engine are enabled -
5347 + // If the channel is not enabled we just return an empty interrupts list.
5349 + // Validate params (ignoring engine_index >= controller->vdma_engines_count).
5350 + // It us ok to wait on a disabled channel - the wait will just exit.
5351 + for_each_vdma_engine(controller, engine, engine_index) {
5352 + if (0 != params.channels_bitmap_per_engine[engine_index]) {
5353 + bitmap_not_empty = true;
5356 + if (!bitmap_not_empty) {
5357 + hailo_dev_err(controller->dev, "Got an empty bitmap for wait interrupts\n");
5362 + err = wait_event_interruptible(controller->interrupts_wq,
5363 + got_interrupt(controller, params.channels_bitmap_per_engine));
5365 + hailo_dev_info(controller->dev,
5366 + "wait channel interrupts failed with err=%ld (process was interrupted or killed)\n", err);
5367 + *should_up_board_mutex = false;
5371 + if (down_interruptible(mutex)) {
5372 + hailo_dev_info(controller->dev, "down_interruptible error (process was interrupted or killed)\n");
5373 + *should_up_board_mutex = false;
5374 + return -ERESTARTSYS;
5377 + params.channels_count = 0;
5378 + for_each_vdma_engine(controller, engine, engine_index) {
5380 + spin_lock_irqsave(&controller->interrupts_lock, irq_saved_flags);
5381 + irq_bitmap = hailo_vdma_engine_read_interrupts(engine,
5382 + params.channels_bitmap_per_engine[engine->index]);
5383 + spin_unlock_irqrestore(&controller->interrupts_lock, irq_saved_flags);
5385 + err = hailo_vdma_engine_fill_irq_data(¶ms, engine, irq_bitmap,
5386 + transfer_done, controller);
5388 + hailo_dev_err(controller->dev, "Failed fill irq data %ld", err);
5393 + if (copy_to_user((void __user*)arg, ¶ms, sizeof(params))) {
5394 + hailo_dev_err(controller->dev, "copy_to_user fail\n");
5401 +static uintptr_t hailo_get_next_vdma_handle(struct hailo_vdma_file_context *context)
5403 + // Note: The kernel code left-shifts the 'offset' param from the user-space call to mmap by PAGE_SHIFT bits and
5404 + // stores the result in 'vm_area_struct.vm_pgoff'. We pass the desc_handle to mmap in the offset param. To
5405 + // counter this, we right-shift the desc_handle. See also 'mmap function'.
5406 + uintptr_t next_handle = 0;
5407 + next_handle = atomic_inc_return(&context->last_vdma_handle);
5408 + return (next_handle << PAGE_SHIFT);
5411 +long hailo_vdma_buffer_map_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
5412 + unsigned long arg)
5414 + struct hailo_vdma_buffer_map_params buf_info;
5415 + struct hailo_vdma_buffer *mapped_buffer = NULL;
5416 + enum dma_data_direction direction = DMA_NONE;
5417 + struct hailo_vdma_low_memory_buffer *low_memory_buffer = NULL;
5419 + if (copy_from_user(&buf_info, (void __user*)arg, sizeof(buf_info))) {
5420 + hailo_dev_err(controller->dev, "copy from user fail\n");
5424 + hailo_dev_info(controller->dev, "address %px tgid %d size: %zu\n",
5425 + buf_info.user_address, current->tgid, buf_info.size);
5427 + direction = get_dma_direction(buf_info.data_direction);
5428 + if (DMA_NONE == direction) {
5429 + hailo_dev_err(controller->dev, "invalid data direction %d\n", buf_info.data_direction);
5433 + low_memory_buffer = hailo_vdma_find_low_memory_buffer(context, buf_info.allocated_buffer_handle);
5435 + mapped_buffer = hailo_vdma_buffer_map(controller->dev,
5436 + buf_info.user_address, buf_info.size, direction, low_memory_buffer);
5437 + if (IS_ERR(mapped_buffer)) {
5438 + hailo_dev_err(controller->dev, "failed map buffer %px\n",
5439 + buf_info.user_address);
5440 + return PTR_ERR(mapped_buffer);
5443 + mapped_buffer->handle = atomic_inc_return(&context->last_vdma_user_buffer_handle);
5444 + buf_info.mapped_handle = mapped_buffer->handle;
5445 + if (copy_to_user((void __user*)arg, &buf_info, sizeof(buf_info))) {
5446 + hailo_dev_err(controller->dev, "copy_to_user fail\n");
5447 + hailo_vdma_buffer_put(mapped_buffer);
5451 + list_add(&mapped_buffer->mapped_user_buffer_list, &context->mapped_user_buffer_list);
5452 + hailo_dev_info(controller->dev, "buffer %px (handle %zu) is mapped\n",
5453 + buf_info.user_address, buf_info.mapped_handle);
5457 +long hailo_vdma_buffer_unmap_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
5458 + unsigned long arg)
5460 + struct hailo_vdma_buffer *mapped_buffer = NULL;
5461 + struct hailo_vdma_buffer_unmap_params buffer_unmap_params;
5463 + if (copy_from_user(&buffer_unmap_params, (void __user*)arg, sizeof(buffer_unmap_params))) {
5464 + hailo_dev_err(controller->dev, "copy from user fail\n");
5468 + hailo_dev_info(controller->dev, "unmap user buffer handle %zu\n", buffer_unmap_params.mapped_handle);
5470 + mapped_buffer = hailo_vdma_find_mapped_user_buffer(context, buffer_unmap_params.mapped_handle);
5471 + if (mapped_buffer == NULL) {
5472 + hailo_dev_warn(controller->dev, "buffer handle %zu not found\n", buffer_unmap_params.mapped_handle);
5476 + list_del(&mapped_buffer->mapped_user_buffer_list);
5477 + hailo_vdma_buffer_put(mapped_buffer);
5481 +long hailo_vdma_buffer_sync_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg)
5483 + struct hailo_vdma_buffer_sync_params sync_info = {};
5484 + struct hailo_vdma_buffer *mapped_buffer = NULL;
5486 + if (copy_from_user(&sync_info, (void __user*)arg, sizeof(sync_info))) {
5487 + hailo_dev_err(controller->dev, "copy_from_user fail\n");
5491 + if (!(mapped_buffer = hailo_vdma_find_mapped_user_buffer(context, sync_info.handle))) {
5492 + hailo_dev_err(controller->dev, "buffer handle %zu doesn't exist\n", sync_info.handle);
5496 + if ((sync_info.sync_type != HAILO_SYNC_FOR_CPU) && (sync_info.sync_type != HAILO_SYNC_FOR_DEVICE)) {
5497 + hailo_dev_err(controller->dev, "Invalid sync_type given for vdma buffer sync.\n");
5501 + if (sync_info.offset + sync_info.count > mapped_buffer->size) {
5502 + hailo_dev_err(controller->dev, "Invalid offset/count given for vdma buffer sync. offset %zu count %zu buffer size %u\n",
5503 + sync_info.offset, sync_info.count, mapped_buffer->size);
5507 + hailo_vdma_buffer_sync(controller, mapped_buffer, sync_info.sync_type,
5508 + sync_info.offset, sync_info.count);
5512 +long hailo_desc_list_create_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
5513 + unsigned long arg)
5515 + struct hailo_desc_list_create_params params;
5516 + struct hailo_descriptors_list_buffer *descriptors_buffer = NULL;
5517 + uintptr_t next_handle = 0;
5518 + long err = -EINVAL;
5520 + if (copy_from_user(¶ms, (void __user*)arg, sizeof(params))) {
5521 + hailo_dev_err(controller->dev, "copy_from_user fail\n");
5525 + if (params.is_circular && !is_powerof2(params.desc_count)) {
5526 + hailo_dev_err(controller->dev, "Invalid desc count given : %zu , circular descriptors count must be power of 2\n",
5527 + params.desc_count);
5531 + if (!is_powerof2(params.desc_page_size)) {
5532 + hailo_dev_err(controller->dev, "Invalid desc page size given : %u\n",
5533 + params.desc_page_size);
5537 + hailo_dev_info(controller->dev,
5538 + "Create desc list desc_count: %zu desc_page_size: %u\n",
5539 + params.desc_count, params.desc_page_size);
5541 + descriptors_buffer = kzalloc(sizeof(*descriptors_buffer), GFP_KERNEL);
5542 + if (NULL == descriptors_buffer) {
5543 + hailo_dev_err(controller->dev, "Failed to allocate buffer for descriptors list struct\n");
5547 + next_handle = hailo_get_next_vdma_handle(context);
5549 + err = hailo_desc_list_create(controller->dev, params.desc_count,
5550 + params.desc_page_size, next_handle, params.is_circular,
5551 + descriptors_buffer);
5553 + hailo_dev_err(controller->dev, "failed to allocate descriptors buffer\n");
5554 + kfree(descriptors_buffer);
5558 + list_add(&descriptors_buffer->descriptors_buffer_list, &context->descriptors_buffer_list);
5560 + // Note: The physical address is required for CONTEXT_SWITCH firmware controls
5561 + BUILD_BUG_ON(sizeof(params.dma_address) < sizeof(descriptors_buffer->dma_address));
5562 + params.dma_address = descriptors_buffer->dma_address;
5563 + params.desc_handle = descriptors_buffer->handle;
5565 + if(copy_to_user((void __user*)arg, ¶ms, sizeof(params))){
5566 + hailo_dev_err(controller->dev, "copy_to_user fail\n");
5567 + list_del(&descriptors_buffer->descriptors_buffer_list);
5568 + hailo_desc_list_release(controller->dev, descriptors_buffer);
5569 + kfree(descriptors_buffer);
5573 + hailo_dev_info(controller->dev, "Created desc list, handle 0x%llu\n",
5574 + (u64)params.desc_handle);
5578 +long hailo_desc_list_release_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
5579 + unsigned long arg)
5581 + struct hailo_desc_list_release_params params;
5582 + struct hailo_descriptors_list_buffer *descriptors_buffer = NULL;
5584 + if (copy_from_user(¶ms, (void __user*)arg, sizeof(params))) {
5585 + hailo_dev_err(controller->dev, "copy_from_user fail\n");
5589 + descriptors_buffer = hailo_vdma_find_descriptors_buffer(context, params.desc_handle);
5590 + if (descriptors_buffer == NULL) {
5591 + hailo_dev_warn(controller->dev, "not found desc handle %llu\n", (unsigned long long)params.desc_handle);
5595 + list_del(&descriptors_buffer->descriptors_buffer_list);
5596 + hailo_desc_list_release(controller->dev, descriptors_buffer);
5597 + kfree(descriptors_buffer);
5601 +long hailo_desc_list_bind_vdma_buffer(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
5602 + unsigned long arg)
5604 + struct hailo_desc_list_bind_vdma_buffer_params configure_info;
5605 + struct hailo_vdma_buffer *mapped_buffer = NULL;
5606 + struct hailo_descriptors_list_buffer *descriptors_buffer = NULL;
5607 + struct hailo_vdma_mapped_transfer_buffer transfer_buffer = {0};
5609 + if (copy_from_user(&configure_info, (void __user*)arg, sizeof(configure_info))) {
5610 + hailo_dev_err(controller->dev, "copy from user fail\n");
5613 + hailo_dev_info(controller->dev, "config buffer_handle=%zu desc_handle=%llu starting_desc=%u\n",
5614 + configure_info.buffer_handle, (u64)configure_info.desc_handle, configure_info.starting_desc);
5616 + mapped_buffer = hailo_vdma_find_mapped_user_buffer(context, configure_info.buffer_handle);
5617 + descriptors_buffer = hailo_vdma_find_descriptors_buffer(context, configure_info.desc_handle);
5618 + if (mapped_buffer == NULL || descriptors_buffer == NULL) {
5619 + hailo_dev_err(controller->dev, "invalid user/descriptors buffer\n");
5623 + if (configure_info.buffer_size > mapped_buffer->size) {
5624 + hailo_dev_err(controller->dev, "invalid buffer size. \n");
5628 + transfer_buffer.sg_table = &mapped_buffer->sg_table;
5629 + transfer_buffer.size = configure_info.buffer_size;
5630 + transfer_buffer.offset = configure_info.buffer_offset;
5632 + return hailo_vdma_program_descriptors_list(
5634 + &descriptors_buffer->desc_list,
5635 + configure_info.starting_desc,
5637 + configure_info.channel_index
5641 +long hailo_vdma_low_memory_buffer_alloc_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
5642 + unsigned long arg)
5644 + struct hailo_allocate_low_memory_buffer_params buf_info = {0};
5645 + struct hailo_vdma_low_memory_buffer *low_memory_buffer = NULL;
5646 + long err = -EINVAL;
5648 + if (copy_from_user(&buf_info, (void __user*)arg, sizeof(buf_info))) {
5649 + hailo_dev_err(controller->dev, "copy from user fail\n");
5653 + low_memory_buffer = kzalloc(sizeof(*low_memory_buffer), GFP_KERNEL);
5654 + if (NULL == low_memory_buffer) {
5655 + hailo_dev_err(controller->dev, "memory alloc failed\n");
5659 + err = hailo_vdma_low_memory_buffer_alloc(buf_info.buffer_size, low_memory_buffer);
5661 + kfree(low_memory_buffer);
5662 + hailo_dev_err(controller->dev, "failed allocating buffer from driver\n");
5666 + // Get handle for allocated buffer
5667 + low_memory_buffer->handle = hailo_get_next_vdma_handle(context);
5669 + list_add(&low_memory_buffer->vdma_low_memory_buffer_list, &context->vdma_low_memory_buffer_list);
5671 + buf_info.buffer_handle = low_memory_buffer->handle;
5672 + if (copy_to_user((void __user*)arg, &buf_info, sizeof(buf_info))) {
5673 + hailo_dev_err(controller->dev, "copy_to_user fail\n");
5674 + list_del(&low_memory_buffer->vdma_low_memory_buffer_list);
5675 + hailo_vdma_low_memory_buffer_free(low_memory_buffer);
5676 + kfree(low_memory_buffer);
5683 +long hailo_vdma_low_memory_buffer_free_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
5684 + unsigned long arg)
5686 + struct hailo_vdma_low_memory_buffer *low_memory_buffer = NULL;
5687 + struct hailo_free_low_memory_buffer_params params = {0};
5689 + if (copy_from_user(¶ms, (void __user*)arg, sizeof(params))) {
5690 + hailo_dev_err(controller->dev, "copy from user fail\n");
5694 + low_memory_buffer = hailo_vdma_find_low_memory_buffer(context, params.buffer_handle);
5695 + if (NULL == low_memory_buffer) {
5696 + hailo_dev_warn(controller->dev, "vdma buffer handle %lx not found\n", params.buffer_handle);
5700 + list_del(&low_memory_buffer->vdma_low_memory_buffer_list);
5701 + hailo_vdma_low_memory_buffer_free(low_memory_buffer);
5702 + kfree(low_memory_buffer);
5706 +long hailo_mark_as_in_use(struct hailo_vdma_controller *controller, unsigned long arg, struct file *filp)
5708 + struct hailo_mark_as_in_use_params params = {0};
5710 + // If device is used by this FD, return false to indicate its free for usage
5711 + if (filp == controller->used_by_filp) {
5712 + params.in_use = false;
5713 + } else if (NULL != controller->used_by_filp) {
5714 + params.in_use = true;
5716 + controller->used_by_filp = filp;
5717 + params.in_use = false;
5720 + if (copy_to_user((void __user*)arg, ¶ms, sizeof(params))) {
5721 + hailo_dev_err(controller->dev, "copy_to_user fail\n");
5728 +long hailo_vdma_continuous_buffer_alloc_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg)
5730 + struct hailo_allocate_continuous_buffer_params buf_info = {0};
5731 + struct hailo_vdma_continuous_buffer *continuous_buffer = NULL;
5732 + long err = -EINVAL;
5733 + size_t aligned_buffer_size = 0;
5735 + if (copy_from_user(&buf_info, (void __user*)arg, sizeof(buf_info))) {
5736 + hailo_dev_err(controller->dev, "copy from user fail\n");
5740 + continuous_buffer = kzalloc(sizeof(*continuous_buffer), GFP_KERNEL);
5741 + if (NULL == continuous_buffer) {
5742 + hailo_dev_err(controller->dev, "memory alloc failed\n");
5746 + // We use PAGE_ALIGN to support mmap
5747 + aligned_buffer_size = PAGE_ALIGN(buf_info.buffer_size);
5748 + err = hailo_vdma_continuous_buffer_alloc(controller->dev, aligned_buffer_size, continuous_buffer);
5750 + kfree(continuous_buffer);
5754 + continuous_buffer->handle = hailo_get_next_vdma_handle(context);
5755 + list_add(&continuous_buffer->continuous_buffer_list, &context->continuous_buffer_list);
5757 + buf_info.buffer_handle = continuous_buffer->handle;
5758 + buf_info.dma_address = continuous_buffer->dma_address;
5759 + if (copy_to_user((void __user*)arg, &buf_info, sizeof(buf_info))) {
5760 + hailo_dev_err(controller->dev, "copy_to_user fail\n");
5761 + list_del(&continuous_buffer->continuous_buffer_list);
5762 + hailo_vdma_continuous_buffer_free(controller->dev, continuous_buffer);
5763 + kfree(continuous_buffer);
5770 +long hailo_vdma_continuous_buffer_free_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg)
5772 + struct hailo_free_continuous_buffer_params params;
5773 + struct hailo_vdma_continuous_buffer *continuous_buffer = NULL;
5775 + if (copy_from_user(¶ms, (void __user*)arg, sizeof(params))) {
5776 + hailo_dev_err(controller->dev, "copy from user fail\n");
5780 + continuous_buffer = hailo_vdma_find_continuous_buffer(context, params.buffer_handle);
5781 + if (NULL == continuous_buffer) {
5782 + hailo_dev_warn(controller->dev, "vdma buffer handle %lx not found\n", params.buffer_handle);
5786 + list_del(&continuous_buffer->continuous_buffer_list);
5787 + hailo_vdma_continuous_buffer_free(controller->dev, continuous_buffer);
5788 + kfree(continuous_buffer);
5792 +long hailo_vdma_interrupts_read_timestamps_ioctl(struct hailo_vdma_controller *controller, unsigned long arg)
5794 + struct hailo_vdma_interrupts_read_timestamp_params *params = &controller->read_interrupt_timestamps_params;
5795 + struct hailo_vdma_engine *engine = NULL;
5796 + int err = -EINVAL;
5798 + hailo_dev_dbg(controller->dev, "Start read interrupt timestamps ioctl\n");
5800 + if (copy_from_user(params, (void __user*)arg, sizeof(*params))) {
5801 + hailo_dev_err(controller->dev, "copy_from_user fail\n");
5805 + if (params->engine_index >= controller->vdma_engines_count) {
5806 + hailo_dev_err(controller->dev, "Invalid engine %u", params->engine_index);
5809 + engine = &controller->vdma_engines[params->engine_index];
5811 + err = hailo_vdma_engine_read_timestamps(engine, params);
5813 + hailo_dev_err(controller->dev, "Failed read engine interrupts for %u:%u",
5814 + params->engine_index, params->channel_index);
5818 + if (copy_to_user((void __user*)arg, params, sizeof(*params))) {
5819 + hailo_dev_err(controller->dev, "copy_to_user fail\n");
5826 +long hailo_vdma_launch_transfer_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
5827 + unsigned long arg)
5829 + struct hailo_vdma_launch_transfer_params params;
5830 + struct hailo_vdma_engine *engine = NULL;
5831 + struct hailo_vdma_channel *channel = NULL;
5832 + struct hailo_descriptors_list_buffer *descriptors_buffer = NULL;
5833 + struct hailo_vdma_mapped_transfer_buffer mapped_transfer_buffers[ARRAY_SIZE(params.buffers)] = {0};
5834 + int ret = -EINVAL;
5837 + if (copy_from_user(¶ms, (void __user*)arg, sizeof(params))) {
5838 + hailo_dev_err(controller->dev, "copy from user fail\n");
5842 + if (params.engine_index >= controller->vdma_engines_count) {
5843 + hailo_dev_err(controller->dev, "Invalid engine %u", params.engine_index);
5846 + engine = &controller->vdma_engines[params.engine_index];
5848 + if (params.channel_index >= ARRAY_SIZE(engine->channels)) {
5849 + hailo_dev_err(controller->dev, "Invalid channel %u", params.channel_index);
5852 + channel = &engine->channels[params.channel_index];
5854 + if (params.buffers_count > ARRAY_SIZE(params.buffers)) {
5855 + hailo_dev_err(controller->dev, "too many buffers %u\n", params.buffers_count);
5859 + descriptors_buffer = hailo_vdma_find_descriptors_buffer(context, params.desc_handle);
5860 + if (descriptors_buffer == NULL) {
5861 + hailo_dev_err(controller->dev, "invalid descriptors list handle\n");
5865 + for (i = 0; i < params.buffers_count; i++) {
5866 + struct hailo_vdma_buffer *mapped_buffer =
5867 + hailo_vdma_find_mapped_user_buffer(context, params.buffers[i].mapped_buffer_handle);
5868 + if (mapped_buffer == NULL) {
5869 + hailo_dev_err(controller->dev, "invalid user buffer\n");
5873 + if (params.buffers[i].size > mapped_buffer->size) {
5874 + hailo_dev_err(controller->dev, "Syncing size %u while buffer size is %u\n",
5875 + params.buffers[i].size, mapped_buffer->size);
5879 + if (params.buffers[i].offset > mapped_buffer->size) {
5880 + hailo_dev_err(controller->dev, "Syncing offset %u while buffer size is %u\n",
5881 + params.buffers[i].offset, mapped_buffer->size);
5885 + // Syncing the buffer to device change its ownership from host to the device.
5886 + // We sync on D2H as well if the user owns the buffer since the buffer might have been changed by
5887 + // the host between the time it was mapped and the current async transfer.
5888 + hailo_vdma_buffer_sync_cyclic(controller, mapped_buffer, HAILO_SYNC_FOR_DEVICE,
5889 + params.buffers[i].offset, params.buffers[i].size);
5891 + mapped_transfer_buffers[i].sg_table = &mapped_buffer->sg_table;
5892 + mapped_transfer_buffers[i].size = params.buffers[i].size;
5893 + mapped_transfer_buffers[i].offset = params.buffers[i].offset;
5894 + mapped_transfer_buffers[i].opaque = mapped_buffer;
5897 + ret = hailo_vdma_launch_transfer(
5900 + &descriptors_buffer->desc_list,
5901 + params.starting_desc,
5902 + params.buffers_count,
5903 + mapped_transfer_buffers,
5904 + params.should_bind,
5905 + params.first_interrupts_domain,
5906 + params.last_interrupts_domain,
5910 + hailo_dev_err(controller->dev, "Failed launch transfer %d\n", ret);
5914 + params.descs_programed = ret;
5916 + if (copy_to_user((void __user*)arg, ¶ms, sizeof(params))) {
5917 + hailo_dev_err(controller->dev, "copy_to_user fail\n");
5923 \ No newline at end of file
5925 +++ b/drivers/media/pci/hailo/vdma/ioctl.h
5927 +// SPDX-License-Identifier: GPL-2.0
5929 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
5932 +#ifndef _HAILO_VDMA_IOCTL_H_
5933 +#define _HAILO_VDMA_IOCTL_H_
5935 +#include "vdma/vdma.h"
5937 +long hailo_vdma_interrupts_enable_ioctl(struct hailo_vdma_controller *controller, unsigned long arg);
5938 +long hailo_vdma_interrupts_disable_ioctl(struct hailo_vdma_controller *controller, unsigned long arg);
5939 +long hailo_vdma_interrupts_wait_ioctl(struct hailo_vdma_controller *controller, unsigned long arg,
5940 + struct semaphore *mutex, bool *should_up_board_mutex);
5942 +long hailo_vdma_buffer_map_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
5943 +long hailo_vdma_buffer_unmap_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long handle);
5944 +long hailo_vdma_buffer_sync_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
5946 +long hailo_desc_list_create_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
5947 +long hailo_desc_list_release_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
5948 +long hailo_desc_list_bind_vdma_buffer(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
5950 +long hailo_vdma_low_memory_buffer_alloc_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
5951 +long hailo_vdma_low_memory_buffer_free_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
5953 +long hailo_mark_as_in_use(struct hailo_vdma_controller *controller, unsigned long arg, struct file *filp);
5955 +long hailo_vdma_continuous_buffer_alloc_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
5956 +long hailo_vdma_continuous_buffer_free_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
5958 +long hailo_vdma_interrupts_read_timestamps_ioctl(struct hailo_vdma_controller *controller, unsigned long arg);
5960 +long hailo_vdma_launch_transfer_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
5961 + unsigned long arg);
5963 +#endif /* _HAILO_VDMA_IOCTL_H_ */
5964 \ No newline at end of file
5966 +++ b/drivers/media/pci/hailo/vdma/memory.c
5968 +// SPDX-License-Identifier: GPL-2.0
5970 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
5973 +#define pr_fmt(fmt) "hailo: " fmt
5975 +#include "memory.h"
5976 +#include "utils/compact.h"
5978 +#include <linux/slab.h>
5979 +#include <linux/scatterlist.h>
5980 +#include <linux/sched.h>
5983 +#define SGL_MAX_SEGMENT_SIZE (0x10000)
5985 +#define MMIO_AND_NO_PAGES_VMA_MASK (VM_IO | VM_PFNMAP)
5987 +static int map_mmio_address(void __user* user_address, u32 size, struct vm_area_struct *vma,
5988 + struct sg_table *sgt);
5989 +static int prepare_sg_table(struct sg_table *sg_table, void __user* user_address, u32 size,
5990 + struct hailo_vdma_low_memory_buffer *low_mem_driver_allocated_buffer);
5991 +static void clear_sg_table(struct sg_table *sgt);
5993 +struct hailo_vdma_buffer *hailo_vdma_buffer_map(struct device *dev,
5994 + void __user *user_address, size_t size, enum dma_data_direction direction,
5995 + struct hailo_vdma_low_memory_buffer *low_mem_driver_allocated_buffer)
5997 + int ret = -EINVAL;
5998 + struct hailo_vdma_buffer *mapped_buffer = NULL;
5999 + struct sg_table sgt = {0};
6000 + struct vm_area_struct *vma = NULL;
6001 + bool is_mmio = false;
6003 + mapped_buffer = kzalloc(sizeof(*mapped_buffer), GFP_KERNEL);
6004 + if (NULL == mapped_buffer) {
6005 + dev_err(dev, "memory alloc failed\n");
6010 + if (IS_ENABLED(HAILO_SUPPORT_MMIO_DMA_MAPPING)) {
6011 + vma = find_vma(current->mm, (uintptr_t)user_address);
6012 + if (NULL == vma) {
6013 + dev_err(dev, "no vma for virt_addr/size = 0x%08lx/0x%08zx\n", (uintptr_t)user_address, size);
6019 + if (IS_ENABLED(HAILO_SUPPORT_MMIO_DMA_MAPPING) &&
6020 + (MMIO_AND_NO_PAGES_VMA_MASK == (vma->vm_flags & MMIO_AND_NO_PAGES_VMA_MASK))) {
6021 + // user_address represents memory mapped I/O and isn't backed by 'struct page' (only by pure pfn)
6022 + if (NULL != low_mem_driver_allocated_buffer) {
6023 + // low_mem_driver_allocated_buffer are backed by regular 'struct page' addresses, just in low memory
6024 + dev_err(dev, "low_mem_driver_allocated_buffer shouldn't be provided with an mmio address\n");
6026 + goto free_buffer_struct;
6029 + ret = map_mmio_address(user_address, size, vma, &sgt);
6031 + dev_err(dev, "failed to map mmio address %d\n", ret);
6032 + goto free_buffer_struct;
6037 + // user_address is a standard 'struct page' backed memory address
6038 + ret = prepare_sg_table(&sgt, user_address, size, low_mem_driver_allocated_buffer);
6040 + dev_err(dev, "failed to set sg list for user buffer %d\n", ret);
6041 + goto free_buffer_struct;
6043 + sgt.nents = dma_map_sg(dev, sgt.sgl, sgt.orig_nents, direction);
6044 + if (0 == sgt.nents) {
6045 + dev_err(dev, "failed to map sg list for user buffer\n");
6047 + goto clear_sg_table;
6051 + kref_init(&mapped_buffer->kref);
6052 + mapped_buffer->device = dev;
6053 + mapped_buffer->user_address = user_address;
6054 + mapped_buffer->size = size;
6055 + mapped_buffer->data_direction = direction;
6056 + mapped_buffer->sg_table = sgt;
6057 + mapped_buffer->is_mmio = is_mmio;
6059 + return mapped_buffer;
6062 + clear_sg_table(&sgt);
6063 +free_buffer_struct:
6064 + kfree(mapped_buffer);
6066 + return ERR_PTR(ret);
6069 +static void unmap_buffer(struct kref *kref)
6071 + struct hailo_vdma_buffer *buf = container_of(kref, struct hailo_vdma_buffer, kref);
6073 + if (!buf->is_mmio) {
6074 + dma_unmap_sg(buf->device, buf->sg_table.sgl, buf->sg_table.orig_nents, buf->data_direction);
6077 + clear_sg_table(&buf->sg_table);
6081 +void hailo_vdma_buffer_get(struct hailo_vdma_buffer *buf)
6083 + kref_get(&buf->kref);
6086 +void hailo_vdma_buffer_put(struct hailo_vdma_buffer *buf)
6088 + kref_put(&buf->kref, unmap_buffer);
6091 +static void vdma_sync_entire_buffer(struct hailo_vdma_controller *controller,
6092 + struct hailo_vdma_buffer *mapped_buffer, enum hailo_vdma_buffer_sync_type sync_type)
6094 + if (sync_type == HAILO_SYNC_FOR_CPU) {
6095 + dma_sync_sg_for_cpu(controller->dev, mapped_buffer->sg_table.sgl, mapped_buffer->sg_table.nents,
6096 + mapped_buffer->data_direction);
6098 + dma_sync_sg_for_device(controller->dev, mapped_buffer->sg_table.sgl, mapped_buffer->sg_table.nents,
6099 + mapped_buffer->data_direction);
6103 +typedef void (*dma_sync_single_callback)(struct device *, dma_addr_t, size_t, enum dma_data_direction);
6104 +// Map sync_info->count bytes starting at sync_info->offset
6105 +static void vdma_sync_buffer_interval(struct hailo_vdma_controller *controller,
6106 + struct hailo_vdma_buffer *mapped_buffer,
6107 + size_t offset, size_t size, enum hailo_vdma_buffer_sync_type sync_type)
6109 + size_t sync_start_offset = offset;
6110 + size_t sync_end_offset = offset + size;
6111 + dma_sync_single_callback dma_sync_single = (sync_type == HAILO_SYNC_FOR_CPU) ?
6112 + dma_sync_single_for_cpu :
6113 + dma_sync_single_for_device;
6114 + struct scatterlist* sg_entry = NULL;
6115 + size_t current_iter_offset = 0;
6118 + for_each_sg(mapped_buffer->sg_table.sgl, sg_entry, mapped_buffer->sg_table.nents, i) {
6119 + // Check if the intervals: [current_iter_offset, sg_dma_len(sg_entry)] and [sync_start_offset, sync_end_offset]
6120 + // have any intersection. If offset isn't at the start of a sg_entry, we still want to sync it.
6121 + if (max(sync_start_offset, current_iter_offset) <= min(sync_end_offset, current_iter_offset + sg_dma_len(sg_entry))) {
6122 + dma_sync_single(controller->dev, sg_dma_address(sg_entry), sg_dma_len(sg_entry),
6123 + mapped_buffer->data_direction);
6126 + current_iter_offset += sg_dma_len(sg_entry);
6130 +void hailo_vdma_buffer_sync(struct hailo_vdma_controller *controller,
6131 + struct hailo_vdma_buffer *mapped_buffer, enum hailo_vdma_buffer_sync_type sync_type,
6132 + size_t offset, size_t size)
6134 + if (IS_ENABLED(HAILO_SUPPORT_MMIO_DMA_MAPPING) && mapped_buffer->is_mmio) {
6135 + // MMIO buffers don't need to be sync'd
6139 + if ((offset == 0) && (size == mapped_buffer->size)) {
6140 + vdma_sync_entire_buffer(controller, mapped_buffer, sync_type);
6142 + vdma_sync_buffer_interval(controller, mapped_buffer, offset, size, sync_type);
6146 +// Similar to vdma_buffer_sync, allow circular sync of the buffer.
6147 +void hailo_vdma_buffer_sync_cyclic(struct hailo_vdma_controller *controller,
6148 + struct hailo_vdma_buffer *mapped_buffer, enum hailo_vdma_buffer_sync_type sync_type,
6149 + size_t offset, size_t size)
6151 + size_t size_to_end = min(size, mapped_buffer->size - offset);
6153 + hailo_vdma_buffer_sync(controller, mapped_buffer, sync_type, offset, size_to_end);
6155 + if (size_to_end < size) {
6156 + hailo_vdma_buffer_sync(controller, mapped_buffer, sync_type, 0, size - size_to_end);
6160 +struct hailo_vdma_buffer* hailo_vdma_find_mapped_user_buffer(struct hailo_vdma_file_context *context,
6161 + size_t buffer_handle)
6163 + struct hailo_vdma_buffer *cur = NULL;
6164 + list_for_each_entry(cur, &context->mapped_user_buffer_list, mapped_user_buffer_list) {
6165 + if (cur->handle == buffer_handle) {
6172 +void hailo_vdma_clear_mapped_user_buffer_list(struct hailo_vdma_file_context *context,
6173 + struct hailo_vdma_controller *controller)
6175 + struct hailo_vdma_buffer *cur = NULL, *next = NULL;
6176 + list_for_each_entry_safe(cur, next, &context->mapped_user_buffer_list, mapped_user_buffer_list) {
6177 + list_del(&cur->mapped_user_buffer_list);
6178 + hailo_vdma_buffer_put(cur);
6183 +int hailo_desc_list_create(struct device *dev, u32 descriptors_count, u16 desc_page_size,
6184 + uintptr_t desc_handle, bool is_circular, struct hailo_descriptors_list_buffer *descriptors)
6186 + size_t buffer_size = 0;
6187 + const u64 align = VDMA_DESCRIPTOR_LIST_ALIGN; //First addr must be aligned on 64 KB (from the VDMA registers documentation)
6189 + buffer_size = descriptors_count * sizeof(struct hailo_vdma_descriptor);
6190 + buffer_size = ALIGN(buffer_size, align);
6192 + descriptors->kernel_address = dma_alloc_coherent(dev, buffer_size,
6193 + &descriptors->dma_address, GFP_KERNEL | __GFP_ZERO);
6194 + if (descriptors->kernel_address == NULL) {
6195 + dev_err(dev, "Failed to allocate descriptors list, desc_count 0x%x, buffer_size 0x%zx, This failure means there is not a sufficient amount of CMA memory "
6196 + "(contiguous physical memory), This usually is caused by lack of general system memory. Please check you have sufficent memory.\n",
6197 + descriptors_count, buffer_size);
6201 + descriptors->buffer_size = buffer_size;
6202 + descriptors->handle = desc_handle;
6204 + descriptors->desc_list.desc_list = descriptors->kernel_address;
6205 + descriptors->desc_list.desc_count = descriptors_count;
6206 + descriptors->desc_list.desc_page_size = desc_page_size;
6207 + descriptors->desc_list.is_circular = is_circular;
6212 +void hailo_desc_list_release(struct device *dev, struct hailo_descriptors_list_buffer *descriptors)
6214 + dma_free_coherent(dev, descriptors->buffer_size, descriptors->kernel_address, descriptors->dma_address);
6217 +struct hailo_descriptors_list_buffer* hailo_vdma_find_descriptors_buffer(struct hailo_vdma_file_context *context,
6218 + uintptr_t desc_handle)
6220 + struct hailo_descriptors_list_buffer *cur = NULL;
6221 + list_for_each_entry(cur, &context->descriptors_buffer_list, descriptors_buffer_list) {
6222 + if (cur->handle == desc_handle) {
6229 +void hailo_vdma_clear_descriptors_buffer_list(struct hailo_vdma_file_context *context,
6230 + struct hailo_vdma_controller *controller)
6232 + struct hailo_descriptors_list_buffer *cur = NULL, *next = NULL;
6233 + list_for_each_entry_safe(cur, next, &context->descriptors_buffer_list, descriptors_buffer_list) {
6234 + list_del(&cur->descriptors_buffer_list);
6235 + hailo_desc_list_release(controller->dev, cur);
6240 +int hailo_vdma_low_memory_buffer_alloc(size_t size, struct hailo_vdma_low_memory_buffer *low_memory_buffer)
6242 + int ret = -EINVAL;
6243 + void *kernel_address = NULL;
6244 + size_t pages_count = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
6245 + size_t num_allocated = 0, i = 0;
6246 + void **pages = NULL;
6248 + pages = kcalloc(pages_count, sizeof(*pages), GFP_KERNEL);
6249 + if (NULL == pages) {
6250 + pr_err("Failed to allocate pages for buffer (size %zu)\n", size);
6255 + for (num_allocated = 0; num_allocated < pages_count; num_allocated++) {
6256 + // __GFP_DMA32 flag is used to limit system memory allocations to the lowest 4 GB of physical memory in order to guarantee DMA
6257 + // Operations will not have to use bounce buffers on certain architectures (e.g 32-bit DMA enabled architectures)
6258 + kernel_address = (void*)__get_free_page(__GFP_DMA32);
6259 + if (NULL == kernel_address) {
6260 + pr_err("Failed to allocate %zu coherent bytes\n", (size_t)PAGE_SIZE);
6265 + pages[num_allocated] = kernel_address;
6268 + low_memory_buffer->pages_count = pages_count;
6269 + low_memory_buffer->pages_address = pages;
6274 + if (NULL != pages) {
6275 + for (i = 0; i < num_allocated; i++) {
6276 + free_page((long unsigned)pages[i]);
6285 +void hailo_vdma_low_memory_buffer_free(struct hailo_vdma_low_memory_buffer *low_memory_buffer)
6288 + if (NULL == low_memory_buffer) {
6292 + for (i = 0; i < low_memory_buffer->pages_count; i++) {
6293 + free_page((long unsigned)low_memory_buffer->pages_address[i]);
6296 + kfree(low_memory_buffer->pages_address);
6299 +struct hailo_vdma_low_memory_buffer* hailo_vdma_find_low_memory_buffer(struct hailo_vdma_file_context *context,
6300 + uintptr_t buf_handle)
6302 + struct hailo_vdma_low_memory_buffer *cur = NULL;
6303 + list_for_each_entry(cur, &context->vdma_low_memory_buffer_list, vdma_low_memory_buffer_list) {
6304 + if (cur->handle == buf_handle) {
6312 +void hailo_vdma_clear_low_memory_buffer_list(struct hailo_vdma_file_context *context)
6314 + struct hailo_vdma_low_memory_buffer *cur = NULL, *next = NULL;
6315 + list_for_each_entry_safe(cur, next, &context->vdma_low_memory_buffer_list, vdma_low_memory_buffer_list) {
6316 + list_del(&cur->vdma_low_memory_buffer_list);
6317 + hailo_vdma_low_memory_buffer_free(cur);
6322 +int hailo_vdma_continuous_buffer_alloc(struct device *dev, size_t size,
6323 + struct hailo_vdma_continuous_buffer *continuous_buffer)
6325 + dma_addr_t dma_address = 0;
6326 + void *kernel_address = NULL;
6328 + kernel_address = dma_alloc_coherent(dev, size, &dma_address, GFP_KERNEL);
6329 + if (NULL == kernel_address) {
6330 + dev_warn(dev, "Failed to allocate continuous buffer, size 0x%zx. This failure means there is not a sufficient amount of CMA memory "
6331 + "(contiguous physical memory), This usually is caused by lack of general system memory. Please check you have sufficent memory.\n", size);
6335 + continuous_buffer->kernel_address = kernel_address;
6336 + continuous_buffer->dma_address = dma_address;
6337 + continuous_buffer->size = size;
6341 +void hailo_vdma_continuous_buffer_free(struct device *dev,
6342 + struct hailo_vdma_continuous_buffer *continuous_buffer)
6344 + dma_free_coherent(dev, continuous_buffer->size, continuous_buffer->kernel_address,
6345 + continuous_buffer->dma_address);
6348 +struct hailo_vdma_continuous_buffer* hailo_vdma_find_continuous_buffer(struct hailo_vdma_file_context *context,
6349 + uintptr_t buf_handle)
6351 + struct hailo_vdma_continuous_buffer *cur = NULL;
6352 + list_for_each_entry(cur, &context->continuous_buffer_list, continuous_buffer_list) {
6353 + if (cur->handle == buf_handle) {
6361 +void hailo_vdma_clear_continuous_buffer_list(struct hailo_vdma_file_context *context,
6362 + struct hailo_vdma_controller *controller)
6364 + struct hailo_vdma_continuous_buffer *cur = NULL, *next = NULL;
6365 + list_for_each_entry_safe(cur, next, &context->continuous_buffer_list, continuous_buffer_list) {
6366 + list_del(&cur->continuous_buffer_list);
6367 + hailo_vdma_continuous_buffer_free(controller->dev, cur);
6372 +// Assumes the provided user_address belongs to the vma and that MMIO_AND_NO_PAGES_VMA_MASK bits are set under
6373 +// vma->vm_flags. This is validated in hailo_vdma_buffer_map, and won't be checked here
6374 +static int map_mmio_address(void __user* user_address, u32 size, struct vm_area_struct *vma,
6375 + struct sg_table *sgt)
6377 + int ret = -EINVAL;
6378 + unsigned long i = 0;
6379 + unsigned long pfn = 0;
6380 + unsigned long next_pfn = 0;
6381 + phys_addr_t phys_addr = 0;
6382 + dma_addr_t mmio_dma_address = 0;
6383 + const uintptr_t virt_addr = (uintptr_t)user_address;
6384 + const u32 vma_size = vma->vm_end - vma->vm_start + 1;
6385 + const uintptr_t num_pages = PFN_UP(virt_addr + size) - PFN_DOWN(virt_addr);
6387 + // Check that the vma that was marked as MMIO_AND_NO_PAGES_VMA_MASK is big enough
6388 + if (vma_size < size) {
6389 + pr_err("vma (%u bytes) smaller than provided buffer (%u bytes)\n", vma_size, size);
6393 + // Get the physical address of user_address
6394 + ret = follow_pfn(vma, virt_addr, &pfn);
6396 + pr_err("follow_pfn failed with %d\n", ret);
6399 + phys_addr = __pfn_to_phys(pfn) + offset_in_page(virt_addr);
6401 + // Make sure the physical memory is contiguous
6402 + for (i = 1; i < num_pages; ++i) {
6403 + ret = follow_pfn(vma, virt_addr + (i << PAGE_SHIFT), &next_pfn);
6405 + pr_err("follow_pfn failed with %d\n", ret);
6408 + if (next_pfn != pfn + 1) {
6409 + pr_err("non-contiguous physical memory\n");
6415 + // phys_addr to dma
6416 + // TODO: need dma_map_resource here? doesn't work currently (we get dma_mapping_error on the returned dma addr)
6418 + mmio_dma_address = (dma_addr_t)phys_addr;
6420 + // Create a page-less scatterlist.
6421 + ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
6426 + sg_assign_page(sgt->sgl, NULL);
6427 + sg_dma_address(sgt->sgl) = mmio_dma_address;
6428 + sg_dma_len(sgt->sgl) = size;
6433 +static int prepare_sg_table(struct sg_table *sg_table, void __user *user_address, u32 size,
6434 + struct hailo_vdma_low_memory_buffer *low_mem_driver_allocated_buffer)
6436 + int ret = -EINVAL;
6437 + int pinned_pages = 0;
6438 + size_t npages = 0;
6439 + struct page **pages = NULL;
6441 + struct scatterlist *sg_alloc_res = NULL;
6443 + npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
6444 + pages = kvmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
6449 + // Check whether mapping user allocated buffer or driver allocated low memory buffer
6450 + if (NULL == low_mem_driver_allocated_buffer) {
6451 + mmap_read_lock(current->mm);
6452 + pinned_pages = get_user_pages_compact((unsigned long)user_address,
6453 + npages, FOLL_WRITE | FOLL_FORCE, pages);
6454 + mmap_read_unlock(current->mm);
6456 + if (pinned_pages < 0) {
6457 + pr_err("get_user_pages failed with %d\n", pinned_pages);
6458 + ret = pinned_pages;
6460 + } else if (pinned_pages != npages) {
6461 + pr_err("Pinned %d out of %zu\n", pinned_pages, npages);
6463 + goto release_pages;
6466 + // Check to make sure in case user provides wrong buffer
6467 + if (npages != low_mem_driver_allocated_buffer->pages_count) {
6468 + pr_err("Received wrong amount of pages %zu to map expected %zu\n",
6469 + npages, low_mem_driver_allocated_buffer->pages_count);
6474 + for (i = 0; i < npages; i++) {
6475 + pages[i] = virt_to_page(low_mem_driver_allocated_buffer->pages_address[i]);
6476 + get_page(pages[i]);
6480 + sg_alloc_res = sg_alloc_table_from_pages_segment_compat(sg_table, pages, npages,
6481 + 0, size, SGL_MAX_SEGMENT_SIZE, NULL, 0, GFP_KERNEL);
6482 + if (IS_ERR(sg_alloc_res)) {
6483 + ret = PTR_ERR(sg_alloc_res);
6484 + pr_err("sg table alloc failed (err %d)..\n", ret);
6485 + goto release_pages;
6491 + for (i = 0; i < pinned_pages; i++) {
6492 + if (!PageReserved(pages[i])) {
6493 + SetPageDirty(pages[i]);
6495 + put_page(pages[i]);
6502 +static void clear_sg_table(struct sg_table *sgt)
6504 + struct sg_page_iter iter;
6505 + struct page *page = NULL;
6507 + for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
6508 + page = sg_page_iter_page(&iter);
6510 + if (!PageReserved(page)) {
6511 + SetPageDirty(page);
6517 + sg_free_table(sgt);
6520 +++ b/drivers/media/pci/hailo/vdma/memory.h
6522 +// SPDX-License-Identifier: GPL-2.0
6524 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
6527 + * vDMA memory utility (including allocation and mappings)
6530 +#ifndef _HAILO_VDMA_MEMORY_H_
6531 +#define _HAILO_VDMA_MEMORY_H_
6533 +#include "vdma/vdma.h"
6535 +struct hailo_vdma_buffer *hailo_vdma_buffer_map(struct device *dev,
6536 + void __user *user_address, size_t size, enum dma_data_direction direction,
6537 + struct hailo_vdma_low_memory_buffer *low_mem_driver_allocated_buffer);
6538 +void hailo_vdma_buffer_get(struct hailo_vdma_buffer *buf);
6539 +void hailo_vdma_buffer_put(struct hailo_vdma_buffer *buf);
6541 +void hailo_vdma_buffer_sync(struct hailo_vdma_controller *controller,
6542 + struct hailo_vdma_buffer *mapped_buffer, enum hailo_vdma_buffer_sync_type sync_type,
6543 + size_t offset, size_t size);
6544 +void hailo_vdma_buffer_sync_cyclic(struct hailo_vdma_controller *controller,
6545 + struct hailo_vdma_buffer *mapped_buffer, enum hailo_vdma_buffer_sync_type sync_type,
6546 + size_t offset, size_t size);
6548 +struct hailo_vdma_buffer* hailo_vdma_find_mapped_user_buffer(struct hailo_vdma_file_context *context,
6549 + size_t buffer_handle);
6550 +void hailo_vdma_clear_mapped_user_buffer_list(struct hailo_vdma_file_context *context,
6551 + struct hailo_vdma_controller *controller);
6553 +int hailo_desc_list_create(struct device *dev, u32 descriptors_count, u16 desc_page_size,
6554 + uintptr_t desc_handle, bool is_circular, struct hailo_descriptors_list_buffer *descriptors);
6555 +void hailo_desc_list_release(struct device *dev, struct hailo_descriptors_list_buffer *descriptors);
6556 +struct hailo_descriptors_list_buffer* hailo_vdma_find_descriptors_buffer(struct hailo_vdma_file_context *context,
6557 + uintptr_t desc_handle);
6558 +void hailo_vdma_clear_descriptors_buffer_list(struct hailo_vdma_file_context *context,
6559 + struct hailo_vdma_controller *controller);
6561 +int hailo_vdma_low_memory_buffer_alloc(size_t size, struct hailo_vdma_low_memory_buffer *low_memory_buffer);
6562 +void hailo_vdma_low_memory_buffer_free(struct hailo_vdma_low_memory_buffer *low_memory_buffer);
6563 +struct hailo_vdma_low_memory_buffer* hailo_vdma_find_low_memory_buffer(struct hailo_vdma_file_context *context,
6564 + uintptr_t buf_handle);
6565 +void hailo_vdma_clear_low_memory_buffer_list(struct hailo_vdma_file_context *context);
6567 +int hailo_vdma_continuous_buffer_alloc(struct device *dev, size_t size,
6568 + struct hailo_vdma_continuous_buffer *continuous_buffer);
6569 +void hailo_vdma_continuous_buffer_free(struct device *dev,
6570 + struct hailo_vdma_continuous_buffer *continuous_buffer);
6571 +struct hailo_vdma_continuous_buffer* hailo_vdma_find_continuous_buffer(struct hailo_vdma_file_context *context,
6572 + uintptr_t buf_handle);
6573 +void hailo_vdma_clear_continuous_buffer_list(struct hailo_vdma_file_context *context,
6574 + struct hailo_vdma_controller *controller);
6575 +#endif /* _HAILO_VDMA_MEMORY_H_ */
6576 \ No newline at end of file
6578 +++ b/drivers/media/pci/hailo/vdma/vdma.c
6580 +// SPDX-License-Identifier: GPL-2.0
6582 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
6585 +#define pr_fmt(fmt) "hailo: " fmt
6588 +#include "memory.h"
6590 +#include "utils/logs.h"
6592 +#include <linux/sched.h>
6593 +#include <linux/version.h>
6595 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
6596 +#include <linux/dma-map-ops.h>
6598 +#include <linux/dma-mapping.h>
6602 +static struct hailo_vdma_engine* init_vdma_engines(struct device *dev,
6603 + struct hailo_resource *channel_registers_per_engine, size_t engines_count)
6605 + struct hailo_vdma_engine *engines = NULL;
6608 + engines = devm_kmalloc_array(dev, engines_count, sizeof(*engines), GFP_KERNEL);
6609 + if (NULL == engines) {
6610 + dev_err(dev, "Failed allocating vdma engines\n");
6611 + return ERR_PTR(-ENOMEM);
6614 + for (i = 0; i < engines_count; i++) {
6615 + hailo_vdma_engine_init(&engines[i], i, &channel_registers_per_engine[i]);
6621 +static int hailo_set_dma_mask(struct device *dev)
6623 + int err = -EINVAL;
6624 + /* Check and configure DMA length */
6625 + if (!(err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))) {
6626 + dev_notice(dev, "Probing: Enabled 64 bit dma\n");
6627 + } else if (!(err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)))) {
6628 + dev_notice(dev, "Probing: Enabled 48 bit dma\n");
6629 + } else if (!(err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)))) {
6630 + dev_notice(dev, "Probing: Enabled 40 bit dma\n");
6631 + } else if (!(err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36)))) {
6632 + dev_notice(dev, "Probing: Enabled 36 bit dma\n");
6633 + } else if (!(err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)))) {
6634 + dev_notice(dev, "Probing: Enabled 32 bit dma\n");
6636 + dev_err(dev, "Probing: Error enabling dma %d\n", err);
6643 +int hailo_vdma_controller_init(struct hailo_vdma_controller *controller,
6644 + struct device *dev, struct hailo_vdma_hw *vdma_hw,
6645 + struct hailo_vdma_controller_ops *ops,
6646 + struct hailo_resource *channel_registers_per_engine, size_t engines_count)
6649 + controller->hw = vdma_hw;
6650 + controller->ops = ops;
6651 + controller->dev = dev;
6653 + controller->vdma_engines_count = engines_count;
6654 + controller->vdma_engines = init_vdma_engines(dev, channel_registers_per_engine, engines_count);
6655 + if (IS_ERR(controller->vdma_engines)) {
6656 + dev_err(dev, "Failed initialized vdma engines\n");
6657 + return PTR_ERR(controller->vdma_engines);
6660 + controller->used_by_filp = NULL;
6661 + spin_lock_init(&controller->interrupts_lock);
6662 + init_waitqueue_head(&controller->interrupts_wq);
6664 + /* Check and configure DMA length */
6665 + err = hailo_set_dma_mask(dev);
6670 + if (get_dma_ops(controller->dev)) {
6671 + hailo_dev_notice(controller->dev, "Probing: Using specialized dma_ops=%ps", get_dma_ops(controller->dev));
6677 +void hailo_vdma_file_context_init(struct hailo_vdma_file_context *context)
6679 + atomic_set(&context->last_vdma_user_buffer_handle, 0);
6680 + INIT_LIST_HEAD(&context->mapped_user_buffer_list);
6682 + atomic_set(&context->last_vdma_handle, 0);
6683 + INIT_LIST_HEAD(&context->descriptors_buffer_list);
6684 + INIT_LIST_HEAD(&context->vdma_low_memory_buffer_list);
6685 + INIT_LIST_HEAD(&context->continuous_buffer_list);
6688 +void hailo_vdma_update_interrupts_mask(struct hailo_vdma_controller *controller,
6689 + size_t engine_index)
6691 + struct hailo_vdma_engine *engine = &controller->vdma_engines[engine_index];
6692 + controller->ops->update_channel_interrupts(controller, engine_index, engine->enabled_channels);
6695 +void hailo_vdma_engine_interrupts_disable(struct hailo_vdma_controller *controller,
6696 + struct hailo_vdma_engine *engine, u8 engine_index, u32 channels_bitmap)
6698 + unsigned long irq_saved_flags = 0;
6699 + // In case of FLR, the vdma registers will be NULL
6700 + const bool is_device_up = (NULL != controller->dev);
6702 + hailo_vdma_engine_disable_channel_interrupts(engine, channels_bitmap);
6703 + if (is_device_up) {
6704 + hailo_vdma_update_interrupts_mask(controller, engine_index);
6707 + spin_lock_irqsave(&controller->interrupts_lock, irq_saved_flags);
6708 + hailo_vdma_engine_clear_channel_interrupts(engine, channels_bitmap);
6709 + spin_unlock_irqrestore(&controller->interrupts_lock, irq_saved_flags);
6711 + hailo_dev_info(controller->dev, "Disabled interrupts for engine %u, channels bitmap 0x%x\n",
6712 + engine_index, channels_bitmap);
6715 +void hailo_vdma_file_context_finalize(struct hailo_vdma_file_context *context,
6716 + struct hailo_vdma_controller *controller, struct file *filp)
6718 + size_t engine_index = 0;
6719 + struct hailo_vdma_engine *engine = NULL;
6720 + const u32 channels_bitmap = 0xFFFFFFFF; // disable all channel interrupts
6722 + if (filp == controller->used_by_filp) {
6723 + for_each_vdma_engine(controller, engine, engine_index) {
6724 + hailo_vdma_engine_interrupts_disable(controller, engine, engine_index, channels_bitmap);
6728 + hailo_vdma_clear_mapped_user_buffer_list(context, controller);
6729 + hailo_vdma_clear_descriptors_buffer_list(context, controller);
6730 + hailo_vdma_clear_low_memory_buffer_list(context);
6731 + hailo_vdma_clear_continuous_buffer_list(context, controller);
6733 + if (filp == controller->used_by_filp) {
6734 + controller->used_by_filp = NULL;
6738 +void hailo_vdma_irq_handler(struct hailo_vdma_controller *controller,
6739 + size_t engine_index, u32 channels_bitmap)
6741 + unsigned long irq_saved_flags = 0;
6742 + struct hailo_vdma_engine *engine = NULL;
6744 + BUG_ON(engine_index >= controller->vdma_engines_count);
6745 + engine = &controller->vdma_engines[engine_index];
6747 + hailo_vdma_engine_push_timestamps(engine, channels_bitmap);
6749 + spin_lock_irqsave(&controller->interrupts_lock, irq_saved_flags);
6750 + hailo_vdma_engine_set_channel_interrupts(engine, channels_bitmap);
6751 + spin_unlock_irqrestore(&controller->interrupts_lock, irq_saved_flags);
6753 + wake_up_interruptible_all(&controller->interrupts_wq);
6756 +long hailo_vdma_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
6757 + unsigned int cmd, unsigned long arg, struct file *filp, struct semaphore *mutex, bool *should_up_board_mutex)
6760 + case HAILO_VDMA_INTERRUPTS_ENABLE:
6761 + return hailo_vdma_interrupts_enable_ioctl(controller, arg);
6762 + case HAILO_VDMA_INTERRUPTS_DISABLE:
6763 + return hailo_vdma_interrupts_disable_ioctl(controller, arg);
6764 + case HAILO_VDMA_INTERRUPTS_WAIT:
6765 + return hailo_vdma_interrupts_wait_ioctl(controller, arg, mutex, should_up_board_mutex);
6766 + case HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS:
6767 + return hailo_vdma_interrupts_read_timestamps_ioctl(controller, arg);
6768 + case HAILO_VDMA_BUFFER_MAP:
6769 + return hailo_vdma_buffer_map_ioctl(context, controller, arg);
6770 + case HAILO_VDMA_BUFFER_UNMAP:
6771 + return hailo_vdma_buffer_unmap_ioctl(context, controller, arg);
6772 + case HAILO_VDMA_BUFFER_SYNC:
6773 + return hailo_vdma_buffer_sync_ioctl(context, controller, arg);
6774 + case HAILO_DESC_LIST_CREATE:
6775 + return hailo_desc_list_create_ioctl(context, controller, arg);
6776 + case HAILO_DESC_LIST_RELEASE:
6777 + return hailo_desc_list_release_ioctl(context, controller, arg);
6778 + case HAILO_DESC_LIST_BIND_VDMA_BUFFER:
6779 + return hailo_desc_list_bind_vdma_buffer(context, controller, arg);
6780 + case HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC:
6781 + return hailo_vdma_low_memory_buffer_alloc_ioctl(context, controller, arg);
6782 + case HAILO_VDMA_LOW_MEMORY_BUFFER_FREE:
6783 + return hailo_vdma_low_memory_buffer_free_ioctl(context, controller, arg);
6784 + case HAILO_MARK_AS_IN_USE:
6785 + return hailo_mark_as_in_use(controller, arg, filp);
6786 + case HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC:
6787 + return hailo_vdma_continuous_buffer_alloc_ioctl(context, controller, arg);
6788 + case HAILO_VDMA_CONTINUOUS_BUFFER_FREE:
6789 + return hailo_vdma_continuous_buffer_free_ioctl(context, controller, arg);
6790 + case HAILO_VDMA_LAUNCH_TRANSFER:
6791 + return hailo_vdma_launch_transfer_ioctl(context, controller, arg);
6793 + hailo_dev_err(controller->dev, "Invalid vDMA ioctl code 0x%x (nr: %d)\n", cmd, _IOC_NR(cmd));
6798 +static int desc_list_mmap(struct hailo_vdma_controller *controller,
6799 + struct hailo_descriptors_list_buffer *vdma_descriptors_buffer, struct vm_area_struct *vma)
6802 + unsigned long vsize = vma->vm_end - vma->vm_start;
6804 + if (vsize > vdma_descriptors_buffer->buffer_size) {
6805 + hailo_dev_err(controller->dev, "Requested size to map (%lx) is larger than the descriptor list size(%x)\n",
6806 + vsize, vdma_descriptors_buffer->buffer_size);
6810 + err = dma_mmap_coherent(controller->dev, vma, vdma_descriptors_buffer->kernel_address,
6811 + vdma_descriptors_buffer->dma_address, vsize);
6813 + hailo_dev_err(controller->dev, " Failed mmap descriptors %d\n", err);
6820 +static int low_memory_buffer_mmap(struct hailo_vdma_controller *controller,
6821 + struct hailo_vdma_low_memory_buffer *vdma_buffer, struct vm_area_struct *vma)
6825 + unsigned long vsize = vma->vm_end - vma->vm_start;
6826 + unsigned long orig_vm_start = vma->vm_start;
6827 + unsigned long orig_vm_end = vma->vm_end;
6828 + unsigned long page_fn = 0;
6830 + if (vsize != vdma_buffer->pages_count * PAGE_SIZE) {
6831 + hailo_dev_err(controller->dev, "mmap size should be %lu (given %lu)\n",
6832 + vdma_buffer->pages_count * PAGE_SIZE, vsize);
6836 + for (i = 0 ; i < vdma_buffer->pages_count ; i++) {
6838 + vma->vm_start = vma->vm_end;
6840 + vma->vm_end = vma->vm_start + PAGE_SIZE;
6842 + page_fn = virt_to_phys(vdma_buffer->pages_address[i]) >> PAGE_SHIFT ;
6843 + err = remap_pfn_range(vma, vma->vm_start, page_fn, PAGE_SIZE, vma->vm_page_prot);
6846 + hailo_dev_err(controller->dev, " fops_mmap failed mapping kernel page %d\n", err);
6851 + vma->vm_start = orig_vm_start;
6852 + vma->vm_end = orig_vm_end;
6857 +static int continuous_buffer_mmap(struct hailo_vdma_controller *controller,
6858 + struct hailo_vdma_continuous_buffer *buffer, struct vm_area_struct *vma)
6861 + const unsigned long vsize = vma->vm_end - vma->vm_start;
6863 + if (vsize > buffer->size) {
6864 + hailo_dev_err(controller->dev, "mmap size should be less than %zu (given %lu)\n",
6865 + buffer->size, vsize);
6869 + err = dma_mmap_coherent(controller->dev, vma, buffer->kernel_address,
6870 + buffer->dma_address, vsize);
6872 + hailo_dev_err(controller->dev, " vdma_mmap failed dma_mmap_coherent %d\n", err);
6879 +int hailo_vdma_mmap(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
6880 + struct vm_area_struct *vma, uintptr_t vdma_handle)
6882 + struct hailo_descriptors_list_buffer *vdma_descriptors_buffer = NULL;
6883 + struct hailo_vdma_low_memory_buffer *low_memory_buffer = NULL;
6884 + struct hailo_vdma_continuous_buffer *continuous_buffer = NULL;
6886 + hailo_dev_info(controller->dev, "Map vdma_handle %llu\n", (u64)vdma_handle);
6887 + if (NULL != (vdma_descriptors_buffer = hailo_vdma_find_descriptors_buffer(context, vdma_handle))) {
6888 + return desc_list_mmap(controller, vdma_descriptors_buffer, vma);
6890 + else if (NULL != (low_memory_buffer = hailo_vdma_find_low_memory_buffer(context, vdma_handle))) {
6891 + return low_memory_buffer_mmap(controller, low_memory_buffer, vma);
6893 + else if (NULL != (continuous_buffer = hailo_vdma_find_continuous_buffer(context, vdma_handle))) {
6894 + return continuous_buffer_mmap(controller, continuous_buffer, vma);
6897 + hailo_dev_err(controller->dev, "Can't mmap vdma handle: %llu (not existing)\n", (u64)vdma_handle);
6902 +enum dma_data_direction get_dma_direction(enum hailo_dma_data_direction hailo_direction)
6904 + switch (hailo_direction) {
6905 + case HAILO_DMA_BIDIRECTIONAL:
6906 + return DMA_BIDIRECTIONAL;
6907 + case HAILO_DMA_TO_DEVICE:
6908 + return DMA_TO_DEVICE;
6909 + case HAILO_DMA_FROM_DEVICE:
6910 + return DMA_FROM_DEVICE;
6912 + pr_err("Invalid hailo direction %d\n", hailo_direction);
6917 +++ b/drivers/media/pci/hailo/vdma/vdma.h
6919 +// SPDX-License-Identifier: GPL-2.0
6921 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
6924 + * Hailo vdma engine definitions
6927 +#ifndef _HAILO_VDMA_VDMA_H_
6928 +#define _HAILO_VDMA_VDMA_H_
6930 +#include "hailo_ioctl_common.h"
6931 +#include "hailo_resource.h"
6932 +#include "vdma_common.h"
6934 +#include <linux/dma-mapping.h>
6935 +#include <linux/types.h>
6936 +#include <linux/semaphore.h>
6938 +#define VDMA_CHANNEL_CONTROL_REG_OFFSET(channel_index, direction) (((direction) == DMA_TO_DEVICE) ? \
6939 + (((channel_index) << 5) + 0x0) : (((channel_index) << 5) + 0x10))
6940 +#define VDMA_CHANNEL_CONTROL_REG_ADDRESS(vdma_registers, channel_index, direction) \
6941 + ((u8*)((vdma_registers)->address) + VDMA_CHANNEL_CONTROL_REG_OFFSET(channel_index, direction))
6943 +#define VDMA_CHANNEL_NUM_PROC_OFFSET(channel_index, direction) (((direction) == DMA_TO_DEVICE) ? \
6944 + (((channel_index) << 5) + 0x4) : (((channel_index) << 5) + 0x14))
6945 +#define VDMA_CHANNEL_NUM_PROC_ADDRESS(vdma_registers, channel_index, direction) \
6946 + ((u8*)((vdma_registers)->address) + VDMA_CHANNEL_NUM_PROC_OFFSET(channel_index, direction))
6949 +struct hailo_vdma_buffer {
6950 + struct list_head mapped_user_buffer_list;
6954 + struct device *device;
6956 + void __user *user_address;
6958 + enum dma_data_direction data_direction;
6959 + struct sg_table sg_table;
6961 + // If this flag is set, the buffer pointed by sg_table is not backed by
6962 + // 'struct page' (only by pure pfn). On this case, accessing to the page,
6963 + // or calling APIs that access the page (e.g. dma_sync_sg_for_cpu) is not
6968 +// Continuous buffer that holds a descriptor list.
6969 +struct hailo_descriptors_list_buffer {
6970 + struct list_head descriptors_buffer_list;
6972 + void *kernel_address;
6973 + dma_addr_t dma_address;
6975 + struct hailo_vdma_descriptors_list desc_list;
6978 +struct hailo_vdma_low_memory_buffer {
6979 + struct list_head vdma_low_memory_buffer_list;
6981 + size_t pages_count;
6982 + void **pages_address;
6985 +struct hailo_vdma_continuous_buffer {
6986 + struct list_head continuous_buffer_list;
6988 + void *kernel_address;
6989 + dma_addr_t dma_address;
6993 +struct hailo_vdma_controller;
6994 +struct hailo_vdma_controller_ops {
6995 + void (*update_channel_interrupts)(struct hailo_vdma_controller *controller, size_t engine_index,
6996 + u32 channels_bitmap);
6999 +struct hailo_vdma_controller {
7000 + struct hailo_vdma_hw *hw;
7001 + struct hailo_vdma_controller_ops *ops;
7002 + struct device *dev;
7004 + size_t vdma_engines_count;
7005 + struct hailo_vdma_engine *vdma_engines;
7007 + spinlock_t interrupts_lock;
7008 + wait_queue_head_t interrupts_wq;
7010 + struct file *used_by_filp;
7012 + // Putting big IOCTL structures here to avoid stack allocation.
7013 + struct hailo_vdma_interrupts_read_timestamp_params read_interrupt_timestamps_params;
7016 +#define for_each_vdma_engine(controller, engine, engine_index) \
7017 + _for_each_element_array(controller->vdma_engines, controller->vdma_engines_count, \
7018 + engine, engine_index)
7020 +struct hailo_vdma_file_context {
7021 + atomic_t last_vdma_user_buffer_handle;
7022 + struct list_head mapped_user_buffer_list;
7024 + // Last_vdma_handle works as a handle for vdma decriptor list and for the vdma buffer -
7025 + // there will be no collisions between the two
7026 + atomic_t last_vdma_handle;
7027 + struct list_head descriptors_buffer_list;
7028 + struct list_head vdma_low_memory_buffer_list;
7029 + struct list_head continuous_buffer_list;
7033 +int hailo_vdma_controller_init(struct hailo_vdma_controller *controller,
7034 + struct device *dev, struct hailo_vdma_hw *vdma_hw,
7035 + struct hailo_vdma_controller_ops *ops,
7036 + struct hailo_resource *channel_registers_per_engine, size_t engines_count);
7038 +void hailo_vdma_update_interrupts_mask(struct hailo_vdma_controller *controller,
7039 + size_t engine_index);
7041 +void hailo_vdma_engine_interrupts_disable(struct hailo_vdma_controller *controller,
7042 + struct hailo_vdma_engine *engine, u8 engine_index, u32 channels_bitmap);
7044 +void hailo_vdma_file_context_init(struct hailo_vdma_file_context *context);
7045 +void hailo_vdma_file_context_finalize(struct hailo_vdma_file_context *context,
7046 + struct hailo_vdma_controller *controller, struct file *filp);
7048 +void hailo_vdma_irq_handler(struct hailo_vdma_controller *controller, size_t engine_index,
7049 + u32 channels_bitmap);
7051 +// TODO: reduce params count
7052 +long hailo_vdma_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
7053 + unsigned int cmd, unsigned long arg, struct file *filp, struct semaphore *mutex, bool *should_up_board_mutex);
7055 +int hailo_vdma_mmap(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
7056 + struct vm_area_struct *vma, uintptr_t vdma_handle);
7058 +enum dma_data_direction get_dma_direction(enum hailo_dma_data_direction hailo_direction);
7059 +void hailo_vdma_disable_vdma_channels(struct hailo_vdma_controller *controller, const bool should_close_channels);
7061 +#endif /* _HAILO_VDMA_VDMA_H_ */
7062 \ No newline at end of file