]> git.ipfire.org Git - thirdparty/openwrt.git/blob
26946113428546dc7ea44e43f615faa67a738bfd
[thirdparty/openwrt.git] /
1 From b01457f2cabf7e9b16f217ef7e4cb739655c407b Mon Sep 17 00:00:00 2001
2 From: Naushir Patuck <naush@raspberrypi.com>
3 Date: Tue, 21 May 2024 12:56:17 +0100
4 Subject: [PATCH 1104/1135] drivers: media: pci: Add Hailo accelerator device
5 drivers
6
7 Add version 4.17.1 of the Hailo PCIe device drivers.
8 Sourced from https://github.com/hailo-ai/hailort-drivers/
9
10 Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
11 ---
12 drivers/media/pci/Kconfig | 1 +
13 drivers/media/pci/Makefile | 3 +-
14 drivers/media/pci/hailo/Kconfig | 6 +
15 drivers/media/pci/hailo/Makefile | 32 +
16 drivers/media/pci/hailo/common/fw_operation.c | 103 ++
17 drivers/media/pci/hailo/common/fw_operation.h | 25 +
18 .../media/pci/hailo/common/fw_validation.c | 112 ++
19 .../media/pci/hailo/common/fw_validation.h | 66 ++
20 .../pci/hailo/common/hailo_ioctl_common.h | 575 ++++++++++
21 .../pci/hailo/common/hailo_pcie_version.h | 13 +
22 .../media/pci/hailo/common/hailo_resource.c | 128 +++
23 .../media/pci/hailo/common/hailo_resource.h | 39 +
24 drivers/media/pci/hailo/common/pcie_common.c | 641 +++++++++++
25 drivers/media/pci/hailo/common/pcie_common.h | 128 +++
26 drivers/media/pci/hailo/common/utils.h | 39 +
27 drivers/media/pci/hailo/common/vdma_common.c | 684 +++++++++++
28 drivers/media/pci/hailo/common/vdma_common.h | 243 ++++
29 .../pci/hailo/include/hailo_pcie_version.h | 14 +
30 drivers/media/pci/hailo/src/fops.c | 736 ++++++++++++
31 drivers/media/pci/hailo/src/fops.h | 21 +
32 drivers/media/pci/hailo/src/pcie.c | 1012 +++++++++++++++++
33 drivers/media/pci/hailo/src/pcie.h | 82 ++
34 drivers/media/pci/hailo/src/sysfs.c | 36 +
35 drivers/media/pci/hailo/src/sysfs.h | 13 +
36 drivers/media/pci/hailo/src/utils.c | 27 +
37 drivers/media/pci/hailo/src/utils.h | 21 +
38 drivers/media/pci/hailo/utils/compact.h | 153 +++
39 drivers/media/pci/hailo/utils/fw_common.h | 19 +
40 drivers/media/pci/hailo/utils/logs.c | 8 +
41 drivers/media/pci/hailo/utils/logs.h | 45 +
42 drivers/media/pci/hailo/vdma/ioctl.c | 698 ++++++++++++
43 drivers/media/pci/hailo/vdma/ioctl.h | 37 +
44 drivers/media/pci/hailo/vdma/memory.c | 551 +++++++++
45 drivers/media/pci/hailo/vdma/memory.h | 54 +
46 drivers/media/pci/hailo/vdma/vdma.c | 336 ++++++
47 drivers/media/pci/hailo/vdma/vdma.h | 143 +++
48 39 files changed, 6849 insertions(+), 1 deletion(-)
49 create mode 100644 drivers/media/pci/hailo/Kconfig
50 create mode 100644 drivers/media/pci/hailo/Makefile
51 create mode 100644 drivers/media/pci/hailo/common/fw_operation.c
52 create mode 100644 drivers/media/pci/hailo/common/fw_operation.h
53 create mode 100644 drivers/media/pci/hailo/common/fw_validation.c
54 create mode 100644 drivers/media/pci/hailo/common/fw_validation.h
55 create mode 100644 drivers/media/pci/hailo/common/hailo_ioctl_common.h
56 create mode 100644 drivers/media/pci/hailo/common/hailo_pcie_version.h
57 create mode 100644 drivers/media/pci/hailo/common/hailo_resource.c
58 create mode 100644 drivers/media/pci/hailo/common/hailo_resource.h
59 create mode 100644 drivers/media/pci/hailo/common/pcie_common.c
60 create mode 100644 drivers/media/pci/hailo/common/pcie_common.h
61 create mode 100644 drivers/media/pci/hailo/common/utils.h
62 create mode 100644 drivers/media/pci/hailo/common/vdma_common.c
63 create mode 100644 drivers/media/pci/hailo/common/vdma_common.h
64 create mode 100755 drivers/media/pci/hailo/include/hailo_pcie_version.h
65 create mode 100644 drivers/media/pci/hailo/src/fops.c
66 create mode 100644 drivers/media/pci/hailo/src/fops.h
67 create mode 100644 drivers/media/pci/hailo/src/pcie.c
68 create mode 100644 drivers/media/pci/hailo/src/pcie.h
69 create mode 100644 drivers/media/pci/hailo/src/sysfs.c
70 create mode 100644 drivers/media/pci/hailo/src/sysfs.h
71 create mode 100644 drivers/media/pci/hailo/src/utils.c
72 create mode 100644 drivers/media/pci/hailo/src/utils.h
73 create mode 100644 drivers/media/pci/hailo/utils/compact.h
74 create mode 100644 drivers/media/pci/hailo/utils/fw_common.h
75 create mode 100644 drivers/media/pci/hailo/utils/logs.c
76 create mode 100644 drivers/media/pci/hailo/utils/logs.h
77 create mode 100644 drivers/media/pci/hailo/vdma/ioctl.c
78 create mode 100644 drivers/media/pci/hailo/vdma/ioctl.h
79 create mode 100644 drivers/media/pci/hailo/vdma/memory.c
80 create mode 100644 drivers/media/pci/hailo/vdma/memory.h
81 create mode 100644 drivers/media/pci/hailo/vdma/vdma.c
82 create mode 100644 drivers/media/pci/hailo/vdma/vdma.h
83
84 --- a/drivers/media/pci/Kconfig
85 +++ b/drivers/media/pci/Kconfig
86 @@ -74,6 +74,7 @@ config VIDEO_PCI_SKELETON
87 when developing new drivers.
88
89 source "drivers/media/pci/intel/Kconfig"
90 +source "drivers/media/pci/hailo/Kconfig"
91
92 endif #MEDIA_PCI_SUPPORT
93 endif #PCI
94 --- a/drivers/media/pci/Makefile
95 +++ b/drivers/media/pci/Makefile
96 @@ -17,7 +17,8 @@ obj-y += ttpci/ \
97 saa7146/ \
98 smipcie/ \
99 netup_unidvb/ \
100 - intel/
101 + intel/ \
102 + hailo/
103
104 # Please keep it alphabetically sorted by Kconfig name
105 # (e. g. LC_ALL=C sort Makefile)
106 --- /dev/null
107 +++ b/drivers/media/pci/hailo/Kconfig
108 @@ -0,0 +1,6 @@
109 +
110 +config MEDIA_PCI_HAILO
111 + tristate "Hailo AI accelerator PCIe driver"
112 + depends on PCI
113 + help
114 + Enable build of Hailo AI accelerator PCIe driver.
115 --- /dev/null
116 +++ b/drivers/media/pci/hailo/Makefile
117 @@ -0,0 +1,32 @@
118 +# SPDX-License-Identifier: GPL-2.0
119 +
120 +COMMON_SRC_DIRECTORY=common
121 +VDMA_SRC_DIRECTORY=vdma
122 +UTILS_SRC_DIRECTORY=utils
123 +
124 +obj-$(CONFIG_MEDIA_PCI_HAILO) := hailo_pci.o
125 +
126 +hailo_pci-objs += src/pcie.o
127 +hailo_pci-objs += src/fops.o
128 +hailo_pci-objs += src/utils.o
129 +hailo_pci-objs += src/sysfs.o
130 +
131 +hailo_pci-objs += $(COMMON_SRC_DIRECTORY)/fw_validation.o
132 +hailo_pci-objs += $(COMMON_SRC_DIRECTORY)/fw_operation.o
133 +hailo_pci-objs += $(COMMON_SRC_DIRECTORY)/pcie_common.o
134 +hailo_pci-objs += $(COMMON_SRC_DIRECTORY)/vdma_common.o
135 +hailo_pci-objs += $(COMMON_SRC_DIRECTORY)/hailo_resource.o
136 +
137 +hailo_pci-objs += $(UTILS_SRC_DIRECTORY)/logs.o
138 +
139 +hailo_pci-objs += $(VDMA_SRC_DIRECTORY)/vdma.o
140 +hailo_pci-objs += $(VDMA_SRC_DIRECTORY)/memory.o
141 +hailo_pci-objs += $(VDMA_SRC_DIRECTORY)/ioctl.o
142 +
143 +ccflags-y += -Werror
144 +ccflags-y += -DHAILO_RASBERRY_PIE
145 +ccflags-y += -I$(srctree)/$(src)
146 +ccflags-y += -I$(srctree)/$(src)/include
147 +ccflags-y += -I$(srctree)/$(src)/common
148 +
149 +clean-files := $(hailo_pci-objs)
150 --- /dev/null
151 +++ b/drivers/media/pci/hailo/common/fw_operation.c
152 @@ -0,0 +1,103 @@
153 +// SPDX-License-Identifier: GPL-2.0
154 +/**
155 + * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
156 +**/
157 +
158 +#include "fw_operation.h"
159 +
160 +#include <linux/errno.h>
161 +#include <linux/types.h>
162 +#include <linux/kernel.h>
163 +#include <linux/bug.h>
164 +
165 +typedef struct {
166 + u32 host_offset;
167 + u32 chip_offset;
168 +} FW_DEBUG_BUFFER_HEADER_t;
169 +
170 +#define DEBUG_BUFFER_DATA_SIZE (DEBUG_BUFFER_TOTAL_SIZE - sizeof(FW_DEBUG_BUFFER_HEADER_t))
171 +
172 +int hailo_read_firmware_notification(struct hailo_resource *resource, struct hailo_d2h_notification *notification)
173 +{
174 + hailo_d2h_buffer_details_t d2h_buffer_details = {0, 0};
175 + hailo_resource_read_buffer(resource, 0, sizeof(d2h_buffer_details),
176 + &d2h_buffer_details);
177 +
178 + if ((sizeof(notification->buffer) < d2h_buffer_details.buffer_len) || (0 == d2h_buffer_details.is_buffer_in_use)) {
179 + return -EINVAL;
180 + }
181 +
182 + notification->buffer_len = d2h_buffer_details.buffer_len;
183 + hailo_resource_read_buffer(resource, sizeof(d2h_buffer_details), notification->buffer_len, notification->buffer);
184 +
185 + // Write is_buffer_in_use = false
186 + hailo_resource_write16(resource, 0, 0);
187 + return 0;
188 +}
189 +
190 +static inline size_t calculate_log_ready_to_read(FW_DEBUG_BUFFER_HEADER_t *header)
191 +{
192 + size_t ready_to_read = 0;
193 + size_t host_offset = header->host_offset;
194 + size_t chip_offset = header->chip_offset;
195 +
196 + if (chip_offset >= host_offset) {
197 + ready_to_read = chip_offset - host_offset;
198 + } else {
199 + ready_to_read = DEBUG_BUFFER_DATA_SIZE - (host_offset - chip_offset);
200 + }
201 +
202 + return ready_to_read;
203 +}
204 +
205 +long hailo_read_firmware_log(struct hailo_resource *fw_logger_resource, struct hailo_read_log_params *params)
206 +{
207 + FW_DEBUG_BUFFER_HEADER_t debug_buffer_header = {0};
208 + size_t read_offset = 0;
209 + size_t ready_to_read = 0;
210 + size_t size_to_read = 0;
211 + uintptr_t user_buffer = (uintptr_t)params->buffer;
212 +
213 + if (params->buffer_size > ARRAY_SIZE(params->buffer)) {
214 + return -EINVAL;
215 + }
216 +
217 + hailo_resource_read_buffer(fw_logger_resource, 0, sizeof(debug_buffer_header),
218 + &debug_buffer_header);
219 +
220 + /* Point to the start of the data buffer. */
221 + ready_to_read = calculate_log_ready_to_read(&debug_buffer_header);
222 + if (0 == ready_to_read) {
223 + params->read_bytes = 0;
224 + return 0;
225 + }
226 + /* If ready to read is bigger than the buffer size, read only buffer size bytes. */
227 + ready_to_read = min(ready_to_read, params->buffer_size);
228 +
229 + /* Point to the data that is read to be read by the host. */
230 + read_offset = sizeof(debug_buffer_header) + debug_buffer_header.host_offset;
231 + /* Check if the offset should cycle back to beginning. */
232 + if (DEBUG_BUFFER_DATA_SIZE <= debug_buffer_header.host_offset + ready_to_read) {
233 + size_to_read = DEBUG_BUFFER_DATA_SIZE - debug_buffer_header.host_offset;
234 + hailo_resource_read_buffer(fw_logger_resource, read_offset, size_to_read, (void*)user_buffer);
235 +
236 + user_buffer += size_to_read;
237 + size_to_read = ready_to_read - size_to_read;
238 + /* Point back to the beginning of the data buffer. */
239 + read_offset -= debug_buffer_header.host_offset;
240 + }
241 + else {
242 + size_to_read = ready_to_read;
243 + }
244 +
245 + /* size_to_read may become 0 if the read reached DEBUG_BUFFER_DATA_SIZE exactly */
246 + hailo_resource_read_buffer(fw_logger_resource, read_offset, size_to_read, (void*)user_buffer);
247 +
248 + /* Change current_offset to represent the new host offset. */
249 + read_offset += size_to_read;
250 + hailo_resource_write32(fw_logger_resource, offsetof(FW_DEBUG_BUFFER_HEADER_t, host_offset),
251 + (u32)(read_offset - sizeof(debug_buffer_header)));
252 +
253 + params->read_bytes = ready_to_read;
254 + return 0;
255 +}
256 \ No newline at end of file
257 --- /dev/null
258 +++ b/drivers/media/pci/hailo/common/fw_operation.h
259 @@ -0,0 +1,25 @@
260 +// SPDX-License-Identifier: GPL-2.0
261 +/**
262 + * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
263 +**/
264 +
265 +#ifndef _HAILO_COMMON_FIRMWARE_OPERATION_H_
266 +#define _HAILO_COMMON_FIRMWARE_OPERATION_H_
267 +
268 +#include "hailo_resource.h"
269 +
270 +#define DEBUG_BUFFER_TOTAL_SIZE (4*1024)
271 +
272 +#ifdef __cplusplus
273 +extern "C" {
274 +#endif
275 +
276 +int hailo_read_firmware_notification(struct hailo_resource *resource, struct hailo_d2h_notification *notification);
277 +
278 +long hailo_read_firmware_log(struct hailo_resource *fw_logger_resource, struct hailo_read_log_params *params);
279 +
280 +#ifdef __cplusplus
281 +}
282 +#endif
283 +
284 +#endif /* _HAILO_COMMON_FIRMWARE_OPERATION_H_ */
285 --- /dev/null
286 +++ b/drivers/media/pci/hailo/common/fw_validation.c
287 @@ -0,0 +1,112 @@
288 +// SPDX-License-Identifier: GPL-2.0
289 +/**
290 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
291 + **/
292 +
293 +#include "fw_validation.h"
294 +#include <linux/errno.h>
295 +#include <linux/types.h>
296 +
297 +
298 +
299 +/* when reading the firmware we don't want to read past the firmware_size,
300 + so we have a consumed_firmware_offset that is updated _before_ accessing data at that offset
301 + of firmware_base_address */
302 +#define CONSUME_FIRMWARE(__size, __err) do { \
303 + consumed_firmware_offset += (u32) (__size); \
304 + if ((firmware_size < (__size)) || (firmware_size < consumed_firmware_offset)) { \
305 + err = __err; \
306 + goto exit; \
307 + } \
308 + } while(0)
309 +
310 +int FW_VALIDATION__validate_fw_header(uintptr_t firmware_base_address,
311 + size_t firmware_size, u32 max_code_size, u32 *outer_consumed_firmware_offset,
312 + firmware_header_t **out_firmware_header, enum hailo_board_type board_type)
313 +{
314 + int err = -EINVAL;
315 + firmware_header_t *firmware_header = NULL;
316 + u32 consumed_firmware_offset = *outer_consumed_firmware_offset;
317 + u32 expected_firmware_magic = 0;
318 +
319 + firmware_header = (firmware_header_t *) (firmware_base_address + consumed_firmware_offset);
320 + CONSUME_FIRMWARE(sizeof(firmware_header_t), -EINVAL);
321 +
322 + switch (board_type) {
323 + case HAILO_BOARD_TYPE_HAILO8:
324 + expected_firmware_magic = FIRMWARE_HEADER_MAGIC_HAILO8;
325 + break;
326 + case HAILO_BOARD_TYPE_HAILO15:
327 + expected_firmware_magic = FIRMWARE_HEADER_MAGIC_HAILO15;
328 + break;
329 + case HAILO_BOARD_TYPE_PLUTO:
330 + expected_firmware_magic = FIRMWARE_HEADER_MAGIC_PLUTO;
331 + break;
332 + default:
333 + err = -EINVAL;
334 + goto exit;
335 + }
336 +
337 + if (expected_firmware_magic != firmware_header->magic) {
338 + err = -EINVAL;
339 + goto exit;
340 + }
341 +
342 + /* Validate that the firmware header version is supported */
343 + switch(firmware_header->header_version) {
344 + case FIRMWARE_HEADER_VERSION_INITIAL:
345 + break;
346 + default:
347 + err = -EINVAL;
348 + goto exit;
349 + break;
350 + }
351 +
352 + if (MINIMUM_FIRMWARE_CODE_SIZE > firmware_header->code_size) {
353 + err = -EINVAL;
354 + goto exit;
355 + }
356 +
357 + if (max_code_size < firmware_header->code_size) {
358 + err = -EINVAL;
359 + goto exit;
360 + }
361 +
362 + CONSUME_FIRMWARE(firmware_header->code_size, -EINVAL);
363 +
364 + *outer_consumed_firmware_offset = consumed_firmware_offset;
365 + *out_firmware_header = firmware_header;
366 + err = 0;
367 +
368 +exit:
369 + return err;
370 +}
371 +
372 +int FW_VALIDATION__validate_cert_header(uintptr_t firmware_base_address,
373 + size_t firmware_size, u32 *outer_consumed_firmware_offset, secure_boot_certificate_t **out_firmware_cert)
374 +{
375 +
376 + secure_boot_certificate_t *firmware_cert = NULL;
377 + int err = -EINVAL;
378 + u32 consumed_firmware_offset = *outer_consumed_firmware_offset;
379 +
380 + firmware_cert = (secure_boot_certificate_t *) (firmware_base_address + consumed_firmware_offset);
381 + CONSUME_FIRMWARE(sizeof(secure_boot_certificate_t), -EINVAL);
382 +
383 + if ((MAXIMUM_FIRMWARE_CERT_KEY_SIZE < firmware_cert->key_size) ||
384 + (MAXIMUM_FIRMWARE_CERT_CONTENT_SIZE < firmware_cert->content_size)) {
385 + err = -EINVAL;
386 + goto exit;
387 + }
388 +
389 + CONSUME_FIRMWARE(firmware_cert->key_size, -EINVAL);
390 + CONSUME_FIRMWARE(firmware_cert->content_size, -EINVAL);
391 +
392 + *outer_consumed_firmware_offset = consumed_firmware_offset;
393 + *out_firmware_cert = firmware_cert;
394 + err = 0;
395 +
396 +exit:
397 + return err;
398 +}
399 +
400 --- /dev/null
401 +++ b/drivers/media/pci/hailo/common/fw_validation.h
402 @@ -0,0 +1,66 @@
403 +// SPDX-License-Identifier: GPL-2.0
404 +/**
405 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
406 + **/
407 +
408 +#ifndef PCIE_COMMON_FIRMWARE_HEADER_UTILS_H_
409 +#define PCIE_COMMON_FIRMWARE_HEADER_UTILS_H_
410 +
411 +#include "hailo_ioctl_common.h"
412 +#include <linux/types.h>
413 +
414 +#define FIRMWARE_HEADER_MAGIC_HAILO8 (0x1DD89DE0)
415 +#define FIRMWARE_HEADER_MAGIC_HAILO15 (0xE905DAAB)
416 +// TODO - HRT-11344 : change fw magic to pluto specific
417 +#define FIRMWARE_HEADER_MAGIC_PLUTO (0xE905DAAB)
418 +
419 +#ifndef HAILO_EMULATOR
420 +#define FIRMWARE_WAIT_TIMEOUT_MS (5000)
421 +#else /* ifndef HAILO_EMULATOR */
422 +#define FIRMWARE_WAIT_TIMEOUT_MS (500000)
423 +#endif /* ifndef HAILO_EMULATOR */
424 +
425 +typedef enum {
426 + FIRMWARE_HEADER_VERSION_INITIAL = 0,
427 +
428 + /* MUST BE LAST */
429 + FIRMWARE_HEADER_VERSION_COUNT
430 +} firmware_header_version_t;
431 +
432 +typedef struct {
433 + u32 magic;
434 + u32 header_version;
435 + u32 firmware_major;
436 + u32 firmware_minor;
437 + u32 firmware_revision;
438 + u32 code_size;
439 +} firmware_header_t;
440 +
441 +
442 +#ifdef _MSC_VER
443 +#pragma warning(push)
444 +#pragma warning(disable:4200)
445 +#endif /* _MSC_VER */
446 +
447 +typedef struct {
448 + u32 key_size;
449 + u32 content_size;
450 + u8 certificates_data[0];
451 +} secure_boot_certificate_t;
452 +
453 +#ifdef _MSC_VER
454 +#pragma warning(pop)
455 +#endif /* _MSC_VER */
456 +
457 +#define MINIMUM_FIRMWARE_CODE_SIZE (20*4)
458 +#define MAXIMUM_FIRMWARE_CERT_KEY_SIZE (0x1000)
459 +#define MAXIMUM_FIRMWARE_CERT_CONTENT_SIZE (0x1000)
460 +
461 +int FW_VALIDATION__validate_fw_header(uintptr_t firmware_base_address,
462 + size_t firmware_size, u32 max_code_size, u32 *outer_consumed_firmware_offset,
463 + firmware_header_t **out_firmware_header, enum hailo_board_type board_type);
464 +
465 +int FW_VALIDATION__validate_cert_header(uintptr_t firmware_base_address,
466 + size_t firmware_size, u32 *outer_consumed_firmware_offset, secure_boot_certificate_t **out_firmware_cert);
467 +
468 +#endif
469 \ No newline at end of file
470 --- /dev/null
471 +++ b/drivers/media/pci/hailo/common/hailo_ioctl_common.h
472 @@ -0,0 +1,575 @@
473 +// SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) AND MIT
474 +/**
475 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
476 + **/
477 +
478 +#ifndef _HAILO_IOCTL_COMMON_H_
479 +#define _HAILO_IOCTL_COMMON_H_
480 +
481 +
482 +// This value is not easily changeable.
483 +// For example: the channel interrupts ioctls assume we have up to 32 channels
484 +#define MAX_VDMA_CHANNELS_PER_ENGINE (32)
485 +#define MAX_VDMA_ENGINES (3)
486 +#define SIZE_OF_VDMA_DESCRIPTOR (16)
487 +#define VDMA_DEST_CHANNELS_START (16)
488 +
489 +#define HAILO_VDMA_MAX_ONGOING_TRANSFERS (128)
490 +#define HAILO_VDMA_MAX_ONGOING_TRANSFERS_MASK (HAILO_VDMA_MAX_ONGOING_TRANSFERS - 1)
491 +
492 +#define CHANNEL_IRQ_TIMESTAMPS_SIZE (HAILO_VDMA_MAX_ONGOING_TRANSFERS * 2)
493 +#define CHANNEL_IRQ_TIMESTAMPS_SIZE_MASK (CHANNEL_IRQ_TIMESTAMPS_SIZE - 1)
494 +
495 +#define INVALID_DRIVER_HANDLE_VALUE ((uintptr_t)-1)
496 +
497 +// Used by windows and unix driver to raise the right CPU control handle to the FW. The same as in pcie_service FW
498 +#define FW_ACCESS_CORE_CPU_CONTROL_SHIFT (1)
499 +#define FW_ACCESS_CORE_CPU_CONTROL_MASK (1 << FW_ACCESS_CORE_CPU_CONTROL_SHIFT)
500 +#define FW_ACCESS_CONTROL_INTERRUPT_SHIFT (0)
501 +#define FW_ACCESS_APP_CPU_CONTROL_MASK (1 << FW_ACCESS_CONTROL_INTERRUPT_SHIFT)
502 +#define FW_ACCESS_DRIVER_SHUTDOWN_SHIFT (2)
503 +#define FW_ACCESS_DRIVER_SHUTDOWN_MASK (1 << FW_ACCESS_DRIVER_SHUTDOWN_SHIFT)
504 +
505 +#define INVALID_VDMA_CHANNEL (0xff)
506 +
507 +#if !defined(__cplusplus) && defined(NTDDI_VERSION)
508 +#include <wdm.h>
509 +typedef ULONG uint32_t;
510 +typedef UCHAR uint8_t;
511 +typedef USHORT uint16_t;
512 +typedef ULONGLONG uint64_t;
513 +#endif /* !defined(__cplusplus) && defined(NTDDI_VERSION) */
514 +
515 +
516 +#ifdef _MSC_VER
517 +
518 +#include <initguid.h>
519 +
520 +#if !defined(bool) && !defined(__cplusplus)
521 +typedef uint8_t bool;
522 +#endif // !defined(bool) && !defined(__cplusplus)
523 +
524 +#if !defined(INT_MAX)
525 +#define INT_MAX 0x7FFFFFFF
526 +#endif // !defined(INT_MAX)
527 +
528 +
529 +// {d88d31f1-fede-4e71-ac2a-6ce0018c1501}
530 +DEFINE_GUID (GUID_DEVINTERFACE_HailoKM,
531 + 0xd88d31f1,0xfede,0x4e71,0xac,0x2a,0x6c,0xe0,0x01,0x8c,0x15,0x01);
532 +
533 +#define HAILO_GENERAL_IOCTL_MAGIC 0
534 +#define HAILO_VDMA_IOCTL_MAGIC 1
535 +#define HAILO_NON_LINUX_IOCTL_MAGIC 2
536 +
537 +#define HAILO_IOCTL_COMPATIBLE CTL_CODE(FILE_DEVICE_UNKNOWN, 0x802, METHOD_BUFFERED, FILE_ANY_ACCESS)
538 +
539 +
540 +typedef struct tCompatibleHailoIoctlParam
541 +{
542 + union {
543 + struct {
544 + ULONG Size : 16;
545 + ULONG Code : 8;
546 + ULONG Type : 6;
547 + ULONG Read : 1;
548 + ULONG Write : 1;
549 + } bits;
550 + ULONG value;
551 + } u;
552 +} tCompatibleHailoIoctlParam;
553 +
554 +static ULONG FORCEINLINE _IOC_(ULONG nr, ULONG type, ULONG size, bool read, bool write)
555 +{
556 + struct tCompatibleHailoIoctlParam param;
557 + param.u.bits.Code = nr;
558 + param.u.bits.Size = size;
559 + param.u.bits.Type = type;
560 + param.u.bits.Read = read ? 1 : 0;
561 + param.u.bits.Write = write ? 1 : 0;
562 + return param.u.value;
563 +}
564 +
565 +#define _IOW_(type,nr,size) _IOC_(nr, type, sizeof(size), true, false)
566 +#define _IOR_(type,nr,size) _IOC_(nr, type, sizeof(size), false, true)
567 +#define _IOWR_(type,nr,size) _IOC_(nr, type, sizeof(size), true, true)
568 +#define _IO_(type,nr) _IOC_(nr, type, 0, false, false)
569 +
570 +#elif defined(__linux__) // #ifdef _MSC_VER
571 +#ifndef __KERNEL__
572 +// include the userspace headers only if this file is included by user space program
573 +// It is discourged to include them when compiling the driver (https://lwn.net/Articles/113349/)
574 +#include <stdint.h>
575 +#include <sys/types.h>
576 +#else
577 +#include <linux/types.h>
578 +#include <linux/limits.h>
579 +#include <linux/kernel.h>
580 +#endif // ifndef __KERNEL__
581 +
582 +#include <linux/ioctl.h>
583 +
584 +#define _IOW_ _IOW
585 +#define _IOR_ _IOR
586 +#define _IOWR_ _IOWR
587 +#define _IO_ _IO
588 +
589 +#define HAILO_GENERAL_IOCTL_MAGIC 'g'
590 +#define HAILO_VDMA_IOCTL_MAGIC 'v'
591 +#define HAILO_NON_LINUX_IOCTL_MAGIC 'w'
592 +
593 +#elif defined(__QNX__) // #ifdef _MSC_VER
594 +#include <devctl.h>
595 +#include <stdint.h>
596 +#include <sys/types.h>
597 +#include <sys/mman.h>
598 +#include <stdbool.h>
599 +
600 +// defines for devctl
601 +#define _IOW_ __DIOF
602 +#define _IOR_ __DIOT
603 +#define _IOWR_ __DIOTF
604 +#define _IO_ __DION
605 +#define HAILO_GENERAL_IOCTL_MAGIC _DCMD_ALL
606 +#define HAILO_VDMA_IOCTL_MAGIC _DCMD_MISC
607 +#define HAILO_NON_LINUX_IOCTL_MAGIC _DCMD_PROC
608 +
609 +#else // #ifdef _MSC_VER
610 +#error "unsupported platform!"
611 +#endif
612 +
613 +#pragma pack(push, 1)
614 +
615 +struct hailo_channel_interrupt_timestamp {
616 + uint64_t timestamp_ns;
617 + uint16_t desc_num_processed;
618 +};
619 +
620 +typedef struct {
621 + uint16_t is_buffer_in_use;
622 + uint16_t buffer_len;
623 +} hailo_d2h_buffer_details_t;
624 +
625 +// This struct is the same as `enum dma_data_direction` (defined in linux/dma-direction)
626 +enum hailo_dma_data_direction {
627 + HAILO_DMA_BIDIRECTIONAL = 0,
628 + HAILO_DMA_TO_DEVICE = 1,
629 + HAILO_DMA_FROM_DEVICE = 2,
630 + HAILO_DMA_NONE = 3,
631 +
632 + /** Max enum value to maintain ABI Integrity */
633 + HAILO_DMA_MAX_ENUM = INT_MAX,
634 +};
635 +
636 +// Enum that determines if buffer should be allocated from user space or from driver
637 +enum hailo_allocation_mode {
638 + HAILO_ALLOCATION_MODE_USERSPACE = 0,
639 + HAILO_ALLOCATION_MODE_DRIVER = 1,
640 +
641 + /** Max enum value to maintain ABI Integrity */
642 + HAILO_ALLOCATION_MODE_MAX_ENUM = INT_MAX,
643 +};
644 +
645 +/* structure used in ioctl HAILO_VDMA_BUFFER_MAP */
646 +struct hailo_vdma_buffer_map_params {
647 +#if defined(__linux__) || defined(_MSC_VER)
648 + void* user_address; // in
649 +#elif defined(__QNX__)
650 + shm_handle_t shared_memory_handle; // in
651 +#else
652 +#error "unsupported platform!"
653 +#endif // __linux__
654 + size_t size; // in
655 + enum hailo_dma_data_direction data_direction; // in
656 + uintptr_t allocated_buffer_handle; // in
657 + size_t mapped_handle; // out
658 +};
659 +
660 +/* structure used in ioctl HAILO_VDMA_BUFFER_UNMAP */
661 +struct hailo_vdma_buffer_unmap_params {
662 + size_t mapped_handle;
663 +};
664 +
665 +/* structure used in ioctl HAILO_DESC_LIST_CREATE */
666 +struct hailo_desc_list_create_params {
667 + size_t desc_count; // in
668 + uint16_t desc_page_size; // in
669 + bool is_circular; // in
670 + uintptr_t desc_handle; // out
671 + uint64_t dma_address; // out
672 +};
673 +
674 +/* structure used in ioctl HAILO_DESC_LIST_RELEASE */
675 +struct hailo_desc_list_release_params {
676 + uintptr_t desc_handle; // in
677 +};
678 +
679 +/* structure used in ioctl HAILO_NON_LINUX_DESC_LIST_MMAP */
680 +struct hailo_non_linux_desc_list_mmap_params {
681 + uintptr_t desc_handle; // in
682 + size_t size; // in
683 + void* user_address; // out
684 +};
685 +
686 +/* structure used in ioctl HAILO_DESC_LIST_BIND_VDMA_BUFFER */
687 +struct hailo_desc_list_bind_vdma_buffer_params {
688 + size_t buffer_handle; // in
689 + size_t buffer_size; // in
690 + size_t buffer_offset; // in
691 + uintptr_t desc_handle; // in
692 + uint8_t channel_index; // in
693 + uint32_t starting_desc; // in
694 +};
695 +
696 +/* structure used in ioctl HAILO_VDMA_INTERRUPTS_ENABLE */
697 +struct hailo_vdma_interrupts_enable_params {
698 + uint32_t channels_bitmap_per_engine[MAX_VDMA_ENGINES]; // in
699 + bool enable_timestamps_measure; // in
700 +};
701 +
702 +/* structure used in ioctl HAILO_VDMA_INTERRUPTS_DISABLE */
703 +struct hailo_vdma_interrupts_disable_params {
704 + uint32_t channels_bitmap_per_engine[MAX_VDMA_ENGINES]; // in
705 +};
706 +
707 +/* structure used in ioctl HAILO_VDMA_INTERRUPTS_WAIT */
708 +struct hailo_vdma_interrupts_channel_data {
709 + uint8_t engine_index;
710 + uint8_t channel_index;
711 + bool is_active; // If not activate, num_processed is ignored.
712 + uint16_t host_num_processed;
713 + uint8_t host_error; // Channel errors bits on source side
714 + uint8_t device_error; // Channel errors bits on dest side
715 + bool validation_success; // If the validation of the channel was successful
716 +};
717 +
718 +struct hailo_vdma_interrupts_wait_params {
719 + uint32_t channels_bitmap_per_engine[MAX_VDMA_ENGINES]; // in
720 + uint8_t channels_count; // out
721 + struct hailo_vdma_interrupts_channel_data
722 + irq_data[MAX_VDMA_CHANNELS_PER_ENGINE * MAX_VDMA_ENGINES]; // out
723 +};
724 +
725 +/* structure used in ioctl HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS */
726 +struct hailo_vdma_interrupts_read_timestamp_params {
727 + uint8_t engine_index; // in
728 + uint8_t channel_index; // in
729 + uint32_t timestamps_count; // out
730 + struct hailo_channel_interrupt_timestamp timestamps[CHANNEL_IRQ_TIMESTAMPS_SIZE]; // out
731 +};
732 +
733 +/* structure used in ioctl HAILO_FW_CONTROL */
734 +#define MAX_CONTROL_LENGTH (1500)
735 +#define PCIE_EXPECTED_MD5_LENGTH (16)
736 +
737 +
738 +/* structure used in ioctl HAILO_FW_CONTROL and HAILO_READ_LOG */
739 +enum hailo_cpu_id {
740 + HAILO_CPU_ID_CPU0 = 0,
741 + HAILO_CPU_ID_CPU1,
742 + HAILO_CPU_ID_NONE,
743 +
744 + /** Max enum value to maintain ABI Integrity */
745 + HAILO_CPU_MAX_ENUM = INT_MAX,
746 +};
747 +
748 +struct hailo_fw_control {
749 + // expected_md5+buffer_len+buffer must be in this order at the start of the struct
750 + uint8_t expected_md5[PCIE_EXPECTED_MD5_LENGTH];
751 + uint32_t buffer_len;
752 + uint8_t buffer[MAX_CONTROL_LENGTH];
753 + uint32_t timeout_ms;
754 + enum hailo_cpu_id cpu_id;
755 +};
756 +
757 +/* structure used in ioctl HAILO_MEMORY_TRANSFER */
758 +// Max bar transfer size gotten from ATR0_TABLE_SIZE
759 +#define MAX_MEMORY_TRANSFER_LENGTH (4096)
760 +
761 +enum hailo_transfer_direction {
762 + TRANSFER_READ = 0,
763 + TRANSFER_WRITE,
764 +
765 + /** Max enum value to maintain ABI Integrity */
766 + TRANSFER_MAX_ENUM = INT_MAX,
767 +};
768 +
769 +enum hailo_transfer_memory_type {
770 + HAILO_TRANSFER_DEVICE_DIRECT_MEMORY,
771 +
772 + // vDMA memories
773 + HAILO_TRANSFER_MEMORY_VDMA0 = 0x100,
774 + HAILO_TRANSFER_MEMORY_VDMA1,
775 + HAILO_TRANSFER_MEMORY_VDMA2,
776 +
777 + // PCIe driver memories
778 + HAILO_TRANSFER_MEMORY_PCIE_BAR0 = 0x200,
779 + HAILO_TRANSFER_MEMORY_PCIE_BAR2 = 0x202,
780 + HAILO_TRANSFER_MEMORY_PCIE_BAR4 = 0x204,
781 +
782 + // DRAM DMA driver memories
783 + HAILO_TRANSFER_MEMORY_DMA_ENGINE0 = 0x300,
784 + HAILO_TRANSFER_MEMORY_DMA_ENGINE1,
785 + HAILO_TRANSFER_MEMORY_DMA_ENGINE2,
786 +
787 + /** Max enum value to maintain ABI Integrity */
788 + HAILO_TRANSFER_MEMORY_MAX_ENUM = INT_MAX,
789 +};
790 +
791 +struct hailo_memory_transfer_params {
792 + enum hailo_transfer_direction transfer_direction; // in
793 + enum hailo_transfer_memory_type memory_type; // in
794 + uint64_t address; // in
795 + size_t count; // in
796 + uint8_t buffer[MAX_MEMORY_TRANSFER_LENGTH]; // in/out
797 +};
798 +
799 +/* structure used in ioctl HAILO_VDMA_BUFFER_SYNC */
800 +enum hailo_vdma_buffer_sync_type {
801 + HAILO_SYNC_FOR_CPU,
802 + HAILO_SYNC_FOR_DEVICE,
803 +
804 + /** Max enum value to maintain ABI Integrity */
805 + HAILO_SYNC_MAX_ENUM = INT_MAX,
806 +};
807 +
808 +struct hailo_vdma_buffer_sync_params {
809 + size_t handle; // in
810 + enum hailo_vdma_buffer_sync_type sync_type; // in
811 + size_t offset; // in
812 + size_t count; // in
813 +};
814 +
815 +/* structure used in ioctl HAILO_READ_NOTIFICATION */
816 +#define MAX_NOTIFICATION_LENGTH (1500)
817 +
818 +struct hailo_d2h_notification {
819 + size_t buffer_len; // out
820 + uint8_t buffer[MAX_NOTIFICATION_LENGTH]; // out
821 +};
822 +
823 +enum hailo_board_type {
824 + HAILO_BOARD_TYPE_HAILO8 = 0,
825 + HAILO_BOARD_TYPE_HAILO15,
826 + HAILO_BOARD_TYPE_PLUTO,
827 + HAILO_BOARD_TYPE_COUNT,
828 +
829 + /** Max enum value to maintain ABI Integrity */
830 + HAILO_BOARD_TYPE_MAX_ENUM = INT_MAX
831 +};
832 +
833 +enum hailo_dma_type {
834 + HAILO_DMA_TYPE_PCIE,
835 + HAILO_DMA_TYPE_DRAM,
836 +
837 + /** Max enum value to maintain ABI Integrity */
838 + HAILO_DMA_TYPE_MAX_ENUM = INT_MAX,
839 +};
840 +
841 +struct hailo_device_properties {
842 + uint16_t desc_max_page_size;
843 + enum hailo_board_type board_type;
844 + enum hailo_allocation_mode allocation_mode;
845 + enum hailo_dma_type dma_type;
846 + size_t dma_engines_count;
847 + bool is_fw_loaded;
848 +#ifdef __QNX__
849 + pid_t resource_manager_pid;
850 +#endif // __QNX__
851 +};
852 +
853 +struct hailo_driver_info {
854 + uint32_t major_version;
855 + uint32_t minor_version;
856 + uint32_t revision_version;
857 +};
858 +
859 +/* structure used in ioctl HAILO_READ_LOG */
860 +#define MAX_FW_LOG_BUFFER_LENGTH (512)
861 +
862 +struct hailo_read_log_params {
863 + enum hailo_cpu_id cpu_id; // in
864 + uint8_t buffer[MAX_FW_LOG_BUFFER_LENGTH]; // out
865 + size_t buffer_size; // in
866 + size_t read_bytes; // out
867 +};
868 +
869 +/* structure used in ioctl HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC */
870 +struct hailo_allocate_low_memory_buffer_params {
871 + size_t buffer_size; // in
872 + uintptr_t buffer_handle; // out
873 +};
874 +
875 +/* structure used in ioctl HAILO_VDMA_LOW_MEMORY_BUFFER_FREE */
876 +struct hailo_free_low_memory_buffer_params {
877 + uintptr_t buffer_handle; // in
878 +};
879 +
880 +struct hailo_mark_as_in_use_params {
881 + bool in_use; // out
882 +};
883 +
884 +/* structure used in ioctl HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC */
885 +struct hailo_allocate_continuous_buffer_params {
886 + size_t buffer_size; // in
887 + uintptr_t buffer_handle; // out
888 + uint64_t dma_address; // out
889 +};
890 +
891 +/* structure used in ioctl HAILO_VDMA_CONTINUOUS_BUFFER_FREE */
892 +struct hailo_free_continuous_buffer_params {
893 + uintptr_t buffer_handle; // in
894 +};
895 +
896 +/* structures used in ioctl HAILO_VDMA_LAUNCH_TRANSFER */
897 +struct hailo_vdma_transfer_buffer {
898 + size_t mapped_buffer_handle; // in
899 + uint32_t offset; // in
900 + uint32_t size; // in
901 +};
902 +
903 +enum hailo_vdma_interrupts_domain {
904 + HAILO_VDMA_INTERRUPTS_DOMAIN_NONE = 0,
905 + HAILO_VDMA_INTERRUPTS_DOMAIN_DEVICE = (1 << 0),
906 + HAILO_VDMA_INTERRUPTS_DOMAIN_HOST = (1 << 1),
907 +
908 + /** Max enum value to maintain ABI Integrity */
909 + HAILO_VDMA_INTERRUPTS_DOMAIN_MAX_ENUM = INT_MAX,
910 +};
911 +
912 +// We allow maximum 2 buffers per transfer since we may have an extra buffer
913 +// to make sure each buffer is aligned to page size.
914 +#define HAILO_MAX_BUFFERS_PER_SINGLE_TRANSFER (2)
915 +
916 +struct hailo_vdma_launch_transfer_params {
917 + uint8_t engine_index; // in
918 + uint8_t channel_index; // in
919 +
920 + uintptr_t desc_handle; // in
921 + uint32_t starting_desc; // in
922 +
923 + bool should_bind; // in, if false, assumes buffer already bound.
924 + uint8_t buffers_count; // in
925 + struct hailo_vdma_transfer_buffer
926 + buffers[HAILO_MAX_BUFFERS_PER_SINGLE_TRANSFER]; // in
927 +
928 + enum hailo_vdma_interrupts_domain first_interrupts_domain; // in
929 + enum hailo_vdma_interrupts_domain last_interrupts_domain; // in
930 +
931 + bool is_debug; // in, if set program hw to send
932 + // more info (e.g desc complete status)
933 +
934 + uint32_t descs_programed; // out, amount of descriptors programed.
935 +};
936 +
937 +#ifdef _MSC_VER
938 +struct tCompatibleHailoIoctlData
939 +{
940 + tCompatibleHailoIoctlParam Parameters;
941 + ULONG_PTR Value;
942 + union {
943 + struct hailo_memory_transfer_params MemoryTransfer;
944 + struct hailo_vdma_interrupts_enable_params VdmaInterruptsEnable;
945 + struct hailo_vdma_interrupts_disable_params VdmaInterruptsDisable;
946 + struct hailo_vdma_interrupts_read_timestamp_params VdmaInterruptsReadTimestamps;
947 + struct hailo_vdma_interrupts_wait_params VdmaInterruptsWait;
948 + struct hailo_vdma_buffer_sync_params VdmaBufferSync;
949 + struct hailo_fw_control FirmwareControl;
950 + struct hailo_vdma_buffer_map_params VdmaBufferMap;
951 + struct hailo_vdma_buffer_unmap_params VdmaBufferUnmap;
952 + struct hailo_desc_list_create_params DescListCreate;
953 + struct hailo_desc_list_release_params DescListReleaseParam;
954 + struct hailo_desc_list_bind_vdma_buffer_params DescListBind;
955 + struct hailo_d2h_notification D2HNotification;
956 + struct hailo_device_properties DeviceProperties;
957 + struct hailo_driver_info DriverInfo;
958 + struct hailo_non_linux_desc_list_mmap_params DescListMmap;
959 + struct hailo_read_log_params ReadLog;
960 + struct hailo_mark_as_in_use_params MarkAsInUse;
961 + struct hailo_vdma_launch_transfer_params LaunchTransfer;
962 + } Buffer;
963 +};
964 +#endif // _MSC_VER
965 +
966 +#pragma pack(pop)
967 +
968 +enum hailo_general_ioctl_code {
969 + HAILO_MEMORY_TRANSFER_CODE,
970 + HAILO_FW_CONTROL_CODE,
971 + HAILO_READ_NOTIFICATION_CODE,
972 + HAILO_DISABLE_NOTIFICATION_CODE,
973 + HAILO_QUERY_DEVICE_PROPERTIES_CODE,
974 + HAILO_QUERY_DRIVER_INFO_CODE,
975 + HAILO_READ_LOG_CODE,
976 + HAILO_RESET_NN_CORE_CODE,
977 +
978 + // Must be last
979 + HAILO_GENERAL_IOCTL_MAX_NR,
980 +};
981 +
982 +#define HAILO_MEMORY_TRANSFER _IOWR_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_MEMORY_TRANSFER_CODE, struct hailo_memory_transfer_params)
983 +#define HAILO_FW_CONTROL _IOWR_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_FW_CONTROL_CODE, struct hailo_fw_control)
984 +#define HAILO_READ_NOTIFICATION _IOW_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_READ_NOTIFICATION_CODE, struct hailo_d2h_notification)
985 +#define HAILO_DISABLE_NOTIFICATION _IO_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_DISABLE_NOTIFICATION_CODE)
986 +#define HAILO_QUERY_DEVICE_PROPERTIES _IOW_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_QUERY_DEVICE_PROPERTIES_CODE, struct hailo_device_properties)
987 +#define HAILO_QUERY_DRIVER_INFO _IOW_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_QUERY_DRIVER_INFO_CODE, struct hailo_driver_info)
988 +#define HAILO_READ_LOG _IOWR_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_READ_LOG_CODE, struct hailo_read_log_params)
989 +#define HAILO_RESET_NN_CORE _IO_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_RESET_NN_CORE_CODE)
990 +
991 +enum hailo_vdma_ioctl_code {
992 + HAILO_VDMA_INTERRUPTS_ENABLE_CODE,
993 + HAILO_VDMA_INTERRUPTS_DISABLE_CODE,
994 + HAILO_VDMA_INTERRUPTS_WAIT_CODE,
995 + HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS_CODE,
996 + HAILO_VDMA_BUFFER_MAP_CODE,
997 + HAILO_VDMA_BUFFER_UNMAP_CODE,
998 + HAILO_VDMA_BUFFER_SYNC_CODE,
999 + HAILO_DESC_LIST_CREATE_CODE,
1000 + HAILO_DESC_LIST_RELEASE_CODE,
1001 + HAILO_DESC_LIST_BIND_VDMA_BUFFER_CODE,
1002 + HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC_CODE,
1003 + HAILO_VDMA_LOW_MEMORY_BUFFER_FREE_CODE,
1004 + HAILO_MARK_AS_IN_USE_CODE,
1005 + HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC_CODE,
1006 + HAILO_VDMA_CONTINUOUS_BUFFER_FREE_CODE,
1007 + HAILO_VDMA_LAUNCH_TRANSFER_CODE,
1008 +
1009 + // Must be last
1010 + HAILO_VDMA_IOCTL_MAX_NR,
1011 +};
1012 +
1013 +#define HAILO_VDMA_INTERRUPTS_ENABLE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_INTERRUPTS_ENABLE_CODE, struct hailo_vdma_interrupts_enable_params)
1014 +#define HAILO_VDMA_INTERRUPTS_DISABLE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_INTERRUPTS_DISABLE_CODE, struct hailo_vdma_interrupts_disable_params)
1015 +#define HAILO_VDMA_INTERRUPTS_WAIT _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_INTERRUPTS_WAIT_CODE, struct hailo_vdma_interrupts_wait_params)
1016 +#define HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS_CODE, struct hailo_vdma_interrupts_read_timestamp_params)
1017 +
1018 +#define HAILO_VDMA_BUFFER_MAP _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_BUFFER_MAP_CODE, struct hailo_vdma_buffer_map_params)
1019 +#define HAILO_VDMA_BUFFER_UNMAP _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_BUFFER_UNMAP_CODE, struct hailo_vdma_buffer_unmap_params)
1020 +#define HAILO_VDMA_BUFFER_SYNC _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_BUFFER_SYNC_CODE, struct hailo_vdma_buffer_sync_params)
1021 +
1022 +#define HAILO_DESC_LIST_CREATE _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_DESC_LIST_CREATE_CODE, struct hailo_desc_list_create_params)
1023 +#define HAILO_DESC_LIST_RELEASE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_DESC_LIST_RELEASE_CODE, struct hailo_desc_list_release_params)
1024 +#define HAILO_DESC_LIST_BIND_VDMA_BUFFER _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_DESC_LIST_BIND_VDMA_BUFFER_CODE, struct hailo_desc_list_bind_vdma_buffer_params)
1025 +
1026 +#define HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC_CODE, struct hailo_allocate_low_memory_buffer_params)
1027 +#define HAILO_VDMA_LOW_MEMORY_BUFFER_FREE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_LOW_MEMORY_BUFFER_FREE_CODE, struct hailo_free_low_memory_buffer_params)
1028 +
1029 +#define HAILO_MARK_AS_IN_USE _IOW_(HAILO_VDMA_IOCTL_MAGIC, HAILO_MARK_AS_IN_USE_CODE, struct hailo_mark_as_in_use_params)
1030 +
1031 +#define HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC_CODE, struct hailo_allocate_continuous_buffer_params)
1032 +#define HAILO_VDMA_CONTINUOUS_BUFFER_FREE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_CONTINUOUS_BUFFER_FREE_CODE, struct hailo_free_continuous_buffer_params)
1033 +
1034 +#define HAILO_VDMA_LAUNCH_TRANSFER _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_LAUNCH_TRANSFER_CODE, struct hailo_vdma_launch_transfer_params)
1035 +
1036 +
1037 +enum hailo_non_linux_ioctl_code {
1038 + HAILO_NON_LINUX_DESC_LIST_MMAP_CODE,
1039 +
1040 + // Must be last
1041 + HAILO_NON_LINUX_IOCTL_MAX_NR,
1042 +};
1043 +
1044 +#define HAILO_NON_LINUX_DESC_LIST_MMAP _IOWR_(HAILO_NON_LINUX_IOCTL_MAGIC, HAILO_NON_LINUX_DESC_LIST_MMAP_CODE, struct hailo_non_linux_desc_list_mmap_params)
1045 +
1046 +
1047 +#endif /* _HAILO_IOCTL_COMMON_H_ */
1048 --- /dev/null
1049 +++ b/drivers/media/pci/hailo/common/hailo_pcie_version.h
1050 @@ -0,0 +1,13 @@
1051 +// SPDX-License-Identifier: GPL-2.0
1052 +/**
1053 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
1054 + **/
1055 +
1056 +#ifndef _HAILO_COMMON_PCIE_VERSION_H_
1057 +#define _HAILO_COMMON_PCIE_VERSION_H_
1058 +
1059 +#define HAILO_DRV_VER_MAJOR 4
1060 +#define HAILO_DRV_VER_MINOR 17
1061 +#define HAILO_DRV_VER_REVISION 0
1062 +
1063 +#endif /* _HAILO_COMMON_PCIE_VERSION_H_ */
1064 \ No newline at end of file
1065 --- /dev/null
1066 +++ b/drivers/media/pci/hailo/common/hailo_resource.c
1067 @@ -0,0 +1,128 @@
1068 +// SPDX-License-Identifier: GPL-2.0
1069 +/**
1070 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
1071 + **/
1072 +
1073 +#include "hailo_resource.h"
1074 +
1075 +#include <linux/io.h>
1076 +#include <linux/errno.h>
1077 +#include <linux/types.h>
1078 +#include <linux/kernel.h>
1079 +
1080 +
1081 +u8 hailo_resource_read8(struct hailo_resource *resource, size_t offset)
1082 +{
1083 + return ioread8((u8*)resource->address + offset);
1084 +}
1085 +
1086 +u16 hailo_resource_read16(struct hailo_resource *resource, size_t offset)
1087 +{
1088 + return ioread16((u8*)resource->address + offset);
1089 +}
1090 +
1091 +u32 hailo_resource_read32(struct hailo_resource *resource, size_t offset)
1092 +{
1093 + return ioread32((u8*)resource->address + offset);
1094 +}
1095 +
1096 +void hailo_resource_write8(struct hailo_resource *resource, size_t offset, u8 value)
1097 +{
1098 + iowrite8(value, (u8*)resource->address + offset);
1099 +}
1100 +
1101 +void hailo_resource_write16(struct hailo_resource *resource, size_t offset, u16 value)
1102 +{
1103 + iowrite16(value, (u8*)resource->address + offset);
1104 +}
1105 +
1106 +void hailo_resource_write32(struct hailo_resource *resource, size_t offset, u32 value)
1107 +{
1108 + iowrite32(value, (u8*)resource->address + offset);
1109 +}
1110 +
1111 +void hailo_resource_read_buffer(struct hailo_resource *resource, size_t offset, size_t count, void *to)
1112 +{
1113 + // Copied and modified from linux aarch64 (using ioread32 instead of readq that does not work all the time)
1114 + uintptr_t to_ptr = (uintptr_t)to;
1115 + while ((count > 0) && (!IS_ALIGNED(to_ptr, 4) || !IS_ALIGNED((uintptr_t)resource->address + offset, 4))) {
1116 + *(u8*)to_ptr = hailo_resource_read8(resource, offset);
1117 + to_ptr++;
1118 + offset++;
1119 + count--;
1120 + }
1121 +
1122 + while (count >= 4) {
1123 + *(u32*)to_ptr = hailo_resource_read32(resource, offset);
1124 + to_ptr += 4;
1125 + offset += 4;
1126 + count -= 4;
1127 + }
1128 +
1129 + while (count > 0) {
1130 + *(u8*)to_ptr = hailo_resource_read8(resource, offset);
1131 + to_ptr++;
1132 + offset++;
1133 + count--;
1134 + }
1135 +}
1136 +
1137 +int hailo_resource_write_buffer(struct hailo_resource *resource, size_t offset, size_t count, const void *from)
1138 +{
1139 + // read the bytes after writing them for flushing the data. This function also checks if the pcie link
1140 + // is broken.
1141 + uintptr_t from_ptr = (uintptr_t)from;
1142 + while (count && (!IS_ALIGNED(resource->address + offset, 4) || !IS_ALIGNED(from_ptr, 4))) {
1143 + hailo_resource_write8(resource, offset, *(u8*)from_ptr);
1144 + if (hailo_resource_read8(resource, offset) != *(u8*)from_ptr) {
1145 + return -EIO;
1146 + }
1147 + from_ptr++;
1148 + offset++;
1149 + count--;
1150 + }
1151 +
1152 + while (count >= 4) {
1153 + hailo_resource_write32(resource, offset, *(u32*)from_ptr);
1154 + if (hailo_resource_read32(resource, offset) != *(u32*)from_ptr) {
1155 + return -EIO;
1156 + }
1157 + from_ptr += 4;
1158 + offset += 4;
1159 + count -= 4;
1160 + }
1161 +
1162 + while (count) {
1163 + hailo_resource_write8(resource, offset, *(u8*)from_ptr);
1164 + if (hailo_resource_read8(resource, offset) != *(u8*)from_ptr) {
1165 + return -EIO;
1166 + }
1167 + from_ptr++;
1168 + offset++;
1169 + count--;
1170 + }
1171 +
1172 + return 0;
1173 +}
1174 +
1175 +int hailo_resource_transfer(struct hailo_resource *resource, struct hailo_memory_transfer_params *transfer)
1176 +{
1177 + // Check for transfer size (address is in resources address-space)
1178 + if ((transfer->address + transfer->count) > (u64)resource->size) {
1179 + return -EINVAL;
1180 + }
1181 +
1182 + if (transfer->count > ARRAY_SIZE(transfer->buffer)) {
1183 + return -EINVAL;
1184 + }
1185 +
1186 + switch (transfer->transfer_direction) {
1187 + case TRANSFER_READ:
1188 + hailo_resource_read_buffer(resource, (u32)transfer->address, transfer->count, transfer->buffer);
1189 + return 0;
1190 + case TRANSFER_WRITE:
1191 + return hailo_resource_write_buffer(resource, (u32)transfer->address, transfer->count, transfer->buffer);
1192 + default:
1193 + return -EINVAL;
1194 + }
1195 +}
1196 --- /dev/null
1197 +++ b/drivers/media/pci/hailo/common/hailo_resource.h
1198 @@ -0,0 +1,39 @@
1199 +// SPDX-License-Identifier: GPL-2.0
1200 +/**
1201 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
1202 + **/
1203 +
1204 +#ifndef _HAILO_COMMON_HAILO_RESOURCE_H_
1205 +#define _HAILO_COMMON_HAILO_RESOURCE_H_
1206 +
1207 +#include "hailo_ioctl_common.h"
1208 +#include <linux/types.h>
1209 +
1210 +struct hailo_resource {
1211 + uintptr_t address;
1212 + size_t size;
1213 +};
1214 +
1215 +#ifdef __cplusplus
1216 +extern "C" {
1217 +#endif
1218 +
1219 +// Implemented by the specific platform
1220 +u32 hailo_resource_read32(struct hailo_resource *resource, size_t offset);
1221 +u16 hailo_resource_read16(struct hailo_resource *resource, size_t offset);
1222 +u8 hailo_resource_read8(struct hailo_resource *resource, size_t offset);
1223 +void hailo_resource_write32(struct hailo_resource *resource, size_t offset, u32 value);
1224 +void hailo_resource_write16(struct hailo_resource *resource, size_t offset, u16 value);
1225 +void hailo_resource_write8(struct hailo_resource *resource, size_t offset, u8 value);
1226 +
1227 +void hailo_resource_read_buffer(struct hailo_resource *resource, size_t offset, size_t count, void *to);
1228 +int hailo_resource_write_buffer(struct hailo_resource *resource, size_t offset, size_t count, const void *from);
1229 +
1230 +// Transfer (read/write) the given resource into/from transfer params.
1231 +int hailo_resource_transfer(struct hailo_resource *resource, struct hailo_memory_transfer_params *transfer);
1232 +
1233 +#ifdef __cplusplus
1234 +}
1235 +#endif
1236 +
1237 +#endif /* _HAILO_COMMON_HAILO_RESOURCE_H_ */
1238 \ No newline at end of file
1239 --- /dev/null
1240 +++ b/drivers/media/pci/hailo/common/pcie_common.c
1241 @@ -0,0 +1,641 @@
1242 +// SPDX-License-Identifier: GPL-2.0
1243 +/**
1244 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
1245 + **/
1246 +
1247 +#include "pcie_common.h"
1248 +#include "fw_operation.h"
1249 +
1250 +#include <linux/errno.h>
1251 +#include <linux/bug.h>
1252 +#include <linux/delay.h>
1253 +#include <linux/kernel.h>
1254 +
1255 +
1256 +#define BSC_IMASK_HOST (0x0188)
1257 +#define BCS_ISTATUS_HOST (0x018C)
1258 +#define BCS_SOURCE_INTERRUPT_PER_CHANNEL (0x400)
1259 +#define BCS_DESTINATION_INTERRUPT_PER_CHANNEL (0x500)
1260 +
1261 +#define PO2_ROUND_UP(size, alignment) ((size + alignment-1) & ~(alignment-1))
1262 +
1263 +#define ATR0_PARAM (0x17)
1264 +#define ATR0_SRC_ADDR (0x0)
1265 +#define ATR0_TRSL_ADDR2 (0x0)
1266 +#define ATR0_TRSL_PARAM (6)
1267 +
1268 +#define ATR0_PCIE_BRIDGE_OFFSET (0x700)
1269 +#define ATR0_TABLE_SIZE (0x1000u)
1270 +#define ATR0_TABLE_SIZE_MASK (0x1000u - 1)
1271 +
1272 +#define MAXIMUM_APP_FIRMWARE_CODE_SIZE (0x40000)
1273 +#define MAXIMUM_CORE_FIRMWARE_CODE_SIZE (0x20000)
1274 +
1275 +#define FIRMWARE_LOAD_WAIT_MAX_RETRIES (100)
1276 +#define FIRMWARE_LOAD_SLEEP_MS (50)
1277 +
1278 +#define PCIE_APP_CPU_DEBUG_OFFSET (8*1024)
1279 +#define PCIE_CORE_CPU_DEBUG_OFFSET (PCIE_APP_CPU_DEBUG_OFFSET + DEBUG_BUFFER_TOTAL_SIZE)
1280 +
1281 +#define PCIE_D2H_NOTIFICATION_SRAM_OFFSET (0x640 + 0x640)
1282 +#define PCIE_REQUEST_SIZE_OFFSET (0x640)
1283 +
1284 +#define PCIE_CONFIG_VENDOR_OFFSET (0x0098)
1285 +
1286 +#define HAILO_PCIE_HOST_DMA_DATA_ID (0)
1287 +#define HAILO_PCIE_DMA_DEVICE_INTERRUPTS_BITMASK (1 << 4)
1288 +#define HAILO_PCIE_DMA_HOST_INTERRUPTS_BITMASK (1 << 5)
1289 +
1290 +typedef u32 hailo_ptr_t;
1291 +
1292 +struct hailo_fw_addresses {
1293 + u32 boot_fw_header;
1294 + u32 app_fw_code_ram_base;
1295 + u32 boot_key_cert;
1296 + u32 boot_cont_cert;
1297 + u32 boot_fw_trigger;
1298 + u32 core_code_ram_base;
1299 + u32 core_fw_header;
1300 + u32 atr0_trsl_addr1;
1301 + u32 raise_ready_offset;
1302 +};
1303 +
1304 +struct hailo_atr_config {
1305 + u32 atr_param;
1306 + u32 atr_src;
1307 + u32 atr_trsl_addr_1;
1308 + u32 atr_trsl_addr_2;
1309 + u32 atr_trsl_param;
1310 +};
1311 +
1312 +struct hailo_board_compatibility {
1313 + struct hailo_fw_addresses fw_addresses;
1314 + const char *fw_filename;
1315 + const struct hailo_config_constants board_cfg;
1316 + const struct hailo_config_constants fw_cfg;
1317 +};
1318 +
1319 +static const struct hailo_board_compatibility compat[HAILO_BOARD_TYPE_COUNT] = {
1320 + [HAILO_BOARD_TYPE_HAILO8] = {
1321 + .fw_addresses = {
1322 + .boot_fw_header = 0xE0030,
1323 + .boot_fw_trigger = 0xE0980,
1324 + .boot_key_cert = 0xE0048,
1325 + .boot_cont_cert = 0xE0390,
1326 + .app_fw_code_ram_base = 0x60000,
1327 + .core_code_ram_base = 0xC0000,
1328 + .core_fw_header = 0xA0000,
1329 + .atr0_trsl_addr1 = 0x60000000,
1330 + .raise_ready_offset = 0x1684,
1331 + },
1332 + .fw_filename = "hailo/hailo8_fw.bin",
1333 + .board_cfg = {
1334 + .filename = "hailo/hailo8_board_cfg.bin",
1335 + .address = 0x60001000,
1336 + .max_size = PCIE_HAILO8_BOARD_CFG_MAX_SIZE,
1337 + },
1338 + .fw_cfg = {
1339 + .filename = "hailo/hailo8_fw_cfg.bin",
1340 + .address = 0x60001500,
1341 + .max_size = PCIE_HAILO8_FW_CFG_MAX_SIZE,
1342 + },
1343 + },
1344 + [HAILO_BOARD_TYPE_HAILO15] = {
1345 + .fw_addresses = {
1346 + .boot_fw_header = 0x88000,
1347 + .boot_fw_trigger = 0x88c98,
1348 + .boot_key_cert = 0x88018,
1349 + .boot_cont_cert = 0x886a8,
1350 + .app_fw_code_ram_base = 0x20000,
1351 + .core_code_ram_base = 0x60000,
1352 + .core_fw_header = 0xC0000,
1353 + .atr0_trsl_addr1 = 0x000BE000,
1354 + .raise_ready_offset = 0x1754,
1355 + },
1356 + .fw_filename = "hailo/hailo15_fw.bin",
1357 + .board_cfg = {
1358 + .filename = NULL,
1359 + .address = 0,
1360 + .max_size = 0,
1361 + },
1362 + .fw_cfg = {
1363 + .filename = NULL,
1364 + .address = 0,
1365 + .max_size = 0,
1366 + },
1367 + },
1368 + // HRT-11344 : none of these matter except raise_ready_offset seeing as we load fw seperately - not through driver
1369 + // After implementing bootloader put correct values here
1370 + [HAILO_BOARD_TYPE_PLUTO] = {
1371 + .fw_addresses = {
1372 + .boot_fw_header = 0x88000,
1373 + .boot_fw_trigger = 0x88c98,
1374 + .boot_key_cert = 0x88018,
1375 + .boot_cont_cert = 0x886a8,
1376 + .app_fw_code_ram_base = 0x20000,
1377 + .core_code_ram_base = 0x60000,
1378 + .core_fw_header = 0xC0000,
1379 + .atr0_trsl_addr1 = 0x000BE000,
1380 + // NOTE: After they update hw consts - check register fw_access_interrupt_w1s of pcie_config
1381 + .raise_ready_offset = 0x174c,
1382 + },
1383 + .fw_filename = "hailo/pluto_fw.bin",
1384 + .board_cfg = {
1385 + .filename = NULL,
1386 + .address = 0,
1387 + .max_size = 0,
1388 + },
1389 + .fw_cfg = {
1390 + .filename = NULL,
1391 + .address = 0,
1392 + .max_size = 0,
1393 + },
1394 + }
1395 +};
1396 +
1397 +
1398 +bool hailo_pcie_read_interrupt(struct hailo_pcie_resources *resources, struct hailo_pcie_interrupt_source *source)
1399 +{
1400 + u32 channel_data_source = 0;
1401 + u32 channel_data_dest = 0;
1402 + memset(source, 0, sizeof(*source));
1403 +
1404 + source->interrupt_bitmask = hailo_resource_read32(&resources->config, BCS_ISTATUS_HOST);
1405 + if (0 == source->interrupt_bitmask) {
1406 + return false;
1407 + }
1408 +
1409 + // clear signal
1410 + hailo_resource_write32(&resources->config, BCS_ISTATUS_HOST, source->interrupt_bitmask);
1411 +
1412 + if (source->interrupt_bitmask & BCS_ISTATUS_HOST_VDMA_SRC_IRQ_MASK) {
1413 + channel_data_source = hailo_resource_read32(&resources->config, BCS_SOURCE_INTERRUPT_PER_CHANNEL);
1414 + hailo_resource_write32(&resources->config, BCS_SOURCE_INTERRUPT_PER_CHANNEL, channel_data_source);
1415 + }
1416 + if (source->interrupt_bitmask & BCS_ISTATUS_HOST_VDMA_DEST_IRQ_MASK) {
1417 + channel_data_dest = hailo_resource_read32(&resources->config, BCS_DESTINATION_INTERRUPT_PER_CHANNEL);
1418 + hailo_resource_write32(&resources->config, BCS_DESTINATION_INTERRUPT_PER_CHANNEL, channel_data_dest);
1419 + }
1420 + source->vdma_channels_bitmap = channel_data_source | channel_data_dest;
1421 +
1422 + return true;
1423 +}
1424 +
1425 +int hailo_pcie_write_firmware_control(struct hailo_pcie_resources *resources, const struct hailo_fw_control *command)
1426 +{
1427 + int err = 0;
1428 + u32 request_size = 0;
1429 + u8 fw_access_value = FW_ACCESS_APP_CPU_CONTROL_MASK;
1430 + const struct hailo_fw_addresses *fw_addresses = &(compat[resources->board_type].fw_addresses);
1431 +
1432 + if (!hailo_pcie_is_firmware_loaded(resources)) {
1433 + return -ENODEV;
1434 + }
1435 +
1436 + // Copy md5 + buffer_len + buffer
1437 + request_size = sizeof(command->expected_md5) + sizeof(command->buffer_len) + command->buffer_len;
1438 + err = hailo_resource_write_buffer(&resources->fw_access, 0, PO2_ROUND_UP(request_size, FW_CODE_SECTION_ALIGNMENT),
1439 + command);
1440 + if (err < 0) {
1441 + return err;
1442 + }
1443 +
1444 + // Raise the bit for the CPU that will handle the control
1445 + fw_access_value = (command->cpu_id == HAILO_CPU_ID_CPU1) ? FW_ACCESS_CORE_CPU_CONTROL_MASK :
1446 + FW_ACCESS_APP_CPU_CONTROL_MASK;
1447 +
1448 + // Raise ready flag to FW
1449 + hailo_resource_write32(&resources->fw_access, fw_addresses->raise_ready_offset, (u32)fw_access_value);
1450 + return 0;
1451 +}
1452 +
1453 +int hailo_pcie_read_firmware_control(struct hailo_pcie_resources *resources, struct hailo_fw_control *command)
1454 +{
1455 + u32 response_header_size = 0;
1456 +
1457 + // Copy response md5 + buffer_len
1458 + response_header_size = sizeof(command->expected_md5) + sizeof(command->buffer_len);
1459 +
1460 + hailo_resource_read_buffer(&resources->fw_access, PCIE_REQUEST_SIZE_OFFSET, response_header_size, command);
1461 +
1462 + if (sizeof(command->buffer) < command->buffer_len) {
1463 + return -EINVAL;
1464 + }
1465 +
1466 + // Copy response buffer
1467 + hailo_resource_read_buffer(&resources->fw_access, PCIE_REQUEST_SIZE_OFFSET + (size_t)response_header_size,
1468 + command->buffer_len, &command->buffer);
1469 +
1470 + return 0;
1471 +}
1472 +
1473 +void hailo_pcie_write_firmware_driver_shutdown(struct hailo_pcie_resources *resources)
1474 +{
1475 + const struct hailo_fw_addresses *fw_addresses = &(compat[resources->board_type].fw_addresses);
1476 + const u32 fw_access_value = FW_ACCESS_DRIVER_SHUTDOWN_MASK;
1477 +
1478 + // Write shutdown flag to FW
1479 + hailo_resource_write32(&resources->fw_access, fw_addresses->raise_ready_offset, fw_access_value);
1480 +}
1481 +
1482 +int hailo_pcie_read_firmware_notification(struct hailo_pcie_resources *resources,
1483 + struct hailo_d2h_notification *notification)
1484 +{
1485 + struct hailo_resource notification_resource;
1486 +
1487 + if (PCIE_D2H_NOTIFICATION_SRAM_OFFSET > resources->fw_access.size) {
1488 + return -EINVAL;
1489 + }
1490 +
1491 + notification_resource.address = resources->fw_access.address + PCIE_D2H_NOTIFICATION_SRAM_OFFSET,
1492 + notification_resource.size = sizeof(struct hailo_d2h_notification);
1493 +
1494 + return hailo_read_firmware_notification(&notification_resource, notification);
1495 +}
1496 +
1497 +static void write_atr_table(struct hailo_pcie_resources *resources,
1498 + struct hailo_atr_config *atr)
1499 +{
1500 + hailo_resource_write_buffer(&resources->config, ATR0_PCIE_BRIDGE_OFFSET,
1501 + sizeof(*atr), (void*)atr);
1502 +}
1503 +
1504 +static void read_atr_table(struct hailo_pcie_resources *resources,
1505 + struct hailo_atr_config *atr)
1506 +{
1507 + hailo_resource_read_buffer(&resources->config, ATR0_PCIE_BRIDGE_OFFSET,
1508 + sizeof(*atr), (void*)atr);
1509 +}
1510 +
1511 +static void configure_atr_table(struct hailo_pcie_resources *resources,
1512 + hailo_ptr_t base_address)
1513 +{
1514 + struct hailo_atr_config atr = {
1515 + .atr_param = ATR0_PARAM,
1516 + .atr_src = ATR0_SRC_ADDR,
1517 + .atr_trsl_addr_1 = (u32)base_address,
1518 + .atr_trsl_addr_2 = ATR0_TRSL_ADDR2,
1519 + .atr_trsl_param = ATR0_TRSL_PARAM
1520 + };
1521 + write_atr_table(resources, &atr);
1522 +}
1523 +
1524 +static void write_memory_chunk(struct hailo_pcie_resources *resources,
1525 + hailo_ptr_t dest, u32 dest_offset, const void *src, u32 len)
1526 +{
1527 + BUG_ON(dest_offset + len > (u32)resources->fw_access.size);
1528 +
1529 + configure_atr_table(resources, dest);
1530 + (void)hailo_resource_write_buffer(&resources->fw_access, dest_offset, len, src);
1531 +}
1532 +
1533 +static void read_memory_chunk(
1534 + struct hailo_pcie_resources *resources, hailo_ptr_t src, u32 src_offset, void *dest, u32 len)
1535 +{
1536 + BUG_ON(src_offset + len > (u32)resources->fw_access.size);
1537 +
1538 + configure_atr_table(resources, src);
1539 + (void)hailo_resource_read_buffer(&resources->fw_access, src_offset, len, dest);
1540 +}
1541 +
1542 +// Note: this function modify the device ATR table (that is also used by the firmware for control and vdma).
1543 +// Use with caution, and restore the original atr if needed.
1544 +static void write_memory(struct hailo_pcie_resources *resources, hailo_ptr_t dest, const void *src, u32 len)
1545 +{
1546 + hailo_ptr_t base_address = dest & ~ATR0_TABLE_SIZE_MASK;
1547 + u32 chunk_len = 0;
1548 + u32 offset = 0;
1549 +
1550 + if (base_address != dest) {
1551 + // Data is not aligned, write the first chunk
1552 + chunk_len = min(base_address + ATR0_TABLE_SIZE - dest, len);
1553 + write_memory_chunk(resources, base_address, dest - base_address, src, chunk_len);
1554 + offset += chunk_len;
1555 + }
1556 +
1557 + while (offset < len) {
1558 + chunk_len = min(len - offset, ATR0_TABLE_SIZE);
1559 + write_memory_chunk(resources, dest + offset, 0, (const u8*)src + offset, chunk_len);
1560 + offset += chunk_len;
1561 + }
1562 +}
1563 +
1564 +// Note: this function modify the device ATR table (that is also used by the firmware for control and vdma).
1565 +// Use with caution, and restore the original atr if needed.
1566 +static void read_memory(struct hailo_pcie_resources *resources, hailo_ptr_t src, void *dest, u32 len)
1567 +{
1568 + hailo_ptr_t base_address = src & ~ATR0_TABLE_SIZE_MASK;
1569 + u32 chunk_len = 0;
1570 + u32 offset = 0;
1571 +
1572 + if (base_address != src) {
1573 + // Data is not aligned, write the first chunk
1574 + chunk_len = min(base_address + ATR0_TABLE_SIZE - src, len);
1575 + read_memory_chunk(resources, base_address, src - base_address, dest, chunk_len);
1576 + offset += chunk_len;
1577 + }
1578 +
1579 + while (offset < len) {
1580 + chunk_len = min(len - offset, ATR0_TABLE_SIZE);
1581 + read_memory_chunk(resources, src + offset, 0, (u8*)dest + offset, chunk_len);
1582 + offset += chunk_len;
1583 + }
1584 +}
1585 +
1586 +static void hailo_write_app_firmware(struct hailo_pcie_resources *resources, firmware_header_t *fw_header,
1587 + secure_boot_certificate_t *fw_cert)
1588 +{
1589 + const struct hailo_fw_addresses *fw_addresses = &(compat[resources->board_type].fw_addresses);
1590 + void *fw_code = (void*)((u8*)fw_header + sizeof(firmware_header_t));
1591 + void *key_data = &fw_cert->certificates_data[0];
1592 + void *content_data = &fw_cert->certificates_data[fw_cert->key_size];
1593 +
1594 + write_memory(resources, fw_addresses->boot_fw_header, fw_header, sizeof(firmware_header_t));
1595 +
1596 + write_memory(resources, fw_addresses->app_fw_code_ram_base, fw_code, fw_header->code_size);
1597 +
1598 + write_memory(resources, fw_addresses->boot_key_cert, key_data, fw_cert->key_size);
1599 + write_memory(resources, fw_addresses->boot_cont_cert, content_data, fw_cert->content_size);
1600 +}
1601 +
1602 +static void hailo_write_core_firmware(struct hailo_pcie_resources *resources, firmware_header_t *fw_header)
1603 +{
1604 + const struct hailo_fw_addresses *fw_addresses = &(compat[resources->board_type].fw_addresses);
1605 + void *fw_code = (void*)((u8*)fw_header + sizeof(firmware_header_t));
1606 +
1607 + write_memory(resources, fw_addresses->core_code_ram_base, fw_code, fw_header->code_size);
1608 + write_memory(resources, fw_addresses->core_fw_header, fw_header, sizeof(firmware_header_t));
1609 +}
1610 +
1611 +static void hailo_trigger_firmware_boot(struct hailo_pcie_resources *resources)
1612 +{
1613 + const struct hailo_fw_addresses *fw_addresses = &(compat[resources->board_type].fw_addresses);
1614 + u32 pcie_finished = 1;
1615 +
1616 + write_memory(resources, fw_addresses->boot_fw_trigger,
1617 + (void*)&pcie_finished, sizeof(pcie_finished));
1618 +}
1619 +
1620 +/**
1621 +* Validates the FW headers.
1622 +* @param[in] address Address of the firmware.
1623 +* @param[in] firmware_size Size of the firmware.
1624 +* @param[out] out_app_firmware_header (optional) App firmware header
1625 +* @param[out] out_core_firmware_header (optional) Core firmware header
1626 +* @param[out] out_firmware_cert (optional) Firmware certificate header
1627 +*/
1628 +static int FW_VALIDATION__validate_fw_headers(uintptr_t firmware_base_address, size_t firmware_size,
1629 + firmware_header_t **out_app_firmware_header, firmware_header_t **out_core_firmware_header,
1630 + secure_boot_certificate_t **out_firmware_cert, enum hailo_board_type board_type)
1631 +{
1632 + firmware_header_t *app_firmware_header = NULL;
1633 + firmware_header_t *core_firmware_header = NULL;
1634 + secure_boot_certificate_t *firmware_cert = NULL;
1635 + int err = -EINVAL;
1636 + u32 consumed_firmware_offset = 0;
1637 +
1638 + err = FW_VALIDATION__validate_fw_header(firmware_base_address, firmware_size, MAXIMUM_APP_FIRMWARE_CODE_SIZE,
1639 + &consumed_firmware_offset, &app_firmware_header, board_type);
1640 + if (0 != err) {
1641 + err = -EINVAL;
1642 + goto exit;
1643 + }
1644 +
1645 + err = FW_VALIDATION__validate_cert_header(firmware_base_address, firmware_size,
1646 + &consumed_firmware_offset, &firmware_cert);
1647 + if (0 != err) {
1648 + err = -EINVAL;
1649 + goto exit;
1650 + }
1651 +
1652 + err = FW_VALIDATION__validate_fw_header(firmware_base_address, firmware_size, MAXIMUM_CORE_FIRMWARE_CODE_SIZE,
1653 + &consumed_firmware_offset, &core_firmware_header, board_type);
1654 + if (0 != err) {
1655 + err = -EINVAL;
1656 + goto exit;
1657 + }
1658 +
1659 + if (consumed_firmware_offset != firmware_size) {
1660 + /* it is an error if there is leftover data after the last firmware header */
1661 + err = -EINVAL;
1662 + goto exit;
1663 + }
1664 +
1665 + /* the out params are all optional */
1666 + if (NULL != out_app_firmware_header) {
1667 + *out_app_firmware_header = app_firmware_header;
1668 + }
1669 + if (NULL != out_firmware_cert) {
1670 + *out_firmware_cert = firmware_cert;
1671 + }
1672 + if (NULL != out_core_firmware_header) {
1673 + *out_core_firmware_header = core_firmware_header;
1674 + }
1675 + err = 0;
1676 +
1677 +exit:
1678 + return err;
1679 +}
1680 +
1681 +int hailo_pcie_write_firmware(struct hailo_pcie_resources *resources, const void *fw_data, size_t fw_size)
1682 +{
1683 + firmware_header_t *app_firmware_header = NULL;
1684 + secure_boot_certificate_t *firmware_cert = NULL;
1685 + firmware_header_t *core_firmware_header = NULL;
1686 +
1687 + int err = FW_VALIDATION__validate_fw_headers((uintptr_t)fw_data, fw_size,
1688 + &app_firmware_header, &core_firmware_header, &firmware_cert, resources->board_type);
1689 + if (err < 0) {
1690 + return err;
1691 + }
1692 +
1693 + hailo_write_app_firmware(resources, app_firmware_header, firmware_cert);
1694 + hailo_write_core_firmware(resources, core_firmware_header);
1695 +
1696 + hailo_trigger_firmware_boot(resources);
1697 +
1698 + return 0;
1699 +}
1700 +
1701 +bool hailo_pcie_is_firmware_loaded(struct hailo_pcie_resources *resources)
1702 +{
1703 + u32 offset = ATR0_PCIE_BRIDGE_OFFSET + offsetof(struct hailo_atr_config, atr_trsl_addr_1);
1704 + u32 atr_value = hailo_resource_read32(&resources->config, offset);
1705 + return atr_value == compat[resources->board_type].fw_addresses.atr0_trsl_addr1;
1706 +}
1707 +
1708 +bool hailo_pcie_wait_for_firmware(struct hailo_pcie_resources *resources)
1709 +{
1710 + size_t retries;
1711 + for (retries = 0; retries < FIRMWARE_LOAD_WAIT_MAX_RETRIES; retries++) {
1712 + if (hailo_pcie_is_firmware_loaded(resources)) {
1713 + return true;
1714 + }
1715 +
1716 + msleep(FIRMWARE_LOAD_SLEEP_MS);
1717 + }
1718 +
1719 + return false;
1720 +}
1721 +
1722 +int hailo_pcie_write_config_common(struct hailo_pcie_resources *resources, const void* config_data,
1723 + const size_t config_size, const struct hailo_config_constants *config_consts)
1724 +{
1725 + if (config_size > config_consts->max_size) {
1726 + return -EINVAL;
1727 + }
1728 +
1729 + write_memory(resources, config_consts->address, config_data, (u32)config_size);
1730 + return 0;
1731 +}
1732 +
1733 +const struct hailo_config_constants* hailo_pcie_get_board_config_constants(const enum hailo_board_type board_type) {
1734 + BUG_ON(board_type >= HAILO_BOARD_TYPE_COUNT || board_type < 0);
1735 + return &compat[board_type].board_cfg;
1736 +}
1737 +
1738 +const struct hailo_config_constants* hailo_pcie_get_user_config_constants(const enum hailo_board_type board_type) {
1739 + BUG_ON(board_type >= HAILO_BOARD_TYPE_COUNT || board_type < 0);
1740 + return &compat[board_type].fw_cfg;
1741 +}
1742 +
1743 +const char* hailo_pcie_get_fw_filename(const enum hailo_board_type board_type) {
1744 + BUG_ON(board_type >= HAILO_BOARD_TYPE_COUNT || board_type < 0);
1745 + return compat[board_type].fw_filename;
1746 +}
1747 +
1748 +void hailo_pcie_update_channel_interrupts_mask(struct hailo_pcie_resources* resources, u32 channels_bitmap)
1749 +{
1750 + size_t i = 0;
1751 + u32 mask = hailo_resource_read32(&resources->config, BSC_IMASK_HOST);
1752 +
1753 + // Clear old channel interrupts
1754 + mask &= ~BCS_ISTATUS_HOST_VDMA_SRC_IRQ_MASK;
1755 + mask &= ~BCS_ISTATUS_HOST_VDMA_DEST_IRQ_MASK;
1756 + // Set interrupt by the bitmap
1757 + for (i = 0; i < MAX_VDMA_CHANNELS_PER_ENGINE; ++i) {
1758 + if (hailo_test_bit(i, &channels_bitmap)) {
1759 + // based on 18.5.2 "vDMA Interrupt Registers" in PLDA documentation
1760 + u32 offset = (i < VDMA_DEST_CHANNELS_START) ? 0 : 8;
1761 + hailo_set_bit((((int)i*8) / MAX_VDMA_CHANNELS_PER_ENGINE) + offset, &mask);
1762 + }
1763 + }
1764 + hailo_resource_write32(&resources->config, BSC_IMASK_HOST, mask);
1765 +}
1766 +
1767 +void hailo_pcie_enable_interrupts(struct hailo_pcie_resources *resources)
1768 +{
1769 + u32 mask = hailo_resource_read32(&resources->config, BSC_IMASK_HOST);
1770 +
1771 + hailo_resource_write32(&resources->config, BCS_ISTATUS_HOST, 0xFFFFFFFF);
1772 + hailo_resource_write32(&resources->config, BCS_DESTINATION_INTERRUPT_PER_CHANNEL, 0xFFFFFFFF);
1773 + hailo_resource_write32(&resources->config, BCS_SOURCE_INTERRUPT_PER_CHANNEL, 0xFFFFFFFF);
1774 +
1775 + mask |= BCS_ISTATUS_HOST_FW_IRQ_CONTROL_MASK | BCS_ISTATUS_HOST_FW_IRQ_NOTIFICATION | BCS_ISTATUS_HOST_DRIVER_DOWN;
1776 + hailo_resource_write32(&resources->config, BSC_IMASK_HOST, mask);
1777 +}
1778 +
1779 +void hailo_pcie_disable_interrupts(struct hailo_pcie_resources* resources)
1780 +{
1781 + hailo_resource_write32(&resources->config, BSC_IMASK_HOST, 0);
1782 +}
1783 +
1784 +long hailo_pcie_read_firmware_log(struct hailo_pcie_resources *resources, struct hailo_read_log_params *params)
1785 +{
1786 + long err = 0;
1787 + struct hailo_resource log_resource = {resources->fw_access.address, DEBUG_BUFFER_TOTAL_SIZE};
1788 +
1789 + if (HAILO_CPU_ID_CPU0 == params->cpu_id) {
1790 + log_resource.address += PCIE_APP_CPU_DEBUG_OFFSET;
1791 + } else if (HAILO_CPU_ID_CPU1 == params->cpu_id) {
1792 + log_resource.address += PCIE_CORE_CPU_DEBUG_OFFSET;
1793 + } else {
1794 + return -EINVAL;
1795 + }
1796 +
1797 + if (0 == params->buffer_size) {
1798 + params->read_bytes = 0;
1799 + return 0;
1800 + }
1801 +
1802 + err = hailo_read_firmware_log(&log_resource, params);
1803 + if (0 != err) {
1804 + return err;
1805 + }
1806 +
1807 + return 0;
1808 +}
1809 +
1810 +static int direct_memory_transfer(struct hailo_pcie_resources *resources,
1811 + struct hailo_memory_transfer_params *params)
1812 +{
1813 + int err = -EINVAL;
1814 + struct hailo_atr_config previous_atr = {0};
1815 +
1816 + if (params->address > U32_MAX) {
1817 + return -EFAULT;
1818 + }
1819 +
1820 + // Store previous ATR (Read/write modify the ATR).
1821 + read_atr_table(resources, &previous_atr);
1822 +
1823 + switch (params->transfer_direction) {
1824 + case TRANSFER_READ:
1825 + read_memory(resources, (u32)params->address, params->buffer, (u32)params->count);
1826 + break;
1827 + case TRANSFER_WRITE:
1828 + write_memory(resources, (u32)params->address, params->buffer, (u32)params->count);
1829 + break;
1830 + default:
1831 + err = -EINVAL;
1832 + goto restore_atr;
1833 + }
1834 +
1835 + err = 0;
1836 +restore_atr:
1837 + write_atr_table(resources, &previous_atr);
1838 + return err;
1839 +}
1840 +
1841 +int hailo_pcie_memory_transfer(struct hailo_pcie_resources *resources, struct hailo_memory_transfer_params *params)
1842 +{
1843 + if (params->count > ARRAY_SIZE(params->buffer)) {
1844 + return -EINVAL;
1845 + }
1846 +
1847 + switch (params->memory_type) {
1848 + case HAILO_TRANSFER_DEVICE_DIRECT_MEMORY:
1849 + return direct_memory_transfer(resources, params);
1850 + case HAILO_TRANSFER_MEMORY_PCIE_BAR0:
1851 + return hailo_resource_transfer(&resources->config, params);
1852 + case HAILO_TRANSFER_MEMORY_PCIE_BAR2:
1853 + case HAILO_TRANSFER_MEMORY_VDMA0:
1854 + return hailo_resource_transfer(&resources->vdma_registers, params);
1855 + case HAILO_TRANSFER_MEMORY_PCIE_BAR4:
1856 + return hailo_resource_transfer(&resources->fw_access, params);
1857 + default:
1858 + return -EINVAL;
1859 + }
1860 +}
1861 +
1862 +bool hailo_pcie_is_device_connected(struct hailo_pcie_resources *resources)
1863 +{
1864 + return PCI_VENDOR_ID_HAILO == hailo_resource_read16(&resources->config, PCIE_CONFIG_VENDOR_OFFSET);
1865 +}
1866 +
1867 +// On PCIe, just return the address
1868 +static u64 encode_dma_address(dma_addr_t dma_address, u8 channel_id)
1869 +{
1870 + (void)channel_id;
1871 + return (u64)dma_address;
1872 +}
1873 +
1874 +struct hailo_vdma_hw hailo_pcie_vdma_hw = {
1875 + .hw_ops = {
1876 + .encode_desc_dma_address = encode_dma_address
1877 + },
1878 + .ddr_data_id = HAILO_PCIE_HOST_DMA_DATA_ID,
1879 + .device_interrupts_bitmask = HAILO_PCIE_DMA_DEVICE_INTERRUPTS_BITMASK,
1880 + .host_interrupts_bitmask = HAILO_PCIE_DMA_HOST_INTERRUPTS_BITMASK,
1881 +
1882 +};
1883 \ No newline at end of file
1884 --- /dev/null
1885 +++ b/drivers/media/pci/hailo/common/pcie_common.h
1886 @@ -0,0 +1,128 @@
1887 +// SPDX-License-Identifier: GPL-2.0
1888 +/**
1889 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
1890 + **/
1891 +
1892 +#ifndef _HAILO_COMMON_PCIE_COMMON_H_
1893 +#define _HAILO_COMMON_PCIE_COMMON_H_
1894 +
1895 +#include "hailo_resource.h"
1896 +#include "hailo_ioctl_common.h"
1897 +#include "fw_validation.h"
1898 +#include "fw_operation.h"
1899 +#include "utils.h"
1900 +#include "vdma_common.h"
1901 +
1902 +#include <linux/types.h>
1903 +
1904 +
1905 +#define BCS_ISTATUS_HOST_FW_IRQ_CONTROL_MASK (0x04000000)
1906 +#define BCS_ISTATUS_HOST_FW_IRQ_NOTIFICATION (0x02000000)
1907 +#define BCS_ISTATUS_HOST_DRIVER_DOWN (0x08000000)
1908 +#define BCS_ISTATUS_HOST_VDMA_SRC_IRQ_MASK (0x000000FF)
1909 +#define BCS_ISTATUS_HOST_VDMA_DEST_IRQ_MASK (0x0000FF00)
1910 +
1911 +#define PCIE_HAILO8_BOARD_CFG_MAX_SIZE (0x500)
1912 +#define PCIE_HAILO8_FW_CFG_MAX_SIZE (0x500)
1913 +
1914 +#define FW_CODE_SECTION_ALIGNMENT (4)
1915 +
1916 +#define HAILO_PCIE_CONFIG_BAR (0)
1917 +#define HAILO_PCIE_VDMA_REGS_BAR (2)
1918 +#define HAILO_PCIE_FW_ACCESS_BAR (4)
1919 +
1920 +#define HAILO_PCIE_DMA_ENGINES_COUNT (1)
1921 +
1922 +#define DRIVER_NAME "hailo"
1923 +
1924 +#define PCI_VENDOR_ID_HAILO 0x1e60
1925 +#define PCI_DEVICE_ID_HAILO_HAILO8 0x2864
1926 +#define PCI_DEVICE_ID_HAILO_HAILO15 0x45C4
1927 +#define PCI_DEVICE_ID_HAILO_PLUTO 0x43a2
1928 +
1929 +struct hailo_pcie_resources {
1930 + struct hailo_resource config; // BAR0
1931 + struct hailo_resource vdma_registers; // BAR2
1932 + struct hailo_resource fw_access; // BAR4
1933 + enum hailo_board_type board_type;
1934 +};
1935 +
1936 +enum hailo_pcie_interrupt_masks {
1937 + FW_CONTROL = BCS_ISTATUS_HOST_FW_IRQ_CONTROL_MASK,
1938 + FW_NOTIFICATION = BCS_ISTATUS_HOST_FW_IRQ_NOTIFICATION,
1939 + DRIVER_DOWN = BCS_ISTATUS_HOST_DRIVER_DOWN,
1940 + VDMA_SRC_IRQ_MASK = BCS_ISTATUS_HOST_VDMA_SRC_IRQ_MASK,
1941 + VDMA_DEST_IRQ_MASK = BCS_ISTATUS_HOST_VDMA_DEST_IRQ_MASK
1942 +};
1943 +
1944 +struct hailo_pcie_interrupt_source {
1945 + u32 interrupt_bitmask;
1946 + u32 vdma_channels_bitmap;
1947 +};
1948 +
1949 +struct hailo_config_constants {
1950 + const char *filename;
1951 + u32 address;
1952 + size_t max_size;
1953 +};
1954 +
1955 +// TODO: HRT-6144 - Align Windows/Linux to QNX
1956 +#ifdef __QNX__
1957 +enum hailo_bar_index {
1958 + BAR0 = 0,
1959 + BAR2,
1960 + BAR4,
1961 + MAX_BAR
1962 +};
1963 +#else
1964 +enum hailo_bar_index {
1965 + BAR0 = 0,
1966 + BAR1,
1967 + BAR2,
1968 + BAR3,
1969 + BAR4,
1970 + BAR5,
1971 + MAX_BAR
1972 +};
1973 +#endif // ifdef (__QNX__)
1974 +
1975 +#ifdef __cplusplus
1976 +extern "C" {
1977 +#endif
1978 +
1979 +extern struct hailo_vdma_hw hailo_pcie_vdma_hw;
1980 +
1981 +// Reads the interrupt source from BARs, return false if there is no interrupt.
1982 +// note - this function clears the interrupt signals.
1983 +bool hailo_pcie_read_interrupt(struct hailo_pcie_resources *resources, struct hailo_pcie_interrupt_source *source);
1984 +void hailo_pcie_update_channel_interrupts_mask(struct hailo_pcie_resources *resources, u32 channels_bitmap);
1985 +void hailo_pcie_enable_interrupts(struct hailo_pcie_resources *resources);
1986 +void hailo_pcie_disable_interrupts(struct hailo_pcie_resources *resources);
1987 +
1988 +int hailo_pcie_write_firmware_control(struct hailo_pcie_resources *resources, const struct hailo_fw_control *command);
1989 +int hailo_pcie_read_firmware_control(struct hailo_pcie_resources *resources, struct hailo_fw_control *command);
1990 +
1991 +int hailo_pcie_write_firmware(struct hailo_pcie_resources *resources, const void *fw_data, size_t fw_size);
1992 +bool hailo_pcie_is_firmware_loaded(struct hailo_pcie_resources *resources);
1993 +bool hailo_pcie_wait_for_firmware(struct hailo_pcie_resources *resources);
1994 +
1995 +int hailo_pcie_read_firmware_notification(struct hailo_pcie_resources *resources,
1996 + struct hailo_d2h_notification *notification);
1997 +
1998 +int hailo_pcie_write_config_common(struct hailo_pcie_resources *resources, const void* config_data,
1999 + const size_t config_size, const struct hailo_config_constants *config_consts);
2000 +const struct hailo_config_constants* hailo_pcie_get_board_config_constants(const enum hailo_board_type board_type);
2001 +const struct hailo_config_constants* hailo_pcie_get_user_config_constants(const enum hailo_board_type board_type);
2002 +const char* hailo_pcie_get_fw_filename(const enum hailo_board_type board_type);
2003 +
2004 +long hailo_pcie_read_firmware_log(struct hailo_pcie_resources *resources, struct hailo_read_log_params *params);
2005 +int hailo_pcie_memory_transfer(struct hailo_pcie_resources *resources, struct hailo_memory_transfer_params *params);
2006 +
2007 +bool hailo_pcie_is_device_connected(struct hailo_pcie_resources *resources);
2008 +void hailo_pcie_write_firmware_driver_shutdown(struct hailo_pcie_resources *resources);
2009 +
2010 +#ifdef __cplusplus
2011 +}
2012 +#endif
2013 +
2014 +#endif /* _HAILO_COMMON_PCIE_COMMON_H_ */
2015 \ No newline at end of file
2016 --- /dev/null
2017 +++ b/drivers/media/pci/hailo/common/utils.h
2018 @@ -0,0 +1,39 @@
2019 +// SPDX-License-Identifier: GPL-2.0
2020 +/**
2021 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
2022 + **/
2023 +
2024 +#ifndef _HAILO_DRIVER_UTILS_H_
2025 +#define _HAILO_DRIVER_UTILS_H_
2026 +
2027 +#include <linux/bitops.h>
2028 +
2029 +#define hailo_clear_bit(bit, pval) { *(pval) &= ~(1 << bit); }
2030 +#define hailo_test_bit(pos,var_addr) ((*var_addr) & (1<<(pos)))
2031 +
2032 +#ifdef __cplusplus
2033 +extern "C"
2034 +{
2035 +#endif
2036 +
2037 +static inline bool is_powerof2(size_t v) {
2038 + // bit trick
2039 + return (v & (v - 1)) == 0;
2040 +}
2041 +
2042 +static inline void hailo_set_bit(int nr, u32* addr) {
2043 + u32 mask = BIT_MASK(nr);
2044 + u32 *p = addr + BIT_WORD(nr);
2045 +
2046 + *p |= mask;
2047 +}
2048 +
2049 +#ifndef DIV_ROUND_UP
2050 +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
2051 +#endif
2052 +
2053 +#ifdef __cplusplus
2054 +}
2055 +#endif
2056 +
2057 +#endif // _HAILO_DRIVER_UTILS_H_
2058 \ No newline at end of file
2059 --- /dev/null
2060 +++ b/drivers/media/pci/hailo/common/vdma_common.c
2061 @@ -0,0 +1,684 @@
2062 +// SPDX-License-Identifier: GPL-2.0
2063 +/**
2064 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
2065 + **/
2066 +
2067 +#include "vdma_common.h"
2068 +
2069 +#include <linux/types.h>
2070 +#include <linux/errno.h>
2071 +#include <linux/bug.h>
2072 +#include <linux/circ_buf.h>
2073 +#include <linux/ktime.h>
2074 +#include <linux/timekeeping.h>
2075 +#include <linux/kernel.h>
2076 +#include <linux/kconfig.h>
2077 +#include <linux/printk.h>
2078 +
2079 +
2080 +#define CHANNEL_BASE_OFFSET(channel_index) ((channel_index) << 5)
2081 +#define CHANNEL_HOST_OFFSET(channel_index) CHANNEL_BASE_OFFSET(channel_index) + \
2082 + (channel_index < VDMA_DEST_CHANNELS_START ? 0 : 0x10)
2083 +#define CHANNEL_DEVICE_OFFSET(channel_index) CHANNEL_BASE_OFFSET(channel_index) + \
2084 + (channel_index < VDMA_DEST_CHANNELS_START ? 0x10 : 0)
2085 +
2086 +#define CHANNEL_CONTROL_OFFSET (0x0)
2087 +#define CHANNEL_NUM_AVAIL_OFFSET (0x2)
2088 +#define CHANNEL_NUM_PROC_OFFSET (0x4)
2089 +#define CHANNEL_ERROR_OFFSET (0x8)
2090 +
2091 +#define VDMA_CHANNEL_CONTROL_START (0x1)
2092 +#define VDMA_CHANNEL_CONTROL_ABORT (0b00)
2093 +#define VDMA_CHANNEL_CONTROL_ABORT_PAUSE (0b10)
2094 +#define VDMA_CHANNEL_CONTROL_START_ABORT_PAUSE_RESUME_BITMASK (0x3)
2095 +#define VDMA_CHANNEL_CONTROL_START_ABORT_BITMASK (0x1)
2096 +
2097 +#define DESCRIPTOR_PAGE_SIZE_SHIFT (8)
2098 +#define DESCRIPTOR_DESC_CONTROL (0x2)
2099 +#define DESCRIPTOR_ADDR_L_MASK (0xFFFFFFC0)
2100 +
2101 +#define DESCRIPTOR_DESC_STATUS_DONE_BIT (0x0)
2102 +#define DESCRIPTOR_DESC_STATUS_ERROR_BIT (0x1)
2103 +#define DESCRIPTOR_DESC_STATUS_MASK (0xFF)
2104 +
2105 +#define DESC_STATUS_REQ (1 << 0)
2106 +#define DESC_STATUS_REQ_ERR (1 << 1)
2107 +#define DESC_REQUEST_IRQ_PROCESSED (1 << 2)
2108 +#define DESC_REQUEST_IRQ_ERR (1 << 3)
2109 +
2110 +
2111 +#define DWORD_SIZE (4)
2112 +#define WORD_SIZE (2)
2113 +#define BYTE_SIZE (1)
2114 +
2115 +#define TIMESTAMPS_CIRC_SPACE(timestamp_list) \
2116 + CIRC_SPACE((timestamp_list).head, (timestamp_list).tail, CHANNEL_IRQ_TIMESTAMPS_SIZE)
2117 +#define TIMESTAMPS_CIRC_CNT(timestamp_list) \
2118 + CIRC_CNT((timestamp_list).head, (timestamp_list).tail, CHANNEL_IRQ_TIMESTAMPS_SIZE)
2119 +
2120 +#define ONGOING_TRANSFERS_CIRC_SPACE(transfers_list) \
2121 + CIRC_SPACE((transfers_list).head, (transfers_list).tail, HAILO_VDMA_MAX_ONGOING_TRANSFERS)
2122 +#define ONGOING_TRANSFERS_CIRC_CNT(transfers_list) \
2123 + CIRC_CNT((transfers_list).head, (transfers_list).tail, HAILO_VDMA_MAX_ONGOING_TRANSFERS)
2124 +
2125 +#ifndef for_each_sgtable_dma_sg
2126 +#define for_each_sgtable_dma_sg(sgt, sg, i) \
2127 + for_each_sg((sgt)->sgl, sg, (sgt)->nents, i)
2128 +#endif /* for_each_sgtable_dma_sg */
2129 +
2130 +
2131 +static int ongoing_transfer_push(struct hailo_vdma_channel *channel,
2132 + struct hailo_ongoing_transfer *ongoing_transfer)
2133 +{
2134 + struct hailo_ongoing_transfers_list *transfers = &channel->ongoing_transfers;
2135 + if (!ONGOING_TRANSFERS_CIRC_SPACE(*transfers)) {
2136 + return -EFAULT;
2137 + }
2138 +
2139 + if (ongoing_transfer->dirty_descs_count > ARRAY_SIZE(ongoing_transfer->dirty_descs)) {
2140 + return -EFAULT;
2141 + }
2142 +
2143 + transfers->transfers[transfers->head] = *ongoing_transfer;
2144 + transfers->head = (transfers->head + 1) & HAILO_VDMA_MAX_ONGOING_TRANSFERS_MASK;
2145 + return 0;
2146 +}
2147 +
2148 +static int ongoing_transfer_pop(struct hailo_vdma_channel *channel,
2149 + struct hailo_ongoing_transfer *ongoing_transfer)
2150 +{
2151 + struct hailo_ongoing_transfers_list *transfers = &channel->ongoing_transfers;
2152 + if (!ONGOING_TRANSFERS_CIRC_CNT(*transfers)) {
2153 + return -EFAULT;
2154 + }
2155 +
2156 + if (ongoing_transfer) {
2157 + *ongoing_transfer = transfers->transfers[transfers->tail];
2158 + }
2159 + transfers->tail = (transfers->tail + 1) & HAILO_VDMA_MAX_ONGOING_TRANSFERS_MASK;
2160 + return 0;
2161 +}
2162 +
2163 +static void clear_dirty_desc(struct hailo_vdma_descriptors_list *desc_list, u16 desc)
2164 +{
2165 + desc_list->desc_list[desc].PageSize_DescControl =
2166 + (u32)((desc_list->desc_page_size << DESCRIPTOR_PAGE_SIZE_SHIFT) + DESCRIPTOR_DESC_CONTROL);
2167 +}
2168 +
2169 +static void clear_dirty_descs(struct hailo_vdma_channel *channel,
2170 + struct hailo_ongoing_transfer *ongoing_transfer)
2171 +{
2172 + u8 i = 0;
2173 + struct hailo_vdma_descriptors_list *desc_list = channel->last_desc_list;
2174 + BUG_ON(ongoing_transfer->dirty_descs_count > ARRAY_SIZE(ongoing_transfer->dirty_descs));
2175 + for (i = 0; i < ongoing_transfer->dirty_descs_count; i++) {
2176 + clear_dirty_desc(desc_list, ongoing_transfer->dirty_descs[i]);
2177 + }
2178 +}
2179 +
2180 +static bool validate_last_desc_status(struct hailo_vdma_channel *channel,
2181 + struct hailo_ongoing_transfer *ongoing_transfer)
2182 +{
2183 + u16 last_desc = ongoing_transfer->last_desc;
2184 + u32 last_desc_control = channel->last_desc_list->desc_list[last_desc].RemainingPageSize_Status &
2185 + DESCRIPTOR_DESC_STATUS_MASK;
2186 + if (!hailo_test_bit(DESCRIPTOR_DESC_STATUS_DONE_BIT, &last_desc_control)) {
2187 + pr_err("Expecting desc %d to be done\n", last_desc);
2188 + return false;
2189 + }
2190 + if (hailo_test_bit(DESCRIPTOR_DESC_STATUS_ERROR_BIT, &last_desc_control)) {
2191 + pr_err("Got unexpected error on desc %d\n", last_desc);
2192 + return false;
2193 + }
2194 +
2195 + return true;
2196 +}
2197 +
2198 +void hailo_vdma_program_descriptor(struct hailo_vdma_descriptor *descriptor, u64 dma_address, size_t page_size,
2199 + u8 data_id)
2200 +{
2201 + descriptor->PageSize_DescControl = (u32)((page_size << DESCRIPTOR_PAGE_SIZE_SHIFT) +
2202 + DESCRIPTOR_DESC_CONTROL);
2203 + descriptor->AddrL_rsvd_DataID = (u32)(((dma_address & DESCRIPTOR_ADDR_L_MASK)) | data_id);
2204 + descriptor->AddrH = (u32)(dma_address >> 32);
2205 + descriptor->RemainingPageSize_Status = 0 ;
2206 +}
2207 +
2208 +static u8 get_channel_id(u8 channel_index)
2209 +{
2210 + if (channel_index < VDMA_DEST_CHANNELS_START) {
2211 + // H2D channel
2212 + return channel_index;
2213 + }
2214 + else if ((channel_index >= VDMA_DEST_CHANNELS_START) &&
2215 + (channel_index < MAX_VDMA_CHANNELS_PER_ENGINE)) {
2216 + // D2H channel
2217 + return channel_index - VDMA_DEST_CHANNELS_START;
2218 + }
2219 + else {
2220 + return INVALID_VDMA_CHANNEL;
2221 + }
2222 +}
2223 +
2224 +static int program_descriptors_in_chunk(
2225 + struct hailo_vdma_hw *vdma_hw,
2226 + dma_addr_t chunk_addr,
2227 + unsigned int chunk_size,
2228 + struct hailo_vdma_descriptors_list *desc_list,
2229 + u32 desc_index,
2230 + u32 max_desc_index,
2231 + u8 channel_id)
2232 +{
2233 + const u32 desc_per_chunk = DIV_ROUND_UP(chunk_size, desc_list->desc_page_size);
2234 + struct hailo_vdma_descriptor *dma_desc = NULL;
2235 + u16 size_to_program = 0;
2236 + u32 index = 0;
2237 + u64 encoded_addr = 0;
2238 +
2239 + for (index = 0; index < desc_per_chunk; index++) {
2240 + if (desc_index > max_desc_index) {
2241 + return -ERANGE;
2242 + }
2243 +
2244 + encoded_addr = vdma_hw->hw_ops.encode_desc_dma_address(chunk_addr, channel_id);
2245 + if (INVALID_VDMA_ADDRESS == encoded_addr) {
2246 + return -EFAULT;
2247 + }
2248 +
2249 + dma_desc = &desc_list->desc_list[desc_index % desc_list->desc_count];
2250 + size_to_program = chunk_size > desc_list->desc_page_size ?
2251 + desc_list->desc_page_size : (u16)chunk_size;
2252 + hailo_vdma_program_descriptor(dma_desc, encoded_addr, size_to_program, vdma_hw->ddr_data_id);
2253 +
2254 + chunk_addr += size_to_program;
2255 + chunk_size -= size_to_program;
2256 + desc_index++;
2257 + }
2258 +
2259 + return (int)desc_per_chunk;
2260 +}
2261 +
2262 +int hailo_vdma_program_descriptors_list(
2263 + struct hailo_vdma_hw *vdma_hw,
2264 + struct hailo_vdma_descriptors_list *desc_list,
2265 + u32 starting_desc,
2266 + struct hailo_vdma_mapped_transfer_buffer *buffer,
2267 + u8 channel_index)
2268 +{
2269 + const u8 channel_id = get_channel_id(channel_index);
2270 + int desc_programmed = 0;
2271 + u32 max_desc_index = 0;
2272 + u32 chunk_size = 0;
2273 + struct scatterlist *sg_entry = NULL;
2274 + unsigned int i = 0;
2275 + int ret = 0;
2276 + size_t buffer_current_offset = 0;
2277 + dma_addr_t chunk_start_addr = 0;
2278 + u32 program_size = buffer->size;
2279 +
2280 + if (starting_desc >= desc_list->desc_count) {
2281 + return -EFAULT;
2282 + }
2283 +
2284 + if (buffer->offset % desc_list->desc_page_size != 0) {
2285 + return -EFAULT;
2286 + }
2287 +
2288 + // On circular buffer, allow programming desc_count descriptors (starting
2289 + // from starting_desc). On non circular, don't allow is to pass desc_count
2290 + max_desc_index = desc_list->is_circular ?
2291 + starting_desc + desc_list->desc_count - 1 :
2292 + desc_list->desc_count - 1;
2293 + for_each_sgtable_dma_sg(buffer->sg_table, sg_entry, i) {
2294 + // Skip sg entries until we reach the right buffer offset. offset can be in the middle of an sg entry.
2295 + if (buffer_current_offset + sg_dma_len(sg_entry) < buffer->offset) {
2296 + buffer_current_offset += sg_dma_len(sg_entry);
2297 + continue;
2298 + }
2299 + chunk_start_addr = (buffer_current_offset < buffer->offset) ?
2300 + sg_dma_address(sg_entry) + (buffer->offset - buffer_current_offset) :
2301 + sg_dma_address(sg_entry);
2302 + chunk_size = (buffer_current_offset < buffer->offset) ?
2303 + (u32)(sg_dma_len(sg_entry) - (buffer->offset - buffer_current_offset)) :
2304 + (u32)(sg_dma_len(sg_entry));
2305 + chunk_size = min((u32)program_size, chunk_size);
2306 +
2307 + ret = program_descriptors_in_chunk(vdma_hw, chunk_start_addr, chunk_size, desc_list,
2308 + starting_desc, max_desc_index, channel_id);
2309 + if (ret < 0) {
2310 + return ret;
2311 + }
2312 +
2313 + desc_programmed += ret;
2314 + starting_desc = starting_desc + ret;
2315 + program_size -= chunk_size;
2316 + buffer_current_offset += sg_dma_len(sg_entry);
2317 + }
2318 +
2319 + if (program_size != 0) {
2320 + // We didn't program all the buffer.
2321 + return -EFAULT;
2322 + }
2323 +
2324 + return desc_programmed;
2325 +}
2326 +
2327 +static bool channel_control_reg_is_active(u8 control)
2328 +{
2329 + return (control & VDMA_CHANNEL_CONTROL_START_ABORT_BITMASK) == VDMA_CHANNEL_CONTROL_START;
2330 +}
2331 +
2332 +static int validate_channel_state(struct hailo_vdma_channel *channel)
2333 +{
2334 + const u8 control = ioread8(channel->host_regs + CHANNEL_CONTROL_OFFSET);
2335 + const u16 hw_num_avail = ioread16(channel->host_regs + CHANNEL_NUM_AVAIL_OFFSET);
2336 +
2337 + if (!channel_control_reg_is_active(control)) {
2338 + pr_err("Channel %d is not active\n", channel->index);
2339 + return -EBUSY;
2340 + }
2341 +
2342 + if (hw_num_avail != channel->state.num_avail) {
2343 + pr_err("Channel %d hw state out of sync. num available is %d, expected %d\n",
2344 + channel->index, hw_num_avail, channel->state.num_avail);
2345 + return -EFAULT;
2346 + }
2347 +
2348 + return 0;
2349 +}
2350 +
2351 +static unsigned long get_interrupts_bitmask(struct hailo_vdma_hw *vdma_hw,
2352 + enum hailo_vdma_interrupts_domain interrupts_domain, bool is_debug)
2353 +{
2354 + unsigned long bitmask = 0;
2355 +
2356 + if (0 != (HAILO_VDMA_INTERRUPTS_DOMAIN_DEVICE & interrupts_domain)) {
2357 + bitmask |= vdma_hw->device_interrupts_bitmask;
2358 + }
2359 + if (0 != (HAILO_VDMA_INTERRUPTS_DOMAIN_HOST & interrupts_domain)) {
2360 + bitmask |= vdma_hw->host_interrupts_bitmask;
2361 + }
2362 +
2363 + if (bitmask != 0) {
2364 + bitmask |= DESC_REQUEST_IRQ_PROCESSED | DESC_REQUEST_IRQ_ERR;
2365 + if (is_debug) {
2366 + bitmask |= DESC_STATUS_REQ | DESC_STATUS_REQ_ERR;
2367 + }
2368 + }
2369 +
2370 + return bitmask;
2371 +}
2372 +
2373 +static void set_num_avail(u8 __iomem *host_regs, u16 num_avail)
2374 +{
2375 + iowrite16(num_avail, host_regs + CHANNEL_NUM_AVAIL_OFFSET);
2376 +}
2377 +
2378 +static u16 get_num_proc(u8 __iomem *host_regs)
2379 +{
2380 + return ioread16(host_regs + CHANNEL_NUM_PROC_OFFSET);
2381 +}
2382 +
2383 +static int program_last_desc(
2384 + struct hailo_vdma_descriptors_list *desc_list,
2385 + u32 starting_desc,
2386 + struct hailo_vdma_mapped_transfer_buffer *transfer_buffer)
2387 +{
2388 + u32 total_descs = DIV_ROUND_UP(transfer_buffer->size, desc_list->desc_page_size);
2389 + u32 last_desc = (starting_desc + total_descs - 1) % desc_list->desc_count;
2390 + u32 last_desc_size = transfer_buffer->size - (total_descs - 1) * desc_list->desc_page_size;
2391 +
2392 + // Configure only last descriptor with residue size
2393 + desc_list->desc_list[last_desc].PageSize_DescControl = (u32)
2394 + ((last_desc_size << DESCRIPTOR_PAGE_SIZE_SHIFT) + DESCRIPTOR_DESC_CONTROL);
2395 + return (int)total_descs;
2396 +}
2397 +
2398 +int hailo_vdma_launch_transfer(
2399 + struct hailo_vdma_hw *vdma_hw,
2400 + struct hailo_vdma_channel *channel,
2401 + struct hailo_vdma_descriptors_list *desc_list,
2402 + u32 starting_desc,
2403 + u8 buffers_count,
2404 + struct hailo_vdma_mapped_transfer_buffer *buffers,
2405 + bool should_bind,
2406 + enum hailo_vdma_interrupts_domain first_interrupts_domain,
2407 + enum hailo_vdma_interrupts_domain last_desc_interrupts,
2408 + bool is_debug)
2409 +{
2410 + int ret = -EFAULT;
2411 + u32 total_descs = 0;
2412 + u32 first_desc = starting_desc;
2413 + u32 last_desc = U32_MAX;
2414 + u16 new_num_avail = 0;
2415 + struct hailo_ongoing_transfer ongoing_transfer = {0};
2416 + u8 i = 0;
2417 +
2418 + channel->state.desc_count_mask = (desc_list->desc_count - 1);
2419 +
2420 + if (NULL == channel->last_desc_list) {
2421 + // First transfer on this active channel, store desc list.
2422 + channel->last_desc_list = desc_list;
2423 + } else if (desc_list != channel->last_desc_list) {
2424 + // Shouldn't happen, desc list may change only after channel deactivation.
2425 + pr_err("Inconsistent desc list given to channel %d\n", channel->index);
2426 + return -EINVAL;
2427 + }
2428 +
2429 + if (channel->state.num_avail != (u16)starting_desc) {
2430 + pr_err("Channel %d state out of sync. num available is %d, expected %d\n",
2431 + channel->index, channel->state.num_avail, (u16)starting_desc);
2432 + return -EFAULT;
2433 + }
2434 +
2435 + if (buffers_count > HAILO_MAX_BUFFERS_PER_SINGLE_TRANSFER) {
2436 + pr_err("Too many buffers %u for single transfer\n", buffers_count);
2437 + return -EINVAL;
2438 + }
2439 +
2440 + if (is_debug) {
2441 + ret = validate_channel_state(channel);
2442 + if (ret < 0) {
2443 + return ret;
2444 + }
2445 + }
2446 +
2447 + BUILD_BUG_ON_MSG((HAILO_MAX_BUFFERS_PER_SINGLE_TRANSFER + 1) != ARRAY_SIZE(ongoing_transfer.dirty_descs),
2448 + "Unexpected amount of dirty descriptors");
2449 + ongoing_transfer.dirty_descs_count = buffers_count + 1;
2450 + ongoing_transfer.dirty_descs[0] = (u16)starting_desc;
2451 +
2452 + for (i = 0; i < buffers_count; i++) {
2453 + ret = should_bind ?
2454 + hailo_vdma_program_descriptors_list(vdma_hw, desc_list, starting_desc, &buffers[i], channel->index) :
2455 + program_last_desc(desc_list, starting_desc, &buffers[i]);
2456 + if (ret < 0) {
2457 + return ret;
2458 + }
2459 + total_descs += ret;
2460 + last_desc = (starting_desc + ret - 1) % desc_list->desc_count;
2461 + starting_desc = (starting_desc + ret) % desc_list->desc_count;
2462 +
2463 + ongoing_transfer.dirty_descs[i+1] = (u16)last_desc;
2464 + ongoing_transfer.buffers[i] = buffers[i];
2465 + }
2466 + ongoing_transfer.buffers_count = buffers_count;
2467 +
2468 + desc_list->desc_list[first_desc].PageSize_DescControl |=
2469 + get_interrupts_bitmask(vdma_hw, first_interrupts_domain, is_debug);
2470 + desc_list->desc_list[last_desc].PageSize_DescControl |=
2471 + get_interrupts_bitmask(vdma_hw, last_desc_interrupts, is_debug);
2472 +
2473 + ongoing_transfer.last_desc = (u16)last_desc;
2474 + ongoing_transfer.is_debug = is_debug;
2475 + ret = ongoing_transfer_push(channel, &ongoing_transfer);
2476 + if (ret < 0) {
2477 + pr_err("Failed push ongoing transfer to channel %d\n", channel->index);
2478 + return ret;
2479 + }
2480 +
2481 + new_num_avail = (u16)((last_desc + 1) % desc_list->desc_count);
2482 + channel->state.num_avail = new_num_avail;
2483 + set_num_avail(channel->host_regs, new_num_avail);
2484 +
2485 + return (int)total_descs;
2486 +}
2487 +
2488 +static void hailo_vdma_push_timestamp(struct hailo_vdma_channel *channel)
2489 +{
2490 + struct hailo_channel_interrupt_timestamp_list *timestamp_list = &channel->timestamp_list;
2491 + const u16 num_proc = get_num_proc(channel->host_regs);
2492 + if (TIMESTAMPS_CIRC_SPACE(*timestamp_list) != 0) {
2493 + timestamp_list->timestamps[timestamp_list->head].timestamp_ns = ktime_get_ns();
2494 + timestamp_list->timestamps[timestamp_list->head].desc_num_processed = num_proc;
2495 + timestamp_list->head = (timestamp_list->head + 1) & CHANNEL_IRQ_TIMESTAMPS_SIZE_MASK;
2496 + }
2497 +}
2498 +
2499 +// Returns false if there are no items
2500 +static bool hailo_vdma_pop_timestamp(struct hailo_channel_interrupt_timestamp_list *timestamp_list,
2501 + struct hailo_channel_interrupt_timestamp *out_timestamp)
2502 +{
2503 + if (0 == TIMESTAMPS_CIRC_CNT(*timestamp_list)) {
2504 + return false;
2505 + }
2506 +
2507 + *out_timestamp = timestamp_list->timestamps[timestamp_list->tail];
2508 + timestamp_list->tail = (timestamp_list->tail+1) & CHANNEL_IRQ_TIMESTAMPS_SIZE_MASK;
2509 + return true;
2510 +}
2511 +
2512 +static void hailo_vdma_pop_timestamps_to_response(struct hailo_vdma_channel *channel,
2513 + struct hailo_vdma_interrupts_read_timestamp_params *result)
2514 +{
2515 + const u32 max_timestamps = ARRAY_SIZE(result->timestamps);
2516 + u32 i = 0;
2517 +
2518 + while (hailo_vdma_pop_timestamp(&channel->timestamp_list, &result->timestamps[i]) &&
2519 + (i < max_timestamps)) {
2520 + // Although the hw_num_processed should be a number between 0 and
2521 + // desc_count-1, if desc_count < 0x10000 (the maximum desc size),
2522 + // the actual hw_num_processed is a number between 1 and desc_count.
2523 + // Therefore the value can be desc_count, in this case we change it to
2524 + // zero.
2525 + result->timestamps[i].desc_num_processed = result->timestamps[i].desc_num_processed &
2526 + channel->state.desc_count_mask;
2527 + i++;
2528 + }
2529 +
2530 + result->timestamps_count = i;
2531 +}
2532 +
2533 +static void channel_state_init(struct hailo_vdma_channel_state *state)
2534 +{
2535 + state->num_avail = state->num_proc = 0;
2536 +
2537 + // Special value used when the channel is not activate.
2538 + state->desc_count_mask = U32_MAX;
2539 +}
2540 +
2541 +void hailo_vdma_engine_init(struct hailo_vdma_engine *engine, u8 engine_index,
2542 + const struct hailo_resource *channel_registers)
2543 +{
2544 + u8 channel_index = 0;
2545 + struct hailo_vdma_channel *channel;
2546 +
2547 + engine->index = engine_index;
2548 + engine->enabled_channels = 0x0;
2549 + engine->interrupted_channels = 0x0;
2550 +
2551 + for_each_vdma_channel(engine, channel, channel_index) {
2552 + u8 __iomem *regs_base = (u8 __iomem *)channel_registers->address;
2553 + channel->host_regs = regs_base + CHANNEL_HOST_OFFSET(channel_index);
2554 + channel->device_regs = regs_base + CHANNEL_DEVICE_OFFSET(channel_index);
2555 + channel->index = channel_index;
2556 + channel->timestamp_measure_enabled = false;
2557 +
2558 + channel_state_init(&channel->state);
2559 + channel->last_desc_list = NULL;
2560 +
2561 + channel->ongoing_transfers.head = 0;
2562 + channel->ongoing_transfers.tail = 0;
2563 + }
2564 +}
2565 +
2566 +void hailo_vdma_engine_enable_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap,
2567 + bool measure_timestamp)
2568 +{
2569 + struct hailo_vdma_channel *channel = NULL;
2570 + u8 channel_index = 0;
2571 +
2572 + for_each_vdma_channel(engine, channel, channel_index) {
2573 + if (hailo_test_bit(channel_index, &bitmap)) {
2574 + channel->timestamp_measure_enabled = measure_timestamp;
2575 + channel->timestamp_list.head = channel->timestamp_list.tail = 0;
2576 + }
2577 + }
2578 +
2579 + engine->enabled_channels |= bitmap;
2580 +}
2581 +
2582 +void hailo_vdma_engine_disable_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap)
2583 +{
2584 + struct hailo_vdma_channel *channel = NULL;
2585 + u8 channel_index = 0;
2586 +
2587 + engine->enabled_channels &= ~bitmap;
2588 +
2589 + for_each_vdma_channel(engine, channel, channel_index) {
2590 + channel_state_init(&channel->state);
2591 +
2592 + while (ONGOING_TRANSFERS_CIRC_CNT(channel->ongoing_transfers) > 0) {
2593 + struct hailo_ongoing_transfer transfer;
2594 + ongoing_transfer_pop(channel, &transfer);
2595 +
2596 + if (channel->last_desc_list == NULL) {
2597 + pr_err("Channel %d has ongoing transfers but no desc list\n", channel->index);
2598 + continue;
2599 + }
2600 +
2601 + clear_dirty_descs(channel, &transfer);
2602 + }
2603 +
2604 + channel->last_desc_list = NULL;
2605 + }
2606 +}
2607 +
2608 +void hailo_vdma_engine_push_timestamps(struct hailo_vdma_engine *engine, u32 bitmap)
2609 +{
2610 + struct hailo_vdma_channel *channel = NULL;
2611 + u8 channel_index = 0;
2612 +
2613 + for_each_vdma_channel(engine, channel, channel_index) {
2614 + if (unlikely(hailo_test_bit(channel_index, &bitmap) &&
2615 + channel->timestamp_measure_enabled)) {
2616 + hailo_vdma_push_timestamp(channel);
2617 + }
2618 + }
2619 +}
2620 +
2621 +int hailo_vdma_engine_read_timestamps(struct hailo_vdma_engine *engine,
2622 + struct hailo_vdma_interrupts_read_timestamp_params *params)
2623 +{
2624 + struct hailo_vdma_channel *channel = NULL;
2625 +
2626 + if (params->channel_index >= MAX_VDMA_CHANNELS_PER_ENGINE) {
2627 + return -EINVAL;
2628 + }
2629 +
2630 + channel = &engine->channels[params->channel_index];
2631 + hailo_vdma_pop_timestamps_to_response(channel, params);
2632 + return 0;
2633 +}
2634 +
2635 +void hailo_vdma_engine_clear_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap)
2636 +{
2637 + engine->interrupted_channels &= ~bitmap;
2638 +}
2639 +
2640 +void hailo_vdma_engine_set_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap)
2641 +{
2642 + engine->interrupted_channels |= bitmap;
2643 +}
2644 +
2645 +static void fill_channel_irq_data(struct hailo_vdma_interrupts_channel_data *irq_data,
2646 + struct hailo_vdma_engine *engine, struct hailo_vdma_channel *channel, u16 num_proc,
2647 + bool validation_success)
2648 +{
2649 + u8 host_control = ioread8(channel->host_regs + CHANNEL_CONTROL_OFFSET);
2650 + u8 device_control = ioread8(channel->device_regs + CHANNEL_CONTROL_OFFSET);
2651 +
2652 + irq_data->engine_index = engine->index;
2653 + irq_data->channel_index = channel->index;
2654 +
2655 + irq_data->is_active = channel_control_reg_is_active(host_control) &&
2656 + channel_control_reg_is_active(device_control);
2657 +
2658 + irq_data->host_num_processed = num_proc;
2659 + irq_data->host_error = ioread8(channel->host_regs + CHANNEL_ERROR_OFFSET);
2660 + irq_data->device_error = ioread8(channel->device_regs + CHANNEL_ERROR_OFFSET);
2661 + irq_data->validation_success = validation_success;
2662 +}
2663 +
2664 +static bool is_desc_between(u16 begin, u16 end, u16 desc)
2665 +{
2666 + if (begin == end) {
2667 + // There is nothing between
2668 + return false;
2669 + }
2670 + if (begin < end) {
2671 + // desc needs to be in [begin, end)
2672 + return (begin <= desc) && (desc < end);
2673 + }
2674 + else {
2675 + // desc needs to be in [0, end) or [begin, m_descs.size()-1]
2676 + return (desc < end) || (begin <= desc);
2677 + }
2678 +}
2679 +
2680 +static bool is_transfer_complete(struct hailo_vdma_channel *channel,
2681 + struct hailo_ongoing_transfer *transfer, u16 hw_num_proc)
2682 +{
2683 + if (channel->state.num_avail == hw_num_proc) {
2684 + return true;
2685 + }
2686 +
2687 + return is_desc_between(channel->state.num_proc, hw_num_proc, transfer->last_desc);
2688 +}
2689 +
2690 +int hailo_vdma_engine_fill_irq_data(struct hailo_vdma_interrupts_wait_params *irq_data,
2691 + struct hailo_vdma_engine *engine, u32 irq_channels_bitmap,
2692 + transfer_done_cb_t transfer_done, void *transfer_done_opaque)
2693 +{
2694 + struct hailo_vdma_channel *channel = NULL;
2695 + u8 channel_index = 0;
2696 + bool validation_success = true;
2697 +
2698 + for_each_vdma_channel(engine, channel, channel_index) {
2699 + u16 hw_num_proc = U16_MAX;
2700 + if (!hailo_test_bit(channel->index, &irq_channels_bitmap)) {
2701 + continue;
2702 + }
2703 +
2704 + if (channel->last_desc_list == NULL) {
2705 + // Channel not active or no transfer, skipping.
2706 + continue;
2707 + }
2708 +
2709 + if (irq_data->channels_count >= ARRAY_SIZE(irq_data->irq_data)) {
2710 + return -EINVAL;
2711 + }
2712 +
2713 + // Although the hw_num_processed should be a number between 0 and
2714 + // desc_count-1, if desc_count < 0x10000 (the maximum desc size),
2715 + // the actual hw_num_processed is a number between 1 and desc_count.
2716 + // Therefore the value can be desc_count, in this case we change it to
2717 + // zero.
2718 + hw_num_proc = get_num_proc(channel->host_regs) & channel->state.desc_count_mask;
2719 +
2720 + while (ONGOING_TRANSFERS_CIRC_CNT(channel->ongoing_transfers) > 0) {
2721 + struct hailo_ongoing_transfer *cur_transfer =
2722 + &channel->ongoing_transfers.transfers[channel->ongoing_transfers.tail];
2723 + if (!is_transfer_complete(channel, cur_transfer, hw_num_proc)) {
2724 + break;
2725 + }
2726 +
2727 + if (cur_transfer->is_debug &&
2728 + !validate_last_desc_status(channel, cur_transfer)) {
2729 + validation_success = false;
2730 + }
2731 +
2732 + clear_dirty_descs(channel, cur_transfer);
2733 + transfer_done(cur_transfer, transfer_done_opaque);
2734 + channel->state.num_proc = (u16)((cur_transfer->last_desc + 1) & channel->state.desc_count_mask);
2735 +
2736 + ongoing_transfer_pop(channel, NULL);
2737 + }
2738 +
2739 + fill_channel_irq_data(&irq_data->irq_data[irq_data->channels_count],
2740 + engine, channel, hw_num_proc, validation_success);
2741 + irq_data->channels_count++;
2742 + }
2743 +
2744 + return 0;
2745 +}
2746 \ No newline at end of file
2747 --- /dev/null
2748 +++ b/drivers/media/pci/hailo/common/vdma_common.h
2749 @@ -0,0 +1,243 @@
2750 +// SPDX-License-Identifier: GPL-2.0
2751 +/**
2752 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
2753 + **/
2754 +
2755 +#ifndef _HAILO_COMMON_VDMA_COMMON_H_
2756 +#define _HAILO_COMMON_VDMA_COMMON_H_
2757 +
2758 +#include "hailo_resource.h"
2759 +#include "utils.h"
2760 +
2761 +#include <linux/types.h>
2762 +#include <linux/scatterlist.h>
2763 +#include <linux/io.h>
2764 +
2765 +#define VDMA_DESCRIPTOR_LIST_ALIGN (1 << 16)
2766 +#define INVALID_VDMA_ADDRESS (0)
2767 +
2768 +#ifdef __cplusplus
2769 +extern "C"
2770 +{
2771 +#endif
2772 +
2773 +struct hailo_vdma_descriptor {
2774 + u32 PageSize_DescControl;
2775 + u32 AddrL_rsvd_DataID;
2776 + u32 AddrH;
2777 + u32 RemainingPageSize_Status;
2778 +};
2779 +
2780 +struct hailo_vdma_descriptors_list {
2781 + struct hailo_vdma_descriptor *desc_list;
2782 + u32 desc_count; // Must be power of 2 if is_circular is set.
2783 + u16 desc_page_size;
2784 + bool is_circular;
2785 +};
2786 +
2787 +struct hailo_channel_interrupt_timestamp_list {
2788 + int head;
2789 + int tail;
2790 + struct hailo_channel_interrupt_timestamp timestamps[CHANNEL_IRQ_TIMESTAMPS_SIZE];
2791 +};
2792 +
2793 +
2794 +// For each buffers in transfer, the last descriptor will be programmed with
2795 +// the residue size. In addition, if configured, the first descriptor (in
2796 +// all transfer) may be programmed with interrupts.
2797 +#define MAX_DIRTY_DESCRIPTORS_PER_TRANSFER \
2798 + (HAILO_MAX_BUFFERS_PER_SINGLE_TRANSFER + 1)
2799 +
2800 +struct hailo_vdma_mapped_transfer_buffer {
2801 + struct sg_table *sg_table;
2802 + u32 size;
2803 + u32 offset;
2804 + void *opaque; // Drivers can set any opaque data here.
2805 +};
2806 +
2807 +struct hailo_ongoing_transfer {
2808 + uint16_t last_desc;
2809 +
2810 + u8 buffers_count;
2811 + struct hailo_vdma_mapped_transfer_buffer buffers[HAILO_MAX_BUFFERS_PER_SINGLE_TRANSFER];
2812 +
2813 + // Contains all descriptors that were programmed with non-default values
2814 + // for the transfer (by non-default we mean - different size or different
2815 + // interrupts domain).
2816 + uint8_t dirty_descs_count;
2817 + uint16_t dirty_descs[MAX_DIRTY_DESCRIPTORS_PER_TRANSFER];
2818 +
2819 + // If set, validate descriptors status on transfer completion.
2820 + bool is_debug;
2821 +};
2822 +
2823 +struct hailo_ongoing_transfers_list {
2824 + unsigned long head;
2825 + unsigned long tail;
2826 + struct hailo_ongoing_transfer transfers[HAILO_VDMA_MAX_ONGOING_TRANSFERS];
2827 +};
2828 +
2829 +struct hailo_vdma_channel_state {
2830 + // vdma channel counters. num_avail should be synchronized with the hw
2831 + // num_avail value. num_proc is the last num proc updated when the user
2832 + // reads interrupts.
2833 + u16 num_avail;
2834 + u16 num_proc;
2835 +
2836 + // Mask of the num-avail/num-proc counters.
2837 + u32 desc_count_mask;
2838 +};
2839 +
2840 +struct hailo_vdma_channel {
2841 + u8 index;
2842 +
2843 + u8 __iomem *host_regs;
2844 + u8 __iomem *device_regs;
2845 +
2846 + // Last descriptors list attached to the channel. When it changes,
2847 + // assumes that the channel got reset.
2848 + struct hailo_vdma_descriptors_list *last_desc_list;
2849 +
2850 + struct hailo_vdma_channel_state state;
2851 + struct hailo_ongoing_transfers_list ongoing_transfers;
2852 +
2853 + bool timestamp_measure_enabled;
2854 + struct hailo_channel_interrupt_timestamp_list timestamp_list;
2855 +};
2856 +
2857 +struct hailo_vdma_engine {
2858 + u8 index;
2859 + u32 enabled_channels;
2860 + u32 interrupted_channels;
2861 + struct hailo_vdma_channel channels[MAX_VDMA_CHANNELS_PER_ENGINE];
2862 +};
2863 +
2864 +struct hailo_vdma_hw_ops {
2865 + // Accepts some dma_addr_t mapped to the device and encodes it using
2866 + // hw specific encode. returns INVALID_VDMA_ADDRESS on failure.
2867 + u64 (*encode_desc_dma_address)(dma_addr_t dma_address, u8 channel_id);
2868 +};
2869 +
2870 +struct hailo_vdma_hw {
2871 + struct hailo_vdma_hw_ops hw_ops;
2872 +
2873 + // The data_id code of ddr addresses.
2874 + u8 ddr_data_id;
2875 +
2876 + // Bitmask needed to set on each descriptor to enable interrupts (either host/device).
2877 + unsigned long host_interrupts_bitmask;
2878 + unsigned long device_interrupts_bitmask;
2879 +};
2880 +
2881 +#define _for_each_element_array(array, size, element, index) \
2882 + for (index = 0, element = &array[index]; index < size; index++, element = &array[index])
2883 +
2884 +#define for_each_vdma_channel(engine, channel, channel_index) \
2885 + _for_each_element_array(engine->channels, MAX_VDMA_CHANNELS_PER_ENGINE, \
2886 + channel, channel_index)
2887 +
2888 +void hailo_vdma_program_descriptor(struct hailo_vdma_descriptor *descriptor, u64 dma_address, size_t page_size,
2889 + u8 data_id);
2890 +
2891 +/**
2892 + * Program the given descriptors list to map the given buffer.
2893 + *
2894 + * @param vdma_hw vdma hw object
2895 + * @param desc_list descriptors list object to program
2896 + * @param starting_desc index of the first descriptor to program. If the list
2897 + * is circular, this function may wrap around the list.
2898 + * @param buffer buffer to program to the descriptors list.
2899 + * @param channel_index channel index of the channel attached.
2900 + *
2901 + * @return On success - the amount of descriptors programmed, negative value on error.
2902 + */
2903 +int hailo_vdma_program_descriptors_list(
2904 + struct hailo_vdma_hw *vdma_hw,
2905 + struct hailo_vdma_descriptors_list *desc_list,
2906 + u32 starting_desc,
2907 + struct hailo_vdma_mapped_transfer_buffer *buffer,
2908 + u8 channel_index);
2909 +
2910 +/**
2911 + * Launch a transfer on some vdma channel. Includes:
2912 + * 1. Binding the transfer buffers to the descriptors list.
2913 + * 2. Program the descriptors list.
2914 + * 3. Increase num available
2915 + *
2916 + * @param vdma_hw vdma hw object
2917 + * @param channel vdma channel object.
2918 + * @param desc_list descriptors list object to program.
2919 + * @param starting_desc index of the first descriptor to program.
2920 + * @param buffers_count amount of transfer mapped buffers to program.
2921 + * @param buffers array of buffers to program to the descriptors list.
2922 + * @param should_bind whether to bind the buffer to the descriptors list.
2923 + * @param first_interrupts_domain - interrupts settings on first descriptor.
2924 + * @param last_desc_interrupts - interrupts settings on last descriptor.
2925 + * @param is_debug program descriptors for debug run, adds some overhead (for
2926 + * example, hw will write desc complete status).
2927 + *
2928 + * @return On success - the amount of descriptors programmed, negative value on error.
2929 + */
2930 +int hailo_vdma_launch_transfer(
2931 + struct hailo_vdma_hw *vdma_hw,
2932 + struct hailo_vdma_channel *channel,
2933 + struct hailo_vdma_descriptors_list *desc_list,
2934 + u32 starting_desc,
2935 + u8 buffers_count,
2936 + struct hailo_vdma_mapped_transfer_buffer *buffers,
2937 + bool should_bind,
2938 + enum hailo_vdma_interrupts_domain first_interrupts_domain,
2939 + enum hailo_vdma_interrupts_domain last_desc_interrupts,
2940 + bool is_debug);
2941 +
2942 +void hailo_vdma_engine_init(struct hailo_vdma_engine *engine, u8 engine_index,
2943 + const struct hailo_resource *channel_registers);
2944 +
2945 +// enable/disable channels interrupt (does not update interrupts mask because the
2946 +// implementation is different between PCIe and DRAM DMA. To support it we
2947 +// can add some ops struct to the engine).
2948 +void hailo_vdma_engine_enable_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap,
2949 + bool measure_timestamp);
2950 +void hailo_vdma_engine_disable_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap);
2951 +
2952 +void hailo_vdma_engine_push_timestamps(struct hailo_vdma_engine *engine, u32 bitmap);
2953 +int hailo_vdma_engine_read_timestamps(struct hailo_vdma_engine *engine,
2954 + struct hailo_vdma_interrupts_read_timestamp_params *params);
2955 +
2956 +static inline bool hailo_vdma_engine_got_interrupt(struct hailo_vdma_engine *engine,
2957 + u32 channels_bitmap)
2958 +{
2959 + // Reading interrupts without lock is ok (needed only for writes)
2960 + const bool any_interrupt = (0 != (channels_bitmap & engine->interrupted_channels));
2961 + const bool any_disabled = (channels_bitmap != (channels_bitmap & engine->enabled_channels));
2962 + return (any_disabled || any_interrupt);
2963 +}
2964 +
2965 +// Set/Clear/Read channels interrupts, must called under some lock (driver specific)
2966 +void hailo_vdma_engine_clear_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap);
2967 +void hailo_vdma_engine_set_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap);
2968 +
2969 +static inline u32 hailo_vdma_engine_read_interrupts(struct hailo_vdma_engine *engine,
2970 + u32 requested_bitmap)
2971 +{
2972 + // Interrupts only for channels that are requested and enabled.
2973 + u32 irq_channels_bitmap = requested_bitmap &
2974 + engine->enabled_channels &
2975 + engine->interrupted_channels;
2976 + engine->interrupted_channels &= ~irq_channels_bitmap;
2977 +
2978 + return irq_channels_bitmap;
2979 +}
2980 +
2981 +typedef void(*transfer_done_cb_t)(struct hailo_ongoing_transfer *transfer, void *opaque);
2982 +
2983 +// Assuming irq_data->channels_count contains the amount of channels already
2984 +// written (used for multiple engines).
2985 +int hailo_vdma_engine_fill_irq_data(struct hailo_vdma_interrupts_wait_params *irq_data,
2986 + struct hailo_vdma_engine *engine, u32 irq_channels_bitmap,
2987 + transfer_done_cb_t transfer_done, void *transfer_done_opaque);
2988 +
2989 +#ifdef __cplusplus
2990 +}
2991 +#endif
2992 +#endif /* _HAILO_COMMON_VDMA_COMMON_H_ */
2993 --- /dev/null
2994 +++ b/drivers/media/pci/hailo/include/hailo_pcie_version.h
2995 @@ -0,0 +1,14 @@
2996 +// SPDX-License-Identifier: GPL-2.0
2997 +/**
2998 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
2999 + **/
3000 +
3001 +#ifndef _HAILO_PCIE_VERSION_H_
3002 +#define _HAILO_PCIE_VERSION_H_
3003 +
3004 +#include <linux/stringify.h>
3005 +#include "../common/hailo_pcie_version.h"
3006 +
3007 +#define HAILO_DRV_VER __stringify(HAILO_DRV_VER_MAJOR) "." __stringify(HAILO_DRV_VER_MINOR) "." __stringify(HAILO_DRV_VER_REVISION)
3008 +
3009 +#endif /* _HAILO_PCIE_VERSION_H_ */
3010 --- /dev/null
3011 +++ b/drivers/media/pci/hailo/src/fops.c
3012 @@ -0,0 +1,736 @@
3013 +// SPDX-License-Identifier: GPL-2.0
3014 +/**
3015 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
3016 + **/
3017 +
3018 +#include <linux/version.h>
3019 +#include <linux/pci.h>
3020 +#include <linux/interrupt.h>
3021 +#include <linux/sched.h>
3022 +#include <linux/pagemap.h>
3023 +#include <linux/uaccess.h>
3024 +#include <linux/scatterlist.h>
3025 +#include <linux/slab.h>
3026 +#include <linux/delay.h>
3027 +
3028 +#include <asm/thread_info.h>
3029 +
3030 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
3031 +#include <linux/sched/signal.h>
3032 +#endif
3033 +
3034 +#include "hailo_pcie_version.h"
3035 +#include "utils.h"
3036 +#include "fops.h"
3037 +#include "vdma_common.h"
3038 +#include "utils/logs.h"
3039 +#include "vdma/memory.h"
3040 +#include "vdma/ioctl.h"
3041 +#include "utils/compact.h"
3042 +
3043 +
3044 +#if LINUX_VERSION_CODE >= KERNEL_VERSION( 4, 13, 0 )
3045 +#define wait_queue_t wait_queue_entry_t
3046 +#endif
3047 +
3048 +#if LINUX_VERSION_CODE >= KERNEL_VERSION( 4, 15, 0 )
3049 +#define ACCESS_ONCE READ_ONCE
3050 +#endif
3051 +
3052 +#ifndef VM_RESERVED
3053 + #define VMEM_FLAGS (VM_IO | VM_DONTEXPAND | VM_DONTDUMP)
3054 +#else
3055 + #define VMEM_FLAGS (VM_IO | VM_RESERVED)
3056 +#endif
3057 +
3058 +#define IS_PO2_ALIGNED(size, alignment) (!(size & (alignment-1)))
3059 +
3060 +// On pcie driver there is only one dma engine
3061 +#define DEFAULT_VDMA_ENGINE_INDEX (0)
3062 +
3063 +#if !defined(HAILO_EMULATOR)
3064 +#define DEFAULT_SHUTDOWN_TIMEOUT_MS (5)
3065 +#else /* !defined(HAILO_EMULATOR) */
3066 +#define DEFAULT_SHUTDOWN_TIMEOUT_MS (1000)
3067 +#endif /* !defined(HAILO_EMULATOR) */
3068 +
3069 +static long hailo_add_notification_wait(struct hailo_pcie_board *board, struct file *filp);
3070 +
3071 +static struct hailo_file_context *create_file_context(struct hailo_pcie_board *board, struct file *filp)
3072 +{
3073 + struct hailo_file_context *context = kzalloc(sizeof(*context), GFP_KERNEL);
3074 + if (!context) {
3075 + hailo_err(board, "Failed to alloc file context (required size %zu)\n", sizeof(*context));
3076 + return ERR_PTR(-ENOMEM);
3077 + }
3078 +
3079 + context->filp = filp;
3080 + hailo_vdma_file_context_init(&context->vdma_context);
3081 + list_add(&context->open_files_list, &board->open_files_list);
3082 + context->is_valid = true;
3083 + return context;
3084 +}
3085 +
3086 +static void release_file_context(struct hailo_file_context *context)
3087 +{
3088 + context->is_valid = false;
3089 + list_del(&context->open_files_list);
3090 + kfree(context);
3091 +}
3092 +
3093 +static struct hailo_file_context *find_file_context(struct hailo_pcie_board *board, struct file *filp)
3094 +{
3095 + struct hailo_file_context *cur = NULL;
3096 + list_for_each_entry(cur, &board->open_files_list, open_files_list) {
3097 + if (cur->filp == filp) {
3098 + return cur;
3099 + }
3100 + }
3101 + return NULL;
3102 +}
3103 +
3104 +int hailo_pcie_fops_open(struct inode *inode, struct file *filp)
3105 +{
3106 + u32 major = MAJOR(inode->i_rdev);
3107 + u32 minor = MINOR(inode->i_rdev);
3108 + struct hailo_pcie_board *pBoard;
3109 + int err = 0;
3110 + pci_power_t previous_power_state = PCI_UNKNOWN;
3111 + bool interrupts_enabled_by_filp = false;
3112 + struct hailo_file_context *context = NULL;
3113 +
3114 + pr_debug(DRIVER_NAME ": (%d: %d-%d): fops_open\n", current->tgid, major, minor);
3115 +
3116 + // allow multiple processes to open a device, count references in hailo_pcie_get_board_index.
3117 + if (!(pBoard = hailo_pcie_get_board_index(minor))) {
3118 + pr_err(DRIVER_NAME ": fops_open: PCIe board not found for /dev/hailo%d node.\n", minor);
3119 + err = -ENODEV;
3120 + goto l_exit;
3121 + }
3122 +
3123 + filp->private_data = pBoard;
3124 +
3125 + if (down_interruptible(&pBoard->mutex)) {
3126 + hailo_err(pBoard, "fops_open down_interruptible fail tgid:%d\n", current->tgid);
3127 + err = -ERESTARTSYS;
3128 + goto l_decrease_ref_count;
3129 + }
3130 +
3131 + context = create_file_context(pBoard, filp);
3132 + if (IS_ERR(context)) {
3133 + err = PTR_ERR(context);
3134 + goto l_release_mutex;
3135 + }
3136 +
3137 + previous_power_state = pBoard->pDev->current_state;
3138 + if (PCI_D0 != previous_power_state) {
3139 + hailo_info(pBoard, "Waking up board");
3140 + err = pci_set_power_state(pBoard->pDev, PCI_D0);
3141 + if (err < 0) {
3142 + hailo_err(pBoard, "Failed waking up board %d", err);
3143 + goto l_free_context;
3144 + }
3145 + }
3146 +
3147 + if (!hailo_pcie_is_device_connected(&pBoard->pcie_resources)) {
3148 + hailo_err(pBoard, "Device disconnected while opening device\n");
3149 + err = -ENXIO;
3150 + goto l_revert_power_state;
3151 + }
3152 +
3153 + // enable interrupts
3154 + if (!pBoard->interrupts_enabled) {
3155 + err = hailo_enable_interrupts(pBoard);
3156 + if (err < 0) {
3157 + hailo_err(pBoard, "Failed Enabling interrupts %d\n", err);
3158 + goto l_revert_power_state;
3159 + }
3160 + interrupts_enabled_by_filp = true;
3161 + }
3162 +
3163 + err = hailo_add_notification_wait(pBoard, filp);
3164 + if (err < 0) {
3165 + goto l_release_irq;
3166 + }
3167 +
3168 + hailo_dbg(pBoard, "(%d: %d-%d): fops_open: SUCCESS on /dev/hailo%d\n", current->tgid,
3169 + major, minor, minor);
3170 +
3171 + up(&pBoard->mutex);
3172 + return 0;
3173 +
3174 +l_release_irq:
3175 + if (interrupts_enabled_by_filp) {
3176 + hailo_disable_interrupts(pBoard);
3177 + }
3178 +
3179 +l_revert_power_state:
3180 + if (pBoard->pDev->current_state != previous_power_state) {
3181 + if (pci_set_power_state(pBoard->pDev, previous_power_state) < 0) {
3182 + hailo_err(pBoard, "Failed setting power state back to %d\n", (int)previous_power_state);
3183 + }
3184 + }
3185 +l_free_context:
3186 + release_file_context(context);
3187 +l_release_mutex:
3188 + up(&pBoard->mutex);
3189 +l_decrease_ref_count:
3190 + atomic_dec(&pBoard->ref_count);
3191 +l_exit:
3192 + return err;
3193 +}
3194 +
3195 +int hailo_pcie_driver_down(struct hailo_pcie_board *board)
3196 +{
3197 + long completion_result = 0;
3198 + int err = 0;
3199 +
3200 + reinit_completion(&board->driver_down.reset_completed);
3201 +
3202 + hailo_pcie_write_firmware_driver_shutdown(&board->pcie_resources);
3203 +
3204 + // Wait for response
3205 + completion_result =
3206 + wait_for_completion_timeout(&board->driver_down.reset_completed, msecs_to_jiffies(DEFAULT_SHUTDOWN_TIMEOUT_MS));
3207 + if (completion_result <= 0) {
3208 + if (0 == completion_result) {
3209 + hailo_err(board, "hailo_pcie_driver_down, timeout waiting for shutdown response (timeout_ms=%d)\n", DEFAULT_SHUTDOWN_TIMEOUT_MS);
3210 + err = -ETIMEDOUT;
3211 + } else {
3212 + hailo_info(board, "hailo_pcie_driver_down, wait for completion failed with err=%ld (process was interrupted or killed)\n",
3213 + completion_result);
3214 + err = completion_result;
3215 + }
3216 + goto l_exit;
3217 + }
3218 +
3219 +l_exit:
3220 + return err;
3221 +}
3222 +
3223 +int hailo_pcie_fops_release(struct inode *inode, struct file *filp)
3224 +{
3225 + struct hailo_pcie_board *pBoard = (struct hailo_pcie_board *)filp->private_data;
3226 + struct hailo_file_context *context = NULL;
3227 +
3228 + u32 major = MAJOR(inode->i_rdev);
3229 + u32 minor = MINOR(inode->i_rdev);
3230 +
3231 + if (pBoard) {
3232 + hailo_info(pBoard, "(%d: %d-%d): fops_release\n", current->tgid, major, minor);
3233 +
3234 + if (down_interruptible(&pBoard->mutex)) {
3235 + hailo_err(pBoard, "fops_release down_interruptible failed");
3236 + return -ERESTARTSYS;
3237 + }
3238 +
3239 + context = find_file_context(pBoard, filp);
3240 + if (NULL == context) {
3241 + hailo_err(pBoard, "Invalid driver state, file context does not exist\n");
3242 + up(&pBoard->mutex);
3243 + return -EINVAL;
3244 + }
3245 +
3246 + if (false == context->is_valid) {
3247 + // File context is invalid, but open. It's OK to continue finalize and release it.
3248 + hailo_err(pBoard, "Invalid file context\n");
3249 + }
3250 +
3251 + hailo_pcie_clear_notification_wait_list(pBoard, filp);
3252 +
3253 + if (filp == pBoard->vdma.used_by_filp) {
3254 + if (hailo_pcie_driver_down(pBoard)) {
3255 + hailo_err(pBoard, "Failed sending FW shutdown event");
3256 + }
3257 + }
3258 +
3259 + hailo_vdma_file_context_finalize(&context->vdma_context, &pBoard->vdma, filp);
3260 + release_file_context(context);
3261 +
3262 + if (atomic_dec_and_test(&pBoard->ref_count)) {
3263 + // Disable interrupts
3264 + hailo_disable_interrupts(pBoard);
3265 +
3266 + if (power_mode_enabled()) {
3267 + if (pBoard->pDev && pci_set_power_state(pBoard->pDev, PCI_D3hot) < 0) {
3268 + hailo_err(pBoard, "Failed setting power state to D3hot");
3269 + }
3270 + }
3271 +
3272 + // deallocate board if already removed
3273 + if (!pBoard->pDev) {
3274 + hailo_dbg(pBoard, "fops_close, freed board\n");
3275 + up(&pBoard->mutex);
3276 + kfree(pBoard);
3277 + pBoard = NULL;
3278 + } else {
3279 +
3280 + hailo_dbg(pBoard, "fops_close, released resources for board\n");
3281 + up(&pBoard->mutex);
3282 + }
3283 + } else {
3284 + up(&pBoard->mutex);
3285 + }
3286 +
3287 + hailo_dbg(pBoard, "(%d: %d-%d): fops_close: SUCCESS on /dev/hailo%d\n", current->tgid,
3288 + major, minor, minor);
3289 + }
3290 +
3291 + return 0;
3292 +}
3293 +
3294 +static long hailo_memory_transfer_ioctl(struct hailo_pcie_board *board, unsigned long arg)
3295 +{
3296 + long err = 0;
3297 + struct hailo_memory_transfer_params* transfer = &board->memory_transfer_params;
3298 +
3299 + hailo_dbg(board, "Start memory transfer ioctl\n");
3300 +
3301 + if (copy_from_user(transfer, (void __user*)arg, sizeof(*transfer))) {
3302 + hailo_err(board, "copy_from_user fail\n");
3303 + return -ENOMEM;
3304 + }
3305 +
3306 + err = hailo_pcie_memory_transfer(&board->pcie_resources, transfer);
3307 + if (err < 0) {
3308 + hailo_err(board, "memory transfer failed %ld", err);
3309 + }
3310 +
3311 + if (copy_to_user((void __user*)arg, transfer, sizeof(*transfer))) {
3312 + hailo_err(board, "copy_to_user fail\n");
3313 + return -ENOMEM;
3314 + }
3315 +
3316 + return err;
3317 +}
3318 +
3319 +static long hailo_read_log_ioctl(struct hailo_pcie_board *pBoard, unsigned long arg)
3320 +{
3321 + long err = 0;
3322 + struct hailo_read_log_params params;
3323 +
3324 + if (copy_from_user(&params, (void __user*)arg, sizeof(params))) {
3325 + hailo_err(pBoard, "HAILO_READ_LOG, copy_from_user fail\n");
3326 + return -ENOMEM;
3327 + }
3328 +
3329 + if (0 > (err = hailo_pcie_read_firmware_log(&pBoard->pcie_resources, &params))) {
3330 + hailo_err(pBoard, "HAILO_READ_LOG, reading from log failed with error: %ld \n", err);
3331 + return err;
3332 + }
3333 +
3334 + if (copy_to_user((void*)arg, &params, sizeof(params))) {
3335 + return -ENOMEM;
3336 + }
3337 +
3338 + return 0;
3339 +}
3340 +
3341 +static void firmware_notification_irq_handler(struct hailo_pcie_board *board)
3342 +{
3343 + struct hailo_notification_wait *notif_wait_cursor = NULL;
3344 + int err = 0;
3345 + unsigned long irq_saved_flags = 0;
3346 +
3347 + spin_lock_irqsave(&board->notification_read_spinlock, irq_saved_flags);
3348 + err = hailo_pcie_read_firmware_notification(&board->pcie_resources, &board->notification_cache);
3349 + spin_unlock_irqrestore(&board->notification_read_spinlock, irq_saved_flags);
3350 +
3351 + if (err < 0) {
3352 + hailo_err(board, "Failed reading firmware notification");
3353 + }
3354 + else {
3355 + rcu_read_lock();
3356 + list_for_each_entry_rcu(notif_wait_cursor, &board->notification_wait_list, notification_wait_list)
3357 + {
3358 + complete(&notif_wait_cursor->notification_completion);
3359 + }
3360 + rcu_read_unlock();
3361 + }
3362 +}
3363 +
3364 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
3365 +irqreturn_t hailo_irqhandler(int irq, void *dev_id, struct pt_regs *regs)
3366 +#else
3367 +irqreturn_t hailo_irqhandler(int irq, void *dev_id)
3368 +#endif
3369 +{
3370 + irqreturn_t return_value = IRQ_NONE;
3371 + struct hailo_pcie_board *board = (struct hailo_pcie_board *)dev_id;
3372 + bool got_interrupt = false;
3373 + struct hailo_pcie_interrupt_source irq_source = {0};
3374 +
3375 + hailo_dbg(board, "hailo_irqhandler\n");
3376 +
3377 + while (true) {
3378 + if (!hailo_pcie_is_device_connected(&board->pcie_resources)) {
3379 + hailo_err(board, "Device disconnected while handling irq\n");
3380 + break;
3381 + }
3382 +
3383 + got_interrupt = hailo_pcie_read_interrupt(&board->pcie_resources, &irq_source);
3384 + if (!got_interrupt) {
3385 + break;
3386 + }
3387 +
3388 + return_value = IRQ_HANDLED;
3389 +
3390 + // wake fw_control if needed
3391 + if (irq_source.interrupt_bitmask & FW_CONTROL) {
3392 + complete(&board->fw_control.completion);
3393 + }
3394 +
3395 + // wake driver_down if needed
3396 + if (irq_source.interrupt_bitmask & DRIVER_DOWN) {
3397 + complete(&board->driver_down.reset_completed);
3398 + }
3399 +
3400 + if (irq_source.interrupt_bitmask & FW_NOTIFICATION) {
3401 + if (!completion_done(&board->fw_loaded_completion)) {
3402 + // Complete firmware loaded completion
3403 + complete_all(&board->fw_loaded_completion);
3404 + } else {
3405 + firmware_notification_irq_handler(board);
3406 + }
3407 + }
3408 +
3409 + if (0 != irq_source.vdma_channels_bitmap) {
3410 + hailo_vdma_irq_handler(&board->vdma, DEFAULT_VDMA_ENGINE_INDEX,
3411 + irq_source.vdma_channels_bitmap);
3412 + }
3413 + }
3414 +
3415 + return return_value;
3416 +}
3417 +
3418 +static long hailo_get_notification_wait_thread(struct hailo_pcie_board *pBoard, struct file *filp,
3419 + struct hailo_notification_wait **current_waiting_thread)
3420 +{
3421 + struct hailo_notification_wait *cursor = NULL;
3422 + // note: safe to access without rcu because the notification_wait_list is closed only on file release
3423 + list_for_each_entry(cursor, &pBoard->notification_wait_list, notification_wait_list)
3424 + {
3425 + if ((current->tgid == cursor->tgid) && (filp == cursor->filp)) {
3426 + *current_waiting_thread = cursor;
3427 + return 0;
3428 + }
3429 + }
3430 +
3431 + return -EFAULT;
3432 +}
3433 +
3434 +static long hailo_add_notification_wait(struct hailo_pcie_board *board, struct file *filp)
3435 +{
3436 + struct hailo_notification_wait *new_notification_wait = NULL;
3437 + if (!(new_notification_wait = kmalloc(sizeof(*new_notification_wait), GFP_KERNEL))) {
3438 + hailo_err(board, "Failed to allocate notification wait structure.\n");
3439 + return -ENOMEM;
3440 + }
3441 + new_notification_wait->tgid = current->tgid;
3442 + new_notification_wait->filp = filp;
3443 + new_notification_wait->is_disabled = false;
3444 + init_completion(&new_notification_wait->notification_completion);
3445 + list_add_rcu(&new_notification_wait->notification_wait_list, &board->notification_wait_list);
3446 + return 0;
3447 +}
3448 +
3449 +static long hailo_read_notification_ioctl(struct hailo_pcie_board *pBoard, unsigned long arg, struct file *filp,
3450 + bool* should_up_board_mutex)
3451 +{
3452 + long err = 0;
3453 + struct hailo_notification_wait *current_waiting_thread = NULL;
3454 + struct hailo_d2h_notification *notification = &pBoard->notification_to_user;
3455 + unsigned long irq_saved_flags;
3456 +
3457 + err = hailo_get_notification_wait_thread(pBoard, filp, &current_waiting_thread);
3458 + if (0 != err) {
3459 + goto l_exit;
3460 + }
3461 + up(&pBoard->mutex);
3462 +
3463 + if (0 > (err = wait_for_completion_interruptible(&current_waiting_thread->notification_completion))) {
3464 + hailo_info(pBoard,
3465 + "HAILO_READ_NOTIFICATION - wait_for_completion_interruptible error. err=%ld. tgid=%d (process was interrupted or killed)\n",
3466 + err, current_waiting_thread->tgid);
3467 + *should_up_board_mutex = false;
3468 + goto l_exit;
3469 + }
3470 +
3471 + if (down_interruptible(&pBoard->mutex)) {
3472 + hailo_info(pBoard, "HAILO_READ_NOTIFICATION - down_interruptible error (process was interrupted or killed)\n");
3473 + *should_up_board_mutex = false;
3474 + err = -ERESTARTSYS;
3475 + goto l_exit;
3476 + }
3477 +
3478 + // Check if was disabled
3479 + if (current_waiting_thread->is_disabled) {
3480 + hailo_info(pBoard, "HAILO_READ_NOTIFICATION, can't find notification wait for tgid=%d\n", current->tgid);
3481 + err = -EINVAL;
3482 + goto l_exit;
3483 + }
3484 +
3485 + reinit_completion(&current_waiting_thread->notification_completion);
3486 +
3487 + spin_lock_irqsave(&pBoard->notification_read_spinlock, irq_saved_flags);
3488 + notification->buffer_len = pBoard->notification_cache.buffer_len;
3489 + memcpy(notification->buffer, pBoard->notification_cache.buffer, notification->buffer_len);
3490 + spin_unlock_irqrestore(&pBoard->notification_read_spinlock, irq_saved_flags);
3491 +
3492 + if (copy_to_user((void __user*)arg, notification, sizeof(*notification))) {
3493 + hailo_err(pBoard, "HAILO_READ_NOTIFICATION copy_to_user fail\n");
3494 + err = -ENOMEM;
3495 + goto l_exit;
3496 + }
3497 +
3498 +l_exit:
3499 + return err;
3500 +}
3501 +
3502 +static long hailo_disable_notification(struct hailo_pcie_board *pBoard, struct file *filp)
3503 +{
3504 + struct hailo_notification_wait *cursor = NULL;
3505 +
3506 + hailo_info(pBoard, "HAILO_DISABLE_NOTIFICATION: disable notification");
3507 + rcu_read_lock();
3508 + list_for_each_entry_rcu(cursor, &pBoard->notification_wait_list, notification_wait_list) {
3509 + if ((current->tgid == cursor->tgid) && (filp == cursor->filp)) {
3510 + cursor->is_disabled = true;
3511 + complete(&cursor->notification_completion);
3512 + break;
3513 + }
3514 + }
3515 + rcu_read_unlock();
3516 +
3517 + return 0;
3518 +}
3519 +
3520 +static int hailo_fw_control(struct hailo_pcie_board *pBoard, unsigned long arg, bool* should_up_board_mutex)
3521 +{
3522 + struct hailo_fw_control *command = &pBoard->fw_control.command;
3523 + long completion_result = 0;
3524 + int err = 0;
3525 +
3526 + up(&pBoard->mutex);
3527 + *should_up_board_mutex = false;
3528 +
3529 + if (down_interruptible(&pBoard->fw_control.mutex)) {
3530 + hailo_info(pBoard, "hailo_fw_control down_interruptible fail tgid:%d (process was interrupted or killed)\n", current->tgid);
3531 + return -ERESTARTSYS;
3532 + }
3533 +
3534 + if (copy_from_user(command, (void __user*)arg, sizeof(*command))) {
3535 + hailo_err(pBoard, "hailo_fw_control, copy_from_user fail\n");
3536 + err = -ENOMEM;
3537 + goto l_exit;
3538 + }
3539 +
3540 + reinit_completion(&pBoard->fw_control.completion);
3541 +
3542 + err = hailo_pcie_write_firmware_control(&pBoard->pcie_resources, command);
3543 + if (err < 0) {
3544 + hailo_err(pBoard, "Failed writing fw control to pcie\n");
3545 + goto l_exit;
3546 + }
3547 +
3548 + // Wait for response
3549 + completion_result = wait_for_completion_interruptible_timeout(&pBoard->fw_control.completion, msecs_to_jiffies(command->timeout_ms));
3550 + if (completion_result <= 0) {
3551 + if (0 == completion_result) {
3552 + hailo_err(pBoard, "hailo_fw_control, timeout waiting for control (timeout_ms=%d)\n", command->timeout_ms);
3553 + err = -ETIMEDOUT;
3554 + } else {
3555 + hailo_info(pBoard, "hailo_fw_control, wait for completion failed with err=%ld (process was interrupted or killed)\n", completion_result);
3556 + err = -EINTR;
3557 + }
3558 + goto l_exit;
3559 + }
3560 +
3561 + err = hailo_pcie_read_firmware_control(&pBoard->pcie_resources, command);
3562 + if (err < 0) {
3563 + hailo_err(pBoard, "Failed reading fw control from pcie\n");
3564 + goto l_exit;
3565 + }
3566 +
3567 + if (copy_to_user((void __user*)arg, command, sizeof(*command))) {
3568 + hailo_err(pBoard, "hailo_fw_control, copy_to_user fail\n");
3569 + err = -ENOMEM;
3570 + goto l_exit;
3571 + }
3572 +
3573 +l_exit:
3574 + up(&pBoard->fw_control.mutex);
3575 + return err;
3576 +}
3577 +
3578 +static long hailo_query_device_properties(struct hailo_pcie_board *board, unsigned long arg)
3579 +{
3580 + struct hailo_device_properties props = {
3581 + .desc_max_page_size = board->desc_max_page_size,
3582 + .allocation_mode = board->allocation_mode,
3583 + .dma_type = HAILO_DMA_TYPE_PCIE,
3584 + .dma_engines_count = board->vdma.vdma_engines_count,
3585 + .is_fw_loaded = hailo_pcie_is_firmware_loaded(&board->pcie_resources),
3586 + };
3587 +
3588 + hailo_info(board, "HAILO_QUERY_DEVICE_PROPERTIES: desc_max_page_size=%u\n", props.desc_max_page_size);
3589 +
3590 + if (copy_to_user((void __user*)arg, &props, sizeof(props))) {
3591 + hailo_err(board, "HAILO_QUERY_DEVICE_PROPERTIES, copy_to_user failed\n");
3592 + return -ENOMEM;
3593 + }
3594 +
3595 + return 0;
3596 +}
3597 +
3598 +static long hailo_query_driver_info(struct hailo_pcie_board *board, unsigned long arg)
3599 +{
3600 + struct hailo_driver_info info = {
3601 + .major_version = HAILO_DRV_VER_MAJOR,
3602 + .minor_version = HAILO_DRV_VER_MINOR,
3603 + .revision_version = HAILO_DRV_VER_REVISION
3604 + };
3605 +
3606 + hailo_info(board, "HAILO_QUERY_DRIVER_INFO: major=%u, minor=%u, revision=%u\n",
3607 + info.major_version, info.minor_version, info.revision_version);
3608 +
3609 + if (copy_to_user((void __user*)arg, &info, sizeof(info))) {
3610 + hailo_err(board, "HAILO_QUERY_DRIVER_INFO, copy_to_user failed\n");
3611 + return -ENOMEM;
3612 + }
3613 +
3614 + return 0;
3615 +}
3616 +
3617 +static long hailo_general_ioctl(struct hailo_file_context *context, struct hailo_pcie_board *board,
3618 + unsigned int cmd, unsigned long arg, struct file *filp, bool *should_up_board_mutex)
3619 +{
3620 + switch (cmd) {
3621 + case HAILO_MEMORY_TRANSFER:
3622 + return hailo_memory_transfer_ioctl(board, arg);
3623 + case HAILO_FW_CONTROL:
3624 + return hailo_fw_control(board, arg, should_up_board_mutex);
3625 + case HAILO_READ_NOTIFICATION:
3626 + return hailo_read_notification_ioctl(board, arg, filp, should_up_board_mutex);
3627 + case HAILO_DISABLE_NOTIFICATION:
3628 + return hailo_disable_notification(board, filp);
3629 + case HAILO_QUERY_DEVICE_PROPERTIES:
3630 + return hailo_query_device_properties(board, arg);
3631 + case HAILO_QUERY_DRIVER_INFO:
3632 + return hailo_query_driver_info(board, arg);
3633 + case HAILO_READ_LOG:
3634 + return hailo_read_log_ioctl(board, arg);
3635 + default:
3636 + hailo_err(board, "Invalid general ioctl code 0x%x (nr: %d)\n", cmd, _IOC_NR(cmd));
3637 + return -ENOTTY;
3638 + }
3639 +}
3640 +
3641 +long hailo_pcie_fops_unlockedioctl(struct file* filp, unsigned int cmd, unsigned long arg)
3642 +{
3643 + long err = 0;
3644 + struct hailo_pcie_board* board = (struct hailo_pcie_board*) filp->private_data;
3645 + struct hailo_file_context *context = NULL;
3646 + bool should_up_board_mutex = true;
3647 +
3648 +
3649 + if (!board || !board->pDev) return -ENODEV;
3650 +
3651 + hailo_dbg(board, "(%d): fops_unlockedioctl. cmd:%d\n", current->tgid, _IOC_NR(cmd));
3652 +
3653 + if (_IOC_DIR(cmd) & _IOC_READ)
3654 + {
3655 + err = !compatible_access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd));
3656 + }
3657 + else if (_IOC_DIR(cmd) & _IOC_WRITE)
3658 + {
3659 + err = !compatible_access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
3660 + }
3661 +
3662 + if (err) {
3663 + hailo_err(board, "Invalid ioctl parameter access 0x%x", cmd);
3664 + return -EFAULT;
3665 + }
3666 +
3667 + if (down_interruptible(&board->mutex)) {
3668 + hailo_err(board, "unlockedioctl down_interruptible failed");
3669 + return -ERESTARTSYS;
3670 + }
3671 + BUG_ON(board->mutex.count != 0);
3672 +
3673 + context = find_file_context(board, filp);
3674 + if (NULL == context) {
3675 + hailo_err(board, "Invalid driver state, file context does not exist\n");
3676 + up(&board->mutex);
3677 + return -EINVAL;
3678 + }
3679 +
3680 + if (false == context->is_valid) {
3681 + hailo_err(board, "Invalid file context\n");
3682 + up(&board->mutex);
3683 + return -EINVAL;
3684 + }
3685 +
3686 + switch (_IOC_TYPE(cmd)) {
3687 + case HAILO_GENERAL_IOCTL_MAGIC:
3688 + err = hailo_general_ioctl(context, board, cmd, arg, filp, &should_up_board_mutex);
3689 + break;
3690 + case HAILO_VDMA_IOCTL_MAGIC:
3691 + err = hailo_vdma_ioctl(&context->vdma_context, &board->vdma, cmd, arg, filp, &board->mutex,
3692 + &should_up_board_mutex);
3693 + break;
3694 + default:
3695 + hailo_err(board, "Invalid ioctl type %d\n", _IOC_TYPE(cmd));
3696 + err = -ENOTTY;
3697 + }
3698 +
3699 + if (should_up_board_mutex) {
3700 + up(&board->mutex);
3701 + }
3702 +
3703 + hailo_dbg(board, "(%d): fops_unlockedioct: SUCCESS\n", current->tgid);
3704 + return err;
3705 +
3706 +}
3707 +
3708 +int hailo_pcie_fops_mmap(struct file* filp, struct vm_area_struct *vma)
3709 +{
3710 + int err = 0;
3711 +
3712 + uintptr_t vdma_handle = vma->vm_pgoff << PAGE_SHIFT;
3713 +
3714 + struct hailo_pcie_board* board = (struct hailo_pcie_board*)filp->private_data;
3715 + struct hailo_file_context *context = NULL;
3716 +
3717 + BUILD_BUG_ON_MSG(sizeof(vma->vm_pgoff) < sizeof(vdma_handle),
3718 + "If this expression fails to compile it means the target HW is not compatible with our approach to use "
3719 + "the page offset paramter of 'mmap' to pass the driver the 'handle' of the desired descriptor");
3720 +
3721 + vma->vm_pgoff = 0; // vm_pgoff contains vdma_handle page offset, the actual offset from the phys addr is 0
3722 +
3723 + hailo_info(board, "%d fops_mmap\n", current->tgid);
3724 +
3725 + if (!board || !board->pDev) return -ENODEV;
3726 +
3727 + if (down_interruptible(&board->mutex)) {
3728 + hailo_err(board, "hailo_pcie_fops_mmap down_interruptible fail tgid:%d\n", current->tgid);
3729 + return -ERESTARTSYS;
3730 + }
3731 +
3732 + context = find_file_context(board, filp);
3733 + if (NULL == context) {
3734 + up(&board->mutex);
3735 + hailo_err(board, "Invalid driver state, file context does not exist\n");
3736 + return -EINVAL;
3737 + }
3738 +
3739 + if (false == context->is_valid) {
3740 + up(&board->mutex);
3741 + hailo_err(board, "Invalid file context\n");
3742 + return -EINVAL;
3743 + }
3744 +
3745 + err = hailo_vdma_mmap(&context->vdma_context, &board->vdma, vma, vdma_handle);
3746 + up(&board->mutex);
3747 + return err;
3748 +}
3749 --- /dev/null
3750 +++ b/drivers/media/pci/hailo/src/fops.h
3751 @@ -0,0 +1,21 @@
3752 +// SPDX-License-Identifier: GPL-2.0
3753 +/**
3754 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
3755 + **/
3756 +
3757 +#ifndef _HAILO_PCI_FOPS_H_
3758 +#define _HAILO_PCI_FOPS_H_
3759 +
3760 +int hailo_pcie_fops_open(struct inode* inode, struct file* filp);
3761 +int hailo_pcie_fops_release(struct inode* inode, struct file* filp);
3762 +long hailo_pcie_fops_unlockedioctl(struct file* filp, unsigned int cmd, unsigned long arg);
3763 +int hailo_pcie_fops_mmap(struct file* filp, struct vm_area_struct *vma);
3764 +int hailo_pcie_driver_down(struct hailo_pcie_board *board);
3765 +
3766 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
3767 +irqreturn_t hailo_irqhandler(int irq, void* dev_id, struct pt_regs *regs);
3768 +#else
3769 +irqreturn_t hailo_irqhandler(int irq, void* dev_id);
3770 +#endif
3771 +
3772 +#endif /* _HAILO_PCI_FOPS_H_ */
3773 --- /dev/null
3774 +++ b/drivers/media/pci/hailo/src/pcie.c
3775 @@ -0,0 +1,1012 @@
3776 +// SPDX-License-Identifier: GPL-2.0
3777 +/**
3778 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
3779 + **/
3780 +
3781 +#include <linux/version.h>
3782 +#include <linux/init.h>
3783 +#include <linux/module.h>
3784 +#include <linux/pci.h>
3785 +#include <linux/pci_regs.h>
3786 +#include <linux/interrupt.h>
3787 +#include <linux/sched.h>
3788 +#include <linux/pagemap.h>
3789 +#include <linux/firmware.h>
3790 +#include <linux/kthread.h>
3791 +
3792 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)
3793 +#include <linux/dma-direct.h>
3794 +#endif
3795 +
3796 +#define KERNEL_CODE 1
3797 +
3798 +#include "hailo_pcie_version.h"
3799 +#include "hailo_ioctl_common.h"
3800 +#include "pcie.h"
3801 +#include "fops.h"
3802 +#include "sysfs.h"
3803 +#include "utils/logs.h"
3804 +#include "utils/compact.h"
3805 +#include "vdma/vdma.h"
3806 +
3807 +#if LINUX_VERSION_CODE < KERNEL_VERSION( 5, 4, 0 )
3808 +#include <linux/pci-aspm.h>
3809 +#endif
3810 +
3811 +// enum that represents values for the driver parameter to either force buffer from driver , userspace or not force
3812 +// and let driver decide
3813 +enum hailo_allocate_driver_buffer_driver_param {
3814 + HAILO_NO_FORCE_BUFFER = 0,
3815 + HAILO_FORCE_BUFFER_FROM_USERSPACE = 1,
3816 + HAILO_FORCE_BUFFER_FROM_DRIVER = 2,
3817 +};
3818 +
3819 +//Debug flag
3820 +static int force_desc_page_size = 0;
3821 +static bool g_is_power_mode_enabled = true;
3822 +static int force_allocation_from_driver = HAILO_NO_FORCE_BUFFER;
3823 +
3824 +#define DEVICE_NODE_NAME "hailo"
3825 +static int char_major = 0;
3826 +static struct class *chardev_class;
3827 +
3828 +static LIST_HEAD(g_hailo_board_list);
3829 +static struct semaphore g_hailo_add_board_mutex = __SEMAPHORE_INITIALIZER(g_hailo_add_board_mutex, 1);
3830 +
3831 +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22))
3832 +#define HAILO_IRQ_FLAGS (SA_SHIRQ | SA_INTERRUPT)
3833 +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0))
3834 +#define HAILO_IRQ_FLAGS (IRQF_SHARED | IRQF_DISABLED)
3835 +#else
3836 +#define HAILO_IRQ_FLAGS (IRQF_SHARED)
3837 +#endif
3838 +
3839 + /* ****************************
3840 + ******************************* */
3841 +bool power_mode_enabled(void)
3842 +{
3843 +#if !defined(HAILO_EMULATOR)
3844 + return g_is_power_mode_enabled;
3845 +#else /* !defined(HAILO_EMULATOR) */
3846 + return false;
3847 +#endif /* !defined(HAILO_EMULATOR) */
3848 +}
3849 +
3850 +
3851 +/**
3852 + * Due to an HW bug, on system with low MaxReadReq ( < 512) we need to use different descriptors size.
3853 + * Returns the max descriptor size or 0 on failure.
3854 + */
3855 +static int hailo_get_desc_page_size(struct pci_dev *pdev, u32 *out_page_size)
3856 +{
3857 + u16 pcie_device_control = 0;
3858 + int err = 0;
3859 + // The default page size must be smaller/equal to 32K (due to PLDA registers limit).
3860 + const u32 max_page_size = 32u * 1024u;
3861 + const u32 defualt_page_size = min((u32)PAGE_SIZE, max_page_size);
3862 +
3863 + if (force_desc_page_size != 0) {
3864 + // The user given desc_page_size as a module parameter
3865 + if ((force_desc_page_size & (force_desc_page_size - 1)) != 0) {
3866 + pci_err(pdev, "force_desc_page_size must be a power of 2\n");
3867 + return -EINVAL;
3868 + }
3869 +
3870 + if (force_desc_page_size > max_page_size) {
3871 + pci_err(pdev, "force_desc_page_size %d mustn't be larger than %u", force_desc_page_size, max_page_size);
3872 + return -EINVAL;
3873 + }
3874 +
3875 + pci_notice(pdev, "Probing: Force setting max_desc_page_size to %d (recommended value is %lu)\n",
3876 + force_desc_page_size, PAGE_SIZE);
3877 + *out_page_size = force_desc_page_size;
3878 + return 0;
3879 + }
3880 +
3881 + err = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &pcie_device_control);
3882 + if (err < 0) {
3883 + pci_err(pdev, "Couldn't read DEVCTL capability\n");
3884 + return err;
3885 + }
3886 +
3887 + switch (pcie_device_control & PCI_EXP_DEVCTL_READRQ) {
3888 + case PCI_EXP_DEVCTL_READRQ_128B:
3889 + pci_notice(pdev, "Probing: Setting max_desc_page_size to 128 (recommended value is %u)\n", defualt_page_size);
3890 + *out_page_size = 128;
3891 + return 0;
3892 + case PCI_EXP_DEVCTL_READRQ_256B:
3893 + pci_notice(pdev, "Probing: Setting max_desc_page_size to 256 (recommended value is %u)\n", defualt_page_size);
3894 + *out_page_size = 256;
3895 + return 0;
3896 + default:
3897 + pci_notice(pdev, "Probing: Setting max_desc_page_size to %u, (page_size=%lu)\n", defualt_page_size, PAGE_SIZE);
3898 + *out_page_size = defualt_page_size;
3899 + return 0;
3900 + };
3901 +}
3902 +
3903 +// should be called only from fops_open (once)
3904 +struct hailo_pcie_board* hailo_pcie_get_board_index(u32 index)
3905 +{
3906 + struct hailo_pcie_board *pBoard, *pRet = NULL;
3907 +
3908 + down(&g_hailo_add_board_mutex);
3909 + list_for_each_entry(pBoard, &g_hailo_board_list, board_list)
3910 + {
3911 + if ( index == pBoard->board_index )
3912 + {
3913 + atomic_inc(&pBoard->ref_count);
3914 + pRet = pBoard;
3915 + break;
3916 + }
3917 + }
3918 + up(&g_hailo_add_board_mutex);
3919 +
3920 + return pRet;
3921 +}
3922 +
3923 +/**
3924 + * hailo_pcie_disable_aspm - Disable ASPM states
3925 + * @board: pointer to PCI board struct
3926 + * @state: bit-mask of ASPM states to disable
3927 + * @locked: indication if this context holds pci_bus_sem locked.
3928 + *
3929 + * Some devices *must* have certain ASPM states disabled per hardware errata.
3930 + **/
3931 +static int hailo_pcie_disable_aspm(struct hailo_pcie_board *board, u16 state, bool locked)
3932 +{
3933 + struct pci_dev *pdev = board->pDev;
3934 + struct pci_dev *parent = pdev->bus->self;
3935 + u16 aspm_dis_mask = 0;
3936 + u16 pdev_aspmc = 0;
3937 + u16 parent_aspmc = 0;
3938 + int err = 0;
3939 +
3940 + switch (state) {
3941 + case PCIE_LINK_STATE_L0S:
3942 + aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S;
3943 + break;
3944 + case PCIE_LINK_STATE_L1:
3945 + aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1;
3946 + break;
3947 + default:
3948 + break;
3949 + }
3950 +
3951 + err = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
3952 + if (err < 0) {
3953 + hailo_err(board, "Couldn't read LNKCTL capability\n");
3954 + return err;
3955 + }
3956 +
3957 + pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
3958 +
3959 + if (parent) {
3960 + err = pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &parent_aspmc);
3961 + if (err < 0) {
3962 + hailo_err(board, "Couldn't read slot LNKCTL capability\n");
3963 + return err;
3964 + }
3965 + parent_aspmc &= PCI_EXP_LNKCTL_ASPMC;
3966 + }
3967 +
3968 + hailo_notice(board, "Disabling ASPM %s %s\n",
3969 + (aspm_dis_mask & PCI_EXP_LNKCTL_ASPM_L0S) ? "L0s" : "",
3970 + (aspm_dis_mask & PCI_EXP_LNKCTL_ASPM_L1) ? "L1" : "");
3971 +
3972 + // Disable L0s even if it is currently disabled as ASPM states can be enabled by the kernel when changing power modes
3973 +#ifdef CONFIG_PCIEASPM
3974 + if (locked) {
3975 + // Older kernel versions (<5.2.21) don't return value for this functions, so we try manual disabling anyway
3976 + (void)pci_disable_link_state_locked(pdev, state);
3977 + } else {
3978 + (void)pci_disable_link_state(pdev, state);
3979 + }
3980 +
3981 + /* Double-check ASPM control. If not disabled by the above, the
3982 + * BIOS is preventing that from happening (or CONFIG_PCIEASPM is
3983 + * not enabled); override by writing PCI config space directly.
3984 + */
3985 + err = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
3986 + if (err < 0) {
3987 + hailo_err(board, "Couldn't read LNKCTL capability\n");
3988 + return err;
3989 + }
3990 + pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
3991 +
3992 + if (!(aspm_dis_mask & pdev_aspmc)) {
3993 + hailo_notice(board, "Successfully disabled ASPM %s %s\n",
3994 + (aspm_dis_mask & PCI_EXP_LNKCTL_ASPM_L0S) ? "L0s" : "",
3995 + (aspm_dis_mask & PCI_EXP_LNKCTL_ASPM_L1) ? "L1" : "");
3996 + return 0;
3997 + }
3998 +#endif
3999 +
4000 + /* Both device and parent should have the same ASPM setting.
4001 + * Disable ASPM in downstream component first and then upstream.
4002 + */
4003 + err = pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_dis_mask);
4004 + if (err < 0) {
4005 + hailo_err(board, "Couldn't read LNKCTL capability\n");
4006 + return err;
4007 + }
4008 + if (parent) {
4009 + err = pcie_capability_clear_word(parent, PCI_EXP_LNKCTL, aspm_dis_mask);
4010 + if (err < 0) {
4011 + hailo_err(board, "Couldn't read slot LNKCTL capability\n");
4012 + return err;
4013 + }
4014 + }
4015 + hailo_notice(board, "Manually disabled ASPM %s %s\n",
4016 + (aspm_dis_mask & PCI_EXP_LNKCTL_ASPM_L0S) ? "L0s" : "",
4017 + (aspm_dis_mask & PCI_EXP_LNKCTL_ASPM_L1) ? "L1" : "");
4018 +
4019 + return 0;
4020 +}
4021 +
4022 +static void hailo_pcie_insert_board(struct hailo_pcie_board* pBoard)
4023 +{
4024 + u32 index = 0;
4025 + struct hailo_pcie_board *pCurrent, *pNext;
4026 +
4027 +
4028 + down(&g_hailo_add_board_mutex);
4029 + if ( list_empty(&g_hailo_board_list) ||
4030 + list_first_entry(&g_hailo_board_list, struct hailo_pcie_board, board_list)->board_index > 0)
4031 + {
4032 + pBoard->board_index = 0;
4033 + list_add(&pBoard->board_list, &g_hailo_board_list);
4034 +
4035 + up(&g_hailo_add_board_mutex);
4036 + return;
4037 + }
4038 +
4039 + list_for_each_entry_safe(pCurrent, pNext, &g_hailo_board_list, board_list)
4040 + {
4041 + index = pCurrent->board_index+1;
4042 + if( list_is_last(&pCurrent->board_list, &g_hailo_board_list) || (index != pNext->board_index))
4043 + {
4044 + break;
4045 + }
4046 + }
4047 +
4048 + pBoard->board_index = index;
4049 + list_add(&pBoard->board_list, &pCurrent->board_list);
4050 +
4051 + up(&g_hailo_add_board_mutex);
4052 +
4053 + return;
4054 +}
4055 +
4056 +static void hailo_pcie_remove_board(struct hailo_pcie_board* pBoard)
4057 +{
4058 + down(&g_hailo_add_board_mutex);
4059 + if (pBoard)
4060 + {
4061 + list_del(&pBoard->board_list);
4062 + }
4063 + up(&g_hailo_add_board_mutex);
4064 +}
4065 +
4066 +static int hailo_write_config(struct hailo_pcie_resources *resources, struct device *dev,
4067 + const struct hailo_config_constants *config_consts)
4068 +{
4069 + const struct firmware *config = NULL;
4070 + int err = 0;
4071 +
4072 + if (NULL == config_consts->filename) {
4073 + // Config not supported for platform
4074 + return 0;
4075 + }
4076 +
4077 + err = request_firmware_direct(&config, config_consts->filename, dev);
4078 + if (err < 0) {
4079 + hailo_dev_info(dev, "Config %s not found\n", config_consts->filename);
4080 + return 0;
4081 + }
4082 +
4083 + hailo_dev_notice(dev, "Writing config %s\n", config_consts->filename);
4084 +
4085 + err = hailo_pcie_write_config_common(resources, config->data, config->size, config_consts);
4086 + if (err < 0) {
4087 + if (-EINVAL == err) {
4088 + hailo_dev_warn(dev, "Config size %zu is bigger than max %zu\n", config->size, config_consts->max_size);
4089 + }
4090 + release_firmware(config);
4091 + return err;
4092 + }
4093 +
4094 + release_firmware(config);
4095 + return 0;
4096 +}
4097 +
4098 +static bool wait_for_firmware_completion(struct completion *fw_load_completion)
4099 +{
4100 + return (0 != wait_for_completion_timeout(fw_load_completion, FIRMWARE_WAIT_TIMEOUT_MS));
4101 +}
4102 +
4103 +static int hailo_load_firmware(struct hailo_pcie_resources *resources,
4104 + struct device *dev, struct completion *fw_load_completion)
4105 +{
4106 + const struct firmware *firmware = NULL;
4107 + int err = 0;
4108 +
4109 + if (hailo_pcie_is_firmware_loaded(resources)) {
4110 + hailo_dev_warn(dev, "Firmware was already loaded\n");
4111 + return 0;
4112 + }
4113 +
4114 + reinit_completion(fw_load_completion);
4115 +
4116 + err = hailo_write_config(resources, dev, hailo_pcie_get_board_config_constants(resources->board_type));
4117 + if (err < 0) {
4118 + hailo_dev_err(dev, "Failed writing board config");
4119 + return err;
4120 + }
4121 +
4122 + err = hailo_write_config(resources, dev, hailo_pcie_get_user_config_constants(resources->board_type));
4123 + if (err < 0) {
4124 + hailo_dev_err(dev, "Failed writing fw config");
4125 + return err;
4126 + }
4127 +
4128 + // read firmware file
4129 + err = request_firmware_direct(&firmware, hailo_pcie_get_fw_filename(resources->board_type), dev);
4130 + if (err < 0) {
4131 + hailo_dev_warn(dev, "Firmware file not found (/lib/firmware/%s), please upload the firmware manually \n",
4132 + hailo_pcie_get_fw_filename(resources->board_type));
4133 + return 0;
4134 + }
4135 +
4136 + err = hailo_pcie_write_firmware(resources, firmware->data, firmware->size);
4137 + if (err < 0) {
4138 + hailo_dev_err(dev, "Failed writing firmware. err %d\n", err);
4139 + release_firmware(firmware);
4140 + return err;
4141 + }
4142 +
4143 + release_firmware(firmware);
4144 +
4145 + if (!wait_for_firmware_completion(fw_load_completion)) {
4146 + hailo_dev_err(dev, "Timeout waiting for firmware..\n");
4147 + return -ETIMEDOUT;
4148 + }
4149 +
4150 + hailo_dev_notice(dev, "Firmware was loaded successfully\n");
4151 + return 0;
4152 +}
4153 +
4154 +static int hailo_activate_board(struct hailo_pcie_board *board)
4155 +{
4156 + int err = 0;
4157 +
4158 + (void)hailo_pcie_disable_aspm(board, PCIE_LINK_STATE_L0S, false);
4159 +
4160 + err = hailo_enable_interrupts(board);
4161 + if (err < 0) {
4162 + hailo_err(board, "Failed Enabling interrupts %d\n", err);
4163 + return err;
4164 + }
4165 +
4166 + err = hailo_load_firmware(&board->pcie_resources, &board->pDev->dev,
4167 + &board->fw_loaded_completion);
4168 + if (err < 0) {
4169 + hailo_err(board, "Firmware load failed\n");
4170 + hailo_disable_interrupts(board);
4171 + return err;
4172 + }
4173 +
4174 + hailo_disable_interrupts(board);
4175 +
4176 + if (power_mode_enabled()) {
4177 + // Setting the device to low power state, until the user opens the device
4178 + err = pci_set_power_state(board->pDev, PCI_D3hot);
4179 + if (err < 0) {
4180 + hailo_err(board, "Set power state failed %d\n", err);
4181 + return err;
4182 + }
4183 + }
4184 +
4185 + return 0;
4186 +}
4187 +
4188 +int hailo_enable_interrupts(struct hailo_pcie_board *board)
4189 +{
4190 + int err = 0;
4191 +
4192 + if (board->interrupts_enabled) {
4193 + hailo_crit(board, "Failed enabling interrupts (already enabled)\n");
4194 + return -EINVAL;
4195 + }
4196 +
4197 + // TODO HRT-2253: use new api for enabling msi: (pci_alloc_irq_vectors)
4198 + if ((err = pci_enable_msi(board->pDev))) {
4199 + hailo_err(board, "Failed to enable MSI %d\n", err);
4200 + return err;
4201 + }
4202 + hailo_info(board, "Enabled MSI interrupt\n");
4203 +
4204 + err = request_irq(board->pDev->irq, hailo_irqhandler, HAILO_IRQ_FLAGS, DRIVER_NAME, board);
4205 + if (err) {
4206 + hailo_err(board, "request_irq failed %d\n", err);
4207 + pci_disable_msi(board->pDev);
4208 + return err;
4209 + }
4210 + hailo_info(board, "irq enabled %u\n", board->pDev->irq);
4211 +
4212 + hailo_pcie_enable_interrupts(&board->pcie_resources);
4213 +
4214 + board->interrupts_enabled = true;
4215 + return 0;
4216 +}
4217 +
4218 +void hailo_disable_interrupts(struct hailo_pcie_board *board)
4219 +{
4220 + // Sanity Check
4221 + if ((NULL == board) || (NULL == board->pDev)) {
4222 + pr_err("Failed to access board or device\n");
4223 + return;
4224 + }
4225 +
4226 + if (!board->interrupts_enabled) {
4227 + return;
4228 + }
4229 +
4230 + board->interrupts_enabled = false;
4231 + hailo_pcie_disable_interrupts(&board->pcie_resources);
4232 + free_irq(board->pDev->irq, board);
4233 + pci_disable_msi(board->pDev);
4234 +}
4235 +
4236 +static int hailo_bar_iomap(struct pci_dev *pdev, int bar, struct hailo_resource *resource)
4237 +{
4238 + resource->size = pci_resource_len(pdev, bar);
4239 + resource->address = (uintptr_t)(pci_iomap(pdev, bar, resource->size));
4240 +
4241 + if (!resource->size || !resource->address) {
4242 + pci_err(pdev, "Probing: Invalid PCIe BAR %d", bar);
4243 + return -EINVAL;
4244 + }
4245 +
4246 + pci_notice(pdev, "Probing: mapped bar %d - %p %zu\n", bar,
4247 + (void*)resource->address, resource->size);
4248 + return 0;
4249 +}
4250 +
4251 +static void hailo_bar_iounmap(struct pci_dev *pdev, struct hailo_resource *resource)
4252 +{
4253 + if (resource->address) {
4254 + pci_iounmap(pdev, (void*)resource->address);
4255 + resource->address = 0;
4256 + resource->size = 0;
4257 + }
4258 +}
4259 +
4260 +static int pcie_resources_init(struct pci_dev *pdev, struct hailo_pcie_resources *resources,
4261 + enum hailo_board_type board_type)
4262 +{
4263 + int err = -EINVAL;
4264 + if (board_type >= HAILO_BOARD_TYPE_COUNT) {
4265 + pci_err(pdev, "Probing: Invalid board type %d\n", (int)board_type);
4266 + err = -EINVAL;
4267 + goto failure_exit;
4268 + }
4269 +
4270 + err = pci_request_regions(pdev, DRIVER_NAME);
4271 + if (err < 0) {
4272 + pci_err(pdev, "Probing: Error allocating bars %d\n", err);
4273 + goto failure_exit;
4274 + }
4275 +
4276 + err = hailo_bar_iomap(pdev, HAILO_PCIE_CONFIG_BAR, &resources->config);
4277 + if (err < 0) {
4278 + goto failure_release_regions;
4279 + }
4280 +
4281 + err = hailo_bar_iomap(pdev, HAILO_PCIE_VDMA_REGS_BAR, &resources->vdma_registers);
4282 + if (err < 0) {
4283 + goto failure_release_config;
4284 + }
4285 +
4286 + err = hailo_bar_iomap(pdev, HAILO_PCIE_FW_ACCESS_BAR, &resources->fw_access);
4287 + if (err < 0) {
4288 + goto failure_release_vdma_regs;
4289 + }
4290 +
4291 + resources->board_type = board_type;
4292 +
4293 + if (!hailo_pcie_is_device_connected(resources)) {
4294 + pci_err(pdev, "Probing: Failed reading device BARs, device may be disconnected\n");
4295 + err = -ENODEV;
4296 + goto failure_release_fw_access;
4297 + }
4298 +
4299 + return 0;
4300 +
4301 +failure_release_fw_access:
4302 + hailo_bar_iounmap(pdev, &resources->fw_access);
4303 +failure_release_vdma_regs:
4304 + hailo_bar_iounmap(pdev, &resources->vdma_registers);
4305 +failure_release_config:
4306 + hailo_bar_iounmap(pdev, &resources->config);
4307 +failure_release_regions:
4308 + pci_release_regions(pdev);
4309 +failure_exit:
4310 + return err;
4311 +}
4312 +
4313 +static void pcie_resources_release(struct pci_dev *pdev, struct hailo_pcie_resources *resources)
4314 +{
4315 + hailo_bar_iounmap(pdev, &resources->config);
4316 + hailo_bar_iounmap(pdev, &resources->vdma_registers);
4317 + hailo_bar_iounmap(pdev, &resources->fw_access);
4318 + pci_release_regions(pdev);
4319 +}
4320 +
4321 +static void update_channel_interrupts(struct hailo_vdma_controller *controller,
4322 + size_t engine_index, u32 channels_bitmap)
4323 +{
4324 + struct hailo_pcie_board *board = (struct hailo_pcie_board*) dev_get_drvdata(controller->dev);
4325 + if (engine_index >= board->vdma.vdma_engines_count) {
4326 + hailo_err(board, "Invalid engine index %zu", engine_index);
4327 + return;
4328 + }
4329 +
4330 + hailo_pcie_update_channel_interrupts_mask(&board->pcie_resources, channels_bitmap);
4331 +}
4332 +
4333 +static struct hailo_vdma_controller_ops pcie_vdma_controller_ops = {
4334 + .update_channel_interrupts = update_channel_interrupts,
4335 +};
4336 +
4337 +
4338 +static int hailo_pcie_vdma_controller_init(struct hailo_vdma_controller *controller,
4339 + struct device *dev, struct hailo_resource *vdma_registers)
4340 +{
4341 + const size_t engines_count = 1;
4342 + return hailo_vdma_controller_init(controller, dev, &hailo_pcie_vdma_hw,
4343 + &pcie_vdma_controller_ops, vdma_registers, engines_count);
4344 +}
4345 +
4346 +// Tries to check if address allocated with kmalloc is dma capable.
4347 +// If kmalloc address is not dma capable we assume other addresses
4348 +// won't be dma capable as well.
4349 +static bool is_kmalloc_dma_capable(struct device *dev)
4350 +{
4351 + void *check_addr = NULL;
4352 + dma_addr_t dma_addr = 0;
4353 + phys_addr_t phys_addr = 0;
4354 + bool capable = false;
4355 +
4356 + if (!dev->dma_mask) {
4357 + return false;
4358 + }
4359 +
4360 + check_addr = kmalloc(PAGE_SIZE, GFP_KERNEL);
4361 + if (NULL == check_addr) {
4362 + dev_err(dev, "failed allocating page!\n");
4363 + return false;
4364 + }
4365 +
4366 + phys_addr = virt_to_phys(check_addr);
4367 + dma_addr = phys_to_dma(dev, phys_addr);
4368 +
4369 + capable = is_dma_capable(dev, dma_addr, PAGE_SIZE);
4370 + kfree(check_addr);
4371 + return capable;
4372 +}
4373 +
4374 +static int hailo_get_allocation_mode(struct pci_dev *pdev, enum hailo_allocation_mode *allocation_mode)
4375 +{
4376 + // Check if module paramater was given to override driver choice
4377 + if (HAILO_NO_FORCE_BUFFER != force_allocation_from_driver) {
4378 + if (HAILO_FORCE_BUFFER_FROM_USERSPACE == force_allocation_from_driver) {
4379 + *allocation_mode = HAILO_ALLOCATION_MODE_USERSPACE;
4380 + pci_notice(pdev, "Probing: Using userspace allocated vdma buffers\n");
4381 + }
4382 + else if (HAILO_FORCE_BUFFER_FROM_DRIVER == force_allocation_from_driver) {
4383 + *allocation_mode = HAILO_ALLOCATION_MODE_DRIVER;
4384 + pci_notice(pdev, "Probing: Using driver allocated vdma buffers\n");
4385 + }
4386 + else {
4387 + pci_err(pdev, "Invalid value for force allocation driver paramater - value given: %d!\n",
4388 + force_allocation_from_driver);
4389 + return -EINVAL;
4390 + }
4391 +
4392 + return 0;
4393 + }
4394 +
4395 + if (is_kmalloc_dma_capable(&pdev->dev)) {
4396 + *allocation_mode = HAILO_ALLOCATION_MODE_USERSPACE;
4397 + pci_notice(pdev, "Probing: Using userspace allocated vdma buffers\n");
4398 + } else {
4399 + *allocation_mode = HAILO_ALLOCATION_MODE_DRIVER;
4400 + pci_notice(pdev, "Probing: Using driver allocated vdma buffers\n");
4401 + }
4402 +
4403 + return 0;
4404 +}
4405 +
4406 +static int hailo_pcie_probe(struct pci_dev* pDev, const struct pci_device_id* id)
4407 +{
4408 + struct hailo_pcie_board * pBoard;
4409 + struct device *char_device = NULL;
4410 + int err = -EINVAL;
4411 +
4412 + pci_notice(pDev, "Probing on: %04x:%04x...\n", pDev->vendor, pDev->device);
4413 +#ifdef HAILO_EMULATOR
4414 + pci_notice(pDev, "PCIe driver was compiled in emulator mode\n");
4415 +#endif /* HAILO_EMULATOR */
4416 + if (!g_is_power_mode_enabled) {
4417 + pci_notice(pDev, "PCIe driver was compiled with power modes disabled\n");
4418 + }
4419 +
4420 + /* Initialize device extension for the board*/
4421 + pci_notice(pDev, "Probing: Allocate memory for device extension, %zu\n", sizeof(struct hailo_pcie_board));
4422 + pBoard = (struct hailo_pcie_board*) kzalloc( sizeof(struct hailo_pcie_board), GFP_KERNEL);
4423 + if (pBoard == NULL)
4424 + {
4425 + pci_err(pDev, "Probing: Failed to allocate memory for device extension structure\n");
4426 + err = -ENOMEM;
4427 + goto probe_exit;
4428 + }
4429 +
4430 + pBoard->pDev = pDev;
4431 +
4432 + if ( (err = pci_enable_device(pDev)) )
4433 + {
4434 + pci_err(pDev, "Probing: Failed calling pci_enable_device %d\n", err);
4435 + goto probe_free_board;
4436 + }
4437 + pci_notice(pDev, "Probing: Device enabled\n");
4438 +
4439 + pci_set_master(pDev);
4440 +
4441 + err = pcie_resources_init(pDev, &pBoard->pcie_resources, id->driver_data);
4442 + if (err < 0) {
4443 + pci_err(pDev, "Probing: Failed init pcie resources");
4444 + goto probe_disable_device;
4445 + }
4446 +
4447 + err = hailo_get_desc_page_size(pDev, &pBoard->desc_max_page_size);
4448 + if (err < 0) {
4449 + goto probe_release_pcie_resources;
4450 + }
4451 +
4452 + pBoard->interrupts_enabled = false;
4453 + init_completion(&pBoard->fw_loaded_completion);
4454 +
4455 + sema_init(&pBoard->mutex, 1);
4456 + atomic_set(&pBoard->ref_count, 0);
4457 + INIT_LIST_HEAD(&pBoard->open_files_list);
4458 +
4459 + sema_init(&pBoard->fw_control.mutex, 1);
4460 + spin_lock_init(&pBoard->notification_read_spinlock);
4461 + init_completion(&pBoard->fw_control.completion);
4462 +
4463 + init_completion(&pBoard->driver_down.reset_completed);
4464 +
4465 + INIT_LIST_HEAD(&pBoard->notification_wait_list);
4466 +
4467 + memset(&pBoard->notification_cache, 0, sizeof(pBoard->notification_cache));
4468 + memset(&pBoard->memory_transfer_params, 0, sizeof(pBoard->memory_transfer_params));
4469 +
4470 + err = hailo_pcie_vdma_controller_init(&pBoard->vdma, &pBoard->pDev->dev,
4471 + &pBoard->pcie_resources.vdma_registers);
4472 + if (err < 0) {
4473 + hailo_err(pBoard, "Failed init vdma controller %d\n", err);
4474 + goto probe_release_pcie_resources;
4475 + }
4476 +
4477 + // Checks the dma mask => it must be called after the device's dma_mask is set by hailo_pcie_vdma_controller_init
4478 + err = hailo_get_allocation_mode(pDev, &pBoard->allocation_mode);
4479 + if (err < 0) {
4480 + pci_err(pDev, "Failed determining allocation of buffers from driver. error type: %d\n", err);
4481 + goto probe_release_pcie_resources;
4482 + }
4483 +
4484 + err = hailo_activate_board(pBoard);
4485 + if (err < 0) {
4486 + hailo_err(pBoard, "Failed activating board %d\n", err);
4487 + goto probe_release_pcie_resources;
4488 + }
4489 +
4490 + /* Keep track on the device, in order, to be able to remove it later */
4491 + pci_set_drvdata(pDev, pBoard);
4492 + hailo_pcie_insert_board(pBoard);
4493 +
4494 + /* Create dynamically the device node*/
4495 + char_device = device_create_with_groups(chardev_class, NULL,
4496 + MKDEV(char_major, pBoard->board_index),
4497 + pBoard,
4498 + g_hailo_dev_groups,
4499 + DEVICE_NODE_NAME"%d", pBoard->board_index);
4500 + if (IS_ERR(char_device)) {
4501 + hailo_err(pBoard, "Failed creating dynamic device %d\n", pBoard->board_index);
4502 + err = PTR_ERR(char_device);
4503 + goto probe_remove_board;
4504 + }
4505 +
4506 + hailo_notice(pBoard, "Probing: Added board %0x-%0x, /dev/hailo%d\n", pDev->vendor, pDev->device, pBoard->board_index);
4507 +
4508 + return 0;
4509 +
4510 +probe_remove_board:
4511 + hailo_pcie_remove_board(pBoard);
4512 +
4513 +probe_release_pcie_resources:
4514 + pcie_resources_release(pBoard->pDev, &pBoard->pcie_resources);
4515 +
4516 +probe_disable_device:
4517 + pci_disable_device(pDev);
4518 +
4519 +probe_free_board:
4520 + kfree(pBoard);
4521 +
4522 +probe_exit:
4523 +
4524 + return err;
4525 +}
4526 +
4527 +static void hailo_pcie_remove(struct pci_dev* pDev)
4528 +{
4529 + struct hailo_pcie_board* pBoard = (struct hailo_pcie_board*) pci_get_drvdata(pDev);
4530 + struct hailo_notification_wait *cursor = NULL;
4531 +
4532 + pci_notice(pDev, "Remove: Releasing board\n");
4533 +
4534 + if (pBoard)
4535 + {
4536 +
4537 + // lock board to wait for any pending operations and for synchronization with open
4538 + down(&pBoard->mutex);
4539 +
4540 +
4541 + // remove board from active boards list
4542 + hailo_pcie_remove_board(pBoard);
4543 +
4544 +
4545 + /* Delete the device node */
4546 + device_destroy(chardev_class, MKDEV(char_major, pBoard->board_index));
4547 +
4548 + // disable interrupts - will only disable if they have not been disabled in release already
4549 + hailo_disable_interrupts(pBoard);
4550 +
4551 + pcie_resources_release(pBoard->pDev, &pBoard->pcie_resources);
4552 +
4553 + // deassociate device from board to be picked up by char device
4554 + pBoard->pDev = NULL;
4555 +
4556 + pBoard->vdma.dev = NULL;
4557 +
4558 + pci_disable_device(pDev);
4559 +
4560 + pci_set_drvdata(pDev, NULL);
4561 +
4562 + // Lock rcu_read_lock and send notification_completion to wake anyone waiting on the notification_wait_list when removed
4563 + rcu_read_lock();
4564 + list_for_each_entry_rcu(cursor, &pBoard->notification_wait_list, notification_wait_list) {
4565 + cursor->is_disabled = true;
4566 + complete(&cursor->notification_completion);
4567 + }
4568 + rcu_read_unlock();
4569 +
4570 + up(&pBoard->mutex);
4571 +
4572 + if ( 0 == atomic_read(&pBoard->ref_count) )
4573 + {
4574 + // nobody has the board open - free
4575 + pci_notice(pDev, "Remove: Freed board, /dev/hailo%d\n", pBoard->board_index);
4576 + kfree(pBoard);
4577 + }
4578 + else
4579 + {
4580 + // board resources are freed on last close
4581 + pci_notice(pDev, "Remove: Scheduled for board removal, /dev/hailo%d\n", pBoard->board_index);
4582 + }
4583 + }
4584 +
4585 +}
4586 +
4587 +#ifdef CONFIG_PM_SLEEP
4588 +static int hailo_pcie_suspend(struct device *dev)
4589 +{
4590 + struct hailo_pcie_board *board = (struct hailo_pcie_board*) dev_get_drvdata(dev);
4591 + struct hailo_file_context *cur = NULL;
4592 + int err = 0;
4593 +
4594 + // lock board to wait for any pending operations
4595 + down(&board->mutex);
4596 +
4597 + // Disable all interrupts. All interrupts from Hailo chip would be masked.
4598 + hailo_disable_interrupts(board);
4599 +
4600 + // Close all vDMA channels
4601 + if (board->vdma.used_by_filp != NULL) {
4602 + err = hailo_pcie_driver_down(board);
4603 + if (err < 0) {
4604 + dev_notice(dev, "Error while trying to call FW to close vdma channels\n");
4605 + }
4606 + }
4607 +
4608 + // Un validate all activae file contexts so every new action would return error to the user.
4609 + list_for_each_entry(cur, &board->open_files_list, open_files_list) {
4610 + cur->is_valid = false;
4611 + }
4612 +
4613 + // Release board
4614 + up(&board->mutex);
4615 +
4616 + dev_notice(dev, "PM's suspend\n");
4617 + // Continue system suspend
4618 + return err;
4619 +}
4620 +
4621 +static int hailo_pcie_resume(struct device *dev)
4622 +{
4623 + struct hailo_pcie_board *board = (struct hailo_pcie_board*) dev_get_drvdata(dev);
4624 + int err = 0;
4625 +
4626 + if ((err = hailo_activate_board(board)) < 0) {
4627 + dev_err(dev, "Failed activating board %d\n", err);
4628 + return err;
4629 + }
4630 +
4631 + dev_notice(dev, "PM's resume\n");
4632 + return 0;
4633 +}
4634 +#endif /* CONFIG_PM_SLEEP */
4635 +
4636 +static SIMPLE_DEV_PM_OPS(hailo_pcie_pm_ops, hailo_pcie_suspend, hailo_pcie_resume);
4637 +
4638 +#if LINUX_VERSION_CODE >= KERNEL_VERSION( 3, 16, 0 )
4639 +static void hailo_pci_reset_prepare(struct pci_dev *pdev)
4640 +{
4641 + struct hailo_pcie_board* board = (struct hailo_pcie_board*) pci_get_drvdata(pdev);
4642 + int err = 0;
4643 + /* Reset preparation logic goes here */
4644 + pci_err(pdev, "Reset preparation for PCI device \n");
4645 +
4646 + if (board)
4647 + {
4648 + // lock board to wait for any pending operations and for synchronization with open
4649 + down(&board->mutex);
4650 + if (board->vdma.used_by_filp != NULL) {
4651 + // Try to close all vDMA channels before reset
4652 + err = hailo_pcie_driver_down(board);
4653 + if (err < 0) {
4654 + pci_err(pdev, "Error while trying to call FW to close vdma channels (errno %d)\n", err);
4655 + }
4656 + }
4657 + up(&board->mutex);
4658 + }
4659 +}
4660 +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION( 3, 16, 0 ) */
4661 +
4662 +#if LINUX_VERSION_CODE < KERNEL_VERSION( 4, 13, 0 ) && LINUX_VERSION_CODE >= KERNEL_VERSION( 3, 16, 0 )
4663 +static void hailo_pci_reset_notify(struct pci_dev *pdev, bool prepare)
4664 +{
4665 + if (prepare) {
4666 + hailo_pci_reset_prepare(pdev);
4667 + }
4668 +}
4669 +#endif
4670 +
4671 +static const struct pci_error_handlers hailo_pcie_err_handlers = {
4672 +#if LINUX_VERSION_CODE < KERNEL_VERSION( 3, 16, 0 )
4673 +/* No FLR callback */
4674 +#elif LINUX_VERSION_CODE < KERNEL_VERSION( 4, 13, 0 )
4675 +/* FLR Callback is reset_notify */
4676 + .reset_notify = hailo_pci_reset_notify,
4677 +#else
4678 +/* FLR Callback is reset_prepare */
4679 + .reset_prepare = hailo_pci_reset_prepare,
4680 +#endif
4681 +};
4682 +
4683 +static struct pci_device_id hailo_pcie_id_table[] =
4684 +{
4685 + {PCI_DEVICE_DATA(HAILO, HAILO8, HAILO_BOARD_TYPE_HAILO8)},
4686 + {PCI_DEVICE_DATA(HAILO, HAILO15, HAILO_BOARD_TYPE_HAILO15)},
4687 + {PCI_DEVICE_DATA(HAILO, PLUTO, HAILO_BOARD_TYPE_PLUTO)},
4688 + {0,0,0,0,0,0,0 },
4689 +};
4690 +
4691 +static struct file_operations hailo_pcie_fops =
4692 +{
4693 + owner: THIS_MODULE,
4694 + unlocked_ioctl: hailo_pcie_fops_unlockedioctl,
4695 + mmap: hailo_pcie_fops_mmap,
4696 + open: hailo_pcie_fops_open,
4697 + release: hailo_pcie_fops_release
4698 +};
4699 +
4700 +
4701 +static struct pci_driver hailo_pci_driver =
4702 +{
4703 + name: DRIVER_NAME,
4704 + id_table: hailo_pcie_id_table,
4705 + probe: hailo_pcie_probe,
4706 + remove: hailo_pcie_remove,
4707 + driver: {
4708 + pm: &hailo_pcie_pm_ops,
4709 + },
4710 + err_handler: &hailo_pcie_err_handlers,
4711 +};
4712 +
4713 +MODULE_DEVICE_TABLE (pci, hailo_pcie_id_table);
4714 +
4715 +static int hailo_pcie_register_chrdev(unsigned int major, const char *name)
4716 +{
4717 + int char_major;
4718 +
4719 + char_major = register_chrdev(major, name, &hailo_pcie_fops);
4720 +
4721 + chardev_class = class_create_compat("hailo_chardev");
4722 +
4723 + return char_major;
4724 +}
4725 +
4726 +static void hailo_pcie_unregister_chrdev(unsigned int major, const char *name)
4727 +{
4728 + class_destroy(chardev_class);
4729 + unregister_chrdev(major, name);
4730 +}
4731 +
4732 +static int __init hailo_pcie_module_init(void)
4733 +{
4734 + int err;
4735 +
4736 + pr_notice(DRIVER_NAME ": Init module. driver version %s\n", HAILO_DRV_VER);
4737 +
4738 + if ( 0 > (char_major = hailo_pcie_register_chrdev(0, DRIVER_NAME)) )
4739 + {
4740 + pr_err(DRIVER_NAME ": Init Error, failed to call register_chrdev.\n");
4741 +
4742 + return char_major;
4743 + }
4744 +
4745 + if ( 0 != (err = pci_register_driver(&hailo_pci_driver)))
4746 + {
4747 + pr_err(DRIVER_NAME ": Init Error, failed to call pci_register_driver.\n");
4748 + class_destroy(chardev_class);
4749 + hailo_pcie_unregister_chrdev(char_major, DRIVER_NAME);
4750 + return err;
4751 + }
4752 +
4753 + return 0;
4754 +}
4755 +
4756 +static void __exit hailo_pcie_module_exit(void)
4757 +{
4758 +
4759 + pr_notice(DRIVER_NAME ": Exit module.\n");
4760 +
4761 + // Unregister the driver from pci bus
4762 + pci_unregister_driver(&hailo_pci_driver);
4763 + hailo_pcie_unregister_chrdev(char_major, DRIVER_NAME);
4764 +
4765 + pr_notice(DRIVER_NAME ": Hailo PCIe driver unloaded.\n");
4766 +}
4767 +
4768 +
4769 +module_init(hailo_pcie_module_init);
4770 +module_exit(hailo_pcie_module_exit);
4771 +
4772 +module_param(o_dbg, int, S_IRUGO | S_IWUSR);
4773 +
4774 +module_param_named(no_power_mode, g_is_power_mode_enabled, invbool, S_IRUGO);
4775 +MODULE_PARM_DESC(no_power_mode, "Disables automatic D0->D3 PCIe transactions");
4776 +
4777 +module_param(force_allocation_from_driver, int, S_IRUGO);
4778 +MODULE_PARM_DESC(force_allocation_from_driver, "Determines whether to force buffer allocation from driver or userspace");
4779 +
4780 +module_param(force_desc_page_size, int, S_IRUGO);
4781 +MODULE_PARM_DESC(force_desc_page_size, "Determines the maximum DMA descriptor page size (must be a power of 2)");
4782 +
4783 +MODULE_AUTHOR("Hailo Technologies Ltd.");
4784 +MODULE_DESCRIPTION("Hailo PCIe driver");
4785 +MODULE_LICENSE("GPL v2");
4786 +MODULE_VERSION(HAILO_DRV_VER);
4787 +
4788 --- /dev/null
4789 +++ b/drivers/media/pci/hailo/src/pcie.h
4790 @@ -0,0 +1,82 @@
4791 +// SPDX-License-Identifier: GPL-2.0
4792 +/**
4793 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
4794 + **/
4795 +
4796 +#ifndef _HAILO_PCI_PCIE_H_
4797 +#define _HAILO_PCI_PCIE_H_
4798 +
4799 +#include "vdma/vdma.h"
4800 +#include "hailo_ioctl_common.h"
4801 +#include "pcie_common.h"
4802 +#include "utils/fw_common.h"
4803 +
4804 +#include <linux/pci.h>
4805 +#include <linux/fs.h>
4806 +#include <linux/interrupt.h>
4807 +#include <linux/circ_buf.h>
4808 +#include <linux/device.h>
4809 +
4810 +#include <linux/ioctl.h>
4811 +
4812 +struct hailo_fw_control_info {
4813 + // protects that only one fw control will be send at a time
4814 + struct semaphore mutex;
4815 + // called from the interrupt handler to notify that a response is ready
4816 + struct completion completion;
4817 + // the command we are currently handling
4818 + struct hailo_fw_control command;
4819 +};
4820 +
4821 +struct hailo_pcie_driver_down_info {
4822 + // called from the interrupt handler to notify that FW completed reset
4823 + struct completion reset_completed;
4824 +};
4825 +
4826 +struct hailo_fw_boot {
4827 + // the filp that enabled interrupts for fw boot. the interrupt is enabled if this is not null
4828 + struct file *filp;
4829 + // called from the interrupt handler to notify that an interrupt was raised
4830 + struct completion completion;
4831 +};
4832 +
4833 +
4834 +// Context for each open file handle
4835 +// TODO: store board and use as actual context
4836 +struct hailo_file_context {
4837 + struct list_head open_files_list;
4838 + struct file *filp;
4839 + struct hailo_vdma_file_context vdma_context;
4840 + bool is_valid;
4841 +};
4842 +
4843 +struct hailo_pcie_board {
4844 + struct list_head board_list;
4845 + struct pci_dev *pDev;
4846 + u32 board_index;
4847 + atomic_t ref_count;
4848 + struct list_head open_files_list;
4849 + struct hailo_pcie_resources pcie_resources;
4850 + struct hailo_fw_control_info fw_control;
4851 + struct hailo_pcie_driver_down_info driver_down;
4852 + struct semaphore mutex;
4853 + struct hailo_vdma_controller vdma;
4854 + spinlock_t notification_read_spinlock;
4855 + struct list_head notification_wait_list;
4856 + struct hailo_d2h_notification notification_cache;
4857 + struct hailo_d2h_notification notification_to_user;
4858 + struct hailo_memory_transfer_params memory_transfer_params;
4859 + u32 desc_max_page_size;
4860 + enum hailo_allocation_mode allocation_mode;
4861 + struct completion fw_loaded_completion;
4862 + bool interrupts_enabled;
4863 +};
4864 +
4865 +bool power_mode_enabled(void);
4866 +
4867 +struct hailo_pcie_board* hailo_pcie_get_board_index(u32 index);
4868 +void hailo_disable_interrupts(struct hailo_pcie_board *board);
4869 +int hailo_enable_interrupts(struct hailo_pcie_board *board);
4870 +
4871 +#endif /* _HAILO_PCI_PCIE_H_ */
4872 +
4873 --- /dev/null
4874 +++ b/drivers/media/pci/hailo/src/sysfs.c
4875 @@ -0,0 +1,36 @@
4876 +// SPDX-License-Identifier: GPL-2.0
4877 +/**
4878 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
4879 + **/
4880 +
4881 +#include "sysfs.h"
4882 +#include "pcie.h"
4883 +
4884 +#include <linux/device.h>
4885 +#include <linux/sysfs.h>
4886 +
4887 +static ssize_t board_location_show(struct device *dev, struct device_attribute *_attr,
4888 + char *buf)
4889 +{
4890 + struct hailo_pcie_board *board = (struct hailo_pcie_board *)dev_get_drvdata(dev);
4891 + const char *dev_info = pci_name(board->pDev);
4892 + return sprintf(buf, "%s", dev_info);
4893 +}
4894 +static DEVICE_ATTR_RO(board_location);
4895 +
4896 +static ssize_t device_id_show(struct device *dev, struct device_attribute *_attr,
4897 + char *buf)
4898 +{
4899 + struct hailo_pcie_board *board = (struct hailo_pcie_board *)dev_get_drvdata(dev);
4900 + return sprintf(buf, "%x:%x", board->pDev->vendor, board->pDev->device);
4901 +}
4902 +static DEVICE_ATTR_RO(device_id);
4903 +
4904 +static struct attribute *hailo_dev_attrs[] = {
4905 + &dev_attr_board_location.attr,
4906 + &dev_attr_device_id.attr,
4907 + NULL
4908 +};
4909 +
4910 +ATTRIBUTE_GROUPS(hailo_dev);
4911 +const struct attribute_group **g_hailo_dev_groups = hailo_dev_groups;
4912 --- /dev/null
4913 +++ b/drivers/media/pci/hailo/src/sysfs.h
4914 @@ -0,0 +1,13 @@
4915 +// SPDX-License-Identifier: GPL-2.0
4916 +/**
4917 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
4918 + **/
4919 +
4920 +#ifndef _HAILO_PCI_SYSFS_H_
4921 +#define _HAILO_PCI_SYSFS_H_
4922 +
4923 +#include <linux/sysfs.h>
4924 +
4925 +extern const struct attribute_group **g_hailo_dev_groups;
4926 +
4927 +#endif /* _HAILO_PCI_SYSFS_H_ */
4928 --- /dev/null
4929 +++ b/drivers/media/pci/hailo/src/utils.c
4930 @@ -0,0 +1,27 @@
4931 +// SPDX-License-Identifier: GPL-2.0
4932 +/**
4933 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
4934 + **/
4935 +
4936 +#include <linux/version.h>
4937 +#include <linux/init.h>
4938 +#include <linux/module.h>
4939 +#include <linux/pci.h>
4940 +
4941 +#include "hailo_pcie_version.h"
4942 +#include "pcie.h"
4943 +#include "utils.h"
4944 +#include "utils/logs.h"
4945 +
4946 +
4947 +void hailo_pcie_clear_notification_wait_list(struct hailo_pcie_board *pBoard, struct file *filp)
4948 +{
4949 + struct hailo_notification_wait *cur = NULL, *next = NULL;
4950 + list_for_each_entry_safe(cur, next, &pBoard->notification_wait_list, notification_wait_list) {
4951 + if (cur->filp == filp) {
4952 + list_del_rcu(&cur->notification_wait_list);
4953 + synchronize_rcu();
4954 + kfree(cur);
4955 + }
4956 + }
4957 +}
4958 --- /dev/null
4959 +++ b/drivers/media/pci/hailo/src/utils.h
4960 @@ -0,0 +1,21 @@
4961 +// SPDX-License-Identifier: GPL-2.0
4962 +/**
4963 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
4964 + **/
4965 +
4966 +#ifndef _HAILO_PCI_UTILS_H_
4967 +#define _HAILO_PCI_UTILS_H_
4968 +
4969 +#include <linux/version.h>
4970 +#include <linux/init.h>
4971 +#include <linux/module.h>
4972 +#include <linux/pci.h>
4973 +#include <linux/interrupt.h>
4974 +#include <linux/sched.h>
4975 +#include <linux/pagemap.h>
4976 +
4977 +#include "pcie.h"
4978 +
4979 +void hailo_pcie_clear_notification_wait_list(struct hailo_pcie_board *pBoard, struct file *filp);
4980 +
4981 +#endif /* _HAILO_PCI_UTILS_H_ */
4982 --- /dev/null
4983 +++ b/drivers/media/pci/hailo/utils/compact.h
4984 @@ -0,0 +1,153 @@
4985 +// SPDX-License-Identifier: GPL-2.0
4986 +/**
4987 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
4988 + **/
4989 +
4990 +#ifndef _HAILO_PCI_COMPACT_H_
4991 +#define _HAILO_PCI_COMPACT_H_
4992 +
4993 +#include <linux/version.h>
4994 +#include <linux/scatterlist.h>
4995 +#include <linux/vmalloc.h>
4996 +
4997 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 4, 0)
4998 +#define class_create_compat class_create
4999 +#else
5000 +#define class_create_compat(name) class_create(THIS_MODULE, name)
5001 +#endif
5002 +
5003 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)
5004 +#define pci_printk(level, pdev, fmt, arg...) \
5005 + dev_printk(level, &(pdev)->dev, fmt, ##arg)
5006 +#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg)
5007 +#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg)
5008 +#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg)
5009 +#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg)
5010 +#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg)
5011 +#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg)
5012 +#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg)
5013 +#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg)
5014 +#endif
5015 +
5016 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 5, 0)
5017 +#define get_user_pages_compact get_user_pages
5018 +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
5019 +#define get_user_pages_compact(start, nr_pages, gup_flags, pages) \
5020 + get_user_pages(start, nr_pages, gup_flags, pages, NULL)
5021 +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 168)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
5022 +#define get_user_pages_compact(start, nr_pages, gup_flags, pages) \
5023 + get_user_pages(current, current->mm, start, nr_pages, gup_flags, pages, NULL)
5024 +#else
5025 +static inline long get_user_pages_compact(unsigned long start, unsigned long nr_pages,
5026 + unsigned int gup_flags, struct page **pages)
5027 +{
5028 + int write = !!((gup_flags & FOLL_WRITE) == FOLL_WRITE);
5029 + int force = !!((gup_flags & FOLL_FORCE) == FOLL_FORCE);
5030 + return get_user_pages(current, current->mm, start, nr_pages, write, force,
5031 + pages, NULL);
5032 +}
5033 +#endif
5034 +
5035 +#ifndef _LINUX_MMAP_LOCK_H
5036 +static inline void mmap_read_lock(struct mm_struct *mm)
5037 +{
5038 + down_read(&mm->mmap_sem);
5039 +}
5040 +
5041 +static inline void mmap_read_unlock(struct mm_struct *mm)
5042 +{
5043 + up_read(&mm->mmap_sem);
5044 +}
5045 +#endif /* _LINUX_MMAP_LOCK_H */
5046 +
5047 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0)
5048 +#define sg_alloc_table_from_pages_segment_compat __sg_alloc_table_from_pages
5049 +#else
5050 +static inline struct scatterlist *sg_alloc_table_from_pages_segment_compat(struct sg_table *sgt,
5051 + struct page **pages, unsigned int n_pages, unsigned int offset,
5052 + unsigned long size, unsigned int max_segment,
5053 + struct scatterlist *prv, unsigned int left_pages,
5054 + gfp_t gfp_mask)
5055 +{
5056 + int res = 0;
5057 +
5058 + if (NULL != prv) {
5059 + // prv not suported
5060 + return ERR_PTR(-EINVAL);
5061 + }
5062 +
5063 + if (0 != left_pages) {
5064 + // Left pages not supported
5065 + return ERR_PTR(-EINVAL);
5066 + }
5067 +
5068 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)
5069 + res = sg_alloc_table_from_pages_segment(sgt, pages, n_pages, offset, size, max_segment, gfp_mask);
5070 +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)
5071 + res = __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size, max_segment, gfp_mask);
5072 +#else
5073 + res = sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size, gfp_mask);
5074 +#endif
5075 + if (res < 0) {
5076 + return ERR_PTR(res);
5077 + }
5078 +
5079 + return sgt->sgl;
5080 +}
5081 +#endif
5082 +
5083 +#if LINUX_VERSION_CODE >= KERNEL_VERSION( 5, 0, 0 )
5084 +#define compatible_access_ok(a,b,c) access_ok(b, c)
5085 +#else
5086 +#define compatible_access_ok(a,b,c) access_ok(a, b, c)
5087 +#endif
5088 +
5089 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)
5090 +#define PCI_DEVICE_DATA(vend, dev, data) \
5091 + .vendor = PCI_VENDOR_ID_##vend, .device = PCI_DEVICE_ID_##vend##_##dev, \
5092 + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
5093 + .driver_data = (kernel_ulong_t)(data)
5094 +#endif
5095 +
5096 +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
5097 +// On kernels < 4.1.12, kvmalloc, kvfree is not implemented. For simplicity, instead of implement our own
5098 +// kvmalloc/kvfree, just using vmalloc and vfree (It may reduce allocate/access performance, but it worth it).
5099 +static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
5100 +{
5101 + (void)flags; //ignore
5102 + return vmalloc(n * size);
5103 +}
5104 +
5105 +#define kvfree vfree
5106 +#endif
5107 +
5108 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)
5109 +static inline bool is_dma_capable(struct device *dev, dma_addr_t dma_addr, size_t size)
5110 +{
5111 +// Case for Rasberry Pie kernel versions 5.4.83 <=> 5.5.0 - already changed bus_dma_mask -> bus_dma_limit
5112 +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) || (defined(HAILO_RASBERRY_PIE) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 83))
5113 + const u64 bus_dma_limit = dev->bus_dma_limit;
5114 +#else
5115 + const u64 bus_dma_limit = dev->bus_dma_mask;
5116 +#endif
5117 +
5118 + return (dma_addr <= min_not_zero(*dev->dma_mask, bus_dma_limit));
5119 +}
5120 +#else
5121 +static inline bool is_dma_capable(struct device *dev, dma_addr_t dma_addr, size_t size)
5122 +{
5123 + // Implementation of dma_capable from linux kernel
5124 + const u64 bus_dma_limit = (*dev->dma_mask + 1) & ~(*dev->dma_mask);
5125 + if (bus_dma_limit && size > bus_dma_limit) {
5126 + return false;
5127 + }
5128 +
5129 + if ((dma_addr | (dma_addr + size - 1)) & ~(*dev->dma_mask)) {
5130 + return false;
5131 + }
5132 +
5133 + return true;
5134 +}
5135 +#endif // LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)
5136 +
5137 +#endif /* _HAILO_PCI_COMPACT_H_ */
5138 \ No newline at end of file
5139 --- /dev/null
5140 +++ b/drivers/media/pci/hailo/utils/fw_common.h
5141 @@ -0,0 +1,19 @@
5142 +// SPDX-License-Identifier: GPL-2.0
5143 +/**
5144 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
5145 + **/
5146 +
5147 +#ifndef _HAILO_LINUX_COMMON_H_
5148 +#define _HAILO_LINUX_COMMON_H_
5149 +
5150 +#include "hailo_ioctl_common.h"
5151 +
5152 +struct hailo_notification_wait {
5153 + struct list_head notification_wait_list;
5154 + int tgid;
5155 + struct file* filp;
5156 + struct completion notification_completion;
5157 + bool is_disabled;
5158 +};
5159 +
5160 +#endif /* _HAILO_LINUX_COMMON_H_ */
5161 \ No newline at end of file
5162 --- /dev/null
5163 +++ b/drivers/media/pci/hailo/utils/logs.c
5164 @@ -0,0 +1,8 @@
5165 +// SPDX-License-Identifier: GPL-2.0
5166 +/**
5167 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
5168 + **/
5169 +
5170 +#include "logs.h"
5171 +
5172 +int o_dbg = LOGLEVEL_NOTICE;
5173 --- /dev/null
5174 +++ b/drivers/media/pci/hailo/utils/logs.h
5175 @@ -0,0 +1,45 @@
5176 +// SPDX-License-Identifier: GPL-2.0
5177 +/**
5178 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
5179 + **/
5180 +
5181 +#ifndef _COMMON_LOGS_H_
5182 +#define _COMMON_LOGS_H_
5183 +
5184 +#include <linux/kern_levels.h>
5185 +
5186 +// Should be used only by "module_param".
5187 +// Specify the current debug level for the logs
5188 +extern int o_dbg;
5189 +
5190 +
5191 +// Logging, same interface as dev_*, uses o_dbg to filter
5192 +// log messages
5193 +#define hailo_printk(level, dev, fmt, ...) \
5194 + do { \
5195 + int __level = (level[1] - '0'); \
5196 + if (__level <= o_dbg) { \
5197 + dev_printk((level), dev, fmt, ##__VA_ARGS__); \
5198 + } \
5199 + } while (0)
5200 +
5201 +#define hailo_emerg(board, fmt, ...) hailo_printk(KERN_EMERG, &(board)->pDev->dev, fmt, ##__VA_ARGS__)
5202 +#define hailo_alert(board, fmt, ...) hailo_printk(KERN_ALERT, &(board)->pDev->dev, fmt, ##__VA_ARGS__)
5203 +#define hailo_crit(board, fmt, ...) hailo_printk(KERN_CRIT, &(board)->pDev->dev, fmt, ##__VA_ARGS__)
5204 +#define hailo_err(board, fmt, ...) hailo_printk(KERN_ERR, &(board)->pDev->dev, fmt, ##__VA_ARGS__)
5205 +#define hailo_warn(board, fmt, ...) hailo_printk(KERN_WARNING, &(board)->pDev->dev, fmt, ##__VA_ARGS__)
5206 +#define hailo_notice(board, fmt, ...) hailo_printk(KERN_NOTICE, &(board)->pDev->dev, fmt, ##__VA_ARGS__)
5207 +#define hailo_info(board, fmt, ...) hailo_printk(KERN_INFO, &(board)->pDev->dev, fmt, ##__VA_ARGS__)
5208 +#define hailo_dbg(board, fmt, ...) hailo_printk(KERN_DEBUG, &(board)->pDev->dev, fmt, ##__VA_ARGS__)
5209 +
5210 +#define hailo_dev_emerg(dev, fmt, ...) hailo_printk(KERN_EMERG, dev, fmt, ##__VA_ARGS__)
5211 +#define hailo_dev_alert(dev, fmt, ...) hailo_printk(KERN_ALERT, dev, fmt, ##__VA_ARGS__)
5212 +#define hailo_dev_crit(dev, fmt, ...) hailo_printk(KERN_CRIT, dev, fmt, ##__VA_ARGS__)
5213 +#define hailo_dev_err(dev, fmt, ...) hailo_printk(KERN_ERR, dev, fmt, ##__VA_ARGS__)
5214 +#define hailo_dev_warn(dev, fmt, ...) hailo_printk(KERN_WARNING, dev, fmt, ##__VA_ARGS__)
5215 +#define hailo_dev_notice(dev, fmt, ...) hailo_printk(KERN_NOTICE, dev, fmt, ##__VA_ARGS__)
5216 +#define hailo_dev_info(dev, fmt, ...) hailo_printk(KERN_INFO, dev, fmt, ##__VA_ARGS__)
5217 +#define hailo_dev_dbg(dev, fmt, ...) hailo_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__)
5218 +
5219 +
5220 +#endif //_COMMON_LOGS_H_
5221 \ No newline at end of file
5222 --- /dev/null
5223 +++ b/drivers/media/pci/hailo/vdma/ioctl.c
5224 @@ -0,0 +1,698 @@
5225 +// SPDX-License-Identifier: GPL-2.0
5226 +/**
5227 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
5228 + **/
5229 +
5230 +#include "ioctl.h"
5231 +#include "memory.h"
5232 +#include "utils/logs.h"
5233 +#include "utils.h"
5234 +
5235 +#include <linux/slab.h>
5236 +#include <linux/uaccess.h>
5237 +
5238 +
5239 +long hailo_vdma_interrupts_enable_ioctl(struct hailo_vdma_controller *controller, unsigned long arg)
5240 +{
5241 + struct hailo_vdma_interrupts_enable_params input;
5242 + struct hailo_vdma_engine *engine = NULL;
5243 + u8 engine_index = 0;
5244 + u32 channels_bitmap = 0;
5245 +
5246 + if (copy_from_user(&input, (void *)arg, sizeof(input))) {
5247 + hailo_dev_err(controller->dev, "copy_from_user fail\n");
5248 + return -ENOMEM;
5249 + }
5250 +
5251 + // Validate params (ignoring engine_index >= controller->vdma_engines_count).
5252 + for_each_vdma_engine(controller, engine, engine_index) {
5253 + channels_bitmap = input.channels_bitmap_per_engine[engine_index];
5254 + if (0 != (channels_bitmap & engine->enabled_channels)) {
5255 + hailo_dev_err(controller->dev, "Trying to enable channels that are already enabled\n");
5256 + return -EINVAL;
5257 + }
5258 + }
5259 +
5260 + for_each_vdma_engine(controller, engine, engine_index) {
5261 + channels_bitmap = input.channels_bitmap_per_engine[engine_index];
5262 + hailo_vdma_engine_enable_channel_interrupts(engine, channels_bitmap,
5263 + input.enable_timestamps_measure);
5264 + hailo_vdma_update_interrupts_mask(controller, engine_index);
5265 + hailo_dev_info(controller->dev, "Enabled interrupts for engine %u, channels bitmap 0x%x\n",
5266 + engine_index, channels_bitmap);
5267 + }
5268 +
5269 + return 0;
5270 +}
5271 +
5272 +long hailo_vdma_interrupts_disable_ioctl(struct hailo_vdma_controller *controller, unsigned long arg)
5273 +{
5274 + struct hailo_vdma_interrupts_disable_params input;
5275 + struct hailo_vdma_engine *engine = NULL;
5276 + u8 engine_index = 0;
5277 + u32 channels_bitmap = 0;
5278 +
5279 + if (copy_from_user(&input, (void*)arg, sizeof(input))) {
5280 + hailo_dev_err(controller->dev, "copy_from_user fail\n");
5281 + return -ENOMEM;
5282 + }
5283 +
5284 + // Validate params (ignoring engine_index >= controller->vdma_engines_count).
5285 + for_each_vdma_engine(controller, engine, engine_index) {
5286 + channels_bitmap = input.channels_bitmap_per_engine[engine_index];
5287 + if (channels_bitmap != (channels_bitmap & engine->enabled_channels)) {
5288 + hailo_dev_err(controller->dev, "Trying to disable channels that were not enabled\n");
5289 + return -EINVAL;
5290 + }
5291 + }
5292 +
5293 + for_each_vdma_engine(controller, engine, engine_index) {
5294 + channels_bitmap = input.channels_bitmap_per_engine[engine_index];
5295 + hailo_vdma_engine_interrupts_disable(controller, engine, engine_index,
5296 + channels_bitmap);
5297 + }
5298 +
5299 + // Wake up threads waiting
5300 + wake_up_interruptible_all(&controller->interrupts_wq);
5301 +
5302 + return 0;
5303 +}
5304 +
5305 +static bool got_interrupt(struct hailo_vdma_controller *controller,
5306 + u32 channels_bitmap_per_engine[MAX_VDMA_ENGINES])
5307 +{
5308 + struct hailo_vdma_engine *engine = NULL;
5309 + u8 engine_index = 0;
5310 + for_each_vdma_engine(controller, engine, engine_index) {
5311 + if (hailo_vdma_engine_got_interrupt(engine,
5312 + channels_bitmap_per_engine[engine_index])) {
5313 + return true;
5314 + }
5315 + }
5316 + return false;
5317 +}
5318 +
5319 +static void transfer_done(struct hailo_ongoing_transfer *transfer, void *opaque)
5320 +{
5321 + u8 i = 0;
5322 + struct hailo_vdma_controller *controller = (struct hailo_vdma_controller *)opaque;
5323 + for (i = 0; i < transfer->buffers_count; i++) {
5324 + struct hailo_vdma_buffer *mapped_buffer = (struct hailo_vdma_buffer *)transfer->buffers[i].opaque;
5325 + hailo_vdma_buffer_sync_cyclic(controller, mapped_buffer, HAILO_SYNC_FOR_CPU,
5326 + transfer->buffers[i].offset, transfer->buffers[i].size);
5327 + }
5328 +}
5329 +
5330 +long hailo_vdma_interrupts_wait_ioctl(struct hailo_vdma_controller *controller, unsigned long arg,
5331 + struct semaphore *mutex, bool *should_up_board_mutex)
5332 +{
5333 + long err = 0;
5334 + struct hailo_vdma_interrupts_wait_params params = {0};
5335 + struct hailo_vdma_engine *engine = NULL;
5336 + bool bitmap_not_empty = false;
5337 + u8 engine_index = 0;
5338 + u32 irq_bitmap = 0;
5339 + unsigned long irq_saved_flags = 0;
5340 +
5341 + if (copy_from_user(&params, (void*)arg, sizeof(params))) {
5342 + hailo_dev_err(controller->dev, "HAILO_VDMA_INTERRUPTS_WAIT, copy_from_user fail\n");
5343 + return -ENOMEM;
5344 + }
5345 +
5346 + // We don't need to validate that channels_bitmap_per_engine are enabled -
5347 + // If the channel is not enabled we just return an empty interrupts list.
5348 +
5349 + // Validate params (ignoring engine_index >= controller->vdma_engines_count).
5350 + // It us ok to wait on a disabled channel - the wait will just exit.
5351 + for_each_vdma_engine(controller, engine, engine_index) {
5352 + if (0 != params.channels_bitmap_per_engine[engine_index]) {
5353 + bitmap_not_empty = true;
5354 + }
5355 + }
5356 + if (!bitmap_not_empty) {
5357 + hailo_dev_err(controller->dev, "Got an empty bitmap for wait interrupts\n");
5358 + return -EINVAL;
5359 + }
5360 +
5361 + up(mutex);
5362 + err = wait_event_interruptible(controller->interrupts_wq,
5363 + got_interrupt(controller, params.channels_bitmap_per_engine));
5364 + if (err < 0) {
5365 + hailo_dev_info(controller->dev,
5366 + "wait channel interrupts failed with err=%ld (process was interrupted or killed)\n", err);
5367 + *should_up_board_mutex = false;
5368 + return err;
5369 + }
5370 +
5371 + if (down_interruptible(mutex)) {
5372 + hailo_dev_info(controller->dev, "down_interruptible error (process was interrupted or killed)\n");
5373 + *should_up_board_mutex = false;
5374 + return -ERESTARTSYS;
5375 + }
5376 +
5377 + params.channels_count = 0;
5378 + for_each_vdma_engine(controller, engine, engine_index) {
5379 +
5380 + spin_lock_irqsave(&controller->interrupts_lock, irq_saved_flags);
5381 + irq_bitmap = hailo_vdma_engine_read_interrupts(engine,
5382 + params.channels_bitmap_per_engine[engine->index]);
5383 + spin_unlock_irqrestore(&controller->interrupts_lock, irq_saved_flags);
5384 +
5385 + err = hailo_vdma_engine_fill_irq_data(&params, engine, irq_bitmap,
5386 + transfer_done, controller);
5387 + if (err < 0) {
5388 + hailo_dev_err(controller->dev, "Failed fill irq data %ld", err);
5389 + return err;
5390 + }
5391 + }
5392 +
5393 + if (copy_to_user((void __user*)arg, &params, sizeof(params))) {
5394 + hailo_dev_err(controller->dev, "copy_to_user fail\n");
5395 + return -ENOMEM;
5396 + }
5397 +
5398 + return 0;
5399 +}
5400 +
5401 +static uintptr_t hailo_get_next_vdma_handle(struct hailo_vdma_file_context *context)
5402 +{
5403 + // Note: The kernel code left-shifts the 'offset' param from the user-space call to mmap by PAGE_SHIFT bits and
5404 + // stores the result in 'vm_area_struct.vm_pgoff'. We pass the desc_handle to mmap in the offset param. To
5405 + // counter this, we right-shift the desc_handle. See also 'mmap function'.
5406 + uintptr_t next_handle = 0;
5407 + next_handle = atomic_inc_return(&context->last_vdma_handle);
5408 + return (next_handle << PAGE_SHIFT);
5409 +}
5410 +
5411 +long hailo_vdma_buffer_map_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
5412 + unsigned long arg)
5413 +{
5414 + struct hailo_vdma_buffer_map_params buf_info;
5415 + struct hailo_vdma_buffer *mapped_buffer = NULL;
5416 + enum dma_data_direction direction = DMA_NONE;
5417 + struct hailo_vdma_low_memory_buffer *low_memory_buffer = NULL;
5418 +
5419 + if (copy_from_user(&buf_info, (void __user*)arg, sizeof(buf_info))) {
5420 + hailo_dev_err(controller->dev, "copy from user fail\n");
5421 + return -EFAULT;
5422 + }
5423 +
5424 + hailo_dev_info(controller->dev, "address %px tgid %d size: %zu\n",
5425 + buf_info.user_address, current->tgid, buf_info.size);
5426 +
5427 + direction = get_dma_direction(buf_info.data_direction);
5428 + if (DMA_NONE == direction) {
5429 + hailo_dev_err(controller->dev, "invalid data direction %d\n", buf_info.data_direction);
5430 + return -EINVAL;
5431 + }
5432 +
5433 + low_memory_buffer = hailo_vdma_find_low_memory_buffer(context, buf_info.allocated_buffer_handle);
5434 +
5435 + mapped_buffer = hailo_vdma_buffer_map(controller->dev,
5436 + buf_info.user_address, buf_info.size, direction, low_memory_buffer);
5437 + if (IS_ERR(mapped_buffer)) {
5438 + hailo_dev_err(controller->dev, "failed map buffer %px\n",
5439 + buf_info.user_address);
5440 + return PTR_ERR(mapped_buffer);
5441 + }
5442 +
5443 + mapped_buffer->handle = atomic_inc_return(&context->last_vdma_user_buffer_handle);
5444 + buf_info.mapped_handle = mapped_buffer->handle;
5445 + if (copy_to_user((void __user*)arg, &buf_info, sizeof(buf_info))) {
5446 + hailo_dev_err(controller->dev, "copy_to_user fail\n");
5447 + hailo_vdma_buffer_put(mapped_buffer);
5448 + return -EFAULT;
5449 + }
5450 +
5451 + list_add(&mapped_buffer->mapped_user_buffer_list, &context->mapped_user_buffer_list);
5452 + hailo_dev_info(controller->dev, "buffer %px (handle %zu) is mapped\n",
5453 + buf_info.user_address, buf_info.mapped_handle);
5454 + return 0;
5455 +}
5456 +
5457 +long hailo_vdma_buffer_unmap_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
5458 + unsigned long arg)
5459 +{
5460 + struct hailo_vdma_buffer *mapped_buffer = NULL;
5461 + struct hailo_vdma_buffer_unmap_params buffer_unmap_params;
5462 +
5463 + if (copy_from_user(&buffer_unmap_params, (void __user*)arg, sizeof(buffer_unmap_params))) {
5464 + hailo_dev_err(controller->dev, "copy from user fail\n");
5465 + return -EFAULT;
5466 + }
5467 +
5468 + hailo_dev_info(controller->dev, "unmap user buffer handle %zu\n", buffer_unmap_params.mapped_handle);
5469 +
5470 + mapped_buffer = hailo_vdma_find_mapped_user_buffer(context, buffer_unmap_params.mapped_handle);
5471 + if (mapped_buffer == NULL) {
5472 + hailo_dev_warn(controller->dev, "buffer handle %zu not found\n", buffer_unmap_params.mapped_handle);
5473 + return -EINVAL;
5474 + }
5475 +
5476 + list_del(&mapped_buffer->mapped_user_buffer_list);
5477 + hailo_vdma_buffer_put(mapped_buffer);
5478 + return 0;
5479 +}
5480 +
5481 +long hailo_vdma_buffer_sync_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg)
5482 +{
5483 + struct hailo_vdma_buffer_sync_params sync_info = {};
5484 + struct hailo_vdma_buffer *mapped_buffer = NULL;
5485 +
5486 + if (copy_from_user(&sync_info, (void __user*)arg, sizeof(sync_info))) {
5487 + hailo_dev_err(controller->dev, "copy_from_user fail\n");
5488 + return -EFAULT;
5489 + }
5490 +
5491 + if (!(mapped_buffer = hailo_vdma_find_mapped_user_buffer(context, sync_info.handle))) {
5492 + hailo_dev_err(controller->dev, "buffer handle %zu doesn't exist\n", sync_info.handle);
5493 + return -EINVAL;
5494 + }
5495 +
5496 + if ((sync_info.sync_type != HAILO_SYNC_FOR_CPU) && (sync_info.sync_type != HAILO_SYNC_FOR_DEVICE)) {
5497 + hailo_dev_err(controller->dev, "Invalid sync_type given for vdma buffer sync.\n");
5498 + return -EINVAL;
5499 + }
5500 +
5501 + if (sync_info.offset + sync_info.count > mapped_buffer->size) {
5502 + hailo_dev_err(controller->dev, "Invalid offset/count given for vdma buffer sync. offset %zu count %zu buffer size %u\n",
5503 + sync_info.offset, sync_info.count, mapped_buffer->size);
5504 + return -EINVAL;
5505 + }
5506 +
5507 + hailo_vdma_buffer_sync(controller, mapped_buffer, sync_info.sync_type,
5508 + sync_info.offset, sync_info.count);
5509 + return 0;
5510 +}
5511 +
5512 +long hailo_desc_list_create_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
5513 + unsigned long arg)
5514 +{
5515 + struct hailo_desc_list_create_params params;
5516 + struct hailo_descriptors_list_buffer *descriptors_buffer = NULL;
5517 + uintptr_t next_handle = 0;
5518 + long err = -EINVAL;
5519 +
5520 + if (copy_from_user(&params, (void __user*)arg, sizeof(params))) {
5521 + hailo_dev_err(controller->dev, "copy_from_user fail\n");
5522 + return -EFAULT;
5523 + }
5524 +
5525 + if (params.is_circular && !is_powerof2(params.desc_count)) {
5526 + hailo_dev_err(controller->dev, "Invalid desc count given : %zu , circular descriptors count must be power of 2\n",
5527 + params.desc_count);
5528 + return -EINVAL;
5529 + }
5530 +
5531 + if (!is_powerof2(params.desc_page_size)) {
5532 + hailo_dev_err(controller->dev, "Invalid desc page size given : %u\n",
5533 + params.desc_page_size);
5534 + return -EINVAL;
5535 + }
5536 +
5537 + hailo_dev_info(controller->dev,
5538 + "Create desc list desc_count: %zu desc_page_size: %u\n",
5539 + params.desc_count, params.desc_page_size);
5540 +
5541 + descriptors_buffer = kzalloc(sizeof(*descriptors_buffer), GFP_KERNEL);
5542 + if (NULL == descriptors_buffer) {
5543 + hailo_dev_err(controller->dev, "Failed to allocate buffer for descriptors list struct\n");
5544 + return -ENOMEM;
5545 + }
5546 +
5547 + next_handle = hailo_get_next_vdma_handle(context);
5548 +
5549 + err = hailo_desc_list_create(controller->dev, params.desc_count,
5550 + params.desc_page_size, next_handle, params.is_circular,
5551 + descriptors_buffer);
5552 + if (err < 0) {
5553 + hailo_dev_err(controller->dev, "failed to allocate descriptors buffer\n");
5554 + kfree(descriptors_buffer);
5555 + return err;
5556 + }
5557 +
5558 + list_add(&descriptors_buffer->descriptors_buffer_list, &context->descriptors_buffer_list);
5559 +
5560 + // Note: The physical address is required for CONTEXT_SWITCH firmware controls
5561 + BUILD_BUG_ON(sizeof(params.dma_address) < sizeof(descriptors_buffer->dma_address));
5562 + params.dma_address = descriptors_buffer->dma_address;
5563 + params.desc_handle = descriptors_buffer->handle;
5564 +
5565 + if(copy_to_user((void __user*)arg, &params, sizeof(params))){
5566 + hailo_dev_err(controller->dev, "copy_to_user fail\n");
5567 + list_del(&descriptors_buffer->descriptors_buffer_list);
5568 + hailo_desc_list_release(controller->dev, descriptors_buffer);
5569 + kfree(descriptors_buffer);
5570 + return -EFAULT;
5571 + }
5572 +
5573 + hailo_dev_info(controller->dev, "Created desc list, handle 0x%llu\n",
5574 + (u64)params.desc_handle);
5575 + return 0;
5576 +}
5577 +
5578 +long hailo_desc_list_release_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
5579 + unsigned long arg)
5580 +{
5581 + struct hailo_desc_list_release_params params;
5582 + struct hailo_descriptors_list_buffer *descriptors_buffer = NULL;
5583 +
5584 + if (copy_from_user(&params, (void __user*)arg, sizeof(params))) {
5585 + hailo_dev_err(controller->dev, "copy_from_user fail\n");
5586 + return -EFAULT;
5587 + }
5588 +
5589 + descriptors_buffer = hailo_vdma_find_descriptors_buffer(context, params.desc_handle);
5590 + if (descriptors_buffer == NULL) {
5591 + hailo_dev_warn(controller->dev, "not found desc handle %llu\n", (unsigned long long)params.desc_handle);
5592 + return -EINVAL;
5593 + }
5594 +
5595 + list_del(&descriptors_buffer->descriptors_buffer_list);
5596 + hailo_desc_list_release(controller->dev, descriptors_buffer);
5597 + kfree(descriptors_buffer);
5598 + return 0;
5599 +}
5600 +
5601 +long hailo_desc_list_bind_vdma_buffer(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
5602 + unsigned long arg)
5603 +{
5604 + struct hailo_desc_list_bind_vdma_buffer_params configure_info;
5605 + struct hailo_vdma_buffer *mapped_buffer = NULL;
5606 + struct hailo_descriptors_list_buffer *descriptors_buffer = NULL;
5607 + struct hailo_vdma_mapped_transfer_buffer transfer_buffer = {0};
5608 +
5609 + if (copy_from_user(&configure_info, (void __user*)arg, sizeof(configure_info))) {
5610 + hailo_dev_err(controller->dev, "copy from user fail\n");
5611 + return -EFAULT;
5612 + }
5613 + hailo_dev_info(controller->dev, "config buffer_handle=%zu desc_handle=%llu starting_desc=%u\n",
5614 + configure_info.buffer_handle, (u64)configure_info.desc_handle, configure_info.starting_desc);
5615 +
5616 + mapped_buffer = hailo_vdma_find_mapped_user_buffer(context, configure_info.buffer_handle);
5617 + descriptors_buffer = hailo_vdma_find_descriptors_buffer(context, configure_info.desc_handle);
5618 + if (mapped_buffer == NULL || descriptors_buffer == NULL) {
5619 + hailo_dev_err(controller->dev, "invalid user/descriptors buffer\n");
5620 + return -EFAULT;
5621 + }
5622 +
5623 + if (configure_info.buffer_size > mapped_buffer->size) {
5624 + hailo_dev_err(controller->dev, "invalid buffer size. \n");
5625 + return -EFAULT;
5626 + }
5627 +
5628 + transfer_buffer.sg_table = &mapped_buffer->sg_table;
5629 + transfer_buffer.size = configure_info.buffer_size;
5630 + transfer_buffer.offset = configure_info.buffer_offset;
5631 +
5632 + return hailo_vdma_program_descriptors_list(
5633 + controller->hw,
5634 + &descriptors_buffer->desc_list,
5635 + configure_info.starting_desc,
5636 + &transfer_buffer,
5637 + configure_info.channel_index
5638 + );
5639 +}
5640 +
5641 +long hailo_vdma_low_memory_buffer_alloc_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
5642 + unsigned long arg)
5643 +{
5644 + struct hailo_allocate_low_memory_buffer_params buf_info = {0};
5645 + struct hailo_vdma_low_memory_buffer *low_memory_buffer = NULL;
5646 + long err = -EINVAL;
5647 +
5648 + if (copy_from_user(&buf_info, (void __user*)arg, sizeof(buf_info))) {
5649 + hailo_dev_err(controller->dev, "copy from user fail\n");
5650 + return -EFAULT;
5651 + }
5652 +
5653 + low_memory_buffer = kzalloc(sizeof(*low_memory_buffer), GFP_KERNEL);
5654 + if (NULL == low_memory_buffer) {
5655 + hailo_dev_err(controller->dev, "memory alloc failed\n");
5656 + return -ENOMEM;
5657 + }
5658 +
5659 + err = hailo_vdma_low_memory_buffer_alloc(buf_info.buffer_size, low_memory_buffer);
5660 + if (err < 0) {
5661 + kfree(low_memory_buffer);
5662 + hailo_dev_err(controller->dev, "failed allocating buffer from driver\n");
5663 + return err;
5664 + }
5665 +
5666 + // Get handle for allocated buffer
5667 + low_memory_buffer->handle = hailo_get_next_vdma_handle(context);
5668 +
5669 + list_add(&low_memory_buffer->vdma_low_memory_buffer_list, &context->vdma_low_memory_buffer_list);
5670 +
5671 + buf_info.buffer_handle = low_memory_buffer->handle;
5672 + if (copy_to_user((void __user*)arg, &buf_info, sizeof(buf_info))) {
5673 + hailo_dev_err(controller->dev, "copy_to_user fail\n");
5674 + list_del(&low_memory_buffer->vdma_low_memory_buffer_list);
5675 + hailo_vdma_low_memory_buffer_free(low_memory_buffer);
5676 + kfree(low_memory_buffer);
5677 + return -EFAULT;
5678 + }
5679 +
5680 + return 0;
5681 +}
5682 +
5683 +long hailo_vdma_low_memory_buffer_free_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
5684 + unsigned long arg)
5685 +{
5686 + struct hailo_vdma_low_memory_buffer *low_memory_buffer = NULL;
5687 + struct hailo_free_low_memory_buffer_params params = {0};
5688 +
5689 + if (copy_from_user(&params, (void __user*)arg, sizeof(params))) {
5690 + hailo_dev_err(controller->dev, "copy from user fail\n");
5691 + return -EFAULT;
5692 + }
5693 +
5694 + low_memory_buffer = hailo_vdma_find_low_memory_buffer(context, params.buffer_handle);
5695 + if (NULL == low_memory_buffer) {
5696 + hailo_dev_warn(controller->dev, "vdma buffer handle %lx not found\n", params.buffer_handle);
5697 + return -EINVAL;
5698 + }
5699 +
5700 + list_del(&low_memory_buffer->vdma_low_memory_buffer_list);
5701 + hailo_vdma_low_memory_buffer_free(low_memory_buffer);
5702 + kfree(low_memory_buffer);
5703 + return 0;
5704 +}
5705 +
5706 +long hailo_mark_as_in_use(struct hailo_vdma_controller *controller, unsigned long arg, struct file *filp)
5707 +{
5708 + struct hailo_mark_as_in_use_params params = {0};
5709 +
5710 + // If device is used by this FD, return false to indicate its free for usage
5711 + if (filp == controller->used_by_filp) {
5712 + params.in_use = false;
5713 + } else if (NULL != controller->used_by_filp) {
5714 + params.in_use = true;
5715 + } else {
5716 + controller->used_by_filp = filp;
5717 + params.in_use = false;
5718 + }
5719 +
5720 + if (copy_to_user((void __user*)arg, &params, sizeof(params))) {
5721 + hailo_dev_err(controller->dev, "copy_to_user fail\n");
5722 + return -EFAULT;
5723 + }
5724 +
5725 + return 0;
5726 +}
5727 +
5728 +long hailo_vdma_continuous_buffer_alloc_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg)
5729 +{
5730 + struct hailo_allocate_continuous_buffer_params buf_info = {0};
5731 + struct hailo_vdma_continuous_buffer *continuous_buffer = NULL;
5732 + long err = -EINVAL;
5733 + size_t aligned_buffer_size = 0;
5734 +
5735 + if (copy_from_user(&buf_info, (void __user*)arg, sizeof(buf_info))) {
5736 + hailo_dev_err(controller->dev, "copy from user fail\n");
5737 + return -EFAULT;
5738 + }
5739 +
5740 + continuous_buffer = kzalloc(sizeof(*continuous_buffer), GFP_KERNEL);
5741 + if (NULL == continuous_buffer) {
5742 + hailo_dev_err(controller->dev, "memory alloc failed\n");
5743 + return -ENOMEM;
5744 + }
5745 +
5746 + // We use PAGE_ALIGN to support mmap
5747 + aligned_buffer_size = PAGE_ALIGN(buf_info.buffer_size);
5748 + err = hailo_vdma_continuous_buffer_alloc(controller->dev, aligned_buffer_size, continuous_buffer);
5749 + if (err < 0) {
5750 + kfree(continuous_buffer);
5751 + return err;
5752 + }
5753 +
5754 + continuous_buffer->handle = hailo_get_next_vdma_handle(context);
5755 + list_add(&continuous_buffer->continuous_buffer_list, &context->continuous_buffer_list);
5756 +
5757 + buf_info.buffer_handle = continuous_buffer->handle;
5758 + buf_info.dma_address = continuous_buffer->dma_address;
5759 + if (copy_to_user((void __user*)arg, &buf_info, sizeof(buf_info))) {
5760 + hailo_dev_err(controller->dev, "copy_to_user fail\n");
5761 + list_del(&continuous_buffer->continuous_buffer_list);
5762 + hailo_vdma_continuous_buffer_free(controller->dev, continuous_buffer);
5763 + kfree(continuous_buffer);
5764 + return -EFAULT;
5765 + }
5766 +
5767 + return 0;
5768 +}
5769 +
5770 +long hailo_vdma_continuous_buffer_free_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg)
5771 +{
5772 + struct hailo_free_continuous_buffer_params params;
5773 + struct hailo_vdma_continuous_buffer *continuous_buffer = NULL;
5774 +
5775 + if (copy_from_user(&params, (void __user*)arg, sizeof(params))) {
5776 + hailo_dev_err(controller->dev, "copy from user fail\n");
5777 + return -EFAULT;
5778 + }
5779 +
5780 + continuous_buffer = hailo_vdma_find_continuous_buffer(context, params.buffer_handle);
5781 + if (NULL == continuous_buffer) {
5782 + hailo_dev_warn(controller->dev, "vdma buffer handle %lx not found\n", params.buffer_handle);
5783 + return -EINVAL;
5784 + }
5785 +
5786 + list_del(&continuous_buffer->continuous_buffer_list);
5787 + hailo_vdma_continuous_buffer_free(controller->dev, continuous_buffer);
5788 + kfree(continuous_buffer);
5789 + return 0;
5790 +}
5791 +
5792 +long hailo_vdma_interrupts_read_timestamps_ioctl(struct hailo_vdma_controller *controller, unsigned long arg)
5793 +{
5794 + struct hailo_vdma_interrupts_read_timestamp_params *params = &controller->read_interrupt_timestamps_params;
5795 + struct hailo_vdma_engine *engine = NULL;
5796 + int err = -EINVAL;
5797 +
5798 + hailo_dev_dbg(controller->dev, "Start read interrupt timestamps ioctl\n");
5799 +
5800 + if (copy_from_user(params, (void __user*)arg, sizeof(*params))) {
5801 + hailo_dev_err(controller->dev, "copy_from_user fail\n");
5802 + return -ENOMEM;
5803 + }
5804 +
5805 + if (params->engine_index >= controller->vdma_engines_count) {
5806 + hailo_dev_err(controller->dev, "Invalid engine %u", params->engine_index);
5807 + return -EINVAL;
5808 + }
5809 + engine = &controller->vdma_engines[params->engine_index];
5810 +
5811 + err = hailo_vdma_engine_read_timestamps(engine, params);
5812 + if (err < 0) {
5813 + hailo_dev_err(controller->dev, "Failed read engine interrupts for %u:%u",
5814 + params->engine_index, params->channel_index);
5815 + return err;
5816 + }
5817 +
5818 + if (copy_to_user((void __user*)arg, params, sizeof(*params))) {
5819 + hailo_dev_err(controller->dev, "copy_to_user fail\n");
5820 + return -ENOMEM;
5821 + }
5822 +
5823 + return 0;
5824 +}
5825 +
5826 +long hailo_vdma_launch_transfer_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
5827 + unsigned long arg)
5828 +{
5829 + struct hailo_vdma_launch_transfer_params params;
5830 + struct hailo_vdma_engine *engine = NULL;
5831 + struct hailo_vdma_channel *channel = NULL;
5832 + struct hailo_descriptors_list_buffer *descriptors_buffer = NULL;
5833 + struct hailo_vdma_mapped_transfer_buffer mapped_transfer_buffers[ARRAY_SIZE(params.buffers)] = {0};
5834 + int ret = -EINVAL;
5835 + u8 i = 0;
5836 +
5837 + if (copy_from_user(&params, (void __user*)arg, sizeof(params))) {
5838 + hailo_dev_err(controller->dev, "copy from user fail\n");
5839 + return -EFAULT;
5840 + }
5841 +
5842 + if (params.engine_index >= controller->vdma_engines_count) {
5843 + hailo_dev_err(controller->dev, "Invalid engine %u", params.engine_index);
5844 + return -EINVAL;
5845 + }
5846 + engine = &controller->vdma_engines[params.engine_index];
5847 +
5848 + if (params.channel_index >= ARRAY_SIZE(engine->channels)) {
5849 + hailo_dev_err(controller->dev, "Invalid channel %u", params.channel_index);
5850 + return -EINVAL;
5851 + }
5852 + channel = &engine->channels[params.channel_index];
5853 +
5854 + if (params.buffers_count > ARRAY_SIZE(params.buffers)) {
5855 + hailo_dev_err(controller->dev, "too many buffers %u\n", params.buffers_count);
5856 + return -EINVAL;
5857 + }
5858 +
5859 + descriptors_buffer = hailo_vdma_find_descriptors_buffer(context, params.desc_handle);
5860 + if (descriptors_buffer == NULL) {
5861 + hailo_dev_err(controller->dev, "invalid descriptors list handle\n");
5862 + return -EFAULT;
5863 + }
5864 +
5865 + for (i = 0; i < params.buffers_count; i++) {
5866 + struct hailo_vdma_buffer *mapped_buffer =
5867 + hailo_vdma_find_mapped_user_buffer(context, params.buffers[i].mapped_buffer_handle);
5868 + if (mapped_buffer == NULL) {
5869 + hailo_dev_err(controller->dev, "invalid user buffer\n");
5870 + return -EFAULT;
5871 + }
5872 +
5873 + if (params.buffers[i].size > mapped_buffer->size) {
5874 + hailo_dev_err(controller->dev, "Syncing size %u while buffer size is %u\n",
5875 + params.buffers[i].size, mapped_buffer->size);
5876 + return -EINVAL;
5877 + }
5878 +
5879 + if (params.buffers[i].offset > mapped_buffer->size) {
5880 + hailo_dev_err(controller->dev, "Syncing offset %u while buffer size is %u\n",
5881 + params.buffers[i].offset, mapped_buffer->size);
5882 + return -EINVAL;
5883 + }
5884 +
5885 + // Syncing the buffer to device change its ownership from host to the device.
5886 + // We sync on D2H as well if the user owns the buffer since the buffer might have been changed by
5887 + // the host between the time it was mapped and the current async transfer.
5888 + hailo_vdma_buffer_sync_cyclic(controller, mapped_buffer, HAILO_SYNC_FOR_DEVICE,
5889 + params.buffers[i].offset, params.buffers[i].size);
5890 +
5891 + mapped_transfer_buffers[i].sg_table = &mapped_buffer->sg_table;
5892 + mapped_transfer_buffers[i].size = params.buffers[i].size;
5893 + mapped_transfer_buffers[i].offset = params.buffers[i].offset;
5894 + mapped_transfer_buffers[i].opaque = mapped_buffer;
5895 + }
5896 +
5897 + ret = hailo_vdma_launch_transfer(
5898 + controller->hw,
5899 + channel,
5900 + &descriptors_buffer->desc_list,
5901 + params.starting_desc,
5902 + params.buffers_count,
5903 + mapped_transfer_buffers,
5904 + params.should_bind,
5905 + params.first_interrupts_domain,
5906 + params.last_interrupts_domain,
5907 + params.is_debug
5908 + );
5909 + if (ret < 0) {
5910 + hailo_dev_err(controller->dev, "Failed launch transfer %d\n", ret);
5911 + return ret;
5912 + }
5913 +
5914 + params.descs_programed = ret;
5915 +
5916 + if (copy_to_user((void __user*)arg, &params, sizeof(params))) {
5917 + hailo_dev_err(controller->dev, "copy_to_user fail\n");
5918 + return -EFAULT;
5919 + }
5920 +
5921 + return 0;
5922 +}
5923 \ No newline at end of file
5924 --- /dev/null
5925 +++ b/drivers/media/pci/hailo/vdma/ioctl.h
5926 @@ -0,0 +1,37 @@
5927 +// SPDX-License-Identifier: GPL-2.0
5928 +/**
5929 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
5930 + **/
5931 +
5932 +#ifndef _HAILO_VDMA_IOCTL_H_
5933 +#define _HAILO_VDMA_IOCTL_H_
5934 +
5935 +#include "vdma/vdma.h"
5936 +
5937 +long hailo_vdma_interrupts_enable_ioctl(struct hailo_vdma_controller *controller, unsigned long arg);
5938 +long hailo_vdma_interrupts_disable_ioctl(struct hailo_vdma_controller *controller, unsigned long arg);
5939 +long hailo_vdma_interrupts_wait_ioctl(struct hailo_vdma_controller *controller, unsigned long arg,
5940 + struct semaphore *mutex, bool *should_up_board_mutex);
5941 +
5942 +long hailo_vdma_buffer_map_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
5943 +long hailo_vdma_buffer_unmap_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long handle);
5944 +long hailo_vdma_buffer_sync_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
5945 +
5946 +long hailo_desc_list_create_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
5947 +long hailo_desc_list_release_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
5948 +long hailo_desc_list_bind_vdma_buffer(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
5949 +
5950 +long hailo_vdma_low_memory_buffer_alloc_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
5951 +long hailo_vdma_low_memory_buffer_free_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
5952 +
5953 +long hailo_mark_as_in_use(struct hailo_vdma_controller *controller, unsigned long arg, struct file *filp);
5954 +
5955 +long hailo_vdma_continuous_buffer_alloc_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
5956 +long hailo_vdma_continuous_buffer_free_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
5957 +
5958 +long hailo_vdma_interrupts_read_timestamps_ioctl(struct hailo_vdma_controller *controller, unsigned long arg);
5959 +
5960 +long hailo_vdma_launch_transfer_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
5961 + unsigned long arg);
5962 +
5963 +#endif /* _HAILO_VDMA_IOCTL_H_ */
5964 \ No newline at end of file
5965 --- /dev/null
5966 +++ b/drivers/media/pci/hailo/vdma/memory.c
5967 @@ -0,0 +1,551 @@
5968 +// SPDX-License-Identifier: GPL-2.0
5969 +/**
5970 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
5971 + **/
5972 +
5973 +#define pr_fmt(fmt) "hailo: " fmt
5974 +
5975 +#include "memory.h"
5976 +#include "utils/compact.h"
5977 +
5978 +#include <linux/slab.h>
5979 +#include <linux/scatterlist.h>
5980 +#include <linux/sched.h>
5981 +
5982 +
5983 +#define SGL_MAX_SEGMENT_SIZE (0x10000)
5984 +// See linux/mm.h
5985 +#define MMIO_AND_NO_PAGES_VMA_MASK (VM_IO | VM_PFNMAP)
5986 +
5987 +static int map_mmio_address(void __user* user_address, u32 size, struct vm_area_struct *vma,
5988 + struct sg_table *sgt);
5989 +static int prepare_sg_table(struct sg_table *sg_table, void __user* user_address, u32 size,
5990 + struct hailo_vdma_low_memory_buffer *low_mem_driver_allocated_buffer);
5991 +static void clear_sg_table(struct sg_table *sgt);
5992 +
5993 +struct hailo_vdma_buffer *hailo_vdma_buffer_map(struct device *dev,
5994 + void __user *user_address, size_t size, enum dma_data_direction direction,
5995 + struct hailo_vdma_low_memory_buffer *low_mem_driver_allocated_buffer)
5996 +{
5997 + int ret = -EINVAL;
5998 + struct hailo_vdma_buffer *mapped_buffer = NULL;
5999 + struct sg_table sgt = {0};
6000 + struct vm_area_struct *vma = NULL;
6001 + bool is_mmio = false;
6002 +
6003 + mapped_buffer = kzalloc(sizeof(*mapped_buffer), GFP_KERNEL);
6004 + if (NULL == mapped_buffer) {
6005 + dev_err(dev, "memory alloc failed\n");
6006 + ret = -ENOMEM;
6007 + goto cleanup;
6008 + }
6009 +
6010 + if (IS_ENABLED(HAILO_SUPPORT_MMIO_DMA_MAPPING)) {
6011 + vma = find_vma(current->mm, (uintptr_t)user_address);
6012 + if (NULL == vma) {
6013 + dev_err(dev, "no vma for virt_addr/size = 0x%08lx/0x%08zx\n", (uintptr_t)user_address, size);
6014 + ret = -EFAULT;
6015 + goto cleanup;
6016 + }
6017 + }
6018 +
6019 + if (IS_ENABLED(HAILO_SUPPORT_MMIO_DMA_MAPPING) &&
6020 + (MMIO_AND_NO_PAGES_VMA_MASK == (vma->vm_flags & MMIO_AND_NO_PAGES_VMA_MASK))) {
6021 + // user_address represents memory mapped I/O and isn't backed by 'struct page' (only by pure pfn)
6022 + if (NULL != low_mem_driver_allocated_buffer) {
6023 + // low_mem_driver_allocated_buffer are backed by regular 'struct page' addresses, just in low memory
6024 + dev_err(dev, "low_mem_driver_allocated_buffer shouldn't be provided with an mmio address\n");
6025 + ret = -EINVAL;
6026 + goto free_buffer_struct;
6027 + }
6028 +
6029 + ret = map_mmio_address(user_address, size, vma, &sgt);
6030 + if (ret < 0) {
6031 + dev_err(dev, "failed to map mmio address %d\n", ret);
6032 + goto free_buffer_struct;
6033 + }
6034 +
6035 + is_mmio = true;
6036 + } else {
6037 + // user_address is a standard 'struct page' backed memory address
6038 + ret = prepare_sg_table(&sgt, user_address, size, low_mem_driver_allocated_buffer);
6039 + if (ret < 0) {
6040 + dev_err(dev, "failed to set sg list for user buffer %d\n", ret);
6041 + goto free_buffer_struct;
6042 + }
6043 + sgt.nents = dma_map_sg(dev, sgt.sgl, sgt.orig_nents, direction);
6044 + if (0 == sgt.nents) {
6045 + dev_err(dev, "failed to map sg list for user buffer\n");
6046 + ret = -ENXIO;
6047 + goto clear_sg_table;
6048 + }
6049 + }
6050 +
6051 + kref_init(&mapped_buffer->kref);
6052 + mapped_buffer->device = dev;
6053 + mapped_buffer->user_address = user_address;
6054 + mapped_buffer->size = size;
6055 + mapped_buffer->data_direction = direction;
6056 + mapped_buffer->sg_table = sgt;
6057 + mapped_buffer->is_mmio = is_mmio;
6058 +
6059 + return mapped_buffer;
6060 +
6061 +clear_sg_table:
6062 + clear_sg_table(&sgt);
6063 +free_buffer_struct:
6064 + kfree(mapped_buffer);
6065 +cleanup:
6066 + return ERR_PTR(ret);
6067 +}
6068 +
6069 +static void unmap_buffer(struct kref *kref)
6070 +{
6071 + struct hailo_vdma_buffer *buf = container_of(kref, struct hailo_vdma_buffer, kref);
6072 +
6073 + if (!buf->is_mmio) {
6074 + dma_unmap_sg(buf->device, buf->sg_table.sgl, buf->sg_table.orig_nents, buf->data_direction);
6075 + }
6076 +
6077 + clear_sg_table(&buf->sg_table);
6078 + kfree(buf);
6079 +}
6080 +
6081 +void hailo_vdma_buffer_get(struct hailo_vdma_buffer *buf)
6082 +{
6083 + kref_get(&buf->kref);
6084 +}
6085 +
6086 +void hailo_vdma_buffer_put(struct hailo_vdma_buffer *buf)
6087 +{
6088 + kref_put(&buf->kref, unmap_buffer);
6089 +}
6090 +
6091 +static void vdma_sync_entire_buffer(struct hailo_vdma_controller *controller,
6092 + struct hailo_vdma_buffer *mapped_buffer, enum hailo_vdma_buffer_sync_type sync_type)
6093 +{
6094 + if (sync_type == HAILO_SYNC_FOR_CPU) {
6095 + dma_sync_sg_for_cpu(controller->dev, mapped_buffer->sg_table.sgl, mapped_buffer->sg_table.nents,
6096 + mapped_buffer->data_direction);
6097 + } else {
6098 + dma_sync_sg_for_device(controller->dev, mapped_buffer->sg_table.sgl, mapped_buffer->sg_table.nents,
6099 + mapped_buffer->data_direction);
6100 + }
6101 +}
6102 +
6103 +typedef void (*dma_sync_single_callback)(struct device *, dma_addr_t, size_t, enum dma_data_direction);
6104 +// Map sync_info->count bytes starting at sync_info->offset
6105 +static void vdma_sync_buffer_interval(struct hailo_vdma_controller *controller,
6106 + struct hailo_vdma_buffer *mapped_buffer,
6107 + size_t offset, size_t size, enum hailo_vdma_buffer_sync_type sync_type)
6108 +{
6109 + size_t sync_start_offset = offset;
6110 + size_t sync_end_offset = offset + size;
6111 + dma_sync_single_callback dma_sync_single = (sync_type == HAILO_SYNC_FOR_CPU) ?
6112 + dma_sync_single_for_cpu :
6113 + dma_sync_single_for_device;
6114 + struct scatterlist* sg_entry = NULL;
6115 + size_t current_iter_offset = 0;
6116 + int i = 0;
6117 +
6118 + for_each_sg(mapped_buffer->sg_table.sgl, sg_entry, mapped_buffer->sg_table.nents, i) {
6119 + // Check if the intervals: [current_iter_offset, sg_dma_len(sg_entry)] and [sync_start_offset, sync_end_offset]
6120 + // have any intersection. If offset isn't at the start of a sg_entry, we still want to sync it.
6121 + if (max(sync_start_offset, current_iter_offset) <= min(sync_end_offset, current_iter_offset + sg_dma_len(sg_entry))) {
6122 + dma_sync_single(controller->dev, sg_dma_address(sg_entry), sg_dma_len(sg_entry),
6123 + mapped_buffer->data_direction);
6124 + }
6125 +
6126 + current_iter_offset += sg_dma_len(sg_entry);
6127 + }
6128 +}
6129 +
6130 +void hailo_vdma_buffer_sync(struct hailo_vdma_controller *controller,
6131 + struct hailo_vdma_buffer *mapped_buffer, enum hailo_vdma_buffer_sync_type sync_type,
6132 + size_t offset, size_t size)
6133 +{
6134 + if (IS_ENABLED(HAILO_SUPPORT_MMIO_DMA_MAPPING) && mapped_buffer->is_mmio) {
6135 + // MMIO buffers don't need to be sync'd
6136 + return;
6137 + }
6138 +
6139 + if ((offset == 0) && (size == mapped_buffer->size)) {
6140 + vdma_sync_entire_buffer(controller, mapped_buffer, sync_type);
6141 + } else {
6142 + vdma_sync_buffer_interval(controller, mapped_buffer, offset, size, sync_type);
6143 + }
6144 +}
6145 +
6146 +// Similar to vdma_buffer_sync, allow circular sync of the buffer.
6147 +void hailo_vdma_buffer_sync_cyclic(struct hailo_vdma_controller *controller,
6148 + struct hailo_vdma_buffer *mapped_buffer, enum hailo_vdma_buffer_sync_type sync_type,
6149 + size_t offset, size_t size)
6150 +{
6151 + size_t size_to_end = min(size, mapped_buffer->size - offset);
6152 +
6153 + hailo_vdma_buffer_sync(controller, mapped_buffer, sync_type, offset, size_to_end);
6154 +
6155 + if (size_to_end < size) {
6156 + hailo_vdma_buffer_sync(controller, mapped_buffer, sync_type, 0, size - size_to_end);
6157 + }
6158 +}
6159 +
6160 +struct hailo_vdma_buffer* hailo_vdma_find_mapped_user_buffer(struct hailo_vdma_file_context *context,
6161 + size_t buffer_handle)
6162 +{
6163 + struct hailo_vdma_buffer *cur = NULL;
6164 + list_for_each_entry(cur, &context->mapped_user_buffer_list, mapped_user_buffer_list) {
6165 + if (cur->handle == buffer_handle) {
6166 + return cur;
6167 + }
6168 + }
6169 + return NULL;
6170 +}
6171 +
6172 +void hailo_vdma_clear_mapped_user_buffer_list(struct hailo_vdma_file_context *context,
6173 + struct hailo_vdma_controller *controller)
6174 +{
6175 + struct hailo_vdma_buffer *cur = NULL, *next = NULL;
6176 + list_for_each_entry_safe(cur, next, &context->mapped_user_buffer_list, mapped_user_buffer_list) {
6177 + list_del(&cur->mapped_user_buffer_list);
6178 + hailo_vdma_buffer_put(cur);
6179 + }
6180 +}
6181 +
6182 +
6183 +int hailo_desc_list_create(struct device *dev, u32 descriptors_count, u16 desc_page_size,
6184 + uintptr_t desc_handle, bool is_circular, struct hailo_descriptors_list_buffer *descriptors)
6185 +{
6186 + size_t buffer_size = 0;
6187 + const u64 align = VDMA_DESCRIPTOR_LIST_ALIGN; //First addr must be aligned on 64 KB (from the VDMA registers documentation)
6188 +
6189 + buffer_size = descriptors_count * sizeof(struct hailo_vdma_descriptor);
6190 + buffer_size = ALIGN(buffer_size, align);
6191 +
6192 + descriptors->kernel_address = dma_alloc_coherent(dev, buffer_size,
6193 + &descriptors->dma_address, GFP_KERNEL | __GFP_ZERO);
6194 + if (descriptors->kernel_address == NULL) {
6195 + dev_err(dev, "Failed to allocate descriptors list, desc_count 0x%x, buffer_size 0x%zx, This failure means there is not a sufficient amount of CMA memory "
6196 + "(contiguous physical memory), This usually is caused by lack of general system memory. Please check you have sufficent memory.\n",
6197 + descriptors_count, buffer_size);
6198 + return -ENOMEM;
6199 + }
6200 +
6201 + descriptors->buffer_size = buffer_size;
6202 + descriptors->handle = desc_handle;
6203 +
6204 + descriptors->desc_list.desc_list = descriptors->kernel_address;
6205 + descriptors->desc_list.desc_count = descriptors_count;
6206 + descriptors->desc_list.desc_page_size = desc_page_size;
6207 + descriptors->desc_list.is_circular = is_circular;
6208 +
6209 + return 0;
6210 +}
6211 +
6212 +void hailo_desc_list_release(struct device *dev, struct hailo_descriptors_list_buffer *descriptors)
6213 +{
6214 + dma_free_coherent(dev, descriptors->buffer_size, descriptors->kernel_address, descriptors->dma_address);
6215 +}
6216 +
6217 +struct hailo_descriptors_list_buffer* hailo_vdma_find_descriptors_buffer(struct hailo_vdma_file_context *context,
6218 + uintptr_t desc_handle)
6219 +{
6220 + struct hailo_descriptors_list_buffer *cur = NULL;
6221 + list_for_each_entry(cur, &context->descriptors_buffer_list, descriptors_buffer_list) {
6222 + if (cur->handle == desc_handle) {
6223 + return cur;
6224 + }
6225 + }
6226 + return NULL;
6227 +}
6228 +
6229 +void hailo_vdma_clear_descriptors_buffer_list(struct hailo_vdma_file_context *context,
6230 + struct hailo_vdma_controller *controller)
6231 +{
6232 + struct hailo_descriptors_list_buffer *cur = NULL, *next = NULL;
6233 + list_for_each_entry_safe(cur, next, &context->descriptors_buffer_list, descriptors_buffer_list) {
6234 + list_del(&cur->descriptors_buffer_list);
6235 + hailo_desc_list_release(controller->dev, cur);
6236 + kfree(cur);
6237 + }
6238 +}
6239 +
6240 +int hailo_vdma_low_memory_buffer_alloc(size_t size, struct hailo_vdma_low_memory_buffer *low_memory_buffer)
6241 +{
6242 + int ret = -EINVAL;
6243 + void *kernel_address = NULL;
6244 + size_t pages_count = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
6245 + size_t num_allocated = 0, i = 0;
6246 + void **pages = NULL;
6247 +
6248 + pages = kcalloc(pages_count, sizeof(*pages), GFP_KERNEL);
6249 + if (NULL == pages) {
6250 + pr_err("Failed to allocate pages for buffer (size %zu)\n", size);
6251 + ret = -ENOMEM;
6252 + goto cleanup;
6253 + }
6254 +
6255 + for (num_allocated = 0; num_allocated < pages_count; num_allocated++) {
6256 + // __GFP_DMA32 flag is used to limit system memory allocations to the lowest 4 GB of physical memory in order to guarantee DMA
6257 + // Operations will not have to use bounce buffers on certain architectures (e.g 32-bit DMA enabled architectures)
6258 + kernel_address = (void*)__get_free_page(__GFP_DMA32);
6259 + if (NULL == kernel_address) {
6260 + pr_err("Failed to allocate %zu coherent bytes\n", (size_t)PAGE_SIZE);
6261 + ret = -ENOMEM;
6262 + goto cleanup;
6263 + }
6264 +
6265 + pages[num_allocated] = kernel_address;
6266 + }
6267 +
6268 + low_memory_buffer->pages_count = pages_count;
6269 + low_memory_buffer->pages_address = pages;
6270 +
6271 + return 0;
6272 +
6273 +cleanup:
6274 + if (NULL != pages) {
6275 + for (i = 0; i < num_allocated; i++) {
6276 + free_page((long unsigned)pages[i]);
6277 + }
6278 +
6279 + kfree(pages);
6280 + }
6281 +
6282 + return ret;
6283 +}
6284 +
6285 +void hailo_vdma_low_memory_buffer_free(struct hailo_vdma_low_memory_buffer *low_memory_buffer)
6286 +{
6287 + size_t i = 0;
6288 + if (NULL == low_memory_buffer) {
6289 + return;
6290 + }
6291 +
6292 + for (i = 0; i < low_memory_buffer->pages_count; i++) {
6293 + free_page((long unsigned)low_memory_buffer->pages_address[i]);
6294 + }
6295 +
6296 + kfree(low_memory_buffer->pages_address);
6297 +}
6298 +
6299 +struct hailo_vdma_low_memory_buffer* hailo_vdma_find_low_memory_buffer(struct hailo_vdma_file_context *context,
6300 + uintptr_t buf_handle)
6301 +{
6302 + struct hailo_vdma_low_memory_buffer *cur = NULL;
6303 + list_for_each_entry(cur, &context->vdma_low_memory_buffer_list, vdma_low_memory_buffer_list) {
6304 + if (cur->handle == buf_handle) {
6305 + return cur;
6306 + }
6307 + }
6308 +
6309 + return NULL;
6310 +}
6311 +
6312 +void hailo_vdma_clear_low_memory_buffer_list(struct hailo_vdma_file_context *context)
6313 +{
6314 + struct hailo_vdma_low_memory_buffer *cur = NULL, *next = NULL;
6315 + list_for_each_entry_safe(cur, next, &context->vdma_low_memory_buffer_list, vdma_low_memory_buffer_list) {
6316 + list_del(&cur->vdma_low_memory_buffer_list);
6317 + hailo_vdma_low_memory_buffer_free(cur);
6318 + kfree(cur);
6319 + }
6320 +}
6321 +
6322 +int hailo_vdma_continuous_buffer_alloc(struct device *dev, size_t size,
6323 + struct hailo_vdma_continuous_buffer *continuous_buffer)
6324 +{
6325 + dma_addr_t dma_address = 0;
6326 + void *kernel_address = NULL;
6327 +
6328 + kernel_address = dma_alloc_coherent(dev, size, &dma_address, GFP_KERNEL);
6329 + if (NULL == kernel_address) {
6330 + dev_warn(dev, "Failed to allocate continuous buffer, size 0x%zx. This failure means there is not a sufficient amount of CMA memory "
6331 + "(contiguous physical memory), This usually is caused by lack of general system memory. Please check you have sufficent memory.\n", size);
6332 + return -ENOMEM;
6333 + }
6334 +
6335 + continuous_buffer->kernel_address = kernel_address;
6336 + continuous_buffer->dma_address = dma_address;
6337 + continuous_buffer->size = size;
6338 + return 0;
6339 +}
6340 +
6341 +void hailo_vdma_continuous_buffer_free(struct device *dev,
6342 + struct hailo_vdma_continuous_buffer *continuous_buffer)
6343 +{
6344 + dma_free_coherent(dev, continuous_buffer->size, continuous_buffer->kernel_address,
6345 + continuous_buffer->dma_address);
6346 +}
6347 +
6348 +struct hailo_vdma_continuous_buffer* hailo_vdma_find_continuous_buffer(struct hailo_vdma_file_context *context,
6349 + uintptr_t buf_handle)
6350 +{
6351 + struct hailo_vdma_continuous_buffer *cur = NULL;
6352 + list_for_each_entry(cur, &context->continuous_buffer_list, continuous_buffer_list) {
6353 + if (cur->handle == buf_handle) {
6354 + return cur;
6355 + }
6356 + }
6357 +
6358 + return NULL;
6359 +}
6360 +
6361 +void hailo_vdma_clear_continuous_buffer_list(struct hailo_vdma_file_context *context,
6362 + struct hailo_vdma_controller *controller)
6363 +{
6364 + struct hailo_vdma_continuous_buffer *cur = NULL, *next = NULL;
6365 + list_for_each_entry_safe(cur, next, &context->continuous_buffer_list, continuous_buffer_list) {
6366 + list_del(&cur->continuous_buffer_list);
6367 + hailo_vdma_continuous_buffer_free(controller->dev, cur);
6368 + kfree(cur);
6369 + }
6370 +}
6371 +
6372 +// Assumes the provided user_address belongs to the vma and that MMIO_AND_NO_PAGES_VMA_MASK bits are set under
6373 +// vma->vm_flags. This is validated in hailo_vdma_buffer_map, and won't be checked here
6374 +static int map_mmio_address(void __user* user_address, u32 size, struct vm_area_struct *vma,
6375 + struct sg_table *sgt)
6376 +{
6377 + int ret = -EINVAL;
6378 + unsigned long i = 0;
6379 + unsigned long pfn = 0;
6380 + unsigned long next_pfn = 0;
6381 + phys_addr_t phys_addr = 0;
6382 + dma_addr_t mmio_dma_address = 0;
6383 + const uintptr_t virt_addr = (uintptr_t)user_address;
6384 + const u32 vma_size = vma->vm_end - vma->vm_start + 1;
6385 + const uintptr_t num_pages = PFN_UP(virt_addr + size) - PFN_DOWN(virt_addr);
6386 +
6387 + // Check that the vma that was marked as MMIO_AND_NO_PAGES_VMA_MASK is big enough
6388 + if (vma_size < size) {
6389 + pr_err("vma (%u bytes) smaller than provided buffer (%u bytes)\n", vma_size, size);
6390 + return -EINVAL;
6391 + }
6392 +
6393 + // Get the physical address of user_address
6394 + ret = follow_pfn(vma, virt_addr, &pfn);
6395 + if (ret) {
6396 + pr_err("follow_pfn failed with %d\n", ret);
6397 + return ret;
6398 + }
6399 + phys_addr = __pfn_to_phys(pfn) + offset_in_page(virt_addr);
6400 +
6401 + // Make sure the physical memory is contiguous
6402 + for (i = 1; i < num_pages; ++i) {
6403 + ret = follow_pfn(vma, virt_addr + (i << PAGE_SHIFT), &next_pfn);
6404 + if (ret < 0) {
6405 + pr_err("follow_pfn failed with %d\n", ret);
6406 + return ret;
6407 + }
6408 + if (next_pfn != pfn + 1) {
6409 + pr_err("non-contiguous physical memory\n");
6410 + return -EFAULT;
6411 + }
6412 + pfn = next_pfn;
6413 + }
6414 +
6415 + // phys_addr to dma
6416 + // TODO: need dma_map_resource here? doesn't work currently (we get dma_mapping_error on the returned dma addr)
6417 + // (HRT-12521)
6418 + mmio_dma_address = (dma_addr_t)phys_addr;
6419 +
6420 + // Create a page-less scatterlist.
6421 + ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
6422 + if (ret < 0) {
6423 + return ret;
6424 + }
6425 +
6426 + sg_assign_page(sgt->sgl, NULL);
6427 + sg_dma_address(sgt->sgl) = mmio_dma_address;
6428 + sg_dma_len(sgt->sgl) = size;
6429 +
6430 + return 0;
6431 +}
6432 +
6433 +static int prepare_sg_table(struct sg_table *sg_table, void __user *user_address, u32 size,
6434 + struct hailo_vdma_low_memory_buffer *low_mem_driver_allocated_buffer)
6435 +{
6436 + int ret = -EINVAL;
6437 + int pinned_pages = 0;
6438 + size_t npages = 0;
6439 + struct page **pages = NULL;
6440 + int i = 0;
6441 + struct scatterlist *sg_alloc_res = NULL;
6442 +
6443 + npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
6444 + pages = kvmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
6445 + if (!pages) {
6446 + return -ENOMEM;
6447 + }
6448 +
6449 + // Check whether mapping user allocated buffer or driver allocated low memory buffer
6450 + if (NULL == low_mem_driver_allocated_buffer) {
6451 + mmap_read_lock(current->mm);
6452 + pinned_pages = get_user_pages_compact((unsigned long)user_address,
6453 + npages, FOLL_WRITE | FOLL_FORCE, pages);
6454 + mmap_read_unlock(current->mm);
6455 +
6456 + if (pinned_pages < 0) {
6457 + pr_err("get_user_pages failed with %d\n", pinned_pages);
6458 + ret = pinned_pages;
6459 + goto exit;
6460 + } else if (pinned_pages != npages) {
6461 + pr_err("Pinned %d out of %zu\n", pinned_pages, npages);
6462 + ret = -EINVAL;
6463 + goto release_pages;
6464 + }
6465 + } else {
6466 + // Check to make sure in case user provides wrong buffer
6467 + if (npages != low_mem_driver_allocated_buffer->pages_count) {
6468 + pr_err("Received wrong amount of pages %zu to map expected %zu\n",
6469 + npages, low_mem_driver_allocated_buffer->pages_count);
6470 + ret = -EINVAL;
6471 + goto exit;
6472 + }
6473 +
6474 + for (i = 0; i < npages; i++) {
6475 + pages[i] = virt_to_page(low_mem_driver_allocated_buffer->pages_address[i]);
6476 + get_page(pages[i]);
6477 + }
6478 + }
6479 +
6480 + sg_alloc_res = sg_alloc_table_from_pages_segment_compat(sg_table, pages, npages,
6481 + 0, size, SGL_MAX_SEGMENT_SIZE, NULL, 0, GFP_KERNEL);
6482 + if (IS_ERR(sg_alloc_res)) {
6483 + ret = PTR_ERR(sg_alloc_res);
6484 + pr_err("sg table alloc failed (err %d)..\n", ret);
6485 + goto release_pages;
6486 + }
6487 +
6488 + ret = 0;
6489 + goto exit;
6490 +release_pages:
6491 + for (i = 0; i < pinned_pages; i++) {
6492 + if (!PageReserved(pages[i])) {
6493 + SetPageDirty(pages[i]);
6494 + }
6495 + put_page(pages[i]);
6496 + }
6497 +exit:
6498 + kvfree(pages);
6499 + return ret;
6500 +}
6501 +
6502 +static void clear_sg_table(struct sg_table *sgt)
6503 +{
6504 + struct sg_page_iter iter;
6505 + struct page *page = NULL;
6506 +
6507 + for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
6508 + page = sg_page_iter_page(&iter);
6509 + if (page) {
6510 + if (!PageReserved(page)) {
6511 + SetPageDirty(page);
6512 + }
6513 + put_page(page);
6514 + }
6515 + }
6516 +
6517 + sg_free_table(sgt);
6518 +}
6519 --- /dev/null
6520 +++ b/drivers/media/pci/hailo/vdma/memory.h
6521 @@ -0,0 +1,54 @@
6522 +// SPDX-License-Identifier: GPL-2.0
6523 +/**
6524 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
6525 + **/
6526 +/**
6527 + * vDMA memory utility (including allocation and mappings)
6528 + */
6529 +
6530 +#ifndef _HAILO_VDMA_MEMORY_H_
6531 +#define _HAILO_VDMA_MEMORY_H_
6532 +
6533 +#include "vdma/vdma.h"
6534 +
6535 +struct hailo_vdma_buffer *hailo_vdma_buffer_map(struct device *dev,
6536 + void __user *user_address, size_t size, enum dma_data_direction direction,
6537 + struct hailo_vdma_low_memory_buffer *low_mem_driver_allocated_buffer);
6538 +void hailo_vdma_buffer_get(struct hailo_vdma_buffer *buf);
6539 +void hailo_vdma_buffer_put(struct hailo_vdma_buffer *buf);
6540 +
6541 +void hailo_vdma_buffer_sync(struct hailo_vdma_controller *controller,
6542 + struct hailo_vdma_buffer *mapped_buffer, enum hailo_vdma_buffer_sync_type sync_type,
6543 + size_t offset, size_t size);
6544 +void hailo_vdma_buffer_sync_cyclic(struct hailo_vdma_controller *controller,
6545 + struct hailo_vdma_buffer *mapped_buffer, enum hailo_vdma_buffer_sync_type sync_type,
6546 + size_t offset, size_t size);
6547 +
6548 +struct hailo_vdma_buffer* hailo_vdma_find_mapped_user_buffer(struct hailo_vdma_file_context *context,
6549 + size_t buffer_handle);
6550 +void hailo_vdma_clear_mapped_user_buffer_list(struct hailo_vdma_file_context *context,
6551 + struct hailo_vdma_controller *controller);
6552 +
6553 +int hailo_desc_list_create(struct device *dev, u32 descriptors_count, u16 desc_page_size,
6554 + uintptr_t desc_handle, bool is_circular, struct hailo_descriptors_list_buffer *descriptors);
6555 +void hailo_desc_list_release(struct device *dev, struct hailo_descriptors_list_buffer *descriptors);
6556 +struct hailo_descriptors_list_buffer* hailo_vdma_find_descriptors_buffer(struct hailo_vdma_file_context *context,
6557 + uintptr_t desc_handle);
6558 +void hailo_vdma_clear_descriptors_buffer_list(struct hailo_vdma_file_context *context,
6559 + struct hailo_vdma_controller *controller);
6560 +
6561 +int hailo_vdma_low_memory_buffer_alloc(size_t size, struct hailo_vdma_low_memory_buffer *low_memory_buffer);
6562 +void hailo_vdma_low_memory_buffer_free(struct hailo_vdma_low_memory_buffer *low_memory_buffer);
6563 +struct hailo_vdma_low_memory_buffer* hailo_vdma_find_low_memory_buffer(struct hailo_vdma_file_context *context,
6564 + uintptr_t buf_handle);
6565 +void hailo_vdma_clear_low_memory_buffer_list(struct hailo_vdma_file_context *context);
6566 +
6567 +int hailo_vdma_continuous_buffer_alloc(struct device *dev, size_t size,
6568 + struct hailo_vdma_continuous_buffer *continuous_buffer);
6569 +void hailo_vdma_continuous_buffer_free(struct device *dev,
6570 + struct hailo_vdma_continuous_buffer *continuous_buffer);
6571 +struct hailo_vdma_continuous_buffer* hailo_vdma_find_continuous_buffer(struct hailo_vdma_file_context *context,
6572 + uintptr_t buf_handle);
6573 +void hailo_vdma_clear_continuous_buffer_list(struct hailo_vdma_file_context *context,
6574 + struct hailo_vdma_controller *controller);
6575 +#endif /* _HAILO_VDMA_MEMORY_H_ */
6576 \ No newline at end of file
6577 --- /dev/null
6578 +++ b/drivers/media/pci/hailo/vdma/vdma.c
6579 @@ -0,0 +1,336 @@
6580 +// SPDX-License-Identifier: GPL-2.0
6581 +/**
6582 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
6583 + **/
6584 +
6585 +#define pr_fmt(fmt) "hailo: " fmt
6586 +
6587 +#include "vdma.h"
6588 +#include "memory.h"
6589 +#include "ioctl.h"
6590 +#include "utils/logs.h"
6591 +
6592 +#include <linux/sched.h>
6593 +#include <linux/version.h>
6594 +
6595 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
6596 +#include <linux/dma-map-ops.h>
6597 +#else
6598 +#include <linux/dma-mapping.h>
6599 +#endif
6600 +
6601 +
6602 +static struct hailo_vdma_engine* init_vdma_engines(struct device *dev,
6603 + struct hailo_resource *channel_registers_per_engine, size_t engines_count)
6604 +{
6605 + struct hailo_vdma_engine *engines = NULL;
6606 + u8 i = 0;
6607 +
6608 + engines = devm_kmalloc_array(dev, engines_count, sizeof(*engines), GFP_KERNEL);
6609 + if (NULL == engines) {
6610 + dev_err(dev, "Failed allocating vdma engines\n");
6611 + return ERR_PTR(-ENOMEM);
6612 + }
6613 +
6614 + for (i = 0; i < engines_count; i++) {
6615 + hailo_vdma_engine_init(&engines[i], i, &channel_registers_per_engine[i]);
6616 + }
6617 +
6618 + return engines;
6619 +}
6620 +
6621 +static int hailo_set_dma_mask(struct device *dev)
6622 +{
6623 + int err = -EINVAL;
6624 + /* Check and configure DMA length */
6625 + if (!(err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))) {
6626 + dev_notice(dev, "Probing: Enabled 64 bit dma\n");
6627 + } else if (!(err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)))) {
6628 + dev_notice(dev, "Probing: Enabled 48 bit dma\n");
6629 + } else if (!(err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)))) {
6630 + dev_notice(dev, "Probing: Enabled 40 bit dma\n");
6631 + } else if (!(err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36)))) {
6632 + dev_notice(dev, "Probing: Enabled 36 bit dma\n");
6633 + } else if (!(err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)))) {
6634 + dev_notice(dev, "Probing: Enabled 32 bit dma\n");
6635 + } else {
6636 + dev_err(dev, "Probing: Error enabling dma %d\n", err);
6637 + return err;
6638 + }
6639 +
6640 + return 0;
6641 +}
6642 +
6643 +int hailo_vdma_controller_init(struct hailo_vdma_controller *controller,
6644 + struct device *dev, struct hailo_vdma_hw *vdma_hw,
6645 + struct hailo_vdma_controller_ops *ops,
6646 + struct hailo_resource *channel_registers_per_engine, size_t engines_count)
6647 +{
6648 + int err = 0;
6649 + controller->hw = vdma_hw;
6650 + controller->ops = ops;
6651 + controller->dev = dev;
6652 +
6653 + controller->vdma_engines_count = engines_count;
6654 + controller->vdma_engines = init_vdma_engines(dev, channel_registers_per_engine, engines_count);
6655 + if (IS_ERR(controller->vdma_engines)) {
6656 + dev_err(dev, "Failed initialized vdma engines\n");
6657 + return PTR_ERR(controller->vdma_engines);
6658 + }
6659 +
6660 + controller->used_by_filp = NULL;
6661 + spin_lock_init(&controller->interrupts_lock);
6662 + init_waitqueue_head(&controller->interrupts_wq);
6663 +
6664 + /* Check and configure DMA length */
6665 + err = hailo_set_dma_mask(dev);
6666 + if (0 > err) {
6667 + return err;
6668 + }
6669 +
6670 + if (get_dma_ops(controller->dev)) {
6671 + hailo_dev_notice(controller->dev, "Probing: Using specialized dma_ops=%ps", get_dma_ops(controller->dev));
6672 + }
6673 +
6674 + return 0;
6675 +}
6676 +
6677 +void hailo_vdma_file_context_init(struct hailo_vdma_file_context *context)
6678 +{
6679 + atomic_set(&context->last_vdma_user_buffer_handle, 0);
6680 + INIT_LIST_HEAD(&context->mapped_user_buffer_list);
6681 +
6682 + atomic_set(&context->last_vdma_handle, 0);
6683 + INIT_LIST_HEAD(&context->descriptors_buffer_list);
6684 + INIT_LIST_HEAD(&context->vdma_low_memory_buffer_list);
6685 + INIT_LIST_HEAD(&context->continuous_buffer_list);
6686 +}
6687 +
6688 +void hailo_vdma_update_interrupts_mask(struct hailo_vdma_controller *controller,
6689 + size_t engine_index)
6690 +{
6691 + struct hailo_vdma_engine *engine = &controller->vdma_engines[engine_index];
6692 + controller->ops->update_channel_interrupts(controller, engine_index, engine->enabled_channels);
6693 +}
6694 +
6695 +void hailo_vdma_engine_interrupts_disable(struct hailo_vdma_controller *controller,
6696 + struct hailo_vdma_engine *engine, u8 engine_index, u32 channels_bitmap)
6697 +{
6698 + unsigned long irq_saved_flags = 0;
6699 + // In case of FLR, the vdma registers will be NULL
6700 + const bool is_device_up = (NULL != controller->dev);
6701 +
6702 + hailo_vdma_engine_disable_channel_interrupts(engine, channels_bitmap);
6703 + if (is_device_up) {
6704 + hailo_vdma_update_interrupts_mask(controller, engine_index);
6705 + }
6706 +
6707 + spin_lock_irqsave(&controller->interrupts_lock, irq_saved_flags);
6708 + hailo_vdma_engine_clear_channel_interrupts(engine, channels_bitmap);
6709 + spin_unlock_irqrestore(&controller->interrupts_lock, irq_saved_flags);
6710 +
6711 + hailo_dev_info(controller->dev, "Disabled interrupts for engine %u, channels bitmap 0x%x\n",
6712 + engine_index, channels_bitmap);
6713 +}
6714 +
6715 +void hailo_vdma_file_context_finalize(struct hailo_vdma_file_context *context,
6716 + struct hailo_vdma_controller *controller, struct file *filp)
6717 +{
6718 + size_t engine_index = 0;
6719 + struct hailo_vdma_engine *engine = NULL;
6720 + const u32 channels_bitmap = 0xFFFFFFFF; // disable all channel interrupts
6721 +
6722 + if (filp == controller->used_by_filp) {
6723 + for_each_vdma_engine(controller, engine, engine_index) {
6724 + hailo_vdma_engine_interrupts_disable(controller, engine, engine_index, channels_bitmap);
6725 + }
6726 + }
6727 +
6728 + hailo_vdma_clear_mapped_user_buffer_list(context, controller);
6729 + hailo_vdma_clear_descriptors_buffer_list(context, controller);
6730 + hailo_vdma_clear_low_memory_buffer_list(context);
6731 + hailo_vdma_clear_continuous_buffer_list(context, controller);
6732 +
6733 + if (filp == controller->used_by_filp) {
6734 + controller->used_by_filp = NULL;
6735 + }
6736 +}
6737 +
6738 +void hailo_vdma_irq_handler(struct hailo_vdma_controller *controller,
6739 + size_t engine_index, u32 channels_bitmap)
6740 +{
6741 + unsigned long irq_saved_flags = 0;
6742 + struct hailo_vdma_engine *engine = NULL;
6743 +
6744 + BUG_ON(engine_index >= controller->vdma_engines_count);
6745 + engine = &controller->vdma_engines[engine_index];
6746 +
6747 + hailo_vdma_engine_push_timestamps(engine, channels_bitmap);
6748 +
6749 + spin_lock_irqsave(&controller->interrupts_lock, irq_saved_flags);
6750 + hailo_vdma_engine_set_channel_interrupts(engine, channels_bitmap);
6751 + spin_unlock_irqrestore(&controller->interrupts_lock, irq_saved_flags);
6752 +
6753 + wake_up_interruptible_all(&controller->interrupts_wq);
6754 +}
6755 +
6756 +long hailo_vdma_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
6757 + unsigned int cmd, unsigned long arg, struct file *filp, struct semaphore *mutex, bool *should_up_board_mutex)
6758 +{
6759 + switch (cmd) {
6760 + case HAILO_VDMA_INTERRUPTS_ENABLE:
6761 + return hailo_vdma_interrupts_enable_ioctl(controller, arg);
6762 + case HAILO_VDMA_INTERRUPTS_DISABLE:
6763 + return hailo_vdma_interrupts_disable_ioctl(controller, arg);
6764 + case HAILO_VDMA_INTERRUPTS_WAIT:
6765 + return hailo_vdma_interrupts_wait_ioctl(controller, arg, mutex, should_up_board_mutex);
6766 + case HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS:
6767 + return hailo_vdma_interrupts_read_timestamps_ioctl(controller, arg);
6768 + case HAILO_VDMA_BUFFER_MAP:
6769 + return hailo_vdma_buffer_map_ioctl(context, controller, arg);
6770 + case HAILO_VDMA_BUFFER_UNMAP:
6771 + return hailo_vdma_buffer_unmap_ioctl(context, controller, arg);
6772 + case HAILO_VDMA_BUFFER_SYNC:
6773 + return hailo_vdma_buffer_sync_ioctl(context, controller, arg);
6774 + case HAILO_DESC_LIST_CREATE:
6775 + return hailo_desc_list_create_ioctl(context, controller, arg);
6776 + case HAILO_DESC_LIST_RELEASE:
6777 + return hailo_desc_list_release_ioctl(context, controller, arg);
6778 + case HAILO_DESC_LIST_BIND_VDMA_BUFFER:
6779 + return hailo_desc_list_bind_vdma_buffer(context, controller, arg);
6780 + case HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC:
6781 + return hailo_vdma_low_memory_buffer_alloc_ioctl(context, controller, arg);
6782 + case HAILO_VDMA_LOW_MEMORY_BUFFER_FREE:
6783 + return hailo_vdma_low_memory_buffer_free_ioctl(context, controller, arg);
6784 + case HAILO_MARK_AS_IN_USE:
6785 + return hailo_mark_as_in_use(controller, arg, filp);
6786 + case HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC:
6787 + return hailo_vdma_continuous_buffer_alloc_ioctl(context, controller, arg);
6788 + case HAILO_VDMA_CONTINUOUS_BUFFER_FREE:
6789 + return hailo_vdma_continuous_buffer_free_ioctl(context, controller, arg);
6790 + case HAILO_VDMA_LAUNCH_TRANSFER:
6791 + return hailo_vdma_launch_transfer_ioctl(context, controller, arg);
6792 + default:
6793 + hailo_dev_err(controller->dev, "Invalid vDMA ioctl code 0x%x (nr: %d)\n", cmd, _IOC_NR(cmd));
6794 + return -ENOTTY;
6795 + }
6796 +}
6797 +
6798 +static int desc_list_mmap(struct hailo_vdma_controller *controller,
6799 + struct hailo_descriptors_list_buffer *vdma_descriptors_buffer, struct vm_area_struct *vma)
6800 +{
6801 + int err = 0;
6802 + unsigned long vsize = vma->vm_end - vma->vm_start;
6803 +
6804 + if (vsize > vdma_descriptors_buffer->buffer_size) {
6805 + hailo_dev_err(controller->dev, "Requested size to map (%lx) is larger than the descriptor list size(%x)\n",
6806 + vsize, vdma_descriptors_buffer->buffer_size);
6807 + return -EINVAL;
6808 + }
6809 +
6810 + err = dma_mmap_coherent(controller->dev, vma, vdma_descriptors_buffer->kernel_address,
6811 + vdma_descriptors_buffer->dma_address, vsize);
6812 + if (err != 0) {
6813 + hailo_dev_err(controller->dev, " Failed mmap descriptors %d\n", err);
6814 + return err;
6815 + }
6816 +
6817 + return 0;
6818 +}
6819 +
6820 +static int low_memory_buffer_mmap(struct hailo_vdma_controller *controller,
6821 + struct hailo_vdma_low_memory_buffer *vdma_buffer, struct vm_area_struct *vma)
6822 +{
6823 + int err = 0;
6824 + size_t i = 0;
6825 + unsigned long vsize = vma->vm_end - vma->vm_start;
6826 + unsigned long orig_vm_start = vma->vm_start;
6827 + unsigned long orig_vm_end = vma->vm_end;
6828 + unsigned long page_fn = 0;
6829 +
6830 + if (vsize != vdma_buffer->pages_count * PAGE_SIZE) {
6831 + hailo_dev_err(controller->dev, "mmap size should be %lu (given %lu)\n",
6832 + vdma_buffer->pages_count * PAGE_SIZE, vsize);
6833 + return -EINVAL;
6834 + }
6835 +
6836 + for (i = 0 ; i < vdma_buffer->pages_count ; i++) {
6837 + if (i > 0) {
6838 + vma->vm_start = vma->vm_end;
6839 + }
6840 + vma->vm_end = vma->vm_start + PAGE_SIZE;
6841 +
6842 + page_fn = virt_to_phys(vdma_buffer->pages_address[i]) >> PAGE_SHIFT ;
6843 + err = remap_pfn_range(vma, vma->vm_start, page_fn, PAGE_SIZE, vma->vm_page_prot);
6844 +
6845 + if (err != 0) {
6846 + hailo_dev_err(controller->dev, " fops_mmap failed mapping kernel page %d\n", err);
6847 + return err;
6848 + }
6849 + }
6850 +
6851 + vma->vm_start = orig_vm_start;
6852 + vma->vm_end = orig_vm_end;
6853 +
6854 + return 0;
6855 +}
6856 +
6857 +static int continuous_buffer_mmap(struct hailo_vdma_controller *controller,
6858 + struct hailo_vdma_continuous_buffer *buffer, struct vm_area_struct *vma)
6859 +{
6860 + int err = 0;
6861 + const unsigned long vsize = vma->vm_end - vma->vm_start;
6862 +
6863 + if (vsize > buffer->size) {
6864 + hailo_dev_err(controller->dev, "mmap size should be less than %zu (given %lu)\n",
6865 + buffer->size, vsize);
6866 + return -EINVAL;
6867 + }
6868 +
6869 + err = dma_mmap_coherent(controller->dev, vma, buffer->kernel_address,
6870 + buffer->dma_address, vsize);
6871 + if (err < 0) {
6872 + hailo_dev_err(controller->dev, " vdma_mmap failed dma_mmap_coherent %d\n", err);
6873 + return err;
6874 + }
6875 +
6876 + return 0;
6877 +}
6878 +
6879 +int hailo_vdma_mmap(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
6880 + struct vm_area_struct *vma, uintptr_t vdma_handle)
6881 +{
6882 + struct hailo_descriptors_list_buffer *vdma_descriptors_buffer = NULL;
6883 + struct hailo_vdma_low_memory_buffer *low_memory_buffer = NULL;
6884 + struct hailo_vdma_continuous_buffer *continuous_buffer = NULL;
6885 +
6886 + hailo_dev_info(controller->dev, "Map vdma_handle %llu\n", (u64)vdma_handle);
6887 + if (NULL != (vdma_descriptors_buffer = hailo_vdma_find_descriptors_buffer(context, vdma_handle))) {
6888 + return desc_list_mmap(controller, vdma_descriptors_buffer, vma);
6889 + }
6890 + else if (NULL != (low_memory_buffer = hailo_vdma_find_low_memory_buffer(context, vdma_handle))) {
6891 + return low_memory_buffer_mmap(controller, low_memory_buffer, vma);
6892 + }
6893 + else if (NULL != (continuous_buffer = hailo_vdma_find_continuous_buffer(context, vdma_handle))) {
6894 + return continuous_buffer_mmap(controller, continuous_buffer, vma);
6895 + }
6896 + else {
6897 + hailo_dev_err(controller->dev, "Can't mmap vdma handle: %llu (not existing)\n", (u64)vdma_handle);
6898 + return -EINVAL;
6899 + }
6900 +}
6901 +
6902 +enum dma_data_direction get_dma_direction(enum hailo_dma_data_direction hailo_direction)
6903 +{
6904 + switch (hailo_direction) {
6905 + case HAILO_DMA_BIDIRECTIONAL:
6906 + return DMA_BIDIRECTIONAL;
6907 + case HAILO_DMA_TO_DEVICE:
6908 + return DMA_TO_DEVICE;
6909 + case HAILO_DMA_FROM_DEVICE:
6910 + return DMA_FROM_DEVICE;
6911 + default:
6912 + pr_err("Invalid hailo direction %d\n", hailo_direction);
6913 + return DMA_NONE;
6914 + }
6915 +}
6916 --- /dev/null
6917 +++ b/drivers/media/pci/hailo/vdma/vdma.h
6918 @@ -0,0 +1,143 @@
6919 +// SPDX-License-Identifier: GPL-2.0
6920 +/**
6921 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
6922 + **/
6923 +/**
6924 + * Hailo vdma engine definitions
6925 + */
6926 +
6927 +#ifndef _HAILO_VDMA_VDMA_H_
6928 +#define _HAILO_VDMA_VDMA_H_
6929 +
6930 +#include "hailo_ioctl_common.h"
6931 +#include "hailo_resource.h"
6932 +#include "vdma_common.h"
6933 +
6934 +#include <linux/dma-mapping.h>
6935 +#include <linux/types.h>
6936 +#include <linux/semaphore.h>
6937 +
6938 +#define VDMA_CHANNEL_CONTROL_REG_OFFSET(channel_index, direction) (((direction) == DMA_TO_DEVICE) ? \
6939 + (((channel_index) << 5) + 0x0) : (((channel_index) << 5) + 0x10))
6940 +#define VDMA_CHANNEL_CONTROL_REG_ADDRESS(vdma_registers, channel_index, direction) \
6941 + ((u8*)((vdma_registers)->address) + VDMA_CHANNEL_CONTROL_REG_OFFSET(channel_index, direction))
6942 +
6943 +#define VDMA_CHANNEL_NUM_PROC_OFFSET(channel_index, direction) (((direction) == DMA_TO_DEVICE) ? \
6944 + (((channel_index) << 5) + 0x4) : (((channel_index) << 5) + 0x14))
6945 +#define VDMA_CHANNEL_NUM_PROC_ADDRESS(vdma_registers, channel_index, direction) \
6946 + ((u8*)((vdma_registers)->address) + VDMA_CHANNEL_NUM_PROC_OFFSET(channel_index, direction))
6947 +
6948 +
6949 +struct hailo_vdma_buffer {
6950 + struct list_head mapped_user_buffer_list;
6951 + size_t handle;
6952 +
6953 + struct kref kref;
6954 + struct device *device;
6955 +
6956 + void __user *user_address;
6957 + u32 size;
6958 + enum dma_data_direction data_direction;
6959 + struct sg_table sg_table;
6960 +
6961 + // If this flag is set, the buffer pointed by sg_table is not backed by
6962 + // 'struct page' (only by pure pfn). On this case, accessing to the page,
6963 + // or calling APIs that access the page (e.g. dma_sync_sg_for_cpu) is not
6964 + // allowed.
6965 + bool is_mmio;
6966 +};
6967 +
6968 +// Continuous buffer that holds a descriptor list.
6969 +struct hailo_descriptors_list_buffer {
6970 + struct list_head descriptors_buffer_list;
6971 + uintptr_t handle;
6972 + void *kernel_address;
6973 + dma_addr_t dma_address;
6974 + u32 buffer_size;
6975 + struct hailo_vdma_descriptors_list desc_list;
6976 +};
6977 +
6978 +struct hailo_vdma_low_memory_buffer {
6979 + struct list_head vdma_low_memory_buffer_list;
6980 + uintptr_t handle;
6981 + size_t pages_count;
6982 + void **pages_address;
6983 +};
6984 +
6985 +struct hailo_vdma_continuous_buffer {
6986 + struct list_head continuous_buffer_list;
6987 + uintptr_t handle;
6988 + void *kernel_address;
6989 + dma_addr_t dma_address;
6990 + size_t size;
6991 +};
6992 +
6993 +struct hailo_vdma_controller;
6994 +struct hailo_vdma_controller_ops {
6995 + void (*update_channel_interrupts)(struct hailo_vdma_controller *controller, size_t engine_index,
6996 + u32 channels_bitmap);
6997 +};
6998 +
6999 +struct hailo_vdma_controller {
7000 + struct hailo_vdma_hw *hw;
7001 + struct hailo_vdma_controller_ops *ops;
7002 + struct device *dev;
7003 +
7004 + size_t vdma_engines_count;
7005 + struct hailo_vdma_engine *vdma_engines;
7006 +
7007 + spinlock_t interrupts_lock;
7008 + wait_queue_head_t interrupts_wq;
7009 +
7010 + struct file *used_by_filp;
7011 +
7012 + // Putting big IOCTL structures here to avoid stack allocation.
7013 + struct hailo_vdma_interrupts_read_timestamp_params read_interrupt_timestamps_params;
7014 +};
7015 +
7016 +#define for_each_vdma_engine(controller, engine, engine_index) \
7017 + _for_each_element_array(controller->vdma_engines, controller->vdma_engines_count, \
7018 + engine, engine_index)
7019 +
7020 +struct hailo_vdma_file_context {
7021 + atomic_t last_vdma_user_buffer_handle;
7022 + struct list_head mapped_user_buffer_list;
7023 +
7024 + // Last_vdma_handle works as a handle for vdma decriptor list and for the vdma buffer -
7025 + // there will be no collisions between the two
7026 + atomic_t last_vdma_handle;
7027 + struct list_head descriptors_buffer_list;
7028 + struct list_head vdma_low_memory_buffer_list;
7029 + struct list_head continuous_buffer_list;
7030 +};
7031 +
7032 +
7033 +int hailo_vdma_controller_init(struct hailo_vdma_controller *controller,
7034 + struct device *dev, struct hailo_vdma_hw *vdma_hw,
7035 + struct hailo_vdma_controller_ops *ops,
7036 + struct hailo_resource *channel_registers_per_engine, size_t engines_count);
7037 +
7038 +void hailo_vdma_update_interrupts_mask(struct hailo_vdma_controller *controller,
7039 + size_t engine_index);
7040 +
7041 +void hailo_vdma_engine_interrupts_disable(struct hailo_vdma_controller *controller,
7042 + struct hailo_vdma_engine *engine, u8 engine_index, u32 channels_bitmap);
7043 +
7044 +void hailo_vdma_file_context_init(struct hailo_vdma_file_context *context);
7045 +void hailo_vdma_file_context_finalize(struct hailo_vdma_file_context *context,
7046 + struct hailo_vdma_controller *controller, struct file *filp);
7047 +
7048 +void hailo_vdma_irq_handler(struct hailo_vdma_controller *controller, size_t engine_index,
7049 + u32 channels_bitmap);
7050 +
7051 +// TODO: reduce params count
7052 +long hailo_vdma_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
7053 + unsigned int cmd, unsigned long arg, struct file *filp, struct semaphore *mutex, bool *should_up_board_mutex);
7054 +
7055 +int hailo_vdma_mmap(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
7056 + struct vm_area_struct *vma, uintptr_t vdma_handle);
7057 +
7058 +enum dma_data_direction get_dma_direction(enum hailo_dma_data_direction hailo_direction);
7059 +void hailo_vdma_disable_vdma_channels(struct hailo_vdma_controller *controller, const bool should_close_channels);
7060 +
7061 +#endif /* _HAILO_VDMA_VDMA_H_ */
7062 \ No newline at end of file