]> git.ipfire.org Git - thirdparty/openwrt.git/blob
5f8cf8df017ac1caa5b41ce643f8fc1adc8f8271
[thirdparty/openwrt.git] /
1 From a44f17d8193b69aedb1beebf5ad885a88b1c6615 Mon Sep 17 00:00:00 2001
2 From: Naushir Patuck <naush@raspberrypi.com>
3 Date: Fri, 2 Aug 2024 11:01:24 +0100
4 Subject: [PATCH 1235/1350] drivers: media: pci: Update Hailo accelerator
5 device driver to v4.18.0
6
7 Sourced from https://github.com/hailo-ai/hailort-drivers/
8
9 Signed-off-by: Naushir Patuck <naush@raspberrypi.com>
10 ---
11 drivers/media/pci/hailo/Makefile | 2 +
12 drivers/media/pci/hailo/common/fw_operation.c | 2 +-
13 drivers/media/pci/hailo/common/fw_operation.h | 2 +-
14 .../media/pci/hailo/common/fw_validation.c | 10 +-
15 .../media/pci/hailo/common/fw_validation.h | 5 +-
16 .../pci/hailo/common/hailo_ioctl_common.h | 240 +++++++----
17 .../media/pci/hailo/common/hailo_resource.c | 2 +-
18 .../media/pci/hailo/common/hailo_resource.h | 2 +-
19 drivers/media/pci/hailo/common/pcie_common.c | 367 +++++++++++++----
20 drivers/media/pci/hailo/common/pcie_common.h | 42 +-
21 drivers/media/pci/hailo/common/utils.h | 24 +-
22 drivers/media/pci/hailo/common/vdma_common.c | 371 +++++++++++++-----
23 drivers/media/pci/hailo/common/vdma_common.h | 34 +-
24 drivers/media/pci/hailo/src/fops.c | 104 +++--
25 drivers/media/pci/hailo/src/fops.h | 1 +
26 drivers/media/pci/hailo/src/pci_soc_ioctl.c | 155 ++++++++
27 drivers/media/pci/hailo/src/pci_soc_ioctl.h | 19 +
28 drivers/media/pci/hailo/src/pcie.c | 93 ++++-
29 drivers/media/pci/hailo/src/pcie.h | 2 +
30 drivers/media/pci/hailo/src/sysfs.c | 9 +
31 drivers/media/pci/hailo/src/utils.c | 1 -
32 .../pci/hailo/utils/integrated_nnc_utils.c | 101 +++++
33 .../pci/hailo/utils/integrated_nnc_utils.h | 30 ++
34 drivers/media/pci/hailo/vdma/ioctl.c | 53 ++-
35 drivers/media/pci/hailo/vdma/ioctl.h | 6 +-
36 drivers/media/pci/hailo/vdma/memory.c | 148 ++++++-
37 drivers/media/pci/hailo/vdma/memory.h | 4 +-
38 drivers/media/pci/hailo/vdma/vdma.c | 80 ++--
39 drivers/media/pci/hailo/vdma/vdma.h | 30 +-
40 29 files changed, 1536 insertions(+), 403 deletions(-)
41 create mode 100755 drivers/media/pci/hailo/src/pci_soc_ioctl.c
42 create mode 100755 drivers/media/pci/hailo/src/pci_soc_ioctl.h
43 create mode 100755 drivers/media/pci/hailo/utils/integrated_nnc_utils.c
44 create mode 100755 drivers/media/pci/hailo/utils/integrated_nnc_utils.h
45
46 --- a/drivers/media/pci/hailo/Makefile
47 +++ b/drivers/media/pci/hailo/Makefile
48 @@ -10,6 +10,7 @@ hailo_pci-objs += src/pcie.o
49 hailo_pci-objs += src/fops.o
50 hailo_pci-objs += src/utils.o
51 hailo_pci-objs += src/sysfs.o
52 +hailo_pci-objs += src/pci_soc_ioctl.o
53
54 hailo_pci-objs += $(COMMON_SRC_DIRECTORY)/fw_validation.o
55 hailo_pci-objs += $(COMMON_SRC_DIRECTORY)/fw_operation.o
56 @@ -18,6 +19,7 @@ hailo_pci-objs += $(COMMON_SRC_DIRECTORY
57 hailo_pci-objs += $(COMMON_SRC_DIRECTORY)/hailo_resource.o
58
59 hailo_pci-objs += $(UTILS_SRC_DIRECTORY)/logs.o
60 +hailo_pci-objs += $(UTILS_SRC_DIRECTORY)/integrated_nnc_utils.o
61
62 hailo_pci-objs += $(VDMA_SRC_DIRECTORY)/vdma.o
63 hailo_pci-objs += $(VDMA_SRC_DIRECTORY)/memory.o
64 --- a/drivers/media/pci/hailo/common/fw_operation.c
65 +++ b/drivers/media/pci/hailo/common/fw_operation.c
66 @@ -1,4 +1,4 @@
67 -// SPDX-License-Identifier: GPL-2.0
68 +// SPDX-License-Identifier: MIT
69 /**
70 * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
71 **/
72 --- a/drivers/media/pci/hailo/common/fw_operation.h
73 +++ b/drivers/media/pci/hailo/common/fw_operation.h
74 @@ -1,4 +1,4 @@
75 -// SPDX-License-Identifier: GPL-2.0
76 +// SPDX-License-Identifier: MIT
77 /**
78 * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
79 **/
80 --- a/drivers/media/pci/hailo/common/fw_validation.c
81 +++ b/drivers/media/pci/hailo/common/fw_validation.c
82 @@ -1,4 +1,4 @@
83 -// SPDX-License-Identifier: GPL-2.0
84 +// SPDX-License-Identifier: MIT
85 /**
86 * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
87 **/
88 @@ -28,16 +28,18 @@ int FW_VALIDATION__validate_fw_header(ui
89 firmware_header_t *firmware_header = NULL;
90 u32 consumed_firmware_offset = *outer_consumed_firmware_offset;
91 u32 expected_firmware_magic = 0;
92 -
93 +
94 firmware_header = (firmware_header_t *) (firmware_base_address + consumed_firmware_offset);
95 CONSUME_FIRMWARE(sizeof(firmware_header_t), -EINVAL);
96
97 switch (board_type) {
98 case HAILO_BOARD_TYPE_HAILO8:
99 - expected_firmware_magic = FIRMWARE_HEADER_MAGIC_HAILO8;
100 + expected_firmware_magic = FIRMWARE_HEADER_MAGIC_HAILO8;
101 break;
102 + case HAILO_BOARD_TYPE_HAILO10H_LEGACY:
103 case HAILO_BOARD_TYPE_HAILO15:
104 - expected_firmware_magic = FIRMWARE_HEADER_MAGIC_HAILO15;
105 + case HAILO_BOARD_TYPE_HAILO10H:
106 + expected_firmware_magic = FIRMWARE_HEADER_MAGIC_HAILO15;
107 break;
108 case HAILO_BOARD_TYPE_PLUTO:
109 expected_firmware_magic = FIRMWARE_HEADER_MAGIC_PLUTO;
110 --- a/drivers/media/pci/hailo/common/fw_validation.h
111 +++ b/drivers/media/pci/hailo/common/fw_validation.h
112 @@ -1,4 +1,4 @@
113 -// SPDX-License-Identifier: GPL-2.0
114 +// SPDX-License-Identifier: MIT
115 /**
116 * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
117 **/
118 @@ -11,8 +11,7 @@
119
120 #define FIRMWARE_HEADER_MAGIC_HAILO8 (0x1DD89DE0)
121 #define FIRMWARE_HEADER_MAGIC_HAILO15 (0xE905DAAB)
122 -// TODO - HRT-11344 : change fw magic to pluto specific
123 -#define FIRMWARE_HEADER_MAGIC_PLUTO (0xE905DAAB)
124 +#define FIRMWARE_HEADER_MAGIC_PLUTO (0xF94739AB)
125
126 #ifndef HAILO_EMULATOR
127 #define FIRMWARE_WAIT_TIMEOUT_MS (5000)
128 --- a/drivers/media/pci/hailo/common/hailo_ioctl_common.h
129 +++ b/drivers/media/pci/hailo/common/hailo_ioctl_common.h
130 @@ -6,6 +6,14 @@
131 #ifndef _HAILO_IOCTL_COMMON_H_
132 #define _HAILO_IOCTL_COMMON_H_
133
134 +#define HAILO_DRV_VER_MAJOR 4
135 +#define HAILO_DRV_VER_MINOR 18
136 +#define HAILO_DRV_VER_REVISION 0
137 +
138 +#define _STRINGIFY_EXPANDED( x ) #x
139 +#define _STRINGIFY_NUMBER( x ) _STRINGIFY_EXPANDED(x)
140 +#define HAILO_DRV_VER _STRINGIFY_NUMBER(HAILO_DRV_VER_MAJOR) "." _STRINGIFY_NUMBER(HAILO_DRV_VER_MINOR) "." _STRINGIFY_NUMBER(HAILO_DRV_VER_REVISION)
141 +
142
143 // This value is not easily changeable.
144 // For example: the channel interrupts ioctls assume we have up to 32 channels
145 @@ -23,14 +31,17 @@
146 #define INVALID_DRIVER_HANDLE_VALUE ((uintptr_t)-1)
147
148 // Used by windows and unix driver to raise the right CPU control handle to the FW. The same as in pcie_service FW
149 -#define FW_ACCESS_CORE_CPU_CONTROL_SHIFT (1)
150 -#define FW_ACCESS_CORE_CPU_CONTROL_MASK (1 << FW_ACCESS_CORE_CPU_CONTROL_SHIFT)
151 -#define FW_ACCESS_CONTROL_INTERRUPT_SHIFT (0)
152 -#define FW_ACCESS_APP_CPU_CONTROL_MASK (1 << FW_ACCESS_CONTROL_INTERRUPT_SHIFT)
153 -#define FW_ACCESS_DRIVER_SHUTDOWN_SHIFT (2)
154 -#define FW_ACCESS_DRIVER_SHUTDOWN_MASK (1 << FW_ACCESS_DRIVER_SHUTDOWN_SHIFT)
155 +#define FW_ACCESS_CORE_CPU_CONTROL_SHIFT (1)
156 +#define FW_ACCESS_CORE_CPU_CONTROL_MASK (1 << FW_ACCESS_CORE_CPU_CONTROL_SHIFT)
157 +#define FW_ACCESS_CONTROL_INTERRUPT_SHIFT (0)
158 +#define FW_ACCESS_APP_CPU_CONTROL_MASK (1 << FW_ACCESS_CONTROL_INTERRUPT_SHIFT)
159 +#define FW_ACCESS_DRIVER_SHUTDOWN_SHIFT (2)
160 +#define FW_ACCESS_DRIVER_SHUTDOWN_MASK (1 << FW_ACCESS_DRIVER_SHUTDOWN_SHIFT)
161 +#define FW_ACCESS_SOC_CONNECT_SHIFT (3)
162 +#define FW_ACCESS_SOC_CONNECT_MASK (1 << FW_ACCESS_SOC_CONNECT_SHIFT)
163 +
164 +#define INVALID_VDMA_CHANNEL (0xff)
165
166 -#define INVALID_VDMA_CHANNEL (0xff)
167
168 #if !defined(__cplusplus) && defined(NTDDI_VERSION)
169 #include <wdm.h>
170 @@ -53,14 +64,23 @@ typedef uint8_t bool;
171 #define INT_MAX 0x7FFFFFFF
172 #endif // !defined(INT_MAX)
173
174 +#if !defined(ECONNRESET)
175 +#define ECONNRESET 104 /* Connection reset by peer */
176 +#endif // !defined(ECONNRESET)
177
178 // {d88d31f1-fede-4e71-ac2a-6ce0018c1501}
179 -DEFINE_GUID (GUID_DEVINTERFACE_HailoKM,
180 +DEFINE_GUID (GUID_DEVINTERFACE_HailoKM_NNC,
181 0xd88d31f1,0xfede,0x4e71,0xac,0x2a,0x6c,0xe0,0x01,0x8c,0x15,0x01);
182
183 -#define HAILO_GENERAL_IOCTL_MAGIC 0
184 -#define HAILO_VDMA_IOCTL_MAGIC 1
185 -#define HAILO_NON_LINUX_IOCTL_MAGIC 2
186 +// {7f16047d-64b8-207a-0092-e970893970a2}
187 +DEFINE_GUID (GUID_DEVINTERFACE_HailoKM_SOC,
188 + 0x7f16047d,0x64b8,0x207a,0x00,0x92,0xe9,0x70,0x89,0x39,0x70,0xa2);
189 +
190 +#define HAILO_GENERAL_IOCTL_MAGIC 0
191 +#define HAILO_VDMA_IOCTL_MAGIC 1
192 +#define HAILO_SOC_IOCTL_MAGIC 2
193 +#define HAILO_PCI_EP_IOCTL_MAGIC 3
194 +#define HAILO_NNC_IOCTL_MAGIC 4
195
196 #define HAILO_IOCTL_COMPATIBLE CTL_CODE(FILE_DEVICE_UNKNOWN, 0x802, METHOD_BUFFERED, FILE_ANY_ACCESS)
197
198 @@ -114,9 +134,11 @@ static ULONG FORCEINLINE _IOC_(ULONG nr,
199 #define _IOWR_ _IOWR
200 #define _IO_ _IO
201
202 -#define HAILO_GENERAL_IOCTL_MAGIC 'g'
203 -#define HAILO_VDMA_IOCTL_MAGIC 'v'
204 -#define HAILO_NON_LINUX_IOCTL_MAGIC 'w'
205 +#define HAILO_GENERAL_IOCTL_MAGIC 'g'
206 +#define HAILO_VDMA_IOCTL_MAGIC 'v'
207 +#define HAILO_SOC_IOCTL_MAGIC 's'
208 +#define HAILO_NNC_IOCTL_MAGIC 'n'
209 +#define HAILO_PCI_EP_IOCTL_MAGIC 'p'
210
211 #elif defined(__QNX__) // #ifdef _MSC_VER
212 #include <devctl.h>
213 @@ -132,7 +154,6 @@ static ULONG FORCEINLINE _IOC_(ULONG nr,
214 #define _IO_ __DION
215 #define HAILO_GENERAL_IOCTL_MAGIC _DCMD_ALL
216 #define HAILO_VDMA_IOCTL_MAGIC _DCMD_MISC
217 -#define HAILO_NON_LINUX_IOCTL_MAGIC _DCMD_PROC
218
219 #else // #ifdef _MSC_VER
220 #error "unsupported platform!"
221 @@ -161,6 +182,16 @@ enum hailo_dma_data_direction {
222 HAILO_DMA_MAX_ENUM = INT_MAX,
223 };
224
225 +// Enum that states what type of buffer we are working with in the driver
226 +// TODO: HRT-13580 - Add specific type for user allocated and for driver allocated
227 +enum hailo_dma_buffer_type {
228 + HAILO_DMA_USER_PTR_BUFFER = 0,
229 + HAILO_DMA_DMABUF_BUFFER = 1,
230 +
231 + /** Max enum value to maintain ABI Integrity */
232 + HAILO_DMA_BUFFER_MAX_ENUM = INT_MAX,
233 +};
234 +
235 // Enum that determines if buffer should be allocated from user space or from driver
236 enum hailo_allocation_mode {
237 HAILO_ALLOCATION_MODE_USERSPACE = 0,
238 @@ -170,10 +201,19 @@ enum hailo_allocation_mode {
239 HAILO_ALLOCATION_MODE_MAX_ENUM = INT_MAX,
240 };
241
242 +enum hailo_vdma_interrupts_domain {
243 + HAILO_VDMA_INTERRUPTS_DOMAIN_NONE = 0,
244 + HAILO_VDMA_INTERRUPTS_DOMAIN_DEVICE = (1 << 0),
245 + HAILO_VDMA_INTERRUPTS_DOMAIN_HOST = (1 << 1),
246 +
247 + /** Max enum value to maintain ABI Integrity */
248 + HAILO_VDMA_INTERRUPTS_DOMAIN_MAX_ENUM = INT_MAX,
249 +};
250 +
251 /* structure used in ioctl HAILO_VDMA_BUFFER_MAP */
252 struct hailo_vdma_buffer_map_params {
253 #if defined(__linux__) || defined(_MSC_VER)
254 - void* user_address; // in
255 + uintptr_t user_address; // in
256 #elif defined(__QNX__)
257 shm_handle_t shared_memory_handle; // in
258 #else
259 @@ -181,6 +221,7 @@ struct hailo_vdma_buffer_map_params {
260 #endif // __linux__
261 size_t size; // in
262 enum hailo_dma_data_direction data_direction; // in
263 + enum hailo_dma_buffer_type buffer_type; // in
264 uintptr_t allocated_buffer_handle; // in
265 size_t mapped_handle; // out
266 };
267 @@ -204,31 +245,27 @@ struct hailo_desc_list_release_params {
268 uintptr_t desc_handle; // in
269 };
270
271 -/* structure used in ioctl HAILO_NON_LINUX_DESC_LIST_MMAP */
272 -struct hailo_non_linux_desc_list_mmap_params {
273 - uintptr_t desc_handle; // in
274 - size_t size; // in
275 - void* user_address; // out
276 -};
277 -
278 /* structure used in ioctl HAILO_DESC_LIST_BIND_VDMA_BUFFER */
279 -struct hailo_desc_list_bind_vdma_buffer_params {
280 +struct hailo_desc_list_program_params {
281 size_t buffer_handle; // in
282 size_t buffer_size; // in
283 size_t buffer_offset; // in
284 uintptr_t desc_handle; // in
285 uint8_t channel_index; // in
286 uint32_t starting_desc; // in
287 + bool should_bind; // in
288 + enum hailo_vdma_interrupts_domain last_interrupts_domain; // in
289 + bool is_debug; // in
290 };
291
292 -/* structure used in ioctl HAILO_VDMA_INTERRUPTS_ENABLE */
293 -struct hailo_vdma_interrupts_enable_params {
294 +/* structure used in ioctl HAILO_VDMA_ENABLE_CHANNELS */
295 +struct hailo_vdma_enable_channels_params {
296 uint32_t channels_bitmap_per_engine[MAX_VDMA_ENGINES]; // in
297 bool enable_timestamps_measure; // in
298 };
299
300 -/* structure used in ioctl HAILO_VDMA_INTERRUPTS_DISABLE */
301 -struct hailo_vdma_interrupts_disable_params {
302 +/* structure used in ioctl HAILO_VDMA_DISABLE_CHANNELS */
303 +struct hailo_vdma_disable_channels_params {
304 uint32_t channels_bitmap_per_engine[MAX_VDMA_ENGINES]; // in
305 };
306
307 @@ -237,7 +274,7 @@ struct hailo_vdma_interrupts_channel_dat
308 uint8_t engine_index;
309 uint8_t channel_index;
310 bool is_active; // If not activate, num_processed is ignored.
311 - uint16_t host_num_processed;
312 + uint8_t transfers_completed; // Number of transfers completed.
313 uint8_t host_error; // Channel errors bits on source side
314 uint8_t device_error; // Channel errors bits on dest side
315 bool validation_success; // If the validation of the channel was successful
316 @@ -312,6 +349,10 @@ enum hailo_transfer_memory_type {
317 HAILO_TRANSFER_MEMORY_DMA_ENGINE1,
318 HAILO_TRANSFER_MEMORY_DMA_ENGINE2,
319
320 + // PCIe EP driver memories
321 + HAILO_TRANSFER_MEMORY_PCIE_EP_CONFIG = 0x400,
322 + HAILO_TRANSFER_MEMORY_PCIE_EP_BRIDGE,
323 +
324 /** Max enum value to maintain ABI Integrity */
325 HAILO_TRANSFER_MEMORY_MAX_ENUM = INT_MAX,
326 };
327 @@ -352,15 +393,26 @@ enum hailo_board_type {
328 HAILO_BOARD_TYPE_HAILO8 = 0,
329 HAILO_BOARD_TYPE_HAILO15,
330 HAILO_BOARD_TYPE_PLUTO,
331 + HAILO_BOARD_TYPE_HAILO10H,
332 + HAILO_BOARD_TYPE_HAILO10H_LEGACY,
333 HAILO_BOARD_TYPE_COUNT,
334
335 /** Max enum value to maintain ABI Integrity */
336 HAILO_BOARD_TYPE_MAX_ENUM = INT_MAX
337 };
338
339 +enum hailo_accelerator_type {
340 + HAILO_ACCELERATOR_TYPE_NNC,
341 + HAILO_ACCELERATOR_TYPE_SOC,
342 +
343 + /** Max enum value to maintain ABI Integrity */
344 + HAILO_ACCELERATOR_TYPE_MAX_ENUM = INT_MAX
345 +};
346 +
347 enum hailo_dma_type {
348 HAILO_DMA_TYPE_PCIE,
349 HAILO_DMA_TYPE_DRAM,
350 + HAILO_DMA_TYPE_PCI_EP,
351
352 /** Max enum value to maintain ABI Integrity */
353 HAILO_DMA_TYPE_MAX_ENUM = INT_MAX,
354 @@ -428,15 +480,6 @@ struct hailo_vdma_transfer_buffer {
355 uint32_t size; // in
356 };
357
358 -enum hailo_vdma_interrupts_domain {
359 - HAILO_VDMA_INTERRUPTS_DOMAIN_NONE = 0,
360 - HAILO_VDMA_INTERRUPTS_DOMAIN_DEVICE = (1 << 0),
361 - HAILO_VDMA_INTERRUPTS_DOMAIN_HOST = (1 << 1),
362 -
363 - /** Max enum value to maintain ABI Integrity */
364 - HAILO_VDMA_INTERRUPTS_DOMAIN_MAX_ENUM = INT_MAX,
365 -};
366 -
367 // We allow maximum 2 buffers per transfer since we may have an extra buffer
368 // to make sure each buffer is aligned to page size.
369 #define HAILO_MAX_BUFFERS_PER_SINGLE_TRANSFER (2)
370 @@ -460,6 +503,35 @@ struct hailo_vdma_launch_transfer_params
371 // more info (e.g desc complete status)
372
373 uint32_t descs_programed; // out, amount of descriptors programed.
374 + int launch_transfer_status; // out, status of the launch transfer call. (only used in case of error)
375 +};
376 +
377 +/* structure used in ioctl HAILO_SOC_CONNECT */
378 +struct hailo_soc_connect_params {
379 + uint8_t input_channel_index; // out
380 + uint8_t output_channel_index; // out
381 + uintptr_t input_desc_handle; // in
382 + uintptr_t output_desc_handle; // in
383 +};
384 +
385 +/* structure used in ioctl HAILO_SOC_CLOSE */
386 +struct hailo_soc_close_params {
387 + uint8_t input_channel_index; // in
388 + uint8_t output_channel_index; // in
389 +};
390 +
391 +/* structure used in ioctl HAILO_PCI_EP_ACCEPT */
392 +struct hailo_pci_ep_accept_params {
393 + uint8_t input_channel_index; // out
394 + uint8_t output_channel_index; // out
395 + uintptr_t input_desc_handle; // in
396 + uintptr_t output_desc_handle; // in
397 +};
398 +
399 +/* structure used in ioctl HAILO_PCI_EP_CLOSE */
400 +struct hailo_pci_ep_close_params {
401 + uint8_t input_channel_index; // in
402 + uint8_t output_channel_index; // in
403 };
404
405 #ifdef _MSC_VER
406 @@ -469,8 +541,8 @@ struct tCompatibleHailoIoctlData
407 ULONG_PTR Value;
408 union {
409 struct hailo_memory_transfer_params MemoryTransfer;
410 - struct hailo_vdma_interrupts_enable_params VdmaInterruptsEnable;
411 - struct hailo_vdma_interrupts_disable_params VdmaInterruptsDisable;
412 + struct hailo_vdma_enable_channels_params VdmaEnableChannels;
413 + struct hailo_vdma_disable_channels_params VdmaDisableChannels;
414 struct hailo_vdma_interrupts_read_timestamp_params VdmaInterruptsReadTimestamps;
415 struct hailo_vdma_interrupts_wait_params VdmaInterruptsWait;
416 struct hailo_vdma_buffer_sync_params VdmaBufferSync;
417 @@ -479,14 +551,17 @@ struct tCompatibleHailoIoctlData
418 struct hailo_vdma_buffer_unmap_params VdmaBufferUnmap;
419 struct hailo_desc_list_create_params DescListCreate;
420 struct hailo_desc_list_release_params DescListReleaseParam;
421 - struct hailo_desc_list_bind_vdma_buffer_params DescListBind;
422 + struct hailo_desc_list_program_params DescListProgram;
423 struct hailo_d2h_notification D2HNotification;
424 struct hailo_device_properties DeviceProperties;
425 struct hailo_driver_info DriverInfo;
426 - struct hailo_non_linux_desc_list_mmap_params DescListMmap;
427 struct hailo_read_log_params ReadLog;
428 struct hailo_mark_as_in_use_params MarkAsInUse;
429 struct hailo_vdma_launch_transfer_params LaunchTransfer;
430 + struct hailo_soc_connect_params ConnectParams;
431 + struct hailo_soc_close_params SocCloseParams;
432 + struct hailo_pci_ep_accept_params AcceptParams;
433 + struct hailo_pci_ep_close_params PciEpCloseParams;
434 } Buffer;
435 };
436 #endif // _MSC_VER
437 @@ -495,30 +570,20 @@ struct tCompatibleHailoIoctlData
438
439 enum hailo_general_ioctl_code {
440 HAILO_MEMORY_TRANSFER_CODE,
441 - HAILO_FW_CONTROL_CODE,
442 - HAILO_READ_NOTIFICATION_CODE,
443 - HAILO_DISABLE_NOTIFICATION_CODE,
444 HAILO_QUERY_DEVICE_PROPERTIES_CODE,
445 HAILO_QUERY_DRIVER_INFO_CODE,
446 - HAILO_READ_LOG_CODE,
447 - HAILO_RESET_NN_CORE_CODE,
448
449 // Must be last
450 HAILO_GENERAL_IOCTL_MAX_NR,
451 };
452
453 #define HAILO_MEMORY_TRANSFER _IOWR_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_MEMORY_TRANSFER_CODE, struct hailo_memory_transfer_params)
454 -#define HAILO_FW_CONTROL _IOWR_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_FW_CONTROL_CODE, struct hailo_fw_control)
455 -#define HAILO_READ_NOTIFICATION _IOW_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_READ_NOTIFICATION_CODE, struct hailo_d2h_notification)
456 -#define HAILO_DISABLE_NOTIFICATION _IO_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_DISABLE_NOTIFICATION_CODE)
457 #define HAILO_QUERY_DEVICE_PROPERTIES _IOW_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_QUERY_DEVICE_PROPERTIES_CODE, struct hailo_device_properties)
458 #define HAILO_QUERY_DRIVER_INFO _IOW_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_QUERY_DRIVER_INFO_CODE, struct hailo_driver_info)
459 -#define HAILO_READ_LOG _IOWR_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_READ_LOG_CODE, struct hailo_read_log_params)
460 -#define HAILO_RESET_NN_CORE _IO_(HAILO_GENERAL_IOCTL_MAGIC, HAILO_RESET_NN_CORE_CODE)
461
462 enum hailo_vdma_ioctl_code {
463 - HAILO_VDMA_INTERRUPTS_ENABLE_CODE,
464 - HAILO_VDMA_INTERRUPTS_DISABLE_CODE,
465 + HAILO_VDMA_ENABLE_CHANNELS_CODE,
466 + HAILO_VDMA_DISABLE_CHANNELS_CODE,
467 HAILO_VDMA_INTERRUPTS_WAIT_CODE,
468 HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS_CODE,
469 HAILO_VDMA_BUFFER_MAP_CODE,
470 @@ -526,7 +591,7 @@ enum hailo_vdma_ioctl_code {
471 HAILO_VDMA_BUFFER_SYNC_CODE,
472 HAILO_DESC_LIST_CREATE_CODE,
473 HAILO_DESC_LIST_RELEASE_CODE,
474 - HAILO_DESC_LIST_BIND_VDMA_BUFFER_CODE,
475 + HAILO_DESC_LIST_PROGRAM_CODE,
476 HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC_CODE,
477 HAILO_VDMA_LOW_MEMORY_BUFFER_FREE_CODE,
478 HAILO_MARK_AS_IN_USE_CODE,
479 @@ -538,38 +603,67 @@ enum hailo_vdma_ioctl_code {
480 HAILO_VDMA_IOCTL_MAX_NR,
481 };
482
483 -#define HAILO_VDMA_INTERRUPTS_ENABLE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_INTERRUPTS_ENABLE_CODE, struct hailo_vdma_interrupts_enable_params)
484 -#define HAILO_VDMA_INTERRUPTS_DISABLE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_INTERRUPTS_DISABLE_CODE, struct hailo_vdma_interrupts_disable_params)
485 +#define HAILO_VDMA_ENABLE_CHANNELS _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_ENABLE_CHANNELS_CODE, struct hailo_vdma_enable_channels_params)
486 +#define HAILO_VDMA_DISABLE_CHANNELS _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_DISABLE_CHANNELS_CODE, struct hailo_vdma_disable_channels_params)
487 #define HAILO_VDMA_INTERRUPTS_WAIT _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_INTERRUPTS_WAIT_CODE, struct hailo_vdma_interrupts_wait_params)
488 #define HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS_CODE, struct hailo_vdma_interrupts_read_timestamp_params)
489
490 -#define HAILO_VDMA_BUFFER_MAP _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_BUFFER_MAP_CODE, struct hailo_vdma_buffer_map_params)
491 -#define HAILO_VDMA_BUFFER_UNMAP _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_BUFFER_UNMAP_CODE, struct hailo_vdma_buffer_unmap_params)
492 -#define HAILO_VDMA_BUFFER_SYNC _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_BUFFER_SYNC_CODE, struct hailo_vdma_buffer_sync_params)
493 +#define HAILO_VDMA_BUFFER_MAP _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_BUFFER_MAP_CODE, struct hailo_vdma_buffer_map_params)
494 +#define HAILO_VDMA_BUFFER_UNMAP _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_BUFFER_UNMAP_CODE, struct hailo_vdma_buffer_unmap_params)
495 +#define HAILO_VDMA_BUFFER_SYNC _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_BUFFER_SYNC_CODE, struct hailo_vdma_buffer_sync_params)
496 +
497 +#define HAILO_DESC_LIST_CREATE _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_DESC_LIST_CREATE_CODE, struct hailo_desc_list_create_params)
498 +#define HAILO_DESC_LIST_RELEASE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_DESC_LIST_RELEASE_CODE, struct hailo_desc_list_release_params)
499 +#define HAILO_DESC_LIST_PROGRAM _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_DESC_LIST_PROGRAM_CODE, struct hailo_desc_list_program_params)
500
501 -#define HAILO_DESC_LIST_CREATE _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_DESC_LIST_CREATE_CODE, struct hailo_desc_list_create_params)
502 -#define HAILO_DESC_LIST_RELEASE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_DESC_LIST_RELEASE_CODE, struct hailo_desc_list_release_params)
503 -#define HAILO_DESC_LIST_BIND_VDMA_BUFFER _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_DESC_LIST_BIND_VDMA_BUFFER_CODE, struct hailo_desc_list_bind_vdma_buffer_params)
504 +#define HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC_CODE, struct hailo_allocate_low_memory_buffer_params)
505 +#define HAILO_VDMA_LOW_MEMORY_BUFFER_FREE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_LOW_MEMORY_BUFFER_FREE_CODE, struct hailo_free_low_memory_buffer_params)
506
507 -#define HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC_CODE, struct hailo_allocate_low_memory_buffer_params)
508 -#define HAILO_VDMA_LOW_MEMORY_BUFFER_FREE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_LOW_MEMORY_BUFFER_FREE_CODE, struct hailo_free_low_memory_buffer_params)
509 +#define HAILO_MARK_AS_IN_USE _IOW_(HAILO_VDMA_IOCTL_MAGIC, HAILO_MARK_AS_IN_USE_CODE, struct hailo_mark_as_in_use_params)
510
511 -#define HAILO_MARK_AS_IN_USE _IOW_(HAILO_VDMA_IOCTL_MAGIC, HAILO_MARK_AS_IN_USE_CODE, struct hailo_mark_as_in_use_params)
512 +#define HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC_CODE, struct hailo_allocate_continuous_buffer_params)
513 +#define HAILO_VDMA_CONTINUOUS_BUFFER_FREE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_CONTINUOUS_BUFFER_FREE_CODE, struct hailo_free_continuous_buffer_params)
514
515 -#define HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_CONTINUOUS_BUFFER_ALLOC_CODE, struct hailo_allocate_continuous_buffer_params)
516 -#define HAILO_VDMA_CONTINUOUS_BUFFER_FREE _IOR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_CONTINUOUS_BUFFER_FREE_CODE, struct hailo_free_continuous_buffer_params)
517 +#define HAILO_VDMA_LAUNCH_TRANSFER _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_LAUNCH_TRANSFER_CODE, struct hailo_vdma_launch_transfer_params)
518
519 -#define HAILO_VDMA_LAUNCH_TRANSFER _IOWR_(HAILO_VDMA_IOCTL_MAGIC, HAILO_VDMA_LAUNCH_TRANSFER_CODE, struct hailo_vdma_launch_transfer_params)
520 +enum hailo_nnc_ioctl_code {
521 + HAILO_FW_CONTROL_CODE,
522 + HAILO_READ_NOTIFICATION_CODE,
523 + HAILO_DISABLE_NOTIFICATION_CODE,
524 + HAILO_READ_LOG_CODE,
525 + HAILO_RESET_NN_CORE_CODE,
526
527 + // Must be last
528 + HAILO_NNC_IOCTL_MAX_NR
529 +};
530
531 -enum hailo_non_linux_ioctl_code {
532 - HAILO_NON_LINUX_DESC_LIST_MMAP_CODE,
533 +#define HAILO_FW_CONTROL _IOWR_(HAILO_NNC_IOCTL_MAGIC, HAILO_FW_CONTROL_CODE, struct hailo_fw_control)
534 +#define HAILO_READ_NOTIFICATION _IOW_(HAILO_NNC_IOCTL_MAGIC, HAILO_READ_NOTIFICATION_CODE, struct hailo_d2h_notification)
535 +#define HAILO_DISABLE_NOTIFICATION _IO_(HAILO_NNC_IOCTL_MAGIC, HAILO_DISABLE_NOTIFICATION_CODE)
536 +#define HAILO_READ_LOG _IOWR_(HAILO_NNC_IOCTL_MAGIC, HAILO_READ_LOG_CODE, struct hailo_read_log_params)
537 +#define HAILO_RESET_NN_CORE _IO_(HAILO_NNC_IOCTL_MAGIC, HAILO_RESET_NN_CORE_CODE)
538 +
539 +enum hailo_soc_ioctl_code {
540 + HAILO_SOC_IOCTL_CONNECT_CODE,
541 + HAILO_SOC_IOCTL_CLOSE_CODE,
542
543 // Must be last
544 - HAILO_NON_LINUX_IOCTL_MAX_NR,
545 + HAILO_SOC_IOCTL_MAX_NR,
546 };
547
548 -#define HAILO_NON_LINUX_DESC_LIST_MMAP _IOWR_(HAILO_NON_LINUX_IOCTL_MAGIC, HAILO_NON_LINUX_DESC_LIST_MMAP_CODE, struct hailo_non_linux_desc_list_mmap_params)
549 +#define HAILO_SOC_CONNECT _IOWR_(HAILO_SOC_IOCTL_MAGIC, HAILO_SOC_IOCTL_CONNECT_CODE, struct hailo_soc_connect_params)
550 +#define HAILO_SOC_CLOSE _IOR_(HAILO_SOC_IOCTL_MAGIC, HAILO_SOC_IOCTL_CLOSE_CODE, struct hailo_soc_close_params)
551 +
552 +
553 +enum hailo_pci_ep_ioctl_code {
554 + HAILO_PCI_EP_ACCEPT_CODE,
555 + HAILO_PCI_EP_CLOSE_CODE,
556 +
557 + // Must be last
558 + HAILO_PCI_EP_IOCTL_MAX_NR,
559 +};
560
561 +#define HAILO_PCI_EP_ACCEPT _IOWR_(HAILO_PCI_EP_IOCTL_MAGIC, HAILO_PCI_EP_ACCEPT_CODE, struct hailo_pci_ep_accept_params)
562 +#define HAILO_PCI_EP_CLOSE _IOR_(HAILO_PCI_EP_IOCTL_MAGIC, HAILO_PCI_EP_CLOSE_CODE, struct hailo_pci_ep_close_params)
563
564 #endif /* _HAILO_IOCTL_COMMON_H_ */
565 --- a/drivers/media/pci/hailo/common/hailo_resource.c
566 +++ b/drivers/media/pci/hailo/common/hailo_resource.c
567 @@ -1,4 +1,4 @@
568 -// SPDX-License-Identifier: GPL-2.0
569 +// SPDX-License-Identifier: MIT
570 /**
571 * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
572 **/
573 --- a/drivers/media/pci/hailo/common/hailo_resource.h
574 +++ b/drivers/media/pci/hailo/common/hailo_resource.h
575 @@ -1,4 +1,4 @@
576 -// SPDX-License-Identifier: GPL-2.0
577 +// SPDX-License-Identifier: MIT
578 /**
579 * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
580 **/
581 --- a/drivers/media/pci/hailo/common/pcie_common.c
582 +++ b/drivers/media/pci/hailo/common/pcie_common.c
583 @@ -1,4 +1,4 @@
584 -// SPDX-License-Identifier: GPL-2.0
585 +// SPDX-License-Identifier: MIT
586 /**
587 * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
588 **/
589 @@ -10,6 +10,8 @@
590 #include <linux/bug.h>
591 #include <linux/delay.h>
592 #include <linux/kernel.h>
593 +#include <linux/printk.h>
594 +#include <linux/device.h>
595
596
597 #define BSC_IMASK_HOST (0x0188)
598 @@ -19,14 +21,13 @@
599
600 #define PO2_ROUND_UP(size, alignment) ((size + alignment-1) & ~(alignment-1))
601
602 -#define ATR0_PARAM (0x17)
603 -#define ATR0_SRC_ADDR (0x0)
604 -#define ATR0_TRSL_ADDR2 (0x0)
605 -#define ATR0_TRSL_PARAM (6)
606 +#define ATR_PARAM (0x17)
607 +#define ATR_SRC_ADDR (0x0)
608 +#define ATR_TRSL_PARAM (6)
609 +#define ATR_TABLE_SIZE (0x1000u)
610 +#define ATR_TABLE_SIZE_MASK (0x1000u - 1)
611
612 #define ATR0_PCIE_BRIDGE_OFFSET (0x700)
613 -#define ATR0_TABLE_SIZE (0x1000u)
614 -#define ATR0_TABLE_SIZE_MASK (0x1000u - 1)
615
616 #define MAXIMUM_APP_FIRMWARE_CODE_SIZE (0x40000)
617 #define MAXIMUM_CORE_FIRMWARE_CODE_SIZE (0x20000)
618 @@ -45,8 +46,13 @@
619 #define HAILO_PCIE_HOST_DMA_DATA_ID (0)
620 #define HAILO_PCIE_DMA_DEVICE_INTERRUPTS_BITMASK (1 << 4)
621 #define HAILO_PCIE_DMA_HOST_INTERRUPTS_BITMASK (1 << 5)
622 +#define HAILO_PCIE_DMA_SRC_CHANNELS_BITMASK (0x0000FFFF)
623
624 -typedef u32 hailo_ptr_t;
625 +#define HAILO_PCIE_MAX_ATR_TABLE_INDEX (3)
626 +
627 +#define MAX_FILES_PER_STAGE (4)
628 +
629 +#define BOOT_STATUS_UNINITIALIZED (0x1)
630
631 struct hailo_fw_addresses {
632 u32 boot_fw_header;
633 @@ -58,14 +64,11 @@ struct hailo_fw_addresses {
634 u32 core_fw_header;
635 u32 atr0_trsl_addr1;
636 u32 raise_ready_offset;
637 + u32 boot_status;
638 };
639
640 -struct hailo_atr_config {
641 - u32 atr_param;
642 - u32 atr_src;
643 - u32 atr_trsl_addr_1;
644 - u32 atr_trsl_addr_2;
645 - u32 atr_trsl_param;
646 +struct loading_stage {
647 + const struct hailo_file_batch *batch;
648 };
649
650 struct hailo_board_compatibility {
651 @@ -73,6 +76,69 @@ struct hailo_board_compatibility {
652 const char *fw_filename;
653 const struct hailo_config_constants board_cfg;
654 const struct hailo_config_constants fw_cfg;
655 + const struct loading_stage stages[MAX_LOADING_STAGES];
656 +};
657 +
658 +static const struct hailo_file_batch hailo10h_files_stg1[] = {
659 + {
660 + .filename = "hailo/hailo10h/customer_certificate.bin",
661 + .address = 0xA0000,
662 + .max_size = 0x8004,
663 + .is_mandatory = true,
664 + .has_header = false
665 + },
666 + {
667 + .filename = "hailo/hailo10h/u-boot.dtb.signed",
668 + .address = 0xA8004,
669 + .max_size = 0x20000,
670 + .is_mandatory = true,
671 + .has_header = false
672 + },
673 + {
674 + .filename = "hailo/hailo10h/scu_fw.bin",
675 + .address = 0x20000,
676 + .max_size = 0x40000,
677 + .is_mandatory = true,
678 + .has_header = true
679 + },
680 + {
681 + .filename = NULL,
682 + .address = 0x00,
683 + .max_size = 0x00,
684 + .is_mandatory = false,
685 + .has_header = false
686 + }
687 +};
688 +
689 +static const struct hailo_file_batch hailo10h_files_stg2[] = {
690 + {
691 + .filename = "hailo/hailo10h/u-boot-spl.bin",
692 + .address = 0x85000000,
693 + .max_size = 0x1000000,
694 + .is_mandatory = true,
695 + .has_header = false
696 + },
697 + {
698 + .filename = "hailo/hailo10h/u-boot-tfa.itb",
699 + .address = 0x86000000,
700 + .max_size = 0x1000000,
701 + .is_mandatory = true,
702 + .has_header = false
703 + },
704 + {
705 + .filename = "hailo/hailo10h/fitImage",
706 + .address = 0x87000000,
707 + .max_size = 0x1000000,
708 + .is_mandatory = true,
709 + .has_header = false
710 + },
711 + {
712 + .filename = "hailo/hailo10h/core-image-minimal-hailo10-m2.ext4.gz",
713 + .address = 0x88000000,
714 + .max_size = 0x20000000, // Max size 512MB
715 + .is_mandatory = true,
716 + .has_header = false
717 + },
718 };
719
720 static const struct hailo_board_compatibility compat[HAILO_BOARD_TYPE_COUNT] = {
721 @@ -87,6 +153,7 @@ static const struct hailo_board_compatib
722 .core_fw_header = 0xA0000,
723 .atr0_trsl_addr1 = 0x60000000,
724 .raise_ready_offset = 0x1684,
725 + .boot_status = 0xe0000,
726 },
727 .fw_filename = "hailo/hailo8_fw.bin",
728 .board_cfg = {
729 @@ -100,7 +167,7 @@ static const struct hailo_board_compatib
730 .max_size = PCIE_HAILO8_FW_CFG_MAX_SIZE,
731 },
732 },
733 - [HAILO_BOARD_TYPE_HAILO15] = {
734 + [HAILO_BOARD_TYPE_HAILO10H_LEGACY] = {
735 .fw_addresses = {
736 .boot_fw_header = 0x88000,
737 .boot_fw_trigger = 0x88c98,
738 @@ -111,6 +178,7 @@ static const struct hailo_board_compatib
739 .core_fw_header = 0xC0000,
740 .atr0_trsl_addr1 = 0x000BE000,
741 .raise_ready_offset = 0x1754,
742 + .boot_status = 0x80000,
743 },
744 .fw_filename = "hailo/hailo15_fw.bin",
745 .board_cfg = {
746 @@ -124,6 +192,39 @@ static const struct hailo_board_compatib
747 .max_size = 0,
748 },
749 },
750 + [HAILO_BOARD_TYPE_HAILO10H] = {
751 + .fw_addresses = {
752 + .boot_fw_header = 0x88000,
753 + .boot_fw_trigger = 0x88c98,
754 + .boot_key_cert = 0x88018,
755 + .boot_cont_cert = 0x886a8,
756 + .app_fw_code_ram_base = 0x20000,
757 + .core_code_ram_base = 0,
758 + .core_fw_header = 0,
759 + .atr0_trsl_addr1 = 0x000BE000,
760 + .raise_ready_offset = 0x1754,
761 + .boot_status = 0x80000,
762 + },
763 + .fw_filename = NULL,
764 + .board_cfg = {
765 + .filename = NULL,
766 + .address = 0,
767 + .max_size = 0,
768 + },
769 + .fw_cfg = {
770 + .filename = NULL,
771 + .address = 0,
772 + .max_size = 0,
773 + },
774 + .stages = {
775 + {
776 + .batch = hailo10h_files_stg1,
777 + },
778 + {
779 + .batch = hailo10h_files_stg2,
780 + },
781 + },
782 + },
783 // HRT-11344 : none of these matter except raise_ready_offset seeing as we load fw seperately - not through driver
784 // After implementing bootloader put correct values here
785 [HAILO_BOARD_TYPE_PLUTO] = {
786 @@ -138,6 +239,7 @@ static const struct hailo_board_compatib
787 .atr0_trsl_addr1 = 0x000BE000,
788 // NOTE: After they update hw consts - check register fw_access_interrupt_w1s of pcie_config
789 .raise_ready_offset = 0x174c,
790 + .boot_status = 0x80000,
791 },
792 .fw_filename = "hailo/pluto_fw.bin",
793 .board_cfg = {
794 @@ -225,7 +327,7 @@ int hailo_pcie_read_firmware_control(str
795 // Copy response buffer
796 hailo_resource_read_buffer(&resources->fw_access, PCIE_REQUEST_SIZE_OFFSET + (size_t)response_header_size,
797 command->buffer_len, &command->buffer);
798 -
799 +
800 return 0;
801 }
802
803 @@ -253,93 +355,111 @@ int hailo_pcie_read_firmware_notificatio
804 return hailo_read_firmware_notification(&notification_resource, notification);
805 }
806
807 -static void write_atr_table(struct hailo_pcie_resources *resources,
808 - struct hailo_atr_config *atr)
809 +int hailo_pcie_configure_atr_table(struct hailo_resource *bridge_config, u64 trsl_addr, u32 atr_index)
810 {
811 - hailo_resource_write_buffer(&resources->config, ATR0_PCIE_BRIDGE_OFFSET,
812 - sizeof(*atr), (void*)atr);
813 -}
814 + size_t offset = 0;
815 + struct hailo_atr_config atr = {
816 + .atr_param = (ATR_PARAM | (atr_index << 12)),
817 + .atr_src = ATR_SRC_ADDR,
818 + .atr_trsl_addr_1 = (u32)(trsl_addr & 0xFFFFFFFF),
819 + .atr_trsl_addr_2 = (u32)(trsl_addr >> 32),
820 + .atr_trsl_param = ATR_TRSL_PARAM
821 + };
822
823 -static void read_atr_table(struct hailo_pcie_resources *resources,
824 - struct hailo_atr_config *atr)
825 -{
826 - hailo_resource_read_buffer(&resources->config, ATR0_PCIE_BRIDGE_OFFSET,
827 - sizeof(*atr), (void*)atr);
828 + BUG_ON(HAILO_PCIE_MAX_ATR_TABLE_INDEX < atr_index);
829 + offset = ATR0_PCIE_BRIDGE_OFFSET + (atr_index * 0x20);
830 +
831 + return hailo_resource_write_buffer(bridge_config, offset, sizeof(atr), (void*)&atr);
832 }
833
834 -static void configure_atr_table(struct hailo_pcie_resources *resources,
835 - hailo_ptr_t base_address)
836 +void hailo_pcie_read_atr_table(struct hailo_resource *bridge_config, struct hailo_atr_config *atr, u32 atr_index)
837 {
838 - struct hailo_atr_config atr = {
839 - .atr_param = ATR0_PARAM,
840 - .atr_src = ATR0_SRC_ADDR,
841 - .atr_trsl_addr_1 = (u32)base_address,
842 - .atr_trsl_addr_2 = ATR0_TRSL_ADDR2,
843 - .atr_trsl_param = ATR0_TRSL_PARAM
844 - };
845 - write_atr_table(resources, &atr);
846 + size_t offset = 0;
847 +
848 + BUG_ON(HAILO_PCIE_MAX_ATR_TABLE_INDEX < atr_index);
849 + offset = ATR0_PCIE_BRIDGE_OFFSET + (atr_index * 0x20);
850 +
851 + hailo_resource_read_buffer(bridge_config, offset, sizeof(*atr), (void*)atr);
852 }
853
854 static void write_memory_chunk(struct hailo_pcie_resources *resources,
855 hailo_ptr_t dest, u32 dest_offset, const void *src, u32 len)
856 {
857 + u32 ATR_INDEX = 0;
858 BUG_ON(dest_offset + len > (u32)resources->fw_access.size);
859
860 - configure_atr_table(resources, dest);
861 + (void)hailo_pcie_configure_atr_table(&resources->config, (u64)dest, ATR_INDEX);
862 (void)hailo_resource_write_buffer(&resources->fw_access, dest_offset, len, src);
863 }
864
865 static void read_memory_chunk(
866 struct hailo_pcie_resources *resources, hailo_ptr_t src, u32 src_offset, void *dest, u32 len)
867 {
868 + u32 ATR_INDEX = 0;
869 BUG_ON(src_offset + len > (u32)resources->fw_access.size);
870
871 - configure_atr_table(resources, src);
872 + (void)hailo_pcie_configure_atr_table(&resources->config, (u64)src, ATR_INDEX);
873 (void)hailo_resource_read_buffer(&resources->fw_access, src_offset, len, dest);
874 }
875
876 // Note: this function modify the device ATR table (that is also used by the firmware for control and vdma).
877 // Use with caution, and restore the original atr if needed.
878 -static void write_memory(struct hailo_pcie_resources *resources, hailo_ptr_t dest, const void *src, u32 len)
879 +void write_memory(struct hailo_pcie_resources *resources, hailo_ptr_t dest, const void *src, u32 len)
880 {
881 - hailo_ptr_t base_address = dest & ~ATR0_TABLE_SIZE_MASK;
882 + struct hailo_atr_config previous_atr = {0};
883 + hailo_ptr_t base_address = (dest & ~ATR_TABLE_SIZE_MASK);
884 u32 chunk_len = 0;
885 u32 offset = 0;
886 + u32 ATR_INDEX = 0;
887 +
888 + // Store previous ATR (Read/write modify the ATR).
889 + hailo_pcie_read_atr_table(&resources->config, &previous_atr, ATR_INDEX);
890
891 if (base_address != dest) {
892 // Data is not aligned, write the first chunk
893 - chunk_len = min(base_address + ATR0_TABLE_SIZE - dest, len);
894 + chunk_len = min(base_address + ATR_TABLE_SIZE - dest, len);
895 write_memory_chunk(resources, base_address, dest - base_address, src, chunk_len);
896 offset += chunk_len;
897 }
898
899 while (offset < len) {
900 - chunk_len = min(len - offset, ATR0_TABLE_SIZE);
901 + chunk_len = min(len - offset, ATR_TABLE_SIZE);
902 write_memory_chunk(resources, dest + offset, 0, (const u8*)src + offset, chunk_len);
903 offset += chunk_len;
904 }
905 +
906 + (void)hailo_pcie_configure_atr_table(&resources->config,
907 + (((u64)(previous_atr.atr_trsl_addr_2) << 32) | previous_atr.atr_trsl_addr_1), ATR_INDEX);
908 }
909
910 // Note: this function modify the device ATR table (that is also used by the firmware for control and vdma).
911 // Use with caution, and restore the original atr if needed.
912 static void read_memory(struct hailo_pcie_resources *resources, hailo_ptr_t src, void *dest, u32 len)
913 {
914 - hailo_ptr_t base_address = src & ~ATR0_TABLE_SIZE_MASK;
915 + struct hailo_atr_config previous_atr = {0};
916 + hailo_ptr_t base_address = (src & ~ATR_TABLE_SIZE_MASK);
917 u32 chunk_len = 0;
918 u32 offset = 0;
919 + u32 ATR_INDEX = 0;
920 +
921 + // Store previous ATR (Read/write modify the ATR).
922 + hailo_pcie_read_atr_table(&resources->config, &previous_atr, ATR_INDEX);
923
924 if (base_address != src) {
925 // Data is not aligned, write the first chunk
926 - chunk_len = min(base_address + ATR0_TABLE_SIZE - src, len);
927 + chunk_len = min(base_address + ATR_TABLE_SIZE - src, len);
928 read_memory_chunk(resources, base_address, src - base_address, dest, chunk_len);
929 offset += chunk_len;
930 }
931
932 while (offset < len) {
933 - chunk_len = min(len - offset, ATR0_TABLE_SIZE);
934 + chunk_len = min(len - offset, ATR_TABLE_SIZE);
935 read_memory_chunk(resources, src + offset, 0, (u8*)dest + offset, chunk_len);
936 offset += chunk_len;
937 }
938 +
939 + (void)hailo_pcie_configure_atr_table(&resources->config,
940 + (((u64)(previous_atr.atr_trsl_addr_2) << 32) | previous_atr.atr_trsl_addr_1), ATR_INDEX);
941 }
942
943 static void hailo_write_app_firmware(struct hailo_pcie_resources *resources, firmware_header_t *fw_header,
944 @@ -367,7 +487,7 @@ static void hailo_write_core_firmware(st
945 write_memory(resources, fw_addresses->core_fw_header, fw_header, sizeof(firmware_header_t));
946 }
947
948 -static void hailo_trigger_firmware_boot(struct hailo_pcie_resources *resources)
949 +void hailo_trigger_firmware_boot(struct hailo_pcie_resources *resources)
950 {
951 const struct hailo_fw_addresses *fw_addresses = &(compat[resources->board_type].fw_addresses);
952 u32 pcie_finished = 1;
953 @@ -376,6 +496,17 @@ static void hailo_trigger_firmware_boot(
954 (void*)&pcie_finished, sizeof(pcie_finished));
955 }
956
957 +u32 hailo_get_boot_status(struct hailo_pcie_resources *resources)
958 +{
959 + u32 boot_status = 0;
960 + const struct hailo_fw_addresses *fw_addresses = &(compat[resources->board_type].fw_addresses);
961 +
962 + read_memory(resources, fw_addresses->boot_status,
963 + &boot_status, sizeof(boot_status));
964 +
965 + return boot_status;
966 +}
967 +
968 /**
969 * Validates the FW headers.
970 * @param[in] address Address of the firmware.
971 @@ -408,11 +539,14 @@ static int FW_VALIDATION__validate_fw_he
972 goto exit;
973 }
974
975 - err = FW_VALIDATION__validate_fw_header(firmware_base_address, firmware_size, MAXIMUM_CORE_FIRMWARE_CODE_SIZE,
976 - &consumed_firmware_offset, &core_firmware_header, board_type);
977 - if (0 != err) {
978 - err = -EINVAL;
979 - goto exit;
980 + // Not validating with HAILO10H since core firmware doesn't loaded over pcie
981 + if (HAILO_BOARD_TYPE_HAILO10H != board_type) {
982 + err = FW_VALIDATION__validate_fw_header(firmware_base_address, firmware_size, MAXIMUM_CORE_FIRMWARE_CODE_SIZE,
983 + &consumed_firmware_offset, &core_firmware_header, board_type);
984 + if (0 != err) {
985 + err = -EINVAL;
986 + goto exit;
987 + }
988 }
989
990 if (consumed_firmware_offset != firmware_size) {
991 @@ -437,6 +571,70 @@ exit:
992 return err;
993 }
994
995 +static int write_single_file(struct hailo_pcie_resources *resources, const struct hailo_file_batch *files_batch, struct device *dev)
996 +{
997 + const struct firmware *firmware = NULL;
998 + firmware_header_t *app_firmware_header = NULL;
999 + secure_boot_certificate_t *firmware_cert = NULL;
1000 + firmware_header_t *core_firmware_header = NULL;
1001 + int err = 0;
1002 +
1003 + err = request_firmware_direct(&firmware, files_batch->filename, dev);
1004 + if (err < 0) {
1005 + return err;
1006 + }
1007 +
1008 + if (firmware->size > files_batch->max_size) {
1009 + release_firmware(firmware);
1010 + return -EFBIG;
1011 + }
1012 +
1013 + if (files_batch->has_header) {
1014 + err = FW_VALIDATION__validate_fw_headers((uintptr_t)firmware->data, firmware->size,
1015 + &app_firmware_header, &core_firmware_header, &firmware_cert, resources->board_type);
1016 + if (err < 0) {
1017 + release_firmware(firmware);
1018 + return err;
1019 + }
1020 +
1021 + hailo_write_app_firmware(resources, app_firmware_header, firmware_cert);
1022 + } else {
1023 + write_memory(resources, files_batch->address, (void*)firmware->data, firmware->size);
1024 + }
1025 +
1026 + release_firmware(firmware);
1027 +
1028 + return 0;
1029 +}
1030 +
1031 +int hailo_pcie_write_firmware_batch(struct device *dev, struct hailo_pcie_resources *resources, u32 stage)
1032 +{
1033 + const struct hailo_file_batch *files_batch = compat[resources->board_type].stages[stage].batch;
1034 + int file_index = 0;
1035 + int err = 0;
1036 +
1037 + for (file_index = 0; file_index < MAX_FILES_PER_STAGE; file_index++)
1038 + {
1039 + if (NULL == files_batch[file_index].filename) {
1040 + break;
1041 + }
1042 +
1043 + dev_notice(dev, "Writing file %s\n", files_batch[file_index].filename);
1044 +
1045 + err = write_single_file(resources, &files_batch[file_index], dev);
1046 + if (err < 0) {
1047 + pr_warn("Failed to write file %s\n", files_batch[file_index].filename);
1048 + if (files_batch[file_index].is_mandatory) {
1049 + return err;
1050 + }
1051 + }
1052 +
1053 + dev_notice(dev, "File %s written successfully\n", files_batch[file_index].filename);
1054 + }
1055 +
1056 + return 0;
1057 +}
1058 +
1059 int hailo_pcie_write_firmware(struct hailo_pcie_resources *resources, const void *fw_data, size_t fw_size)
1060 {
1061 firmware_header_t *app_firmware_header = NULL;
1062 @@ -457,10 +655,25 @@ int hailo_pcie_write_firmware(struct hai
1063 return 0;
1064 }
1065
1066 +// TODO: HRT-14147 - remove this function
1067 +bool hailo_pcie_is_device_ready_for_boot(struct hailo_pcie_resources *resources)
1068 +{
1069 + return hailo_get_boot_status(resources) == BOOT_STATUS_UNINITIALIZED;
1070 +}
1071 +
1072 bool hailo_pcie_is_firmware_loaded(struct hailo_pcie_resources *resources)
1073 {
1074 - u32 offset = ATR0_PCIE_BRIDGE_OFFSET + offsetof(struct hailo_atr_config, atr_trsl_addr_1);
1075 - u32 atr_value = hailo_resource_read32(&resources->config, offset);
1076 + u32 offset;
1077 + u32 atr_value;
1078 +
1079 + // TODO: HRT-14147
1080 + if (HAILO_BOARD_TYPE_HAILO10H == resources->board_type) {
1081 + return !hailo_pcie_is_device_ready_for_boot(resources);
1082 + }
1083 +
1084 + offset = ATR0_PCIE_BRIDGE_OFFSET + offsetof(struct hailo_atr_config, atr_trsl_addr_1);
1085 + atr_value = hailo_resource_read32(&resources->config, offset);
1086 +
1087 return atr_value == compat[resources->board_type].fw_addresses.atr0_trsl_addr1;
1088 }
1089
1090 @@ -516,7 +729,7 @@ void hailo_pcie_update_channel_interrupt
1091 for (i = 0; i < MAX_VDMA_CHANNELS_PER_ENGINE; ++i) {
1092 if (hailo_test_bit(i, &channels_bitmap)) {
1093 // based on 18.5.2 "vDMA Interrupt Registers" in PLDA documentation
1094 - u32 offset = (i < VDMA_DEST_CHANNELS_START) ? 0 : 8;
1095 + u32 offset = (i & 16) ? 8 : 0;
1096 hailo_set_bit((((int)i*8) / MAX_VDMA_CHANNELS_PER_ENGINE) + offset, &mask);
1097 }
1098 }
1099 @@ -531,7 +744,8 @@ void hailo_pcie_enable_interrupts(struct
1100 hailo_resource_write32(&resources->config, BCS_DESTINATION_INTERRUPT_PER_CHANNEL, 0xFFFFFFFF);
1101 hailo_resource_write32(&resources->config, BCS_SOURCE_INTERRUPT_PER_CHANNEL, 0xFFFFFFFF);
1102
1103 - mask |= BCS_ISTATUS_HOST_FW_IRQ_CONTROL_MASK | BCS_ISTATUS_HOST_FW_IRQ_NOTIFICATION | BCS_ISTATUS_HOST_DRIVER_DOWN;
1104 + mask |= (BCS_ISTATUS_HOST_FW_IRQ_CONTROL_MASK | BCS_ISTATUS_HOST_FW_IRQ_NOTIFICATION |
1105 + BCS_ISTATUS_HOST_DRIVER_DOWN | BCS_ISTATUS_SOC_CONNECT_ACCEPTED);
1106 hailo_resource_write32(&resources->config, BSC_IMASK_HOST, mask);
1107 }
1108
1109 @@ -569,16 +783,10 @@ long hailo_pcie_read_firmware_log(struct
1110 static int direct_memory_transfer(struct hailo_pcie_resources *resources,
1111 struct hailo_memory_transfer_params *params)
1112 {
1113 - int err = -EINVAL;
1114 - struct hailo_atr_config previous_atr = {0};
1115 -
1116 if (params->address > U32_MAX) {
1117 return -EFAULT;
1118 }
1119
1120 - // Store previous ATR (Read/write modify the ATR).
1121 - read_atr_table(resources, &previous_atr);
1122 -
1123 switch (params->transfer_direction) {
1124 case TRANSFER_READ:
1125 read_memory(resources, (u32)params->address, params->buffer, (u32)params->count);
1126 @@ -587,14 +795,10 @@ static int direct_memory_transfer(struct
1127 write_memory(resources, (u32)params->address, params->buffer, (u32)params->count);
1128 break;
1129 default:
1130 - err = -EINVAL;
1131 - goto restore_atr;
1132 + return -EINVAL;
1133 }
1134
1135 - err = 0;
1136 -restore_atr:
1137 - write_atr_table(resources, &previous_atr);
1138 - return err;
1139 + return 0;
1140 }
1141
1142 int hailo_pcie_memory_transfer(struct hailo_pcie_resources *resources, struct hailo_memory_transfer_params *params)
1143 @@ -623,6 +827,24 @@ bool hailo_pcie_is_device_connected(stru
1144 return PCI_VENDOR_ID_HAILO == hailo_resource_read16(&resources->config, PCIE_CONFIG_VENDOR_OFFSET);
1145 }
1146
1147 +int hailo_set_device_type(struct hailo_pcie_resources *resources)
1148 +{
1149 + switch(resources->board_type) {
1150 + case HAILO_BOARD_TYPE_HAILO8:
1151 + case HAILO_BOARD_TYPE_HAILO10H_LEGACY:
1152 + case HAILO_BOARD_TYPE_PLUTO:
1153 + resources->accelerator_type = HAILO_ACCELERATOR_TYPE_NNC;
1154 + break;
1155 + case HAILO_BOARD_TYPE_HAILO10H:
1156 + resources->accelerator_type = HAILO_ACCELERATOR_TYPE_SOC;
1157 + break;
1158 + default:
1159 + return -EINVAL;
1160 + }
1161 +
1162 + return 0;
1163 +}
1164 +
1165 // On PCIe, just return the address
1166 static u64 encode_dma_address(dma_addr_t dma_address, u8 channel_id)
1167 {
1168 @@ -637,5 +859,14 @@ struct hailo_vdma_hw hailo_pcie_vdma_hw
1169 .ddr_data_id = HAILO_PCIE_HOST_DMA_DATA_ID,
1170 .device_interrupts_bitmask = HAILO_PCIE_DMA_DEVICE_INTERRUPTS_BITMASK,
1171 .host_interrupts_bitmask = HAILO_PCIE_DMA_HOST_INTERRUPTS_BITMASK,
1172 + .src_channels_bitmask = HAILO_PCIE_DMA_SRC_CHANNELS_BITMASK,
1173 +};
1174
1175 -};
1176 \ No newline at end of file
1177 +void hailo_soc_write_soc_connect(struct hailo_pcie_resources *resources)
1178 +{
1179 + const struct hailo_fw_addresses *fw_addresses = &(compat[resources->board_type].fw_addresses);
1180 + const u32 soc_connect_value = FW_ACCESS_SOC_CONNECT_MASK;
1181 +
1182 + // Write shutdown flag to FW
1183 + hailo_resource_write32(&resources->fw_access, fw_addresses->raise_ready_offset, soc_connect_value);
1184 +}
1185 \ No newline at end of file
1186 --- a/drivers/media/pci/hailo/common/pcie_common.h
1187 +++ b/drivers/media/pci/hailo/common/pcie_common.h
1188 @@ -1,4 +1,4 @@
1189 -// SPDX-License-Identifier: GPL-2.0
1190 +// SPDX-License-Identifier: MIT
1191 /**
1192 * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
1193 **/
1194 @@ -14,11 +14,13 @@
1195 #include "vdma_common.h"
1196
1197 #include <linux/types.h>
1198 +#include <linux/firmware.h>
1199
1200
1201 #define BCS_ISTATUS_HOST_FW_IRQ_CONTROL_MASK (0x04000000)
1202 #define BCS_ISTATUS_HOST_FW_IRQ_NOTIFICATION (0x02000000)
1203 #define BCS_ISTATUS_HOST_DRIVER_DOWN (0x08000000)
1204 +#define BCS_ISTATUS_SOC_CONNECT_ACCEPTED (0x10000000)
1205 #define BCS_ISTATUS_HOST_VDMA_SRC_IRQ_MASK (0x000000FF)
1206 #define BCS_ISTATUS_HOST_VDMA_DEST_IRQ_MASK (0x0000FF00)
1207
1208 @@ -40,17 +42,35 @@
1209 #define PCI_DEVICE_ID_HAILO_HAILO15 0x45C4
1210 #define PCI_DEVICE_ID_HAILO_PLUTO 0x43a2
1211
1212 +typedef u32 hailo_ptr_t;
1213 +
1214 struct hailo_pcie_resources {
1215 struct hailo_resource config; // BAR0
1216 struct hailo_resource vdma_registers; // BAR2
1217 struct hailo_resource fw_access; // BAR4
1218 enum hailo_board_type board_type;
1219 + enum hailo_accelerator_type accelerator_type;
1220 +};
1221 +
1222 +struct hailo_atr_config {
1223 + u32 atr_param;
1224 + u32 atr_src;
1225 + u32 atr_trsl_addr_1;
1226 + u32 atr_trsl_addr_2;
1227 + u32 atr_trsl_param;
1228 +};
1229 +
1230 +enum loading_stages {
1231 + FIRST_STAGE = 0,
1232 + SECOND_STAGE = 1,
1233 + MAX_LOADING_STAGES = 2
1234 };
1235
1236 enum hailo_pcie_interrupt_masks {
1237 FW_CONTROL = BCS_ISTATUS_HOST_FW_IRQ_CONTROL_MASK,
1238 FW_NOTIFICATION = BCS_ISTATUS_HOST_FW_IRQ_NOTIFICATION,
1239 DRIVER_DOWN = BCS_ISTATUS_HOST_DRIVER_DOWN,
1240 + SOC_CONNECT_ACCEPTED = BCS_ISTATUS_SOC_CONNECT_ACCEPTED,
1241 VDMA_SRC_IRQ_MASK = BCS_ISTATUS_HOST_VDMA_SRC_IRQ_MASK,
1242 VDMA_DEST_IRQ_MASK = BCS_ISTATUS_HOST_VDMA_DEST_IRQ_MASK
1243 };
1244 @@ -66,6 +86,14 @@ struct hailo_config_constants {
1245 size_t max_size;
1246 };
1247
1248 +struct hailo_file_batch {
1249 + const char *filename;
1250 + u32 address;
1251 + size_t max_size;
1252 + bool is_mandatory;
1253 + bool has_header;
1254 +};
1255 +
1256 // TODO: HRT-6144 - Align Windows/Linux to QNX
1257 #ifdef __QNX__
1258 enum hailo_bar_index {
1259 @@ -103,6 +131,7 @@ int hailo_pcie_write_firmware_control(st
1260 int hailo_pcie_read_firmware_control(struct hailo_pcie_resources *resources, struct hailo_fw_control *command);
1261
1262 int hailo_pcie_write_firmware(struct hailo_pcie_resources *resources, const void *fw_data, size_t fw_size);
1263 +int hailo_pcie_write_firmware_batch(struct device *dev, struct hailo_pcie_resources *resources, u32 stage);
1264 bool hailo_pcie_is_firmware_loaded(struct hailo_pcie_resources *resources);
1265 bool hailo_pcie_wait_for_firmware(struct hailo_pcie_resources *resources);
1266
1267 @@ -120,6 +149,17 @@ int hailo_pcie_memory_transfer(struct ha
1268
1269 bool hailo_pcie_is_device_connected(struct hailo_pcie_resources *resources);
1270 void hailo_pcie_write_firmware_driver_shutdown(struct hailo_pcie_resources *resources);
1271 +void write_memory(struct hailo_pcie_resources *resources, hailo_ptr_t dest, const void *src, u32 len);
1272 +void hailo_trigger_firmware_boot(struct hailo_pcie_resources *resources);
1273 +
1274 +int hailo_set_device_type(struct hailo_pcie_resources *resources);
1275 +
1276 +u32 hailo_get_boot_status(struct hailo_pcie_resources *resources);
1277 +
1278 +int hailo_pcie_configure_atr_table(struct hailo_resource *bridge_config, u64 trsl_addr, u32 atr_index);
1279 +void hailo_pcie_read_atr_table(struct hailo_resource *bridge_config, struct hailo_atr_config *atr, u32 atr_index);
1280 +
1281 +void hailo_soc_write_soc_connect(struct hailo_pcie_resources *resources);
1282
1283 #ifdef __cplusplus
1284 }
1285 --- a/drivers/media/pci/hailo/common/utils.h
1286 +++ b/drivers/media/pci/hailo/common/utils.h
1287 @@ -1,4 +1,4 @@
1288 -// SPDX-License-Identifier: GPL-2.0
1289 +// SPDX-License-Identifier: MIT
1290 /**
1291 * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
1292 **/
1293 @@ -11,6 +11,12 @@
1294 #define hailo_clear_bit(bit, pval) { *(pval) &= ~(1 << bit); }
1295 #define hailo_test_bit(pos,var_addr) ((*var_addr) & (1<<(pos)))
1296
1297 +#define READ_BITS_AT_OFFSET(amount_bits, offset, initial_value) \
1298 + (((initial_value) >> (offset)) & ((1 << (amount_bits)) - 1))
1299 +#define WRITE_BITS_AT_OFFSET(amount_bits, offset, initial_value, value) \
1300 + (((initial_value) & ~(((1 << (amount_bits)) - 1) << (offset))) | \
1301 + (((value) & ((1 << (amount_bits)) - 1)) << (offset)))
1302 +
1303 #ifdef __cplusplus
1304 extern "C"
1305 {
1306 @@ -28,6 +34,22 @@ static inline void hailo_set_bit(int nr,
1307 *p |= mask;
1308 }
1309
1310 +static inline uint8_t ceil_log2(uint32_t n)
1311 +{
1312 + uint8_t result = 0;
1313 +
1314 + if (n <= 1) {
1315 + return 0;
1316 + }
1317 +
1318 + while (n > 1) {
1319 + result++;
1320 + n = (n + 1) >> 1;
1321 + }
1322 +
1323 + return result;
1324 +}
1325 +
1326 #ifndef DIV_ROUND_UP
1327 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
1328 #endif
1329 --- a/drivers/media/pci/hailo/common/vdma_common.c
1330 +++ b/drivers/media/pci/hailo/common/vdma_common.c
1331 @@ -1,4 +1,4 @@
1332 -// SPDX-License-Identifier: GPL-2.0
1333 +// SPDX-License-Identifier: MIT
1334 /**
1335 * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
1336 **/
1337 @@ -17,25 +17,37 @@
1338
1339
1340 #define CHANNEL_BASE_OFFSET(channel_index) ((channel_index) << 5)
1341 -#define CHANNEL_HOST_OFFSET(channel_index) CHANNEL_BASE_OFFSET(channel_index) + \
1342 - (channel_index < VDMA_DEST_CHANNELS_START ? 0 : 0x10)
1343 -#define CHANNEL_DEVICE_OFFSET(channel_index) CHANNEL_BASE_OFFSET(channel_index) + \
1344 - (channel_index < VDMA_DEST_CHANNELS_START ? 0x10 : 0)
1345
1346 #define CHANNEL_CONTROL_OFFSET (0x0)
1347 +#define CHANNEL_DEPTH_ID_OFFSET (0x1)
1348 #define CHANNEL_NUM_AVAIL_OFFSET (0x2)
1349 #define CHANNEL_NUM_PROC_OFFSET (0x4)
1350 #define CHANNEL_ERROR_OFFSET (0x8)
1351 +#define CHANNEL_DEST_REGS_OFFSET (0x10)
1352
1353 #define VDMA_CHANNEL_CONTROL_START (0x1)
1354 #define VDMA_CHANNEL_CONTROL_ABORT (0b00)
1355 #define VDMA_CHANNEL_CONTROL_ABORT_PAUSE (0b10)
1356 #define VDMA_CHANNEL_CONTROL_START_ABORT_PAUSE_RESUME_BITMASK (0x3)
1357 #define VDMA_CHANNEL_CONTROL_START_ABORT_BITMASK (0x1)
1358 +#define VDMA_CHANNEL_CONTROL_MASK (0xFC)
1359 +#define VDMA_CHANNEL_CONTROL_START_RESUME (0b01)
1360 +#define VDMA_CHANNEL_CONTROL_START_PAUSE (0b11)
1361 +#define VDMA_CHANNEL_CONTROL_ABORT (0b00)
1362 +#define VDMA_CHANNEL_CONTROL_ABORT_PAUSE (0b10)
1363 +#define VDMA_CHANNEL_CONTROL_START_ABORT_PAUSE_RESUME_BITMASK (0x3)
1364 +#define VDMA_CHANNEL_DESC_DEPTH_WIDTH (4)
1365 +#define VDMA_CHANNEL_DESC_DEPTH_SHIFT (11)
1366 +#define VDMA_CHANNEL_DATA_ID_SHIFT (8)
1367 +#define VDMA_CHANNEL__MAX_CHECKS_CHANNEL_IS_IDLE (10000)
1368 +#define VDMA_CHANNEL__ADDRESS_L_OFFSET (0x0A)
1369 +#define VDMA_CHANNEL__ALIGNED_ADDRESS_L_OFFSET (0x8)
1370 +#define VDMA_CHANNEL__ADDRESS_H_OFFSET (0x0C)
1371
1372 #define DESCRIPTOR_PAGE_SIZE_SHIFT (8)
1373 #define DESCRIPTOR_DESC_CONTROL (0x2)
1374 #define DESCRIPTOR_ADDR_L_MASK (0xFFFFFFC0)
1375 +#define DESCRIPTOR_LIST_MAX_DEPTH (16)
1376
1377 #define DESCRIPTOR_DESC_STATUS_DONE_BIT (0x0)
1378 #define DESCRIPTOR_DESC_STATUS_ERROR_BIT (0x1)
1379 @@ -46,10 +58,14 @@
1380 #define DESC_REQUEST_IRQ_PROCESSED (1 << 2)
1381 #define DESC_REQUEST_IRQ_ERR (1 << 3)
1382
1383 +#define VDMA_CHANNEL_NUM_PROCESSED_WIDTH (16)
1384 +#define VDMA_CHANNEL_NUM_PROCESSED_MASK ((1 << VDMA_CHANNEL_NUM_PROCESSED_WIDTH) - 1)
1385 +#define VDMA_CHANNEL_NUM_ONGOING_MASK VDMA_CHANNEL_NUM_PROCESSED_MASK
1386
1387 #define DWORD_SIZE (4)
1388 #define WORD_SIZE (2)
1389 #define BYTE_SIZE (1)
1390 +#define BITS_IN_BYTE (8)
1391
1392 #define TIMESTAMPS_CIRC_SPACE(timestamp_list) \
1393 CIRC_SPACE((timestamp_list).head, (timestamp_list).tail, CHANNEL_IRQ_TIMESTAMPS_SIZE)
1394 @@ -146,18 +162,7 @@ void hailo_vdma_program_descriptor(struc
1395
1396 static u8 get_channel_id(u8 channel_index)
1397 {
1398 - if (channel_index < VDMA_DEST_CHANNELS_START) {
1399 - // H2D channel
1400 - return channel_index;
1401 - }
1402 - else if ((channel_index >= VDMA_DEST_CHANNELS_START) &&
1403 - (channel_index < MAX_VDMA_CHANNELS_PER_ENGINE)) {
1404 - // D2H channel
1405 - return channel_index - VDMA_DEST_CHANNELS_START;
1406 - }
1407 - else {
1408 - return INVALID_VDMA_CHANNEL;
1409 - }
1410 + return (channel_index < MAX_VDMA_CHANNELS_PER_ENGINE) ? (channel_index & 0xF) : INVALID_VDMA_CHANNEL;
1411 }
1412
1413 static int program_descriptors_in_chunk(
1414 @@ -198,12 +203,36 @@ static int program_descriptors_in_chunk(
1415 return (int)desc_per_chunk;
1416 }
1417
1418 -int hailo_vdma_program_descriptors_list(
1419 +static unsigned long get_interrupts_bitmask(struct hailo_vdma_hw *vdma_hw,
1420 + enum hailo_vdma_interrupts_domain interrupts_domain, bool is_debug)
1421 +{
1422 + unsigned long bitmask = 0;
1423 +
1424 + if (0 != (HAILO_VDMA_INTERRUPTS_DOMAIN_DEVICE & interrupts_domain)) {
1425 + bitmask |= vdma_hw->device_interrupts_bitmask;
1426 + }
1427 + if (0 != (HAILO_VDMA_INTERRUPTS_DOMAIN_HOST & interrupts_domain)) {
1428 + bitmask |= vdma_hw->host_interrupts_bitmask;
1429 + }
1430 +
1431 + if (bitmask != 0) {
1432 + bitmask |= DESC_REQUEST_IRQ_PROCESSED | DESC_REQUEST_IRQ_ERR;
1433 + if (is_debug) {
1434 + bitmask |= DESC_STATUS_REQ | DESC_STATUS_REQ_ERR;
1435 + }
1436 + }
1437 +
1438 + return bitmask;
1439 +}
1440 +
1441 +static int bind_and_program_descriptors_list(
1442 struct hailo_vdma_hw *vdma_hw,
1443 struct hailo_vdma_descriptors_list *desc_list,
1444 u32 starting_desc,
1445 struct hailo_vdma_mapped_transfer_buffer *buffer,
1446 - u8 channel_index)
1447 + u8 channel_index,
1448 + enum hailo_vdma_interrupts_domain last_desc_interrupts,
1449 + bool is_debug)
1450 {
1451 const u8 channel_id = get_channel_id(channel_index);
1452 int desc_programmed = 0;
1453 @@ -260,9 +289,49 @@ int hailo_vdma_program_descriptors_list(
1454 return -EFAULT;
1455 }
1456
1457 + desc_list->desc_list[(starting_desc - 1) % desc_list->desc_count].PageSize_DescControl |=
1458 + get_interrupts_bitmask(vdma_hw, last_desc_interrupts, is_debug);
1459 +
1460 return desc_programmed;
1461 }
1462
1463 +static int program_last_desc(
1464 + struct hailo_vdma_hw *vdma_hw,
1465 + struct hailo_vdma_descriptors_list *desc_list,
1466 + u32 starting_desc,
1467 + struct hailo_vdma_mapped_transfer_buffer *transfer_buffer,
1468 + enum hailo_vdma_interrupts_domain last_desc_interrupts,
1469 + bool is_debug)
1470 +{
1471 + u8 control = (u8)(DESCRIPTOR_DESC_CONTROL | get_interrupts_bitmask(vdma_hw, last_desc_interrupts, is_debug));
1472 + u32 total_descs = DIV_ROUND_UP(transfer_buffer->size, desc_list->desc_page_size);
1473 + u32 last_desc = (starting_desc + total_descs - 1) % desc_list->desc_count;
1474 + u32 last_desc_size = transfer_buffer->size - (total_descs - 1) * desc_list->desc_page_size;
1475 +
1476 + // Configure only last descriptor with residue size
1477 + desc_list->desc_list[last_desc].PageSize_DescControl = (u32)
1478 + ((last_desc_size << DESCRIPTOR_PAGE_SIZE_SHIFT) + control);
1479 + return (int)total_descs;
1480 +}
1481 +
1482 +int hailo_vdma_program_descriptors_list(
1483 + struct hailo_vdma_hw *vdma_hw,
1484 + struct hailo_vdma_descriptors_list *desc_list,
1485 + u32 starting_desc,
1486 + struct hailo_vdma_mapped_transfer_buffer *buffer,
1487 + bool should_bind,
1488 + u8 channel_index,
1489 + enum hailo_vdma_interrupts_domain last_desc_interrupts,
1490 + bool is_debug)
1491 +{
1492 + return should_bind ?
1493 + bind_and_program_descriptors_list(vdma_hw, desc_list, starting_desc,
1494 + buffer, channel_index, last_desc_interrupts, is_debug) :
1495 + program_last_desc(vdma_hw, desc_list, starting_desc, buffer,
1496 + last_desc_interrupts, is_debug);
1497 +}
1498 +
1499 +
1500 static bool channel_control_reg_is_active(u8 control)
1501 {
1502 return (control & VDMA_CHANNEL_CONTROL_START_ABORT_BITMASK) == VDMA_CHANNEL_CONTROL_START;
1503 @@ -270,12 +339,12 @@ static bool channel_control_reg_is_activ
1504
1505 static int validate_channel_state(struct hailo_vdma_channel *channel)
1506 {
1507 - const u8 control = ioread8(channel->host_regs + CHANNEL_CONTROL_OFFSET);
1508 - const u16 hw_num_avail = ioread16(channel->host_regs + CHANNEL_NUM_AVAIL_OFFSET);
1509 + u32 host_regs_value = ioread32(channel->host_regs);
1510 + const u8 control = READ_BITS_AT_OFFSET(BYTE_SIZE * BITS_IN_BYTE, CHANNEL_CONTROL_OFFSET * BITS_IN_BYTE, host_regs_value);
1511 + const u16 hw_num_avail = READ_BITS_AT_OFFSET(WORD_SIZE * BITS_IN_BYTE, CHANNEL_NUM_AVAIL_OFFSET * BITS_IN_BYTE, host_regs_value);
1512
1513 if (!channel_control_reg_is_active(control)) {
1514 - pr_err("Channel %d is not active\n", channel->index);
1515 - return -EBUSY;
1516 + return -ECONNRESET;
1517 }
1518
1519 if (hw_num_avail != channel->state.num_avail) {
1520 @@ -287,51 +356,16 @@ static int validate_channel_state(struct
1521 return 0;
1522 }
1523
1524 -static unsigned long get_interrupts_bitmask(struct hailo_vdma_hw *vdma_hw,
1525 - enum hailo_vdma_interrupts_domain interrupts_domain, bool is_debug)
1526 -{
1527 - unsigned long bitmask = 0;
1528 -
1529 - if (0 != (HAILO_VDMA_INTERRUPTS_DOMAIN_DEVICE & interrupts_domain)) {
1530 - bitmask |= vdma_hw->device_interrupts_bitmask;
1531 - }
1532 - if (0 != (HAILO_VDMA_INTERRUPTS_DOMAIN_HOST & interrupts_domain)) {
1533 - bitmask |= vdma_hw->host_interrupts_bitmask;
1534 - }
1535 -
1536 - if (bitmask != 0) {
1537 - bitmask |= DESC_REQUEST_IRQ_PROCESSED | DESC_REQUEST_IRQ_ERR;
1538 - if (is_debug) {
1539 - bitmask |= DESC_STATUS_REQ | DESC_STATUS_REQ_ERR;
1540 - }
1541 - }
1542 -
1543 - return bitmask;
1544 -}
1545 -
1546 static void set_num_avail(u8 __iomem *host_regs, u16 num_avail)
1547 {
1548 - iowrite16(num_avail, host_regs + CHANNEL_NUM_AVAIL_OFFSET);
1549 + u32 host_regs_val = ioread32(host_regs);
1550 + iowrite32(WRITE_BITS_AT_OFFSET(WORD_SIZE * BITS_IN_BYTE, CHANNEL_NUM_AVAIL_OFFSET * BITS_IN_BYTE, host_regs_val, num_avail),
1551 + host_regs);
1552 }
1553
1554 static u16 get_num_proc(u8 __iomem *host_regs)
1555 {
1556 - return ioread16(host_regs + CHANNEL_NUM_PROC_OFFSET);
1557 -}
1558 -
1559 -static int program_last_desc(
1560 - struct hailo_vdma_descriptors_list *desc_list,
1561 - u32 starting_desc,
1562 - struct hailo_vdma_mapped_transfer_buffer *transfer_buffer)
1563 -{
1564 - u32 total_descs = DIV_ROUND_UP(transfer_buffer->size, desc_list->desc_page_size);
1565 - u32 last_desc = (starting_desc + total_descs - 1) % desc_list->desc_count;
1566 - u32 last_desc_size = transfer_buffer->size - (total_descs - 1) * desc_list->desc_page_size;
1567 -
1568 - // Configure only last descriptor with residue size
1569 - desc_list->desc_list[last_desc].PageSize_DescControl = (u32)
1570 - ((last_desc_size << DESCRIPTOR_PAGE_SIZE_SHIFT) + DESCRIPTOR_DESC_CONTROL);
1571 - return (int)total_descs;
1572 + return READ_BITS_AT_OFFSET(WORD_SIZE * BITS_IN_BYTE, 0, ioread32(host_regs + CHANNEL_NUM_PROC_OFFSET));
1573 }
1574
1575 int hailo_vdma_launch_transfer(
1576 @@ -365,6 +399,11 @@ int hailo_vdma_launch_transfer(
1577 return -EINVAL;
1578 }
1579
1580 + ret = validate_channel_state(channel);
1581 + if (ret < 0) {
1582 + return ret;
1583 + }
1584 +
1585 if (channel->state.num_avail != (u16)starting_desc) {
1586 pr_err("Channel %d state out of sync. num available is %d, expected %d\n",
1587 channel->index, channel->state.num_avail, (u16)starting_desc);
1588 @@ -376,25 +415,17 @@ int hailo_vdma_launch_transfer(
1589 return -EINVAL;
1590 }
1591
1592 - if (is_debug) {
1593 - ret = validate_channel_state(channel);
1594 - if (ret < 0) {
1595 - return ret;
1596 - }
1597 - }
1598 -
1599 BUILD_BUG_ON_MSG((HAILO_MAX_BUFFERS_PER_SINGLE_TRANSFER + 1) != ARRAY_SIZE(ongoing_transfer.dirty_descs),
1600 "Unexpected amount of dirty descriptors");
1601 ongoing_transfer.dirty_descs_count = buffers_count + 1;
1602 ongoing_transfer.dirty_descs[0] = (u16)starting_desc;
1603
1604 for (i = 0; i < buffers_count; i++) {
1605 - ret = should_bind ?
1606 - hailo_vdma_program_descriptors_list(vdma_hw, desc_list, starting_desc, &buffers[i], channel->index) :
1607 - program_last_desc(desc_list, starting_desc, &buffers[i]);
1608 - if (ret < 0) {
1609 - return ret;
1610 - }
1611 + ret = hailo_vdma_program_descriptors_list(vdma_hw, desc_list,
1612 + starting_desc, &buffers[i], should_bind, channel->index,
1613 + (i == (buffers_count - 1) ? last_desc_interrupts : HAILO_VDMA_INTERRUPTS_DOMAIN_NONE),
1614 + is_debug);
1615 +
1616 total_descs += ret;
1617 last_desc = (starting_desc + ret - 1) % desc_list->desc_count;
1618 starting_desc = (starting_desc + ret) % desc_list->desc_count;
1619 @@ -406,8 +437,6 @@ int hailo_vdma_launch_transfer(
1620
1621 desc_list->desc_list[first_desc].PageSize_DescControl |=
1622 get_interrupts_bitmask(vdma_hw, first_interrupts_domain, is_debug);
1623 - desc_list->desc_list[last_desc].PageSize_DescControl |=
1624 - get_interrupts_bitmask(vdma_hw, last_desc_interrupts, is_debug);
1625
1626 ongoing_transfer.last_desc = (u16)last_desc;
1627 ongoing_transfer.is_debug = is_debug;
1628 @@ -477,8 +506,21 @@ static void channel_state_init(struct ha
1629 state->desc_count_mask = U32_MAX;
1630 }
1631
1632 +static u8 __iomem *get_channel_regs(u8 __iomem *regs_base, u8 channel_index, bool is_host_side, u32 src_channels_bitmask)
1633 +{
1634 + // Check if getting host side regs or device side
1635 + u8 __iomem *channel_regs_base = regs_base + CHANNEL_BASE_OFFSET(channel_index);
1636 + if (is_host_side) {
1637 + return hailo_test_bit(channel_index, &src_channels_bitmask) ? channel_regs_base :
1638 + (channel_regs_base + CHANNEL_DEST_REGS_OFFSET);
1639 + } else {
1640 + return hailo_test_bit(channel_index, &src_channels_bitmask) ? (channel_regs_base + CHANNEL_DEST_REGS_OFFSET) :
1641 + channel_regs_base;
1642 + }
1643 +}
1644 +
1645 void hailo_vdma_engine_init(struct hailo_vdma_engine *engine, u8 engine_index,
1646 - const struct hailo_resource *channel_registers)
1647 + const struct hailo_resource *channel_registers, u32 src_channels_bitmask)
1648 {
1649 u8 channel_index = 0;
1650 struct hailo_vdma_channel *channel;
1651 @@ -489,8 +531,8 @@ void hailo_vdma_engine_init(struct hailo
1652
1653 for_each_vdma_channel(engine, channel, channel_index) {
1654 u8 __iomem *regs_base = (u8 __iomem *)channel_registers->address;
1655 - channel->host_regs = regs_base + CHANNEL_HOST_OFFSET(channel_index);
1656 - channel->device_regs = regs_base + CHANNEL_DEVICE_OFFSET(channel_index);
1657 + channel->host_regs = get_channel_regs(regs_base, channel_index, true, src_channels_bitmask);
1658 + channel->device_regs = get_channel_regs(regs_base, channel_index, false, src_channels_bitmask);
1659 channel->index = channel_index;
1660 channel->timestamp_measure_enabled = false;
1661
1662 @@ -502,7 +544,15 @@ void hailo_vdma_engine_init(struct hailo
1663 }
1664 }
1665
1666 -void hailo_vdma_engine_enable_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap,
1667 +/**
1668 + * Enables the given channels bitmap in the given engine. Allows launching transfer
1669 + * and reading interrupts from the channels.
1670 + *
1671 + * @param engine - dma engine.
1672 + * @param bitmap - channels bitmap to enable.
1673 + * @param measure_timestamp - if set, allow interrupts timestamp measure.
1674 + */
1675 +void hailo_vdma_engine_enable_channels(struct hailo_vdma_engine *engine, u32 bitmap,
1676 bool measure_timestamp)
1677 {
1678 struct hailo_vdma_channel *channel = NULL;
1679 @@ -518,7 +568,14 @@ void hailo_vdma_engine_enable_channel_in
1680 engine->enabled_channels |= bitmap;
1681 }
1682
1683 -void hailo_vdma_engine_disable_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap)
1684 +/**
1685 + * Disables the given channels bitmap in the given engine.
1686 + *
1687 + * @param engine - dma engine.
1688 + * @param bitmap - channels bitmap to enable.
1689 + * @param measure_timestamp - if set, allow interrupts timestamp measure.
1690 + */
1691 +void hailo_vdma_engine_disable_channels(struct hailo_vdma_engine *engine, u32 bitmap)
1692 {
1693 struct hailo_vdma_channel *channel = NULL;
1694 u8 channel_index = 0;
1695 @@ -582,11 +639,11 @@ void hailo_vdma_engine_set_channel_inter
1696 }
1697
1698 static void fill_channel_irq_data(struct hailo_vdma_interrupts_channel_data *irq_data,
1699 - struct hailo_vdma_engine *engine, struct hailo_vdma_channel *channel, u16 num_proc,
1700 + struct hailo_vdma_engine *engine, struct hailo_vdma_channel *channel, u8 transfers_completed,
1701 bool validation_success)
1702 {
1703 - u8 host_control = ioread8(channel->host_regs + CHANNEL_CONTROL_OFFSET);
1704 - u8 device_control = ioread8(channel->device_regs + CHANNEL_CONTROL_OFFSET);
1705 + u8 host_control = READ_BITS_AT_OFFSET(BYTE_SIZE * BITS_IN_BYTE, CHANNEL_CONTROL_OFFSET * BITS_IN_BYTE, ioread32(channel->host_regs));
1706 + u8 device_control = READ_BITS_AT_OFFSET(BYTE_SIZE * BITS_IN_BYTE, CHANNEL_CONTROL_OFFSET * BITS_IN_BYTE, ioread32(channel->device_regs));
1707
1708 irq_data->engine_index = engine->index;
1709 irq_data->channel_index = channel->index;
1710 @@ -594,9 +651,9 @@ static void fill_channel_irq_data(struct
1711 irq_data->is_active = channel_control_reg_is_active(host_control) &&
1712 channel_control_reg_is_active(device_control);
1713
1714 - irq_data->host_num_processed = num_proc;
1715 - irq_data->host_error = ioread8(channel->host_regs + CHANNEL_ERROR_OFFSET);
1716 - irq_data->device_error = ioread8(channel->device_regs + CHANNEL_ERROR_OFFSET);
1717 + irq_data->transfers_completed = transfers_completed;
1718 + irq_data->host_error = READ_BITS_AT_OFFSET(BYTE_SIZE * BITS_IN_BYTE, 0, ioread32(channel->host_regs + CHANNEL_ERROR_OFFSET));
1719 + irq_data->device_error = READ_BITS_AT_OFFSET(BYTE_SIZE * BITS_IN_BYTE, 0, ioread32(channel->device_regs + CHANNEL_ERROR_OFFSET));
1720 irq_data->validation_success = validation_success;
1721 }
1722
1723 @@ -635,7 +692,12 @@ int hailo_vdma_engine_fill_irq_data(stru
1724 bool validation_success = true;
1725
1726 for_each_vdma_channel(engine, channel, channel_index) {
1727 + u8 transfers_completed = 0;
1728 u16 hw_num_proc = U16_MAX;
1729 +
1730 + BUILD_BUG_ON_MSG(HAILO_VDMA_MAX_ONGOING_TRANSFERS >= U8_MAX,
1731 + "HAILO_VDMA_MAX_ONGOING_TRANSFERS must be less than U8_MAX to use transfers_completed as u8");
1732 +
1733 if (!hailo_test_bit(channel->index, &irq_channels_bitmap)) {
1734 continue;
1735 }
1736 @@ -673,12 +735,143 @@ int hailo_vdma_engine_fill_irq_data(stru
1737 channel->state.num_proc = (u16)((cur_transfer->last_desc + 1) & channel->state.desc_count_mask);
1738
1739 ongoing_transfer_pop(channel, NULL);
1740 + transfers_completed++;
1741 }
1742
1743 fill_channel_irq_data(&irq_data->irq_data[irq_data->channels_count],
1744 - engine, channel, hw_num_proc, validation_success);
1745 + engine, channel, transfers_completed, validation_success);
1746 irq_data->channels_count++;
1747 }
1748
1749 return 0;
1750 +}
1751 +
1752 +// For all these functions - best way to optimize might be to not call the function when need to pause and then abort,
1753 +// Rather read value once and maybe save
1754 +// This function reads and writes the register - should try to make more optimized in future
1755 +static void start_vdma_control_register(u8 __iomem *host_regs)
1756 +{
1757 + u32 host_regs_value = ioread32(host_regs);
1758 + iowrite32(WRITE_BITS_AT_OFFSET(BYTE_SIZE * BITS_IN_BYTE, CHANNEL_CONTROL_OFFSET * BITS_IN_BYTE, host_regs_value,
1759 + VDMA_CHANNEL_CONTROL_START_RESUME), host_regs);
1760 +}
1761 +
1762 +static void hailo_vdma_channel_pause(u8 __iomem *host_regs)
1763 +{
1764 + u32 host_regs_value = ioread32(host_regs);
1765 + iowrite32(WRITE_BITS_AT_OFFSET(BYTE_SIZE * BITS_IN_BYTE, CHANNEL_CONTROL_OFFSET * BITS_IN_BYTE, host_regs_value,
1766 + VDMA_CHANNEL_CONTROL_START_PAUSE), host_regs);
1767 +}
1768 +
1769 +// This function reads and writes the register - should try to make more optimized in future
1770 +static void hailo_vdma_channel_abort(u8 __iomem *host_regs)
1771 +{
1772 + u32 host_regs_value = ioread32(host_regs);
1773 + iowrite32(WRITE_BITS_AT_OFFSET(BYTE_SIZE * BITS_IN_BYTE, CHANNEL_CONTROL_OFFSET * BITS_IN_BYTE, host_regs_value,
1774 + VDMA_CHANNEL_CONTROL_ABORT), host_regs);
1775 +}
1776 +
1777 +int hailo_vdma_start_channel(u8 __iomem *host_regs, uint64_t desc_dma_address, uint8_t desc_depth,
1778 + uint8_t data_id)
1779 +{
1780 + u16 dma_address_l = 0;
1781 + u32 dma_address_h = 0;
1782 + u32 desc_depth_data_id = 0;
1783 +
1784 + if (((desc_dma_address & 0xFFFF) != 0) ||
1785 + (desc_depth > DESCRIPTOR_LIST_MAX_DEPTH)) {
1786 + return -EINVAL;
1787 + }
1788 +
1789 + // According to spec, depth 16 is equivalent to depth 0.
1790 + if (DESCRIPTOR_LIST_MAX_DEPTH == desc_depth) {
1791 + desc_depth = 0;
1792 + }
1793 +
1794 + // Stop old channel state
1795 + hailo_vdma_stop_channel(host_regs);
1796 +
1797 + // Configure address, depth and id
1798 + dma_address_l = (uint16_t)((desc_dma_address >> 16) & 0xFFFF);
1799 + iowrite32(WRITE_BITS_AT_OFFSET(WORD_SIZE * BITS_IN_BYTE, (VDMA_CHANNEL__ADDRESS_L_OFFSET -
1800 + VDMA_CHANNEL__ALIGNED_ADDRESS_L_OFFSET) * BITS_IN_BYTE, ioread32(host_regs +
1801 + VDMA_CHANNEL__ALIGNED_ADDRESS_L_OFFSET), dma_address_l), host_regs + VDMA_CHANNEL__ALIGNED_ADDRESS_L_OFFSET);
1802 +
1803 + dma_address_h = (uint32_t)(desc_dma_address >> 32);
1804 + iowrite32(dma_address_h, host_regs + VDMA_CHANNEL__ADDRESS_H_OFFSET);
1805 +
1806 + desc_depth_data_id = (uint32_t)(desc_depth << VDMA_CHANNEL_DESC_DEPTH_SHIFT) |
1807 + (data_id << VDMA_CHANNEL_DATA_ID_SHIFT);
1808 + iowrite32(desc_depth_data_id, host_regs);
1809 +
1810 + start_vdma_control_register(host_regs);
1811 +
1812 + return 0;
1813 +}
1814 +
1815 +static bool hailo_vdma_channel_is_idle(u8 __iomem *host_regs, size_t host_side_max_desc_count)
1816 +{
1817 + // Num processed and ongoing are next to each other in the memory.
1818 + // Reading them both in order to save BAR reads.
1819 + u32 host_side_num_processed_ongoing = ioread32(host_regs + CHANNEL_NUM_PROC_OFFSET);
1820 + u16 host_side_num_processed = (host_side_num_processed_ongoing & VDMA_CHANNEL_NUM_PROCESSED_MASK);
1821 + u16 host_side_num_ongoing = (host_side_num_processed_ongoing >> VDMA_CHANNEL_NUM_PROCESSED_WIDTH) &
1822 + VDMA_CHANNEL_NUM_ONGOING_MASK;
1823 +
1824 + if ((host_side_num_processed % host_side_max_desc_count) == (host_side_num_ongoing % host_side_max_desc_count)) {
1825 + return true;
1826 + }
1827 +
1828 + return false;
1829 +}
1830 +
1831 +static int hailo_vdma_wait_until_channel_idle(u8 __iomem *host_regs)
1832 +{
1833 + bool is_idle = false;
1834 + uint32_t check_counter = 0;
1835 +
1836 + u8 depth = (uint8_t)(READ_BITS_AT_OFFSET(VDMA_CHANNEL_DESC_DEPTH_WIDTH, VDMA_CHANNEL_DESC_DEPTH_SHIFT,
1837 + ioread32(host_regs)));
1838 + size_t host_side_max_desc_count = (size_t)(1 << depth);
1839 +
1840 + for (check_counter = 0; check_counter < VDMA_CHANNEL__MAX_CHECKS_CHANNEL_IS_IDLE; check_counter++) {
1841 + is_idle = hailo_vdma_channel_is_idle(host_regs, host_side_max_desc_count);
1842 + if (is_idle) {
1843 + return 0;
1844 + }
1845 + }
1846 +
1847 + return -ETIMEDOUT;
1848 +}
1849 +
1850 +void hailo_vdma_stop_channel(u8 __iomem *host_regs)
1851 +{
1852 + int err = 0;
1853 + u8 host_side_channel_regs = READ_BITS_AT_OFFSET(BYTE_SIZE * BITS_IN_BYTE, CHANNEL_CONTROL_OFFSET * BITS_IN_BYTE, ioread32(host_regs));
1854 +
1855 + if ((host_side_channel_regs & VDMA_CHANNEL_CONTROL_START_ABORT_PAUSE_RESUME_BITMASK) == VDMA_CHANNEL_CONTROL_ABORT_PAUSE) {
1856 + // The channel is aborted (we set the channel to VDMA_CHANNEL_CONTROL_ABORT_PAUSE at the end of this function)
1857 + return;
1858 + }
1859 +
1860 + // Pause the channel
1861 + // The channel is paused to allow for "all transfers from fetched descriptors..." to be "...completed"
1862 + // (from PLDA PCIe refernce manual, "9.2.5 Starting a Channel and Transferring Data")
1863 + hailo_vdma_channel_pause(host_regs);
1864 +
1865 + // Even if channel is stuck and not idle, force abort and return error in the end
1866 + err = hailo_vdma_wait_until_channel_idle(host_regs);
1867 + // Success oriented - if error occured print error but still abort channel
1868 + if (err < 0) {
1869 + pr_err("Timeout occured while waiting for channel to become idle\n");
1870 + }
1871 +
1872 + // Abort the channel (even of hailo_vdma_wait_until_channel_idle function fails)
1873 + hailo_vdma_channel_abort(host_regs);
1874 +}
1875 +
1876 +bool hailo_check_channel_index(u8 channel_index, u32 src_channels_bitmask, bool is_input_channel)
1877 +{
1878 + return is_input_channel ? hailo_test_bit(channel_index, &src_channels_bitmask) :
1879 + (!hailo_test_bit(channel_index, &src_channels_bitmask));
1880 }
1881 \ No newline at end of file
1882 --- a/drivers/media/pci/hailo/common/vdma_common.h
1883 +++ b/drivers/media/pci/hailo/common/vdma_common.h
1884 @@ -1,4 +1,4 @@
1885 -// SPDX-License-Identifier: GPL-2.0
1886 +// SPDX-License-Identifier: MIT
1887 /**
1888 * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
1889 **/
1890 @@ -30,8 +30,8 @@ struct hailo_vdma_descriptor {
1891
1892 struct hailo_vdma_descriptors_list {
1893 struct hailo_vdma_descriptor *desc_list;
1894 - u32 desc_count; // Must be power of 2 if is_circular is set.
1895 - u16 desc_page_size;
1896 + u32 desc_count; // Must be power of 2 if is_circular is set.
1897 + u16 desc_page_size;
1898 bool is_circular;
1899 };
1900
1901 @@ -127,6 +127,9 @@ struct hailo_vdma_hw {
1902 // Bitmask needed to set on each descriptor to enable interrupts (either host/device).
1903 unsigned long host_interrupts_bitmask;
1904 unsigned long device_interrupts_bitmask;
1905 +
1906 + // Bitmask for each vdma hw, which channels are src side by index (on pcie/dram - 0x0000FFFF, pci ep - 0xFFFF0000)
1907 + u32 src_channels_bitmask;
1908 };
1909
1910 #define _for_each_element_array(array, size, element, index) \
1911 @@ -147,7 +150,11 @@ void hailo_vdma_program_descriptor(struc
1912 * @param starting_desc index of the first descriptor to program. If the list
1913 * is circular, this function may wrap around the list.
1914 * @param buffer buffer to program to the descriptors list.
1915 + * @param should_bind If false, assumes the buffer was already bound to the
1916 + * desc list. Used for optimization.
1917 * @param channel_index channel index of the channel attached.
1918 + * @param last_desc_interrupts - interrupts settings on last descriptor.
1919 + * @param is_debug program descriptors for debug run.
1920 *
1921 * @return On success - the amount of descriptors programmed, negative value on error.
1922 */
1923 @@ -156,7 +163,10 @@ int hailo_vdma_program_descriptors_list(
1924 struct hailo_vdma_descriptors_list *desc_list,
1925 u32 starting_desc,
1926 struct hailo_vdma_mapped_transfer_buffer *buffer,
1927 - u8 channel_index);
1928 + bool should_bind,
1929 + u8 channel_index,
1930 + enum hailo_vdma_interrupts_domain last_desc_interrupts,
1931 + bool is_debug);
1932
1933 /**
1934 * Launch a transfer on some vdma channel. Includes:
1935 @@ -191,14 +201,12 @@ int hailo_vdma_launch_transfer(
1936 bool is_debug);
1937
1938 void hailo_vdma_engine_init(struct hailo_vdma_engine *engine, u8 engine_index,
1939 - const struct hailo_resource *channel_registers);
1940 + const struct hailo_resource *channel_registers, u32 src_channels_bitmask);
1941
1942 -// enable/disable channels interrupt (does not update interrupts mask because the
1943 -// implementation is different between PCIe and DRAM DMA. To support it we
1944 -// can add some ops struct to the engine).
1945 -void hailo_vdma_engine_enable_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap,
1946 +void hailo_vdma_engine_enable_channels(struct hailo_vdma_engine *engine, u32 bitmap,
1947 bool measure_timestamp);
1948 -void hailo_vdma_engine_disable_channel_interrupts(struct hailo_vdma_engine *engine, u32 bitmap);
1949 +
1950 +void hailo_vdma_engine_disable_channels(struct hailo_vdma_engine *engine, u32 bitmap);
1951
1952 void hailo_vdma_engine_push_timestamps(struct hailo_vdma_engine *engine, u32 bitmap);
1953 int hailo_vdma_engine_read_timestamps(struct hailo_vdma_engine *engine,
1954 @@ -237,6 +245,12 @@ int hailo_vdma_engine_fill_irq_data(stru
1955 struct hailo_vdma_engine *engine, u32 irq_channels_bitmap,
1956 transfer_done_cb_t transfer_done, void *transfer_done_opaque);
1957
1958 +int hailo_vdma_start_channel(u8 __iomem *host_regs, uint64_t desc_dma_address, uint8_t desc_depth, uint8_t data_id);
1959 +
1960 +void hailo_vdma_stop_channel(u8 __iomem *host_regs);
1961 +
1962 +bool hailo_check_channel_index(u8 channel_index, u32 src_channels_bitmask, bool is_input_channel);
1963 +
1964 #ifdef __cplusplus
1965 }
1966 #endif
1967 --- a/drivers/media/pci/hailo/src/fops.c
1968 +++ b/drivers/media/pci/hailo/src/fops.c
1969 @@ -19,7 +19,6 @@
1970 #include <linux/sched/signal.h>
1971 #endif
1972
1973 -#include "hailo_pcie_version.h"
1974 #include "utils.h"
1975 #include "fops.h"
1976 #include "vdma_common.h"
1977 @@ -27,6 +26,7 @@
1978 #include "vdma/memory.h"
1979 #include "vdma/ioctl.h"
1980 #include "utils/compact.h"
1981 +#include "pci_soc_ioctl.h"
1982
1983
1984 #if LINUX_VERSION_CODE >= KERNEL_VERSION( 4, 13, 0 )
1985 @@ -210,69 +210,66 @@ l_exit:
1986
1987 int hailo_pcie_fops_release(struct inode *inode, struct file *filp)
1988 {
1989 - struct hailo_pcie_board *pBoard = (struct hailo_pcie_board *)filp->private_data;
1990 + struct hailo_pcie_board *board = (struct hailo_pcie_board *)filp->private_data;
1991 struct hailo_file_context *context = NULL;
1992
1993 u32 major = MAJOR(inode->i_rdev);
1994 u32 minor = MINOR(inode->i_rdev);
1995
1996 - if (pBoard) {
1997 - hailo_info(pBoard, "(%d: %d-%d): fops_release\n", current->tgid, major, minor);
1998 + if (board) {
1999 + hailo_info(board, "(%d: %d-%d): fops_release\n", current->tgid, major, minor);
2000
2001 - if (down_interruptible(&pBoard->mutex)) {
2002 - hailo_err(pBoard, "fops_release down_interruptible failed");
2003 - return -ERESTARTSYS;
2004 - }
2005
2006 - context = find_file_context(pBoard, filp);
2007 + down(&board->mutex);
2008 +
2009 + context = find_file_context(board, filp);
2010 if (NULL == context) {
2011 - hailo_err(pBoard, "Invalid driver state, file context does not exist\n");
2012 - up(&pBoard->mutex);
2013 + hailo_err(board, "Invalid driver state, file context does not exist\n");
2014 + up(&board->mutex);
2015 return -EINVAL;
2016 }
2017
2018 if (false == context->is_valid) {
2019 // File context is invalid, but open. It's OK to continue finalize and release it.
2020 - hailo_err(pBoard, "Invalid file context\n");
2021 + hailo_err(board, "Invalid file context\n");
2022 }
2023
2024 - hailo_pcie_clear_notification_wait_list(pBoard, filp);
2025 + hailo_pcie_clear_notification_wait_list(board, filp);
2026
2027 - if (filp == pBoard->vdma.used_by_filp) {
2028 - if (hailo_pcie_driver_down(pBoard)) {
2029 - hailo_err(pBoard, "Failed sending FW shutdown event");
2030 + if (filp == board->vdma.used_by_filp) {
2031 + if (hailo_pcie_driver_down(board)) {
2032 + hailo_err(board, "Failed sending FW shutdown event");
2033 }
2034 }
2035
2036 - hailo_vdma_file_context_finalize(&context->vdma_context, &pBoard->vdma, filp);
2037 + hailo_vdma_file_context_finalize(&context->vdma_context, &board->vdma, filp);
2038 release_file_context(context);
2039
2040 - if (atomic_dec_and_test(&pBoard->ref_count)) {
2041 + if (atomic_dec_and_test(&board->ref_count)) {
2042 // Disable interrupts
2043 - hailo_disable_interrupts(pBoard);
2044 + hailo_disable_interrupts(board);
2045
2046 if (power_mode_enabled()) {
2047 - if (pBoard->pDev && pci_set_power_state(pBoard->pDev, PCI_D3hot) < 0) {
2048 - hailo_err(pBoard, "Failed setting power state to D3hot");
2049 + if (board->pDev && pci_set_power_state(board->pDev, PCI_D3hot) < 0) {
2050 + hailo_err(board, "Failed setting power state to D3hot");
2051 }
2052 }
2053
2054 // deallocate board if already removed
2055 - if (!pBoard->pDev) {
2056 - hailo_dbg(pBoard, "fops_close, freed board\n");
2057 - up(&pBoard->mutex);
2058 - kfree(pBoard);
2059 - pBoard = NULL;
2060 + if (!board->pDev) {
2061 + hailo_dbg(board, "fops_release, freed board\n");
2062 + up(&board->mutex);
2063 + kfree(board);
2064 + board = NULL;
2065 } else {
2066 -
2067 - hailo_dbg(pBoard, "fops_close, released resources for board\n");
2068 - up(&pBoard->mutex);
2069 + hailo_dbg(board, "fops_release, released resources for board\n");
2070 + up(&board->mutex);
2071 }
2072 } else {
2073 - up(&pBoard->mutex);
2074 + up(&board->mutex);
2075 }
2076
2077 - hailo_dbg(pBoard, "(%d: %d-%d): fops_close: SUCCESS on /dev/hailo%d\n", current->tgid,
2078 + hailo_dbg(board, "(%d: %d-%d): fops_release: SUCCESS on /dev/hailo%d\n", current->tgid,
2079 major, minor, minor);
2080 }
2081
2082 @@ -394,6 +391,10 @@ irqreturn_t hailo_irqhandler(int irq, vo
2083 }
2084 }
2085
2086 + if (irq_source.interrupt_bitmask & SOC_CONNECT_ACCEPTED) {
2087 + complete_all(&board->soc_connect_accepted);
2088 + }
2089 +
2090 if (0 != irq_source.vdma_channels_bitmap) {
2091 hailo_vdma_irq_handler(&board->vdma, DEFAULT_VDMA_ENGINE_INDEX,
2092 irq_source.vdma_channels_bitmap);
2093 @@ -602,26 +603,35 @@ static long hailo_query_driver_info(stru
2094 return 0;
2095 }
2096
2097 -static long hailo_general_ioctl(struct hailo_file_context *context, struct hailo_pcie_board *board,
2098 - unsigned int cmd, unsigned long arg, struct file *filp, bool *should_up_board_mutex)
2099 +static long hailo_general_ioctl(struct hailo_pcie_board *board, unsigned int cmd, unsigned long arg)
2100 {
2101 switch (cmd) {
2102 case HAILO_MEMORY_TRANSFER:
2103 return hailo_memory_transfer_ioctl(board, arg);
2104 + case HAILO_QUERY_DEVICE_PROPERTIES:
2105 + return hailo_query_device_properties(board, arg);
2106 + case HAILO_QUERY_DRIVER_INFO:
2107 + return hailo_query_driver_info(board, arg);
2108 + default:
2109 + hailo_err(board, "Invalid general ioctl code 0x%x (nr: %d)\n", cmd, _IOC_NR(cmd));
2110 + return -ENOTTY;
2111 + }
2112 +}
2113 +
2114 +static long hailo_nnc_ioctl(struct hailo_pcie_board *board, unsigned int cmd, unsigned long arg,
2115 + struct file *filp, bool *should_up_board_mutex)
2116 +{
2117 + switch (cmd) {
2118 case HAILO_FW_CONTROL:
2119 return hailo_fw_control(board, arg, should_up_board_mutex);
2120 case HAILO_READ_NOTIFICATION:
2121 return hailo_read_notification_ioctl(board, arg, filp, should_up_board_mutex);
2122 case HAILO_DISABLE_NOTIFICATION:
2123 return hailo_disable_notification(board, filp);
2124 - case HAILO_QUERY_DEVICE_PROPERTIES:
2125 - return hailo_query_device_properties(board, arg);
2126 - case HAILO_QUERY_DRIVER_INFO:
2127 - return hailo_query_driver_info(board, arg);
2128 case HAILO_READ_LOG:
2129 return hailo_read_log_ioctl(board, arg);
2130 default:
2131 - hailo_err(board, "Invalid general ioctl code 0x%x (nr: %d)\n", cmd, _IOC_NR(cmd));
2132 + hailo_err(board, "Invalid nnc ioctl code 0x%x (nr: %d)\n", cmd, _IOC_NR(cmd));
2133 return -ENOTTY;
2134 }
2135 }
2136 @@ -673,12 +683,28 @@ long hailo_pcie_fops_unlockedioctl(struc
2137
2138 switch (_IOC_TYPE(cmd)) {
2139 case HAILO_GENERAL_IOCTL_MAGIC:
2140 - err = hailo_general_ioctl(context, board, cmd, arg, filp, &should_up_board_mutex);
2141 + err = hailo_general_ioctl(board, cmd, arg);
2142 break;
2143 case HAILO_VDMA_IOCTL_MAGIC:
2144 err = hailo_vdma_ioctl(&context->vdma_context, &board->vdma, cmd, arg, filp, &board->mutex,
2145 &should_up_board_mutex);
2146 break;
2147 + case HAILO_SOC_IOCTL_MAGIC:
2148 + if (HAILO_ACCELERATOR_TYPE_SOC != board->pcie_resources.accelerator_type) {
2149 + hailo_err(board, "Ioctl %d is not supported on this accelerator type\n", _IOC_TYPE(cmd));
2150 + err = -EINVAL;
2151 + } else {
2152 + err = hailo_soc_ioctl(board, &context->vdma_context, &board->vdma, cmd, arg);
2153 + }
2154 + break;
2155 + case HAILO_NNC_IOCTL_MAGIC:
2156 + if (HAILO_ACCELERATOR_TYPE_NNC != board->pcie_resources.accelerator_type) {
2157 + hailo_err(board, "Ioctl %d is not supported on this accelerator type\n", _IOC_TYPE(cmd));
2158 + err = -EINVAL;
2159 + } else {
2160 + err = hailo_nnc_ioctl(board, cmd, arg, filp, &should_up_board_mutex);
2161 + }
2162 + break;
2163 default:
2164 hailo_err(board, "Invalid ioctl type %d\n", _IOC_TYPE(cmd));
2165 err = -ENOTTY;
2166 --- a/drivers/media/pci/hailo/src/fops.h
2167 +++ b/drivers/media/pci/hailo/src/fops.h
2168 @@ -11,6 +11,7 @@ int hailo_pcie_fops_release(struct inode
2169 long hailo_pcie_fops_unlockedioctl(struct file* filp, unsigned int cmd, unsigned long arg);
2170 int hailo_pcie_fops_mmap(struct file* filp, struct vm_area_struct *vma);
2171 int hailo_pcie_driver_down(struct hailo_pcie_board *board);
2172 +void hailo_pcie_ep_init(struct hailo_pcie_board *board);
2173
2174 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
2175 irqreturn_t hailo_irqhandler(int irq, void* dev_id, struct pt_regs *regs);
2176 --- /dev/null
2177 +++ b/drivers/media/pci/hailo/src/pci_soc_ioctl.c
2178 @@ -0,0 +1,155 @@
2179 +// SPDX-License-Identifier: GPL-2.0
2180 +/**
2181 + * Copyright (c) 2019-2024 Hailo Technologies Ltd. All rights reserved.
2182 + **/
2183 +#include "pci_soc_ioctl.h"
2184 +
2185 +#include "utils.h"
2186 +#include "vdma_common.h"
2187 +#include "utils/logs.h"
2188 +#include "vdma/memory.h"
2189 +
2190 +#define PCI_SOC_VDMA_ENGINE_INDEX (0)
2191 +#define PCI_SOC_WAIT_FOR_CONNECT_TIMEOUT_MS (10000)
2192 +
2193 +long hailo_soc_ioctl(struct hailo_pcie_board *board, struct hailo_vdma_file_context *context,
2194 + struct hailo_vdma_controller *controller, unsigned int cmd, unsigned long arg)
2195 +{
2196 + switch (cmd) {
2197 + case HAILO_SOC_CONNECT:
2198 + return hailo_soc_connect_ioctl(board, context, controller, arg);
2199 + case HAILO_SOC_CLOSE:
2200 + return hailo_soc_close_ioctl(board, controller, arg);
2201 + default:
2202 + hailo_err(board, "Invalid pcie EP ioctl code 0x%x (nr: %d)\n", cmd, _IOC_NR(cmd));
2203 + return -ENOTTY;
2204 + }
2205 +}
2206 +
2207 +long hailo_soc_connect_ioctl(struct hailo_pcie_board *board, struct hailo_vdma_file_context *context,
2208 + struct hailo_vdma_controller *controller, unsigned long arg)
2209 +{
2210 + struct hailo_soc_connect_params params;
2211 + struct hailo_vdma_channel *input_channel = NULL;
2212 + struct hailo_vdma_channel *output_channel = NULL;
2213 + struct hailo_vdma_engine *vdma_engine = NULL;
2214 + struct hailo_descriptors_list_buffer *input_descriptors_buffer = NULL;
2215 + struct hailo_descriptors_list_buffer *output_descriptors_buffer = NULL;
2216 + uint8_t depth = 0;
2217 + int err = 0;
2218 + long completion_result = 0;
2219 +
2220 + if (copy_from_user(&params, (void *)arg, sizeof(params))) {
2221 + hailo_err(board, "copy_from_user fail\n");
2222 + return -ENOMEM;
2223 + }
2224 +
2225 + // TODO: have pci_ep choose the channel indexes the soc will use - for now use 0 and 16
2226 + params.input_channel_index = 0;
2227 + params.output_channel_index = 16;
2228 +
2229 + reinit_completion(&board->soc_connect_accepted);
2230 + hailo_soc_write_soc_connect(&board->pcie_resources);
2231 +
2232 + // Wait for completion
2233 + completion_result = wait_for_completion_interruptible_timeout(&board->soc_connect_accepted,
2234 + msecs_to_jiffies(PCI_SOC_WAIT_FOR_CONNECT_TIMEOUT_MS));
2235 + if (0 > completion_result) {
2236 + if (0 == completion_result) {
2237 + hailo_err(board, "Timeout waiting for connect to be accepted (timeout_ms=%d)\n", PCI_SOC_WAIT_FOR_CONNECT_TIMEOUT_MS);
2238 + return -ETIMEDOUT;
2239 + } else {
2240 + hailo_info(board, "soc connect failed with err=%ld (process was interrupted or killed)\n",
2241 + completion_result);
2242 + return -EINTR;
2243 + }
2244 + }
2245 +
2246 + vdma_engine = &controller->vdma_engines[PCI_SOC_VDMA_ENGINE_INDEX];
2247 + input_channel = &vdma_engine->channels[params.input_channel_index];
2248 + output_channel = &vdma_engine->channels[params.output_channel_index];
2249 +
2250 + input_descriptors_buffer = hailo_vdma_find_descriptors_buffer(context, params.input_desc_handle);
2251 + output_descriptors_buffer = hailo_vdma_find_descriptors_buffer(context, params.output_desc_handle);
2252 + if (NULL == input_descriptors_buffer || NULL == output_descriptors_buffer) {
2253 + hailo_dev_err(&board->pDev->dev, "input / output descriptors buffer not found \n");
2254 + return -EINVAL;
2255 + }
2256 +
2257 + // Make sure channels that we are accepting are not already enabled
2258 + if (0 != (vdma_engine->enabled_channels & params.input_channel_index) ||
2259 + 0 != (vdma_engine->enabled_channels & params.output_channel_index)) {
2260 + hailo_dev_err(&board->pDev->dev, "Trying to accept already enabled channels\n");
2261 + return -EINVAL;
2262 + }
2263 +
2264 + if (!is_powerof2((size_t)input_descriptors_buffer->desc_list.desc_count) ||
2265 + !is_powerof2((size_t)output_descriptors_buffer->desc_list.desc_count)) {
2266 + hailo_dev_err(&board->pDev->dev, "Invalid desc list size\n");
2267 + return -EINVAL;
2268 + }
2269 +
2270 + // configure and start input channel
2271 + depth = ceil_log2(input_descriptors_buffer->desc_list.desc_count);
2272 + // DMA Direction is only to get channel index - so
2273 + err = hailo_vdma_start_channel(input_channel->host_regs, input_descriptors_buffer->dma_address, depth,
2274 + board->vdma.hw->ddr_data_id);
2275 + if (err < 0) {
2276 + hailo_dev_err(&board->pDev->dev, "Error starting vdma input channel index %u\n", params.input_channel_index);
2277 + return -EINVAL;
2278 + }
2279 +
2280 + // configure and start output channel
2281 + depth = ceil_log2(output_descriptors_buffer->desc_list.desc_count);
2282 + // DMA Direction is only to get channel index - so
2283 + err = hailo_vdma_start_channel(output_channel->host_regs, output_descriptors_buffer->dma_address, depth,
2284 + board->vdma.hw->ddr_data_id);
2285 + if (err < 0) {
2286 + hailo_dev_err(&board->pDev->dev, "Error starting vdma output channel index %u\n", params.output_channel_index);
2287 + // Close input channel
2288 + hailo_vdma_stop_channel(input_channel->host_regs);
2289 + return -EINVAL;
2290 + }
2291 +
2292 + if (copy_to_user((void *)arg, &params, sizeof(params))) {
2293 + hailo_dev_err(&board->pDev->dev, "copy_to_user fail\n");
2294 + return -ENOMEM;
2295 + }
2296 +
2297 + return 0;
2298 +}
2299 +
2300 +long hailo_soc_close_ioctl(struct hailo_pcie_board *board, struct hailo_vdma_controller *controller, unsigned long arg)
2301 +{
2302 + struct hailo_soc_close_params params;
2303 + struct hailo_vdma_channel *input_channel = NULL;
2304 + struct hailo_vdma_channel *output_channel = NULL;
2305 + struct hailo_vdma_engine *vdma_engine = NULL;
2306 +
2307 + if (copy_from_user(&params, (void *)arg, sizeof(params))) {
2308 + hailo_dev_err(&board->pDev->dev, "copy_from_user fail\n");
2309 + return -ENOMEM;
2310 + }
2311 +
2312 + vdma_engine = &controller->vdma_engines[PCI_SOC_VDMA_ENGINE_INDEX];
2313 +
2314 + if (!hailo_check_channel_index(params.input_channel_index, controller->hw->src_channels_bitmask, true)) {
2315 + hailo_dev_err(&board->pDev->dev, "Invalid input channel index %u\n", params.input_channel_index);
2316 + return -EINVAL;
2317 + }
2318 +
2319 + if (!hailo_check_channel_index(params.output_channel_index, controller->hw->src_channels_bitmask, false)) {
2320 + hailo_dev_err(&board->pDev->dev, "Invalid output channel index %u\n", params.output_channel_index);
2321 + return -EINVAL;
2322 + }
2323 +
2324 + input_channel = &vdma_engine->channels[params.input_channel_index];
2325 + output_channel = &vdma_engine->channels[params.output_channel_index];
2326 +
2327 + // Close channels
2328 + hailo_vdma_stop_channel(input_channel->host_regs);
2329 + hailo_vdma_stop_channel(output_channel->host_regs);
2330 +
2331 + hailo_pcie_write_firmware_driver_shutdown(&board->pcie_resources);
2332 + return 0;
2333 +}
2334 \ No newline at end of file
2335 --- /dev/null
2336 +++ b/drivers/media/pci/hailo/src/pci_soc_ioctl.h
2337 @@ -0,0 +1,19 @@
2338 +// SPDX-License-Identifier: GPL-2.0
2339 +/**
2340 + * Copyright (c) 2019-2024 Hailo Technologies Ltd. All rights reserved.
2341 + **/
2342 +
2343 +#ifndef _HAILO_PCI_SOC_IOCTL_H_
2344 +#define _HAILO_PCI_SOC_IOCTL_H_
2345 +
2346 +#include "vdma/ioctl.h"
2347 +#include "pcie.h"
2348 +
2349 +
2350 +long hailo_soc_ioctl(struct hailo_pcie_board *board, struct hailo_vdma_file_context *context,
2351 + struct hailo_vdma_controller *controller, unsigned int cmd, unsigned long arg);
2352 +long hailo_soc_connect_ioctl(struct hailo_pcie_board *board, struct hailo_vdma_file_context *context,
2353 + struct hailo_vdma_controller *controller, unsigned long arg);
2354 +long hailo_soc_close_ioctl(struct hailo_pcie_board *board, struct hailo_vdma_controller *controller, unsigned long arg);
2355 +
2356 +#endif // _HAILO_PCI_SOC_IOCTL_H_
2357 \ No newline at end of file
2358 --- a/drivers/media/pci/hailo/src/pcie.c
2359 +++ b/drivers/media/pci/hailo/src/pcie.c
2360 @@ -20,7 +20,6 @@
2361
2362 #define KERNEL_CODE 1
2363
2364 -#include "hailo_pcie_version.h"
2365 #include "hailo_ioctl_common.h"
2366 #include "pcie.h"
2367 #include "fops.h"
2368 @@ -45,6 +44,7 @@ enum hailo_allocate_driver_buffer_driver
2369 static int force_desc_page_size = 0;
2370 static bool g_is_power_mode_enabled = true;
2371 static int force_allocation_from_driver = HAILO_NO_FORCE_BUFFER;
2372 +static bool force_hailo15_legacy_mode = false;
2373
2374 #define DEVICE_NODE_NAME "hailo"
2375 static int char_major = 0;
2376 @@ -322,7 +322,7 @@ static int hailo_write_config(struct hai
2377
2378 static bool wait_for_firmware_completion(struct completion *fw_load_completion)
2379 {
2380 - return (0 != wait_for_completion_timeout(fw_load_completion, FIRMWARE_WAIT_TIMEOUT_MS));
2381 + return (0 != wait_for_completion_timeout(fw_load_completion, msecs_to_jiffies(FIRMWARE_WAIT_TIMEOUT_MS)));
2382 }
2383
2384 static int hailo_load_firmware(struct hailo_pcie_resources *resources,
2385 @@ -330,6 +330,7 @@ static int hailo_load_firmware(struct ha
2386 {
2387 const struct firmware *firmware = NULL;
2388 int err = 0;
2389 + u32 boot_status = 0;
2390
2391 if (hailo_pcie_is_firmware_loaded(resources)) {
2392 hailo_dev_warn(dev, "Firmware was already loaded\n");
2393 @@ -368,7 +369,8 @@ static int hailo_load_firmware(struct ha
2394 release_firmware(firmware);
2395
2396 if (!wait_for_firmware_completion(fw_load_completion)) {
2397 - hailo_dev_err(dev, "Timeout waiting for firmware..\n");
2398 + boot_status = hailo_get_boot_status(resources);
2399 + hailo_dev_err(dev, "Timeout waiting for firmware file, boot status %u\n", boot_status);
2400 return -ETIMEDOUT;
2401 }
2402
2403 @@ -376,6 +378,55 @@ static int hailo_load_firmware(struct ha
2404 return 0;
2405 }
2406
2407 +static int hailo_load_firmware_batch(struct hailo_pcie_resources *resources,
2408 + struct device *dev, struct completion *fw_load_completion)
2409 +{
2410 + u32 boot_status = 0;
2411 + u32 pcie_finished = 1;
2412 + int err = 0;
2413 +
2414 + if (hailo_pcie_is_firmware_loaded(resources)) {
2415 + hailo_dev_warn(dev, "Firmware batch was already loaded\n");
2416 + return 0;
2417 + }
2418 +
2419 + init_completion(fw_load_completion);
2420 +
2421 + err = hailo_pcie_write_firmware_batch(dev, resources, FIRST_STAGE);
2422 + if (err < 0) {
2423 + hailo_dev_err(dev, "Failed writing firmware files. err %d\n", err);
2424 + return err;
2425 + }
2426 +
2427 + hailo_trigger_firmware_boot(resources);
2428 +
2429 + if (!wait_for_firmware_completion(fw_load_completion)) {
2430 + boot_status = hailo_get_boot_status(resources);
2431 + hailo_dev_err(dev, "Timeout waiting for firmware file, boot status %u\n", boot_status);
2432 + return -ETIMEDOUT;
2433 + }
2434 + reinit_completion(fw_load_completion);
2435 +
2436 + err = hailo_pcie_write_firmware_batch(dev, resources, SECOND_STAGE);
2437 + if (err < 0) {
2438 + hailo_dev_err(dev, "Failed writing firmware files. err %d\n", err);
2439 + return err;
2440 + }
2441 +
2442 + // TODO: HRT-13838 - Remove, move address to compat, make write_memory static
2443 + write_memory(resources, 0x84000000, (void*)&pcie_finished, sizeof(pcie_finished));
2444 +
2445 + if (!wait_for_firmware_completion(fw_load_completion)) {
2446 + boot_status = hailo_get_boot_status(resources);
2447 + hailo_dev_err(dev, "Timeout waiting for firmware file, boot status %u\n", boot_status);
2448 + return -ETIMEDOUT;
2449 + }
2450 +
2451 + hailo_dev_notice(dev, "Firmware Batch loaded successfully\n");
2452 +
2453 + return 0;
2454 +}
2455 +
2456 static int hailo_activate_board(struct hailo_pcie_board *board)
2457 {
2458 int err = 0;
2459 @@ -388,8 +439,21 @@ static int hailo_activate_board(struct h
2460 return err;
2461 }
2462
2463 - err = hailo_load_firmware(&board->pcie_resources, &board->pDev->dev,
2464 - &board->fw_loaded_completion);
2465 + switch (board->pcie_resources.board_type) {
2466 + case HAILO_BOARD_TYPE_HAILO10H:
2467 + err = hailo_load_firmware_batch(&board->pcie_resources, &board->pDev->dev,
2468 + &board->fw_loaded_completion);
2469 + break;
2470 + case HAILO_BOARD_TYPE_HAILO10H_LEGACY:
2471 + case HAILO_BOARD_TYPE_PLUTO:
2472 + case HAILO_BOARD_TYPE_HAILO8:
2473 + err = hailo_load_firmware(&board->pcie_resources, &board->pDev->dev,
2474 + &board->fw_loaded_completion);
2475 + break;
2476 + default:
2477 + hailo_err(board, "Invalid board type");
2478 + err = -EINVAL;
2479 + }
2480 if (err < 0) {
2481 hailo_err(board, "Firmware load failed\n");
2482 hailo_disable_interrupts(board);
2483 @@ -513,8 +577,23 @@ static int pcie_resources_init(struct pc
2484 goto failure_release_vdma_regs;
2485 }
2486
2487 +
2488 + // There is no HAILO15 as mercury through pcie unless it's legacy mode (H15 as accelerator) or HAILO-10H
2489 + if (HAILO_BOARD_TYPE_HAILO15 == board_type){
2490 + if (true == force_hailo15_legacy_mode) {
2491 + board_type = HAILO_BOARD_TYPE_HAILO10H_LEGACY;
2492 + } else {
2493 + board_type = HAILO_BOARD_TYPE_HAILO10H;
2494 + }
2495 + }
2496 +
2497 resources->board_type = board_type;
2498
2499 + err = hailo_set_device_type(resources);
2500 + if (err < 0) {
2501 + goto failure_release_fw_access;
2502 + }
2503 +
2504 if (!hailo_pcie_is_device_connected(resources)) {
2505 pci_err(pdev, "Probing: Failed reading device BARs, device may be disconnected\n");
2506 err = -ENODEV;
2507 @@ -676,6 +755,7 @@ static int hailo_pcie_probe(struct pci_d
2508
2509 pBoard->interrupts_enabled = false;
2510 init_completion(&pBoard->fw_loaded_completion);
2511 + init_completion(&pBoard->soc_connect_accepted);
2512
2513 sema_init(&pBoard->mutex, 1);
2514 atomic_set(&pBoard->ref_count, 0);
2515 @@ -1005,6 +1085,9 @@ MODULE_PARM_DESC(force_allocation_from_d
2516 module_param(force_desc_page_size, int, S_IRUGO);
2517 MODULE_PARM_DESC(force_desc_page_size, "Determines the maximum DMA descriptor page size (must be a power of 2)");
2518
2519 +module_param(force_hailo15_legacy_mode, bool, S_IRUGO);
2520 +MODULE_PARM_DESC(force_hailo15_legacy_mode, "Forces work with Hailo15 in legacy mode(relevant for emulators)");
2521 +
2522 MODULE_AUTHOR("Hailo Technologies Ltd.");
2523 MODULE_DESCRIPTION("Hailo PCIe driver");
2524 MODULE_LICENSE("GPL v2");
2525 --- a/drivers/media/pci/hailo/src/pcie.h
2526 +++ b/drivers/media/pci/hailo/src/pcie.h
2527 @@ -70,6 +70,8 @@ struct hailo_pcie_board {
2528 enum hailo_allocation_mode allocation_mode;
2529 struct completion fw_loaded_completion;
2530 bool interrupts_enabled;
2531 + // Only needed in accelerator type soc
2532 + struct completion soc_connect_accepted;
2533 };
2534
2535 bool power_mode_enabled(void);
2536 --- a/drivers/media/pci/hailo/src/sysfs.c
2537 +++ b/drivers/media/pci/hailo/src/sysfs.c
2538 @@ -26,9 +26,18 @@ static ssize_t device_id_show(struct dev
2539 }
2540 static DEVICE_ATTR_RO(device_id);
2541
2542 +static ssize_t accelerator_type_show(struct device *dev, struct device_attribute *_attr,
2543 + char *buf)
2544 +{
2545 + struct hailo_pcie_board *board = (struct hailo_pcie_board *)dev_get_drvdata(dev);
2546 + return sprintf(buf, "%d", board->pcie_resources.accelerator_type);
2547 +}
2548 +static DEVICE_ATTR_RO(accelerator_type);
2549 +
2550 static struct attribute *hailo_dev_attrs[] = {
2551 &dev_attr_board_location.attr,
2552 &dev_attr_device_id.attr,
2553 + &dev_attr_accelerator_type.attr,
2554 NULL
2555 };
2556
2557 --- a/drivers/media/pci/hailo/src/utils.c
2558 +++ b/drivers/media/pci/hailo/src/utils.c
2559 @@ -8,7 +8,6 @@
2560 #include <linux/module.h>
2561 #include <linux/pci.h>
2562
2563 -#include "hailo_pcie_version.h"
2564 #include "pcie.h"
2565 #include "utils.h"
2566 #include "utils/logs.h"
2567 --- /dev/null
2568 +++ b/drivers/media/pci/hailo/utils/integrated_nnc_utils.c
2569 @@ -0,0 +1,101 @@
2570 +// SPDX-License-Identifier: GPL-2.0
2571 +/**
2572 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
2573 + **/
2574 +
2575 +#include "integrated_nnc_utils.h"
2576 +#include "utils/logs.h"
2577 +
2578 +#include <linux/uaccess.h>
2579 +#include <asm/io.h>
2580 +#include <linux/of_address.h>
2581 +#include <linux/cdev.h>
2582 +
2583 +int hailo_ioremap_resource(struct platform_device *pdev, struct hailo_resource *resource,
2584 + const char *name)
2585 +{
2586 + void __iomem *address;
2587 + struct resource *platform_resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
2588 + if (NULL == platform_resource) {
2589 + return -ENOENT;
2590 + }
2591 +
2592 + address = devm_ioremap_resource(&pdev->dev, platform_resource);
2593 + if (IS_ERR(address)) {
2594 + return PTR_ERR(address);
2595 + }
2596 +
2597 + resource->address = (uintptr_t)address;
2598 + resource->size = resource_size(platform_resource);
2599 +
2600 + hailo_dev_dbg(&pdev->dev, "resource[%s]: remap %pr of %zx bytes to virtual start address %lx\n",
2601 + platform_resource->name, platform_resource, resource->size, (uintptr_t)address);
2602 +
2603 + return 0;
2604 +}
2605 +
2606 +// TODO: HRT-8475 - change to name instead of index
2607 +int hailo_ioremap_shmem(struct platform_device *pdev, int index, struct hailo_resource *resource)
2608 +{
2609 + int ret;
2610 + struct resource res;
2611 + struct device_node *shmem;
2612 + void __iomem * remap_ptr;
2613 +
2614 + shmem = of_parse_phandle(pdev->dev.of_node, "shmem", index);
2615 + ret = of_address_to_resource(shmem, 0, &res);
2616 + if (ret) {
2617 + hailo_dev_err(&pdev->dev, "hailo_ioremap_shmem, failed to get memory (index: %d)\n", index);
2618 + return ret;
2619 + }
2620 + of_node_put(shmem);
2621 +
2622 + remap_ptr = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
2623 + if (!remap_ptr) {
2624 + hailo_dev_err(&pdev->dev, "hailo_ioremap_shmem, failed to ioremap shmem (index: %d)\n", index);
2625 + return -EADDRNOTAVAIL;
2626 + }
2627 +
2628 + resource->address = (uintptr_t)remap_ptr;
2629 + resource->size = resource_size(&res);
2630 +
2631 + return 0;
2632 +}
2633 +
2634 +int direct_memory_transfer(struct platform_device *pdev, struct hailo_memory_transfer_params *params)
2635 +{
2636 + int err = -EINVAL;
2637 + void __iomem *mem = ioremap(params->address, params->count);
2638 + if (NULL == mem) {
2639 + hailo_dev_err(&pdev->dev, "Failed ioremap %llu %zu\n", params->address, params->count);
2640 + return -ENOMEM;
2641 + }
2642 +
2643 + switch (params->transfer_direction) {
2644 + case TRANSFER_READ:
2645 + memcpy_fromio(params->buffer, mem, params->count);
2646 + err = 0;
2647 + break;
2648 + case TRANSFER_WRITE:
2649 + memcpy_toio(mem, params->buffer, params->count);
2650 + err = 0;
2651 + break;
2652 + default:
2653 + hailo_dev_err(&pdev->dev, "Invalid transfer direction %d\n", (int)params->transfer_direction);
2654 + err = -EINVAL;
2655 + }
2656 +
2657 + iounmap(mem);
2658 + return err;
2659 +}
2660 +
2661 +int hailo_get_resource_physical_addr(struct platform_device *pdev, const char *name, u64 *address)
2662 +{
2663 + struct resource *platform_resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
2664 + if (NULL == platform_resource) {
2665 + return -ENOENT;
2666 + }
2667 +
2668 + *address = (u64)(platform_resource->start);
2669 + return 0;
2670 +}
2671 \ No newline at end of file
2672 --- /dev/null
2673 +++ b/drivers/media/pci/hailo/utils/integrated_nnc_utils.h
2674 @@ -0,0 +1,30 @@
2675 +// SPDX-License-Identifier: GPL-2.0
2676 +/**
2677 + * Copyright (c) 2019-2022 Hailo Technologies Ltd. All rights reserved.
2678 + **/
2679 +
2680 +#ifndef _INTEGRATED_NNC_UTILS_H_
2681 +#define _INTEGRATED_NNC_UTILS_H_
2682 +
2683 +#include <linux/platform_device.h>
2684 +#include "hailo_resource.h"
2685 +
2686 +#define HAILO15_CORE_CONTROL_MAILBOX_INDEX (0)
2687 +#define HAILO15_CORE_NOTIFICATION_MAILBOX_INDEX (1)
2688 +#define HAILO15_CORE_DRIVER_DOWN_MAILBOX_INDEX (2)
2689 +
2690 +#define HAILO15_CORE_CONTROL_MAILBOX_TX_SHMEM_INDEX (0)
2691 +#define HAILO15_CORE_CONTROL_MAILBOX_RX_SHMEM_INDEX (1)
2692 +#define HAILO15_CORE_NOTIFICATION_MAILBOX_RX_SHMEM_INDEX (2)
2693 +
2694 +int hailo_ioremap_resource(struct platform_device *pdev, struct hailo_resource *resource,
2695 + const char *name);
2696 +
2697 +// TODO: HRT-8475 - change to name instead of index
2698 +int hailo_ioremap_shmem(struct platform_device *pdev, int index, struct hailo_resource *resource);
2699 +
2700 +int direct_memory_transfer(struct platform_device *pDev, struct hailo_memory_transfer_params *params);
2701 +
2702 +int hailo_get_resource_physical_addr(struct platform_device *pdev, const char *name, u64 *address);
2703 +
2704 +#endif /* _INTEGRATED_NNC_UTILS_H_ */
2705 --- a/drivers/media/pci/hailo/vdma/ioctl.c
2706 +++ b/drivers/media/pci/hailo/vdma/ioctl.c
2707 @@ -12,9 +12,9 @@
2708 #include <linux/uaccess.h>
2709
2710
2711 -long hailo_vdma_interrupts_enable_ioctl(struct hailo_vdma_controller *controller, unsigned long arg)
2712 +long hailo_vdma_enable_channels_ioctl(struct hailo_vdma_controller *controller, unsigned long arg)
2713 {
2714 - struct hailo_vdma_interrupts_enable_params input;
2715 + struct hailo_vdma_enable_channels_params input;
2716 struct hailo_vdma_engine *engine = NULL;
2717 u8 engine_index = 0;
2718 u32 channels_bitmap = 0;
2719 @@ -35,7 +35,7 @@ long hailo_vdma_interrupts_enable_ioctl(
2720
2721 for_each_vdma_engine(controller, engine, engine_index) {
2722 channels_bitmap = input.channels_bitmap_per_engine[engine_index];
2723 - hailo_vdma_engine_enable_channel_interrupts(engine, channels_bitmap,
2724 + hailo_vdma_engine_enable_channels(engine, channels_bitmap,
2725 input.enable_timestamps_measure);
2726 hailo_vdma_update_interrupts_mask(controller, engine_index);
2727 hailo_dev_info(controller->dev, "Enabled interrupts for engine %u, channels bitmap 0x%x\n",
2728 @@ -45,12 +45,13 @@ long hailo_vdma_interrupts_enable_ioctl(
2729 return 0;
2730 }
2731
2732 -long hailo_vdma_interrupts_disable_ioctl(struct hailo_vdma_controller *controller, unsigned long arg)
2733 +long hailo_vdma_disable_channels_ioctl(struct hailo_vdma_controller *controller, unsigned long arg)
2734 {
2735 - struct hailo_vdma_interrupts_disable_params input;
2736 + struct hailo_vdma_disable_channels_params input;
2737 struct hailo_vdma_engine *engine = NULL;
2738 u8 engine_index = 0;
2739 u32 channels_bitmap = 0;
2740 + unsigned long irq_saved_flags = 0;
2741
2742 if (copy_from_user(&input, (void*)arg, sizeof(input))) {
2743 hailo_dev_err(controller->dev, "copy_from_user fail\n");
2744 @@ -61,15 +62,21 @@ long hailo_vdma_interrupts_disable_ioctl
2745 for_each_vdma_engine(controller, engine, engine_index) {
2746 channels_bitmap = input.channels_bitmap_per_engine[engine_index];
2747 if (channels_bitmap != (channels_bitmap & engine->enabled_channels)) {
2748 - hailo_dev_err(controller->dev, "Trying to disable channels that were not enabled\n");
2749 - return -EINVAL;
2750 + hailo_dev_warn(controller->dev, "Trying to disable channels that were not enabled\n");
2751 }
2752 }
2753
2754 for_each_vdma_engine(controller, engine, engine_index) {
2755 channels_bitmap = input.channels_bitmap_per_engine[engine_index];
2756 - hailo_vdma_engine_interrupts_disable(controller, engine, engine_index,
2757 - channels_bitmap);
2758 + hailo_vdma_engine_disable_channels(engine, channels_bitmap);
2759 + hailo_vdma_update_interrupts_mask(controller, engine_index);
2760 +
2761 + spin_lock_irqsave(&controller->interrupts_lock, irq_saved_flags);
2762 + hailo_vdma_engine_clear_channel_interrupts(engine, channels_bitmap);
2763 + spin_unlock_irqrestore(&controller->interrupts_lock, irq_saved_flags);
2764 +
2765 + hailo_dev_info(controller->dev, "Disabled channels for engine %u, bitmap 0x%x\n",
2766 + engine_index, channels_bitmap);
2767 }
2768
2769 // Wake up threads waiting
2770 @@ -197,7 +204,7 @@ long hailo_vdma_buffer_map_ioctl(struct
2771 return -EFAULT;
2772 }
2773
2774 - hailo_dev_info(controller->dev, "address %px tgid %d size: %zu\n",
2775 + hailo_dev_info(controller->dev, "address %lx tgid %d size: %zu\n",
2776 buf_info.user_address, current->tgid, buf_info.size);
2777
2778 direction = get_dma_direction(buf_info.data_direction);
2779 @@ -209,10 +216,9 @@ long hailo_vdma_buffer_map_ioctl(struct
2780 low_memory_buffer = hailo_vdma_find_low_memory_buffer(context, buf_info.allocated_buffer_handle);
2781
2782 mapped_buffer = hailo_vdma_buffer_map(controller->dev,
2783 - buf_info.user_address, buf_info.size, direction, low_memory_buffer);
2784 + buf_info.user_address, buf_info.size, direction, buf_info.buffer_type, low_memory_buffer);
2785 if (IS_ERR(mapped_buffer)) {
2786 - hailo_dev_err(controller->dev, "failed map buffer %px\n",
2787 - buf_info.user_address);
2788 + hailo_dev_err(controller->dev, "failed map buffer %lx\n", buf_info.user_address);
2789 return PTR_ERR(mapped_buffer);
2790 }
2791
2792 @@ -225,7 +231,7 @@ long hailo_vdma_buffer_map_ioctl(struct
2793 }
2794
2795 list_add(&mapped_buffer->mapped_user_buffer_list, &context->mapped_user_buffer_list);
2796 - hailo_dev_info(controller->dev, "buffer %px (handle %zu) is mapped\n",
2797 + hailo_dev_info(controller->dev, "buffer %lx (handle %zu) is mapped\n",
2798 buf_info.user_address, buf_info.mapped_handle);
2799 return 0;
2800 }
2801 @@ -374,10 +380,10 @@ long hailo_desc_list_release_ioctl(struc
2802 return 0;
2803 }
2804
2805 -long hailo_desc_list_bind_vdma_buffer(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
2806 +long hailo_desc_list_program_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
2807 unsigned long arg)
2808 {
2809 - struct hailo_desc_list_bind_vdma_buffer_params configure_info;
2810 + struct hailo_desc_list_program_params configure_info;
2811 struct hailo_vdma_buffer *mapped_buffer = NULL;
2812 struct hailo_descriptors_list_buffer *descriptors_buffer = NULL;
2813 struct hailo_vdma_mapped_transfer_buffer transfer_buffer = {0};
2814 @@ -410,7 +416,10 @@ long hailo_desc_list_bind_vdma_buffer(st
2815 &descriptors_buffer->desc_list,
2816 configure_info.starting_desc,
2817 &transfer_buffer,
2818 - configure_info.channel_index
2819 + configure_info.should_bind,
2820 + configure_info.channel_index,
2821 + configure_info.last_interrupts_domain,
2822 + configure_info.is_debug
2823 );
2824 }
2825
2826 @@ -683,11 +692,19 @@ long hailo_vdma_launch_transfer_ioctl(st
2827 params.is_debug
2828 );
2829 if (ret < 0) {
2830 - hailo_dev_err(controller->dev, "Failed launch transfer %d\n", ret);
2831 + params.launch_transfer_status = ret;
2832 + if (-ECONNRESET != ret) {
2833 + hailo_dev_err(controller->dev, "Failed launch transfer %d\n", ret);
2834 + }
2835 + // Still need to copy fail status back to userspace - success oriented
2836 + if (copy_to_user((void __user*)arg, &params, sizeof(params))) {
2837 + hailo_dev_err(controller->dev, "copy_to_user fail\n");
2838 + }
2839 return ret;
2840 }
2841
2842 params.descs_programed = ret;
2843 + params.launch_transfer_status = 0;
2844
2845 if (copy_to_user((void __user*)arg, &params, sizeof(params))) {
2846 hailo_dev_err(controller->dev, "copy_to_user fail\n");
2847 --- a/drivers/media/pci/hailo/vdma/ioctl.h
2848 +++ b/drivers/media/pci/hailo/vdma/ioctl.h
2849 @@ -8,8 +8,8 @@
2850
2851 #include "vdma/vdma.h"
2852
2853 -long hailo_vdma_interrupts_enable_ioctl(struct hailo_vdma_controller *controller, unsigned long arg);
2854 -long hailo_vdma_interrupts_disable_ioctl(struct hailo_vdma_controller *controller, unsigned long arg);
2855 +long hailo_vdma_enable_channels_ioctl(struct hailo_vdma_controller *controller, unsigned long arg);
2856 +long hailo_vdma_disable_channels_ioctl(struct hailo_vdma_controller *controller, unsigned long arg);
2857 long hailo_vdma_interrupts_wait_ioctl(struct hailo_vdma_controller *controller, unsigned long arg,
2858 struct semaphore *mutex, bool *should_up_board_mutex);
2859
2860 @@ -19,7 +19,7 @@ long hailo_vdma_buffer_sync_ioctl(struct
2861
2862 long hailo_desc_list_create_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
2863 long hailo_desc_list_release_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
2864 -long hailo_desc_list_bind_vdma_buffer(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
2865 +long hailo_desc_list_program_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
2866
2867 long hailo_vdma_low_memory_buffer_alloc_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
2868 long hailo_vdma_low_memory_buffer_free_ioctl(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller, unsigned long arg);
2869 --- a/drivers/media/pci/hailo/vdma/memory.c
2870 +++ b/drivers/media/pci/hailo/vdma/memory.c
2871 @@ -11,27 +11,107 @@
2872 #include <linux/slab.h>
2873 #include <linux/scatterlist.h>
2874 #include <linux/sched.h>
2875 +#include <linux/module.h>
2876
2877
2878 #define SGL_MAX_SEGMENT_SIZE (0x10000)
2879 // See linux/mm.h
2880 #define MMIO_AND_NO_PAGES_VMA_MASK (VM_IO | VM_PFNMAP)
2881
2882 -static int map_mmio_address(void __user* user_address, u32 size, struct vm_area_struct *vma,
2883 +static int map_mmio_address(uintptr_t user_address, u32 size, struct vm_area_struct *vma,
2884 struct sg_table *sgt);
2885 -static int prepare_sg_table(struct sg_table *sg_table, void __user* user_address, u32 size,
2886 +static int prepare_sg_table(struct sg_table *sg_table, uintptr_t user_address, u32 size,
2887 struct hailo_vdma_low_memory_buffer *low_mem_driver_allocated_buffer);
2888 static void clear_sg_table(struct sg_table *sgt);
2889
2890 +#if LINUX_VERSION_CODE >= KERNEL_VERSION( 3, 3, 0 )
2891 +
2892 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0)
2893 +// Import DMA_BUF namespace for needed kernels
2894 +MODULE_IMPORT_NS(DMA_BUF);
2895 +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0) */
2896 +
2897 +static int hailo_map_dmabuf(struct device *dev, int dmabuf_fd, enum dma_data_direction direction, struct sg_table *sgt,
2898 + struct hailo_dmabuf_info *dmabuf_info)
2899 +{
2900 + int ret = -EINVAL;
2901 + struct dma_buf *dmabuf = NULL;
2902 + struct dma_buf_attachment *dmabuf_attachment = NULL;
2903 + struct sg_table *res_sgt = NULL;
2904 +
2905 + dmabuf = dma_buf_get(dmabuf_fd);
2906 + if (IS_ERR(dmabuf)) {
2907 + dev_err(dev, "dma_buf_get failed, err=%ld\n", PTR_ERR(dmabuf));
2908 + ret = -EINVAL;
2909 + goto cleanup;
2910 + }
2911 +
2912 + dmabuf_attachment = dma_buf_attach(dmabuf, dev);
2913 + if (IS_ERR(dmabuf_attachment)) {
2914 + dev_err(dev, "dma_buf_attach failed, err=%ld\n", PTR_ERR(dmabuf_attachment));
2915 + ret = -EINVAL;
2916 + goto l_buf_get;
2917 + }
2918 +
2919 + res_sgt = dma_buf_map_attachment(dmabuf_attachment, direction);
2920 + if (IS_ERR(res_sgt)) {
2921 + dev_err(dev, "dma_buf_map_attachment failed, err=%ld\n", PTR_ERR(res_sgt));
2922 + goto l_buf_attach;
2923 + }
2924 +
2925 + *sgt = *res_sgt;
2926 +
2927 + dmabuf_info->dmabuf = dmabuf;
2928 + dmabuf_info->dmabuf_attachment = dmabuf_attachment;
2929 + dmabuf_info->dmabuf_sg_table = res_sgt;
2930 + return 0;
2931 +
2932 +l_buf_attach:
2933 + dma_buf_detach(dmabuf, dmabuf_attachment);
2934 +l_buf_get:
2935 + dma_buf_put(dmabuf);
2936 +cleanup:
2937 + return ret;
2938 +}
2939 +
2940 +static void hailo_unmap_dmabuf(struct hailo_vdma_buffer *vdma_buffer)
2941 +{
2942 + dma_buf_unmap_attachment(vdma_buffer->dmabuf_info.dmabuf_attachment, vdma_buffer->dmabuf_info.dmabuf_sg_table, vdma_buffer->data_direction);
2943 + dma_buf_detach(vdma_buffer->dmabuf_info.dmabuf, vdma_buffer->dmabuf_info.dmabuf_attachment);
2944 + dma_buf_put(vdma_buffer->dmabuf_info.dmabuf);
2945 +}
2946 +
2947 +#else /* LINUX_VERSION_CODE >= KERNEL_VERSION( 3, 3, 0 ) */
2948 +
2949 +static int hailo_map_dmabuf(struct device *dev, int dmabuf_fd, enum dma_data_direction direction, struct sg_table *sgt,
2950 + struct hailo_dmabuf_info *dmabuf_info)
2951 +{
2952 + (void) dmabuf_fd;
2953 + (void) direction;
2954 + (void) sgt;
2955 + (void) mapped_buffer;
2956 + dev_err(dev, "dmabuf not supported in kernel versions lower than 3.3.0\n");
2957 + return -EINVAL;
2958 +}
2959 +
2960 +static void hailo_unmap_dmabuf(struct hailo_vdma_buffer *vdma_buffer)
2961 +{
2962 + dev_err(vdma_buffer->device, "dmabuf not supported in kernel versions lower than 3.3.0\n");
2963 + return -EINVAL;
2964 +}
2965 +
2966 +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION( 3, 3, 0 ) */
2967 +
2968 struct hailo_vdma_buffer *hailo_vdma_buffer_map(struct device *dev,
2969 - void __user *user_address, size_t size, enum dma_data_direction direction,
2970 - struct hailo_vdma_low_memory_buffer *low_mem_driver_allocated_buffer)
2971 + uintptr_t user_address, size_t size, enum dma_data_direction direction,
2972 + enum hailo_dma_buffer_type buffer_type, struct hailo_vdma_low_memory_buffer *low_mem_driver_allocated_buffer)
2973 {
2974 int ret = -EINVAL;
2975 struct hailo_vdma_buffer *mapped_buffer = NULL;
2976 struct sg_table sgt = {0};
2977 struct vm_area_struct *vma = NULL;
2978 bool is_mmio = false;
2979 + struct hailo_dmabuf_info dmabuf_info = {0};
2980
2981 mapped_buffer = kzalloc(sizeof(*mapped_buffer), GFP_KERNEL);
2982 if (NULL == mapped_buffer) {
2983 @@ -40,17 +120,19 @@ struct hailo_vdma_buffer *hailo_vdma_buf
2984 goto cleanup;
2985 }
2986
2987 - if (IS_ENABLED(HAILO_SUPPORT_MMIO_DMA_MAPPING)) {
2988 - vma = find_vma(current->mm, (uintptr_t)user_address);
2989 + if (IS_ENABLED(HAILO_SUPPORT_MMIO_DMA_MAPPING) && (HAILO_DMA_DMABUF_BUFFER != buffer_type)) {
2990 + vma = find_vma(current->mm, user_address);
2991 if (NULL == vma) {
2992 - dev_err(dev, "no vma for virt_addr/size = 0x%08lx/0x%08zx\n", (uintptr_t)user_address, size);
2993 + dev_err(dev, "no vma for virt_addr/size = 0x%08lx/0x%08zx\n", user_address, size);
2994 ret = -EFAULT;
2995 goto cleanup;
2996 }
2997 }
2998
2999 + // TODO: is MMIO DMA MAPPINGS STILL needed after dmabuf
3000 if (IS_ENABLED(HAILO_SUPPORT_MMIO_DMA_MAPPING) &&
3001 - (MMIO_AND_NO_PAGES_VMA_MASK == (vma->vm_flags & MMIO_AND_NO_PAGES_VMA_MASK))) {
3002 + (MMIO_AND_NO_PAGES_VMA_MASK == (vma->vm_flags & MMIO_AND_NO_PAGES_VMA_MASK)) &&
3003 + (HAILO_DMA_DMABUF_BUFFER != buffer_type)) {
3004 // user_address represents memory mapped I/O and isn't backed by 'struct page' (only by pure pfn)
3005 if (NULL != low_mem_driver_allocated_buffer) {
3006 // low_mem_driver_allocated_buffer are backed by regular 'struct page' addresses, just in low memory
3007 @@ -66,6 +148,14 @@ struct hailo_vdma_buffer *hailo_vdma_buf
3008 }
3009
3010 is_mmio = true;
3011 +
3012 + } else if (HAILO_DMA_DMABUF_BUFFER == buffer_type) {
3013 + // Content user_address in case of dmabuf is fd - for now
3014 + ret = hailo_map_dmabuf(dev, user_address, direction, &sgt, &dmabuf_info);
3015 + if (ret < 0) {
3016 + dev_err(dev, "Failed mapping dmabuf\n");
3017 + goto cleanup;
3018 + }
3019 } else {
3020 // user_address is a standard 'struct page' backed memory address
3021 ret = prepare_sg_table(&sgt, user_address, size, low_mem_driver_allocated_buffer);
3022 @@ -88,6 +178,7 @@ struct hailo_vdma_buffer *hailo_vdma_buf
3023 mapped_buffer->data_direction = direction;
3024 mapped_buffer->sg_table = sgt;
3025 mapped_buffer->is_mmio = is_mmio;
3026 + mapped_buffer->dmabuf_info = dmabuf_info;
3027
3028 return mapped_buffer;
3029
3030 @@ -103,11 +194,16 @@ static void unmap_buffer(struct kref *kr
3031 {
3032 struct hailo_vdma_buffer *buf = container_of(kref, struct hailo_vdma_buffer, kref);
3033
3034 - if (!buf->is_mmio) {
3035 - dma_unmap_sg(buf->device, buf->sg_table.sgl, buf->sg_table.orig_nents, buf->data_direction);
3036 - }
3037 + // If dmabuf - unmap and detatch dmabuf
3038 + if (NULL != buf->dmabuf_info.dmabuf) {
3039 + hailo_unmap_dmabuf(buf);
3040 + } else {
3041 + if (!buf->is_mmio) {
3042 + dma_unmap_sg(buf->device, buf->sg_table.sgl, buf->sg_table.orig_nents, buf->data_direction);
3043 + }
3044
3045 - clear_sg_table(&buf->sg_table);
3046 + clear_sg_table(&buf->sg_table);
3047 + }
3048 kfree(buf);
3049 }
3050
3051 @@ -164,8 +260,9 @@ void hailo_vdma_buffer_sync(struct hailo
3052 struct hailo_vdma_buffer *mapped_buffer, enum hailo_vdma_buffer_sync_type sync_type,
3053 size_t offset, size_t size)
3054 {
3055 - if (IS_ENABLED(HAILO_SUPPORT_MMIO_DMA_MAPPING) && mapped_buffer->is_mmio) {
3056 - // MMIO buffers don't need to be sync'd
3057 + if ((IS_ENABLED(HAILO_SUPPORT_MMIO_DMA_MAPPING) && mapped_buffer->is_mmio) ||
3058 + (NULL != mapped_buffer->dmabuf_info.dmabuf)) {
3059 + // MMIO buffers and dmabufs don't need to be sync'd
3060 return;
3061 }
3062
3063 @@ -404,7 +501,8 @@ void hailo_vdma_clear_continuous_buffer_
3064
3065 // Assumes the provided user_address belongs to the vma and that MMIO_AND_NO_PAGES_VMA_MASK bits are set under
3066 // vma->vm_flags. This is validated in hailo_vdma_buffer_map, and won't be checked here
3067 -static int map_mmio_address(void __user* user_address, u32 size, struct vm_area_struct *vma,
3068 +#if defined(HAILO_SUPPORT_MMIO_DMA_MAPPING)
3069 +static int map_mmio_address(uintptr_t user_address, u32 size, struct vm_area_struct *vma,
3070 struct sg_table *sgt)
3071 {
3072 int ret = -EINVAL;
3073 @@ -413,7 +511,7 @@ static int map_mmio_address(void __user*
3074 unsigned long next_pfn = 0;
3075 phys_addr_t phys_addr = 0;
3076 dma_addr_t mmio_dma_address = 0;
3077 - const uintptr_t virt_addr = (uintptr_t)user_address;
3078 + const uintptr_t virt_addr = user_address;
3079 const u32 vma_size = vma->vm_end - vma->vm_start + 1;
3080 const uintptr_t num_pages = PFN_UP(virt_addr + size) - PFN_DOWN(virt_addr);
3081
3082 @@ -462,8 +560,21 @@ static int map_mmio_address(void __user*
3083
3084 return 0;
3085 }
3086 +#else /* defined(HAILO_SUPPORT_MMIO_DMA_MAPPING) */
3087 +static int map_mmio_address(uintptr_t user_address, u32 size, struct vm_area_struct *vma,
3088 + struct sg_table *sgt)
3089 +{
3090 + (void) user_address;
3091 + (void) size;
3092 + (void) vma;
3093 + (void) sgt;
3094 + pr_err("MMIO DMA MAPPINGS are not supported in this kernel version\n");
3095 + return -EINVAL;
3096 +}
3097 +#endif /* defined(HAILO_SUPPORT_MMIO_DMA_MAPPING) */
3098 +
3099
3100 -static int prepare_sg_table(struct sg_table *sg_table, void __user *user_address, u32 size,
3101 +static int prepare_sg_table(struct sg_table *sg_table, uintptr_t user_address, u32 size,
3102 struct hailo_vdma_low_memory_buffer *low_mem_driver_allocated_buffer)
3103 {
3104 int ret = -EINVAL;
3105 @@ -482,8 +593,7 @@ static int prepare_sg_table(struct sg_ta
3106 // Check whether mapping user allocated buffer or driver allocated low memory buffer
3107 if (NULL == low_mem_driver_allocated_buffer) {
3108 mmap_read_lock(current->mm);
3109 - pinned_pages = get_user_pages_compact((unsigned long)user_address,
3110 - npages, FOLL_WRITE | FOLL_FORCE, pages);
3111 + pinned_pages = get_user_pages_compact(user_address, npages, FOLL_WRITE | FOLL_FORCE, pages);
3112 mmap_read_unlock(current->mm);
3113
3114 if (pinned_pages < 0) {
3115 --- a/drivers/media/pci/hailo/vdma/memory.h
3116 +++ b/drivers/media/pci/hailo/vdma/memory.h
3117 @@ -11,8 +11,8 @@
3118
3119 #include "vdma/vdma.h"
3120
3121 -struct hailo_vdma_buffer *hailo_vdma_buffer_map(struct device *dev,
3122 - void __user *user_address, size_t size, enum dma_data_direction direction,
3123 +struct hailo_vdma_buffer *hailo_vdma_buffer_map(struct device *dev, uintptr_t user_address, size_t size,
3124 + enum dma_data_direction direction, enum hailo_dma_buffer_type buffer_type,
3125 struct hailo_vdma_low_memory_buffer *low_mem_driver_allocated_buffer);
3126 void hailo_vdma_buffer_get(struct hailo_vdma_buffer *buf);
3127 void hailo_vdma_buffer_put(struct hailo_vdma_buffer *buf);
3128 --- a/drivers/media/pci/hailo/vdma/vdma.c
3129 +++ b/drivers/media/pci/hailo/vdma/vdma.c
3130 @@ -21,7 +21,7 @@
3131
3132
3133 static struct hailo_vdma_engine* init_vdma_engines(struct device *dev,
3134 - struct hailo_resource *channel_registers_per_engine, size_t engines_count)
3135 + struct hailo_resource *channel_registers_per_engine, size_t engines_count, u32 src_channels_bitmask)
3136 {
3137 struct hailo_vdma_engine *engines = NULL;
3138 u8 i = 0;
3139 @@ -33,7 +33,7 @@ static struct hailo_vdma_engine* init_vd
3140 }
3141
3142 for (i = 0; i < engines_count; i++) {
3143 - hailo_vdma_engine_init(&engines[i], i, &channel_registers_per_engine[i]);
3144 + hailo_vdma_engine_init(&engines[i], i, &channel_registers_per_engine[i], src_channels_bitmask);
3145 }
3146
3147 return engines;
3148 @@ -72,7 +72,8 @@ int hailo_vdma_controller_init(struct ha
3149 controller->dev = dev;
3150
3151 controller->vdma_engines_count = engines_count;
3152 - controller->vdma_engines = init_vdma_engines(dev, channel_registers_per_engine, engines_count);
3153 + controller->vdma_engines = init_vdma_engines(dev, channel_registers_per_engine, engines_count,
3154 + vdma_hw->src_channels_bitmask);
3155 if (IS_ERR(controller->vdma_engines)) {
3156 dev_err(dev, "Failed initialized vdma engines\n");
3157 return PTR_ERR(controller->vdma_engines);
3158 @@ -113,36 +114,27 @@ void hailo_vdma_update_interrupts_mask(s
3159 controller->ops->update_channel_interrupts(controller, engine_index, engine->enabled_channels);
3160 }
3161
3162 -void hailo_vdma_engine_interrupts_disable(struct hailo_vdma_controller *controller,
3163 - struct hailo_vdma_engine *engine, u8 engine_index, u32 channels_bitmap)
3164 -{
3165 - unsigned long irq_saved_flags = 0;
3166 - // In case of FLR, the vdma registers will be NULL
3167 - const bool is_device_up = (NULL != controller->dev);
3168 -
3169 - hailo_vdma_engine_disable_channel_interrupts(engine, channels_bitmap);
3170 - if (is_device_up) {
3171 - hailo_vdma_update_interrupts_mask(controller, engine_index);
3172 - }
3173 -
3174 - spin_lock_irqsave(&controller->interrupts_lock, irq_saved_flags);
3175 - hailo_vdma_engine_clear_channel_interrupts(engine, channels_bitmap);
3176 - spin_unlock_irqrestore(&controller->interrupts_lock, irq_saved_flags);
3177 -
3178 - hailo_dev_info(controller->dev, "Disabled interrupts for engine %u, channels bitmap 0x%x\n",
3179 - engine_index, channels_bitmap);
3180 -}
3181 -
3182 void hailo_vdma_file_context_finalize(struct hailo_vdma_file_context *context,
3183 struct hailo_vdma_controller *controller, struct file *filp)
3184 {
3185 size_t engine_index = 0;
3186 struct hailo_vdma_engine *engine = NULL;
3187 const u32 channels_bitmap = 0xFFFFFFFF; // disable all channel interrupts
3188 + unsigned long irq_saved_flags = 0;
3189 + // In case of FLR, the vdma registers will be NULL
3190 + const bool is_device_up = (NULL != controller->dev);
3191
3192 if (filp == controller->used_by_filp) {
3193 for_each_vdma_engine(controller, engine, engine_index) {
3194 - hailo_vdma_engine_interrupts_disable(controller, engine, engine_index, channels_bitmap);
3195 + hailo_vdma_engine_disable_channels(engine, channels_bitmap);
3196 +
3197 + if (is_device_up) {
3198 + hailo_vdma_update_interrupts_mask(controller, engine_index);
3199 + }
3200 +
3201 + spin_lock_irqsave(&controller->interrupts_lock, irq_saved_flags);
3202 + hailo_vdma_engine_clear_channel_interrupts(engine, channels_bitmap);
3203 + spin_unlock_irqrestore(&controller->interrupts_lock, irq_saved_flags);
3204 }
3205 }
3206
3207 @@ -178,10 +170,10 @@ long hailo_vdma_ioctl(struct hailo_vdma_
3208 unsigned int cmd, unsigned long arg, struct file *filp, struct semaphore *mutex, bool *should_up_board_mutex)
3209 {
3210 switch (cmd) {
3211 - case HAILO_VDMA_INTERRUPTS_ENABLE:
3212 - return hailo_vdma_interrupts_enable_ioctl(controller, arg);
3213 - case HAILO_VDMA_INTERRUPTS_DISABLE:
3214 - return hailo_vdma_interrupts_disable_ioctl(controller, arg);
3215 + case HAILO_VDMA_ENABLE_CHANNELS:
3216 + return hailo_vdma_enable_channels_ioctl(controller, arg);
3217 + case HAILO_VDMA_DISABLE_CHANNELS:
3218 + return hailo_vdma_disable_channels_ioctl(controller, arg);
3219 case HAILO_VDMA_INTERRUPTS_WAIT:
3220 return hailo_vdma_interrupts_wait_ioctl(controller, arg, mutex, should_up_board_mutex);
3221 case HAILO_VDMA_INTERRUPTS_READ_TIMESTAMPS:
3222 @@ -196,8 +188,8 @@ long hailo_vdma_ioctl(struct hailo_vdma_
3223 return hailo_desc_list_create_ioctl(context, controller, arg);
3224 case HAILO_DESC_LIST_RELEASE:
3225 return hailo_desc_list_release_ioctl(context, controller, arg);
3226 - case HAILO_DESC_LIST_BIND_VDMA_BUFFER:
3227 - return hailo_desc_list_bind_vdma_buffer(context, controller, arg);
3228 + case HAILO_DESC_LIST_PROGRAM:
3229 + return hailo_desc_list_program_ioctl(context, controller, arg);
3230 case HAILO_VDMA_LOW_MEMORY_BUFFER_ALLOC:
3231 return hailo_vdma_low_memory_buffer_alloc_ioctl(context, controller, arg);
3232 case HAILO_VDMA_LOW_MEMORY_BUFFER_FREE:
3233 @@ -216,28 +208,6 @@ long hailo_vdma_ioctl(struct hailo_vdma_
3234 }
3235 }
3236
3237 -static int desc_list_mmap(struct hailo_vdma_controller *controller,
3238 - struct hailo_descriptors_list_buffer *vdma_descriptors_buffer, struct vm_area_struct *vma)
3239 -{
3240 - int err = 0;
3241 - unsigned long vsize = vma->vm_end - vma->vm_start;
3242 -
3243 - if (vsize > vdma_descriptors_buffer->buffer_size) {
3244 - hailo_dev_err(controller->dev, "Requested size to map (%lx) is larger than the descriptor list size(%x)\n",
3245 - vsize, vdma_descriptors_buffer->buffer_size);
3246 - return -EINVAL;
3247 - }
3248 -
3249 - err = dma_mmap_coherent(controller->dev, vma, vdma_descriptors_buffer->kernel_address,
3250 - vdma_descriptors_buffer->dma_address, vsize);
3251 - if (err != 0) {
3252 - hailo_dev_err(controller->dev, " Failed mmap descriptors %d\n", err);
3253 - return err;
3254 - }
3255 -
3256 - return 0;
3257 -}
3258 -
3259 static int low_memory_buffer_mmap(struct hailo_vdma_controller *controller,
3260 struct hailo_vdma_low_memory_buffer *vdma_buffer, struct vm_area_struct *vma)
3261 {
3262 @@ -300,15 +270,11 @@ static int continuous_buffer_mmap(struct
3263 int hailo_vdma_mmap(struct hailo_vdma_file_context *context, struct hailo_vdma_controller *controller,
3264 struct vm_area_struct *vma, uintptr_t vdma_handle)
3265 {
3266 - struct hailo_descriptors_list_buffer *vdma_descriptors_buffer = NULL;
3267 struct hailo_vdma_low_memory_buffer *low_memory_buffer = NULL;
3268 struct hailo_vdma_continuous_buffer *continuous_buffer = NULL;
3269
3270 hailo_dev_info(controller->dev, "Map vdma_handle %llu\n", (u64)vdma_handle);
3271 - if (NULL != (vdma_descriptors_buffer = hailo_vdma_find_descriptors_buffer(context, vdma_handle))) {
3272 - return desc_list_mmap(controller, vdma_descriptors_buffer, vma);
3273 - }
3274 - else if (NULL != (low_memory_buffer = hailo_vdma_find_low_memory_buffer(context, vdma_handle))) {
3275 + if (NULL != (low_memory_buffer = hailo_vdma_find_low_memory_buffer(context, vdma_handle))) {
3276 return low_memory_buffer_mmap(controller, low_memory_buffer, vma);
3277 }
3278 else if (NULL != (continuous_buffer = hailo_vdma_find_continuous_buffer(context, vdma_handle))) {
3279 --- a/drivers/media/pci/hailo/vdma/vdma.h
3280 +++ b/drivers/media/pci/hailo/vdma/vdma.h
3281 @@ -16,6 +16,8 @@
3282 #include <linux/dma-mapping.h>
3283 #include <linux/types.h>
3284 #include <linux/semaphore.h>
3285 +#include <linux/dma-buf.h>
3286 +#include <linux/version.h>
3287
3288 #define VDMA_CHANNEL_CONTROL_REG_OFFSET(channel_index, direction) (((direction) == DMA_TO_DEVICE) ? \
3289 (((channel_index) << 5) + 0x0) : (((channel_index) << 5) + 0x10))
3290 @@ -28,6 +30,22 @@
3291 ((u8*)((vdma_registers)->address) + VDMA_CHANNEL_NUM_PROC_OFFSET(channel_index, direction))
3292
3293
3294 +// dmabuf is supported from linux kernel version 3.3
3295 +#if LINUX_VERSION_CODE < KERNEL_VERSION( 3, 3, 0 )
3296 +// Make dummy struct with one byte (C standards does not allow empty struct) - in order to not have to ifdef everywhere
3297 +struct hailo_dmabuf_info {
3298 + uint8_t dummy;
3299 +};
3300 +#else
3301 +// dmabuf_sg_table is needed because in dma_buf_unmap_attachment() the sg_table's address has to match the
3302 +// The one returned from dma_buf_map_attachment() - otherwise we would need to malloc each time
3303 +struct hailo_dmabuf_info {
3304 + struct dma_buf *dmabuf;
3305 + struct dma_buf_attachment *dmabuf_attachment;
3306 + struct sg_table *dmabuf_sg_table;
3307 +};
3308 +#endif // LINUX_VERSION_CODE < KERNEL_VERSION( 3, 3, 0 )
3309 +
3310 struct hailo_vdma_buffer {
3311 struct list_head mapped_user_buffer_list;
3312 size_t handle;
3313 @@ -35,7 +53,7 @@ struct hailo_vdma_buffer {
3314 struct kref kref;
3315 struct device *device;
3316
3317 - void __user *user_address;
3318 + uintptr_t user_address;
3319 u32 size;
3320 enum dma_data_direction data_direction;
3321 struct sg_table sg_table;
3322 @@ -44,7 +62,10 @@ struct hailo_vdma_buffer {
3323 // 'struct page' (only by pure pfn). On this case, accessing to the page,
3324 // or calling APIs that access the page (e.g. dma_sync_sg_for_cpu) is not
3325 // allowed.
3326 - bool is_mmio;
3327 + bool is_mmio;
3328 +
3329 + // Relevant paramaters that need to be saved in case of dmabuf - otherwise struct pointers will be NULL
3330 + struct hailo_dmabuf_info dmabuf_info;
3331 };
3332
3333 // Continuous buffer that holds a descriptor list.
3334 @@ -53,7 +74,7 @@ struct hailo_descriptors_list_buffer {
3335 uintptr_t handle;
3336 void *kernel_address;
3337 dma_addr_t dma_address;
3338 - u32 buffer_size;
3339 + u32 buffer_size;
3340 struct hailo_vdma_descriptors_list desc_list;
3341 };
3342
3343 @@ -120,9 +141,6 @@ int hailo_vdma_controller_init(struct ha
3344 void hailo_vdma_update_interrupts_mask(struct hailo_vdma_controller *controller,
3345 size_t engine_index);
3346
3347 -void hailo_vdma_engine_interrupts_disable(struct hailo_vdma_controller *controller,
3348 - struct hailo_vdma_engine *engine, u8 engine_index, u32 channels_bitmap);
3349 -
3350 void hailo_vdma_file_context_init(struct hailo_vdma_file_context *context);
3351 void hailo_vdma_file_context_finalize(struct hailo_vdma_file_context *context,
3352 struct hailo_vdma_controller *controller, struct file *filp);