#
detect-thread-ratio: 1.5
-# Cuda configuration.
-cuda:
- # The "mpm" profile. On not specifying any of these parameters, the engine's
- # internal default values are used, which are same as the ones specified in
- # in the default conf file.
- mpm:
- # The minimum length required to buffer data to the gpu.
- # Anything below this is MPM'ed on the CPU.
- # Can be specified in kb, mb, gb. Just a number indicates it's in bytes.
- # A value of 0 indicates there's no limit.
- data-buffer-size-min-limit: 0
- # The maximum length for data that we would buffer to the gpu.
- # Anything over this is MPM'ed on the CPU.
- # Can be specified in kb, mb, gb. Just a number indicates it's in bytes.
- data-buffer-size-max-limit: 1500
- # The ring buffer size used by the CudaBuffer API to buffer data.
- cudabuffer-buffer-size: 500mb
- # The max chunk size that can be sent to the gpu in a single go.
- gpu-transfer-size: 50mb
- # The timeout limit for batching of packets in microseconds.
- batching-timeout: 2000
- # The device to use for the mpm. Currently we don't support load balancing
- # on multiple gpus. In case you have multiple devices on your system, you
- # can specify the device to use, using this conf. By default we hold 0, to
- # specify the first device cuda sees. To find out device-id associated with
- # the card(s) on the system run "suricata --list-cuda-cards".
- device-id: 0
- # No of Cuda streams used for asynchronous processing. All values > 0 are valid.
- # For this option you need a device with Compute Capability > 1.0.
- cuda-streams: 2
-
# Select the multi pattern algorithm you want to run for scan/search the
# in the engine.
#
size10386: 0
size16384: 0
+##
+## Hardware accelaration
+##
+
+# Cuda configuration.
+cuda:
+ # The "mpm" profile. On not specifying any of these parameters, the engine's
+ # internal default values are used, which are same as the ones specified in
+ # in the default conf file.
+ mpm:
+ # The minimum length required to buffer data to the gpu.
+ # Anything below this is MPM'ed on the CPU.
+ # Can be specified in kb, mb, gb. Just a number indicates it's in bytes.
+ # A value of 0 indicates there's no limit.
+ data-buffer-size-min-limit: 0
+ # The maximum length for data that we would buffer to the gpu.
+ # Anything over this is MPM'ed on the CPU.
+ # Can be specified in kb, mb, gb. Just a number indicates it's in bytes.
+ data-buffer-size-max-limit: 1500
+ # The ring buffer size used by the CudaBuffer API to buffer data.
+ cudabuffer-buffer-size: 500mb
+ # The max chunk size that can be sent to the gpu in a single go.
+ gpu-transfer-size: 50mb
+ # The timeout limit for batching of packets in microseconds.
+ batching-timeout: 2000
+ # The device to use for the mpm. Currently we don't support load balancing
+ # on multiple gpus. In case you have multiple devices on your system, you
+ # can specify the device to use, using this conf. By default we hold 0, to
+ # specify the first device cuda sees. To find out device-id associated with
+ # the card(s) on the system run "suricata --list-cuda-cards".
+ device-id: 0
+ # No of Cuda streams used for asynchronous processing. All values > 0 are valid.
+ # For this option you need a device with Compute Capability > 1.0.
+ cuda-streams: 2
+
# Includes. Files included here will be handled as if they were
# inlined in this configuration file.