--- /dev/null
+From 3d71992b07c00e9f99ea83f055b74b348f00eb45 Mon Sep 17 00:00:00 2001
+From: Hal Feng <hal.feng@starfivetech.com>
+Date: Wed, 25 Dec 2024 10:16:27 +0800
+Subject: [PATCH 33/55] wifi: Add aic8800 wifi driver
+
+Add aic8800 wifi driver.
+Ported from tag JH7110_VF2_6.6_v5.13.2
+
+Signed-off-by: Hal Feng <hal.feng@starfivetech.com>
+---
+ drivers/net/wireless/aic8800/Kconfig | 10 +
+ drivers/net/wireless/aic8800/Makefile | 71 +
+ .../net/wireless/aic8800/aic8800_fdrv/Kconfig | 36 +
+ .../wireless/aic8800/aic8800_fdrv/Makefile | 373 +
+ .../aic8800/aic8800_fdrv/aic_br_ext.c | 1569 +++
+ .../aic8800/aic8800_fdrv/aic_br_ext.h | 73 +
+ .../aic8800/aic8800_fdrv/aic_vendor.c | 879 ++
+ .../aic8800/aic8800_fdrv/aic_vendor.h | 346 +
+ .../aic8800_fdrv/aicwf_compat_8800d80.c | 66 +
+ .../aic8800_fdrv/aicwf_compat_8800d80.h | 6 +
+ .../aic8800_fdrv/aicwf_compat_8800dc.c | 2329 ++++
+ .../aic8800_fdrv/aicwf_compat_8800dc.h | 16 +
+ .../aic8800/aic8800_fdrv/aicwf_debug.h | 52 +
+ .../aic8800/aic8800_fdrv/aicwf_rx_prealloc.h | 27 +
+ .../aic8800/aic8800_fdrv/aicwf_sdio.c | 1251 +++
+ .../aic8800/aic8800_fdrv/aicwf_sdio.h | 99 +
+ .../aic8800/aic8800_fdrv/aicwf_txrxif.c | 1227 +++
+ .../aic8800/aic8800_fdrv/aicwf_txrxif.h | 291 +
+ .../wireless/aic8800/aic8800_fdrv/aicwf_usb.c | 2318 ++++
+ .../wireless/aic8800/aic8800_fdrv/aicwf_usb.h | 173 +
+ .../aic8800/aic8800_fdrv/aicwf_wext_linux.c | 1201 +++
+ .../aic8800/aic8800_fdrv/aicwf_wext_linux.h | 11 +
+ .../wireless/aic8800/aic8800_fdrv/hal_desc.h | 376 +
+ .../aic8800/aic8800_fdrv/ipc_compat.h | 25 +
+ .../wireless/aic8800/aic8800_fdrv/ipc_host.c | 771 ++
+ .../wireless/aic8800/aic8800_fdrv/ipc_host.h | 508 +
+ .../aic8800/aic8800_fdrv/ipc_shared.h | 802 ++
+ .../wireless/aic8800/aic8800_fdrv/lmac_mac.h | 588 +
+ .../wireless/aic8800/aic8800_fdrv/lmac_msg.h | 3023 ++++++
+ .../aic8800/aic8800_fdrv/lmac_types.h | 62 +
+ .../net/wireless/aic8800/aic8800_fdrv/md5.c | 161 +
+ .../net/wireless/aic8800/aic8800_fdrv/md5.h | 48 +
+ .../aic8800/aic8800_fdrv/reg_access.h | 161 +
+ .../aic8800/aic8800_fdrv/reg_ipc_app.h | 299 +
+ .../net/wireless/aic8800/aic8800_fdrv/regdb.c | 2873 +++++
+ .../aic8800/aic8800_fdrv/rwnx_bfmer.c | 105 +
+ .../aic8800/aic8800_fdrv/rwnx_bfmer.h | 100 +
+ .../aic8800/aic8800_fdrv/rwnx_cfgfile.c | 237 +
+ .../aic8800/aic8800_fdrv/rwnx_cfgfile.h | 35 +
+ .../wireless/aic8800/aic8800_fdrv/rwnx_cmds.c | 541 +
+ .../wireless/aic8800/aic8800_fdrv/rwnx_cmds.h | 120 +
+ .../aic8800/aic8800_fdrv/rwnx_compat.h | 428 +
+ .../aic8800/aic8800_fdrv/rwnx_debugfs.c | 2251 ++++
+ .../aic8800/aic8800_fdrv/rwnx_debugfs.h | 203 +
+ .../wireless/aic8800/aic8800_fdrv/rwnx_defs.h | 767 ++
+ .../wireless/aic8800/aic8800_fdrv/rwnx_dini.c | 294 +
+ .../wireless/aic8800/aic8800_fdrv/rwnx_dini.h | 20 +
+ .../aic8800/aic8800_fdrv/rwnx_events.h | 1243 +++
+ .../aic8800/aic8800_fdrv/rwnx_fw_dump.c | 568 +
+ .../aic8800/aic8800_fdrv/rwnx_fw_trace.c | 47 +
+ .../aic8800/aic8800_fdrv/rwnx_fw_trace.h | 35 +
+ .../wireless/aic8800/aic8800_fdrv/rwnx_gki.c | 18 +
+ .../wireless/aic8800/aic8800_fdrv/rwnx_gki.h | 11 +
+ .../wireless/aic8800/aic8800_fdrv/rwnx_irqs.c | 67 +
+ .../wireless/aic8800/aic8800_fdrv/rwnx_irqs.h | 20 +
+ .../wireless/aic8800/aic8800_fdrv/rwnx_main.c | 9564 +++++++++++++++++
+ .../wireless/aic8800/aic8800_fdrv/rwnx_main.h | 38 +
+ .../wireless/aic8800/aic8800_fdrv/rwnx_mesh.c | 42 +
+ .../wireless/aic8800/aic8800_fdrv/rwnx_mesh.h | 45 +
+ .../aic8800/aic8800_fdrv/rwnx_mod_params.c | 1755 +++
+ .../aic8800/aic8800_fdrv/rwnx_mod_params.h | 70 +
+ .../aic8800/aic8800_fdrv/rwnx_msg_rx.c | 1566 +++
+ .../aic8800/aic8800_fdrv/rwnx_msg_rx.h | 19 +
+ .../aic8800/aic8800_fdrv/rwnx_msg_tx.c | 3753 +++++++
+ .../aic8800/aic8800_fdrv/rwnx_msg_tx.h | 181 +
+ .../aic8800/aic8800_fdrv/rwnx_mu_group.c | 659 ++
+ .../aic8800/aic8800_fdrv/rwnx_mu_group.h | 179 +
+ .../wireless/aic8800/aic8800_fdrv/rwnx_pci.c | 94 +
+ .../wireless/aic8800/aic8800_fdrv/rwnx_pci.h | 17 +
+ .../aic8800/aic8800_fdrv/rwnx_platform.c | 2306 ++++
+ .../aic8800/aic8800_fdrv/rwnx_platform.h | 142 +
+ .../wireless/aic8800/aic8800_fdrv/rwnx_prof.h | 133 +
+ .../aic8800/aic8800_fdrv/rwnx_radar.c | 1644 +++
+ .../aic8800/aic8800_fdrv/rwnx_radar.h | 160 +
+ .../wireless/aic8800/aic8800_fdrv/rwnx_rx.c | 2219 ++++
+ .../wireless/aic8800/aic8800_fdrv/rwnx_rx.h | 402 +
+ .../wireless/aic8800/aic8800_fdrv/rwnx_strs.c | 261 +
+ .../wireless/aic8800/aic8800_fdrv/rwnx_strs.h | 31 +
+ .../wireless/aic8800/aic8800_fdrv/rwnx_tdls.c | 796 ++
+ .../wireless/aic8800/aic8800_fdrv/rwnx_tdls.h | 54 +
+ .../aic8800/aic8800_fdrv/rwnx_testmode.c | 226 +
+ .../aic8800/aic8800_fdrv/rwnx_testmode.h | 64 +
+ .../wireless/aic8800/aic8800_fdrv/rwnx_tx.c | 2143 ++++
+ .../wireless/aic8800/aic8800_fdrv/rwnx_tx.h | 198 +
+ .../wireless/aic8800/aic8800_fdrv/rwnx_txq.c | 1364 +++
+ .../wireless/aic8800/aic8800_fdrv/rwnx_txq.h | 397 +
+ .../aic8800/aic8800_fdrv/rwnx_utils.c | 39 +
+ .../aic8800/aic8800_fdrv/rwnx_utils.h | 142 +
+ .../wireless/aic8800/aic8800_fdrv/rwnx_v7.c | 192 +
+ .../wireless/aic8800/aic8800_fdrv/rwnx_v7.h | 20 +
+ .../aic8800/aic8800_fdrv/rwnx_version.h | 11 +
+ .../aic8800/aic8800_fdrv/rwnx_version_gen.h | 5 +
+ .../wireless/aic8800/aic8800_fdrv/sdio_host.c | 142 +
+ .../wireless/aic8800/aic8800_fdrv/sdio_host.h | 42 +
+ .../wireless/aic8800/aic8800_fdrv/usb_host.c | 154 +
+ .../wireless/aic8800/aic8800_fdrv/usb_host.h | 43 +
+ .../net/wireless/aic8800/aic_load_fw/Kconfig | 5 +
+ .../net/wireless/aic8800/aic_load_fw/Makefile | 105 +
+ .../aic8800/aic_load_fw/aic_bluetooth_main.c | 71 +
+ .../aic8800/aic_load_fw/aic_compat_8800d80.c | 331 +
+ .../aic8800/aic_load_fw/aic_compat_8800d80.h | 37 +
+ .../wireless/aic8800/aic_load_fw/aic_txrxif.c | 486 +
+ .../wireless/aic8800/aic_load_fw/aic_txrxif.h | 216 +
+ .../aic8800/aic_load_fw/aicbluetooth.c | 1125 ++
+ .../aic8800/aic_load_fw/aicbluetooth.h | 22 +
+ .../aic8800/aic_load_fw/aicbluetooth_cmds.c | 472 +
+ .../aic8800/aic_load_fw/aicbluetooth_cmds.h | 298 +
+ .../aic8800/aic_load_fw/aicwf_debug.h | 52 +
+ .../aic8800/aic_load_fw/aicwf_rx_prealloc.c | 120 +
+ .../aic8800/aic_load_fw/aicwf_rx_prealloc.h | 26 +
+ .../aic8800/aic_load_fw/aicwf_txq_prealloc.c | 61 +
+ .../aic8800/aic_load_fw/aicwf_txq_prealloc.h | 4 +
+ .../wireless/aic8800/aic_load_fw/aicwf_usb.c | 1716 +++
+ .../wireless/aic8800/aic_load_fw/aicwf_usb.h | 187 +
+ .../net/wireless/aic8800/aic_load_fw/md5.c | 161 +
+ .../net/wireless/aic8800/aic_load_fw/md5.h | 48 +
+ .../aic8800/aic_load_fw/rwnx_version_gen.h | 4 +
+ 117 files changed, 66090 insertions(+)
+ create mode 100644 drivers/net/wireless/aic8800/Kconfig
+ create mode 100644 drivers/net/wireless/aic8800/Makefile
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/Kconfig
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/Makefile
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aic_br_ext.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aic_br_ext.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aic_vendor.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aic_vendor.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_compat_8800d80.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_compat_8800d80.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_compat_8800dc.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_compat_8800dc.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_debug.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_rx_prealloc.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_sdio.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_sdio.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_txrxif.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_txrxif.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_usb.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_usb.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_wext_linux.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_wext_linux.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/hal_desc.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/ipc_compat.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/ipc_host.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/ipc_host.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/ipc_shared.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/lmac_mac.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/lmac_msg.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/lmac_types.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/md5.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/md5.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/reg_access.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/reg_ipc_app.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/regdb.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_bfmer.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_bfmer.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_cfgfile.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_cfgfile.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_cmds.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_cmds.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_compat.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_debugfs.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_debugfs.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_defs.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_dini.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_dini.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_events.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_fw_dump.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_fw_trace.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_fw_trace.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_gki.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_gki.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_irqs.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_irqs.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_main.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_main.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_mesh.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_mesh.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_mod_params.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_mod_params.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_msg_rx.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_msg_rx.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_msg_tx.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_msg_tx.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_mu_group.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_mu_group.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_pci.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_pci.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_platform.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_platform.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_prof.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_radar.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_radar.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_rx.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_rx.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_strs.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_strs.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_tdls.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_tdls.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_testmode.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_testmode.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_tx.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_tx.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_txq.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_txq.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_utils.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_utils.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_v7.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_v7.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_version.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_version_gen.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/sdio_host.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/sdio_host.h
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/usb_host.c
+ create mode 100644 drivers/net/wireless/aic8800/aic8800_fdrv/usb_host.h
+ create mode 100644 drivers/net/wireless/aic8800/aic_load_fw/Kconfig
+ create mode 100644 drivers/net/wireless/aic8800/aic_load_fw/Makefile
+ create mode 100644 drivers/net/wireless/aic8800/aic_load_fw/aic_bluetooth_main.c
+ create mode 100644 drivers/net/wireless/aic8800/aic_load_fw/aic_compat_8800d80.c
+ create mode 100644 drivers/net/wireless/aic8800/aic_load_fw/aic_compat_8800d80.h
+ create mode 100644 drivers/net/wireless/aic8800/aic_load_fw/aic_txrxif.c
+ create mode 100644 drivers/net/wireless/aic8800/aic_load_fw/aic_txrxif.h
+ create mode 100644 drivers/net/wireless/aic8800/aic_load_fw/aicbluetooth.c
+ create mode 100644 drivers/net/wireless/aic8800/aic_load_fw/aicbluetooth.h
+ create mode 100644 drivers/net/wireless/aic8800/aic_load_fw/aicbluetooth_cmds.c
+ create mode 100644 drivers/net/wireless/aic8800/aic_load_fw/aicbluetooth_cmds.h
+ create mode 100644 drivers/net/wireless/aic8800/aic_load_fw/aicwf_debug.h
+ create mode 100644 drivers/net/wireless/aic8800/aic_load_fw/aicwf_rx_prealloc.c
+ create mode 100644 drivers/net/wireless/aic8800/aic_load_fw/aicwf_rx_prealloc.h
+ create mode 100644 drivers/net/wireless/aic8800/aic_load_fw/aicwf_txq_prealloc.c
+ create mode 100644 drivers/net/wireless/aic8800/aic_load_fw/aicwf_txq_prealloc.h
+ create mode 100644 drivers/net/wireless/aic8800/aic_load_fw/aicwf_usb.c
+ create mode 100644 drivers/net/wireless/aic8800/aic_load_fw/aicwf_usb.h
+ create mode 100644 drivers/net/wireless/aic8800/aic_load_fw/md5.c
+ create mode 100644 drivers/net/wireless/aic8800/aic_load_fw/md5.h
+ create mode 100644 drivers/net/wireless/aic8800/aic_load_fw/rwnx_version_gen.h
+
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/Kconfig
+@@ -0,0 +1,10 @@
++config AIC_WLAN_SUPPORT
++ bool "AIC wireless Support"
++ default n
++ help
++ This is support for aic wireless chip.
++
++if AIC_WLAN_SUPPORT
++source "drivers/net/wireless/aic8800/aic8800_fdrv/Kconfig"
++source "drivers/net/wireless/aic8800/aic_load_fw/Kconfig"
++endif
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/Makefile
+@@ -0,0 +1,71 @@
++CONFIG_AIC_LOADFW_SUPPORT := m
++CONFIG_AIC8800_WLAN_SUPPORT := m
++
++
++obj-$(CONFIG_AIC_LOADFW_SUPPORT) += aic_load_fw/
++obj-$(CONFIG_AIC8800_WLAN_SUPPORT) += aic8800_fdrv/
++
++
++# Platform support list
++CONFIG_PLATFORM_ROCKCHIP ?= n
++CONFIG_PLATFORM_ALLWINNER ?= n
++CONFIG_PLATFORM_AMLOGIC ?= n
++CONFIG_PLATFORM_UBUNTU ?= y
++
++ifeq ($(CONFIG_PLATFORM_ROCKCHIP), y)
++#KDIR := /home/yaya/E/Rockchip/3229/Android7/RK3229_ANDROID7.1_v1.01_20170914/rk3229_Android7.1_v1.01_xml0914/kernel
++#ARCH ?= arm
++#CROSS_COMPILE ?= /home/yaya/E/Rockchip/3229/Android7/RK3229_ANDROID7.1_v1.01_20170914/rk3229_Android7.1_v1.01_xml0914/prebuilts/gcc/linux-x86/arm/arm-eabi-4.6/bin/arm-eabi-
++KDIR := /home/yaya/E/Rockchip/3229/Android9/rk3229_android9.0_box/kernel
++ARCH ?= arm
++CROSS_COMPILE ?= /home/yaya/E/Rockchip/3229/Android9/rk3229_android9.0_box/prebuilts/gcc/linux-x86/arm/gcc-linaro-6.3.1-2017.05-x86_64_arm-linux-gnueabihf/bin/arm-linux-gnueabihf-
++#KDIR := /home/yaya/E/Rockchip/3399/rk3399-android-10/kernel
++#ARCH ?= arm64
++#CROSS_COMPILE ?= /home/yaya/E/Rockchip/3399/rk3399-android-10/prebuilts/gcc/linux-x86/aarch64/gcc-linaro-6.3.1-2017.05-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu-
++ccflags-y += -DANDROID_PLATFORM
++endif
++
++ifeq ($(CONFIG_PLATFORM_ALLWINNER), y)
++KDIR := /home/yaya/E/Allwinner/R818/R818/AndroidQ/lichee/kernel/linux-4.9
++ARCH ?= arm64
++CROSS_COMPILE ?= /home/yaya/E/Allwinner/R818/R818/AndroidQ/lichee/out/gcc-linaro-5.3.1-2016.05-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu-
++ccflags-y += -DANDROID_PLATFORM
++endif
++
++ifeq ($(CONFIG_PLATFORM_AMLOGIC), y)
++ccflags-y += -DANDROID_PLATFORM
++ARCH := arm
++CROSS_COMPILE := /home/yaya/D/Workspace/CyberQuantum/JinHaoYue/amls905x3/SDK/20191101-0tt-asop/android9.0/prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9/bin/arm-linux-androidkernel-
++KDIR := /home/yaya/D/Workspace/CyberQuantum/JinHaoYue/amls905x3/SDK/20191101-0tt-asop/android9.0/out/target/product/u202/obj/KERNEL_OBJ/
++
++endif
++
++ifeq ($(CONFIG_PLATFORM_UBUNTU), y)
++KDIR := /lib/modules/$(shell uname -r)/build
++PWD := $(shell pwd)
++KVER := $(shell uname -r)
++MODDESTDIR := /lib/modules/$(KVER)/kernel/drivers/net/wireless/aic8800
++ARCH ?= x86_64
++CROSS_COMPILE ?=
++endif
++
++
++all: modules
++modules:
++ make -C $(KDIR) M=$(PWD) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) modules
++
++install:
++ mkdir -p $(MODDESTDIR)
++ install -p -m 644 aic_load_fw/aic_load_fw.ko $(MODDESTDIR)/
++ install -p -m 644 aic8800_fdrv/aic8800_fdrv.ko $(MODDESTDIR)/
++ /sbin/depmod -a ${KVER}
++
++uninstall:
++ rm -rfv $(MODDESTDIR)/aic_load_fw.ko
++ rm -rfv $(MODDESTDIR)/aic8800_fdrv.ko
++ /sbin/depmod -a ${KVER}
++
++clean:
++ cd aic_load_fw/;make clean;cd ..
++ cd aic8800_fdrv/;make clean;cd ..
++ rm -rf modules.order Module.symvers .tmp_versions/
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/Kconfig
+@@ -0,0 +1,36 @@
++config AIC8800_WLAN_SUPPORT
++ tristate "AIC8800 wlan Support"
++ help
++ This is support for aic wifi driver.
++
++#choice
++# depends on AIC8800_WLAN_SUPPORT
++# prompt "Select 8800 support platform"
++#
++# config PLATFORM_ROCHCHIP
++# bool "PLATFORM_ROCHCHIP"
++# depends on AIC8800_WLAN_SUPPORT
++#
++# config PLATFORM_ALLWINNER
++# bool "PLATFORM_ALLWINNER"
++# depends on AIC8800_WLAN_SUPPORT
++#
++# config PLATFORM_AMLOGIC
++# bool "PLATFORM_AMLOGIC"
++# depends on AIC8800_WLAN_SUPPORT
++#
++# config PLATFORM_UBUNTU
++# bool "PLATFORM_UBUNTU"
++# depends on AIC8800_WLAN_SUPPORT
++#
++#endchoice
++#
++#config USE_5G
++# bool "AIC8800 force enable 5G"
++# ---help---
++# This is parameter that force enable 5G.
++#
++#config HE_FOR_OLD_KERNEL
++# bool "AIC8800 support AX for old kernel"
++# ---help---
++# This is parameter that enable AX for old kernel.
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/Makefile
+@@ -0,0 +1,373 @@
++#EXTRA_CFLAGS += $(USER_EXTRA_CFLAGS)
++#EXTRA_CFLAGS += -Wno-implicit-fallthrough
++
++RWNX_VERS_NUM := 6.4.3.0
++
++CONFIG_AIC8800_WLAN_SUPPORT = m
++MODULE_NAME = aic8800_fdrv
++CONFIG_COUNTRY_CODE = "00"
++
++# Support of bootrom start
++CONFIG_START_FROM_BOOTROM = y
++
++# Support of pmic setting, new version bootrom avaliable
++CONFIG_PMIC_SETTING ?=n
++
++# Select 8800DC/DW DCDC_VRF mode, check your board
++CONFIG_VRF_DCDC_MODE = y
++
++# ROM patch enabled option
++CONFIG_ROM_PATCH_EN ?=y
++
++#
++# WAITING FOR KCONFIG {
++#
++CONFIG_RWNX_SOFTMAC ?= n
++CONFIG_RWNX_FULLMAC ?= y
++CONFIG_RWNX_FHOST ?= n
++
++#
++# DEBUG OPTIONS
++CONFIG_RWNX_UM_HELPER_DFLT ?= "/dini/dini_bin/rwnx_umh.sh"
++CONFIG_AIC_FW_PATH = "/vendor/etc/firmware"
++export CONFIG_AIC_FW_PATH
++
++#
++# FW ARCH:
++CONFIG_RWNX_SDM ?= n
++CONFIG_RWNX_TL4 ?= n
++
++# IPC version
++CONFIG_RWNX_OLD_IPC ?= n
++
++# Support of P2P DebugFS for enabling/disabling NoA and OppPS
++CONFIG_RWNX_P2P_DEBUGFS ?= y
++
++#
++# } // WAITING FOR KCONFIG
++#
++
++# Enable A-MSDU support (need FW support)
++## Select this if FW is compiled with AMSDU support
++CONFIG_RWNX_SPLIT_TX_BUF ?= n
++## Select this TO send AMSDU
++CONFIG_RWNX_AMSDUS_TX ?= n
++
++# Enable BFMER support (need FW support)
++CONFIG_RWNX_BFMER ?= n
++
++CONFIG_SDIO_SUPPORT =n
++CONFIG_USB_SUPPORT =y
++CONFIG_RX_REORDER ?=y
++CONFIG_ARP_OFFLOAD =y
++CONFIG_USE_5G ?= n
++CONFIG_RADAR_OR_IR_DETECT =n
++CONFIG_DOWNLOAD_FW =n
++CONFIG_RFTEST=y
++CONFIG_USB_BT=y
++CONFIG_MAC_RANDOM_IF_NO_MAC_IN_EFUSE = y
++CONFIG_WPA3_FOR_OLD_KERNEL ?= n
++CONFIG_HE_FOR_OLD_KERNEL ?= n
++CONFIG_VHT_FOR_OLD_KERNEL ?= n
++# CONFIG_COEX = n for BT_ONLY, CONFIG_COEX =y for combo and sw
++CONFIG_COEX = y
++CONFIG_ALIGN_8BYTES = n
++CONFIG_TXRX_THREAD_PRIO = n
++CONFIG_USB_ALIGN_DATA = n
++CONFIG_RX_TASKLET = n
++CONFIG_TX_TASKLET = n
++CONFIG_RX_NETIF_RECV_SKB = y
++CONFIG_BR_SUPPORT = n
++CONFIG_USB_MSG_OUT_EP = y
++CONFIG_USB_MSG_IN_EP = y
++
++#DCDW support tx aggr, D80 support both
++CONFIG_USB_RX_AGGR = n
++CONFIG_USB_TX_AGGR = n
++
++CONFIG_USB_NO_TRANS_DMA_MAP = n
++CONFIG_GPIO_WAKEUP = n
++CONFIG_CREATE_TRACE_POINTS = n
++CONFIG_SUPPORT_REALTIME_CHANGE_MAC = y
++CONFIG_USE_USB_ZERO_PACKET = y
++CONFIG_DEBUG_FS = n
++CONFIG_STA_SCAN_WHEN_P2P_WORKING = y
++CONFIG_SET_VENDOR_EXTENSION_IE = n
++CONFIG_VENDOR_GPIO = n
++CONFIG_FWLOG_EN = n
++CONFIG_FOR_IPCAM = n
++CONFIG_5M10M = n
++# Need to set fw path in BOARD_KERNEL_CMDLINE
++CONFIG_USE_FW_REQUEST = n
++CONFIG_USE_P2P0 = n
++CONFIG_ONE_TXQ = n
++CONFIG_PER_STA_FC = n
++CONFIG_PREALLOC_RX_SKB = n
++CONFIG_PREALLOC_TXQ = n
++CONFIG_USE_WIRELESS_EXT = n
++CONFIG_DPD = n
++CONFIG_GKI = n
++CONFIG_SCHED_SCAN = n
++
++# Support of MU-MIMO transmission (need FW support)
++ifeq ($(CONFIG_RWNX_BFMER), y)
++CONFIG_RWNX_MUMIMO_TX ?= n
++else
++CONFIG_RWNX_MUMIMO_TX = n
++endif
++
++# Enable handling of radar event
++CONFIG_RWNX_RADAR ?= y
++
++# Enable HW queue for Broadcast/Multicast traffic (need FW support)
++CONFIG_RWNX_BCMC ?= y
++
++# Enable Monitor+Data interface support (need FW support)
++CONFIG_RWNX_MON_DATA =n
++CONFIG_RWNX_MON_XMIT ?= n
++CONFIG_RWNX_MON_RXFILTER ?= n
++
++# extra DEBUG config
++CONFIG_RWNX_SW_PROFILING ?= n
++CONFIG_RWNX_DBG ?= y
++BR_NAME = br0
++
++obj-$(CONFIG_AIC8800_WLAN_SUPPORT) := $(MODULE_NAME).o
++$(MODULE_NAME)-y := \
++ rwnx_msg_tx.o \
++ rwnx_msg_rx.o \
++ rwnx_utils.o \
++ rwnx_cmds.o \
++ rwnx_irqs.o \
++ rwnx_cfgfile.o \
++ rwnx_strs.o \
++ rwnx_rx.o \
++ rwnx_tx.o \
++ rwnx_txq.o \
++ rwnx_main.o \
++ rwnx_mod_params.o \
++ rwnx_mesh.o \
++ rwnx_platform.o \
++ rwnx_pci.o \
++ rwnx_dini.o \
++ rwnx_v7.o \
++ ipc_host.o \
++ rwnx_tdls.o \
++ regdb.o \
++ md5.o \
++ aic_vendor.o \
++ aicwf_compat_8800dc.o \
++ aicwf_compat_8800d80.o \
++
++$(MODULE_NAME)-$(CONFIG_BR_SUPPORT) += aic_br_ext.o
++$(MODULE_NAME)-$(CONFIG_RWNX_RADAR) += rwnx_radar.o
++$(MODULE_NAME)-$(CONFIG_DEBUG_FS) += rwnx_debugfs.o
++$(MODULE_NAME)-$(CONFIG_DEBUG_FS) += rwnx_fw_trace.o
++$(MODULE_NAME)-$(CONFIG_NL80211_TESTMODE) += rwnx_testmode.o
++$(MODULE_NAME)-$(CONFIG_RWNX_BFMER) += rwnx_bfmer.o
++$(MODULE_NAME)-$(CONFIG_RWNX_MUMIMO_TX) += rwnx_mu_group.o
++$(MODULE_NAME)-$(CONFIG_SDIO_SUPPORT) += sdio_host.o
++$(MODULE_NAME)-$(CONFIG_SDIO_SUPPORT) += aicwf_txrxif.o
++$(MODULE_NAME)-$(CONFIG_SDIO_SUPPORT) += aicwf_sdio.o
++
++$(MODULE_NAME)-$(CONFIG_USB_SUPPORT) += usb_host.o
++$(MODULE_NAME)-$(CONFIG_USB_SUPPORT) += aicwf_txrxif.o
++$(MODULE_NAME)-$(CONFIG_USB_SUPPORT) += aicwf_usb.o
++$(MODULE_NAME)-$(CONFIG_USE_WIRELESS_EXT) += aicwf_wext_linux.o
++$(MODULE_NAME)-$(CONFIG_GKI) += rwnx_gki.o
++
++ccflags-$(CONFIG_DEBUG_FS) += -DCONFIG_RWNX_DEBUGFS
++ccflags-$(CONFIG_DEBUG_FS) += -DCONFIG_RWNX_UM_HELPER_DFLT=\"$(CONFIG_RWNX_UM_HELPER_DFLT)\"
++ccflags-$(CONFIG_RWNX_P2P_DEBUGFS) += -DCONFIG_RWNX_P2P_DEBUGFS
++
++# FW VARS
++ccflags-y += -DNX_VIRT_DEV_MAX=4
++
++#for 8800D and DCDW u01
++#ccflags-y += -DNX_REMOTE_STA_MAX=8
++
++#for 8800DCDW u02
++ccflags-y += -DNX_REMOTE_STA_MAX_FOR_OLD_IC=8
++ccflags-y += -DNX_REMOTE_STA_MAX=32
++
++ccflags-y += -DNX_MU_GROUP_MAX=62
++ccflags-y += -DNX_TXDESC_CNT=64
++ccflags-y += -DNX_TX_MAX_RATES=4
++ccflags-y += -DNX_CHAN_CTXT_CNT=3
++
++# FW ARCH:
++ccflags-$(CONFIG_RWNX_SDM) += -DCONFIG_RWNX_SDM
++ccflags-$(CONFIG_RWNX_TL4) += -DCONFIG_RWNX_TL4
++ccflags-$(CONFIG_RWNX_OLD_IPC) += -DCONFIG_RWNX_OLD_IPC
++ccflags-$(CONFIG_START_FROM_BOOTROM) += -DCONFIG_START_FROM_BOOTROM
++ccflags-$(CONFIG_PMIC_SETTING) += -DCONFIG_PMIC_SETTING
++ccflags-$(CONFIG_VRF_DCDC_MODE) += -DCONFIG_VRF_DCDC_MODE
++ccflags-$(CONFIG_ROM_PATCH_EN) += -DCONFIG_ROM_PATCH_EN
++ccflags-$(CONFIG_WPA3_FOR_OLD_KERNEL) += -DCONFIG_WPA3_FOR_OLD_KERNEL
++ccflags-$(CONFIG_HE_FOR_OLD_KERNEL) += -DCONFIG_HE_FOR_OLD_KERNEL
++ccflags-$(CONFIG_VHT_FOR_OLD_KERNEL) += -DCONFIG_VHT_FOR_OLD_KERNEL
++ccflags-$(CONFIG_COEX) += -DCONFIG_COEX
++
++ccflags-y += -DCONFIG_RWNX_FULLMAC
++ccflags-y += -I$(src)/.
++ccflags-$(CONFIG_RWNX_RADAR) += -DCONFIG_RWNX_RADAR
++ccflags-$(CONFIG_RWNX_MON_DATA) += -DCONFIG_RWNX_MON_DATA
++ccflags-$(CONFIG_RWNX_MON_XMIT) += -DCONFIG_RWNX_MON_XMIT
++ccflags-$(CONFIG_RWNX_MON_RXFILTER) += -DCONFIG_RWNX_MON_RXFILTER
++ccflags-$(CONFIG_RWNX_BFMER) += -DCONFIG_RWNX_BFMER
++ccflags-$(CONFIG_RWNX_SPLIT_TX_BUF) += -DCONFIG_RWNX_SPLIT_TX_BUF
++ifeq ($(CONFIG_RWNX_SPLIT_TX_BUF), y)
++ccflags-$(CONFIG_RWNX_AMSDUS_TX) += -DCONFIG_RWNX_AMSDUS_TX
++endif
++ccflags-$(CONFIG_RWNX_DBG) += -DCONFIG_RWNX_DBG
++ccflags-$(CONFIG_RWNX_SW_PROFILING) += -DCONFIG_RWNX_SW_PROFILING
++ccflags-$(CONFIG_RWNX_MUMIMO_TX) += -DCONFIG_RWNX_MUMIMO_TX
++ccflags-$(CONFIG_RFTEST) += -DCONFIG_RFTEST
++
++
++ifeq ($(CONFIG_SDIO_SUPPORT), y)
++ccflags-y += -DAICWF_SDIO_SUPPORT
++endif
++
++ifeq ($(CONFIG_USB_SUPPORT), y)
++ccflags-y += -DAICWF_USB_SUPPORT
++endif
++ifeq ($(CONFIG_BR_SUPPORT), y)
++ccflags-y += -DCONFIG_BR_SUPPORT
++ccflags-y += '-DCONFIG_BR_SUPPORT_BRNAME="'$(BR_NAME)'"'
++endif
++
++ifeq ($(CONFIG_RWNX_MUMIMO_TX), y)
++ccflags-y += -DCONFIG_USER_MAX=2
++else
++ccflags-y += -DCONFIG_USER_MAX=1
++endif
++
++ifeq ($(CONFIG_RWNX_BCMC), y)
++ccflags-y += -DNX_TXQ_CNT=5
++else
++ccflags-y += -DNX_TXQ_CNT=4
++endif
++
++# For old kernel (<=3.19)
++ifeq ($(shell test $(VERSION) -lt 4 -a "$(CONFIG_VENDOR_RWNX)" = y ; echo $$?),0)
++ccflags-y += -DCONFIG_VENDOR_RWNX_VHT_NO80
++endif
++
++ccflags-$(CONFIG_RX_REORDER) += -DAICWF_RX_REORDER
++ccflags-$(CONFIG_ARP_OFFLOAD) += -DAICWF_ARP_OFFLOAD
++ccflags-$(CONFIG_USE_5G) += -DUSE_5G
++ccflags-$(CONFIG_RADAR_OR_IR_DETECT) += -DRADAR_OR_IR_DETECT
++ccflags-$(CONFIG_DOWNLOAD_FW) += -DCONFIG_DOWNLOAD_FW
++ccflags-$(CONFIG_USB_BT) += -DCONFIG_USB_BT
++ccflags-$(CONFIG_ALIGN_8BYTES) += -DCONFIG_ALIGN_8BYTES
++ccflags-$(CONFIG_TXRX_THREAD_PRIO) += -DCONFIG_TXRX_THREAD_PRIO
++ccflags-$(CONFIG_USB_ALIGN_DATA) += -DCONFIG_USB_ALIGN_DATA
++ccflags-$(CONFIG_MAC_RANDOM_IF_NO_MAC_IN_EFUSE) += -DCONFIG_MAC_RANDOM_IF_NO_MAC_IN_EFUSE
++ccflags-$(CONFIG_VENDOR_GPIO) += -DCONFIG_VENDOR_GPIO
++ccflags-y += -DDEFAULT_COUNTRY_CODE=""\$(CONFIG_COUNTRY_CODE)"\"
++ccflags-$(CONFIG_RX_NETIF_RECV_SKB) += -DCONFIG_RX_NETIF_RECV_SKB
++ccflags-$(CONFIG_USB_MSG_OUT_EP) += -DCONFIG_USB_MSG_OUT_EP
++ccflags-$(CONFIG_USB_MSG_IN_EP) += -DCONFIG_USB_MSG_IN_EP
++ccflags-$(CONFIG_USB_RX_AGGR) += -DCONFIG_USB_RX_AGGR
++ccflags-$(CONFIG_USB_TX_AGGR) += -DCONFIG_USB_TX_AGGR
++ccflags-$(CONFIG_USB_NO_TRANS_DMA_MAP) += -DCONFIG_USB_NO_TRANS_DMA_MAP
++ccflags-$(CONFIG_GPIO_WAKEUP) += -DCONFIG_GPIO_WAKEUP
++ccflags-$(CONFIG_CREATE_TRACE_POINTS) += -DCREATE_TRACE_POINTS
++ccflags-$(CONFIG_RX_TASKLET) += -DCONFIG_RX_TASKLET
++ccflags-$(CONFIG_TX_TASKLET) += -DCONFIG_TX_TASKLET
++ccflags-$(CONFIG_USE_USB_ZERO_PACKET) += -DCONFIG_USE_USB_ZERO_PACKET
++ccflags-$(CONFIG_STA_SCAN_WHEN_P2P_WORKING) += -DCONFIG_STA_SCAN_WHEN_P2P_WORKING
++ccflags-$(CONFIG_SUPPORT_REALTIME_CHANGE_MAC) += -DCONFIG_SUPPORT_REALTIME_CHANGE_MAC
++ccflags-$(CONFIG_SET_VENDOR_EXTENSION_IE) += -DCONFIG_SET_VENDOR_EXTENSION_IE
++ccflags-$(CONFIG_FWLOG_EN) += -DCONFIG_FWLOG_EN
++ccflags-$(CONFIG_FOR_IPCAM) += -DCONFIG_FOR_IPCAM
++ccflags-$(CONFIG_5M10M) += -DCONFIG_5M10M
++ccflags-$(CONFIG_USE_FW_REQUEST) += -DCONFIG_USE_FW_REQUEST
++ccflags-$(CONFIG_USE_P2P0) += -DCONFIG_USE_P2P0
++ccflags-$(CONFIG_ONE_TXQ) += -DCONFIG_ONE_TXQ
++ccflags-$(CONFIG_PER_STA_FC) += -DCONFIG_PER_STA_FC
++ccflags-$(CONFIG_PREALLOC_RX_SKB) += -DCONFIG_PREALLOC_RX_SKB
++ccflags-$(CONFIG_PREALLOC_TXQ) += -DCONFIG_PREALLOC_TXQ
++ccflags-$(CONFIG_USE_WIRELESS_EXT) += -DCONFIG_USE_WIRELESS_EXT
++ccflags-$(CONFIG_DPD) += -DCONFIG_DPD
++ccflags-$(CONFIG_GKI) += -DCONFIG_GKI
++ccflags-$(CONFIG_SCHED_SCAN) += -DCONFIG_SCHED_SCAN
++
++# Platform support list
++CONFIG_PLATFORM_ROCKCHIP ?= y
++CONFIG_PLATFORM_ALLWINNER ?= n
++CONFIG_PLATFORM_AMLOGIC ?= n
++CONFIG_PLATFORM_UBUNTU ?= n
++
++ifeq ($(CONFIG_PLATFORM_ROCKCHIP), y)
++#KDIR ?= /home/yaya/E/Rockchip/3288/Android5/kernel/
++#ARCH ?= arm
++#CROSS_COMPILE ?= /home/yaya/D/Workspace/CyberQuantum/JinHaoYue/rk3288/prebuilts/gcc/linux-x86/arm/arm-eabi-4.6/bin/arm-eabi-
++#KDIR ?= /home/yaya/E/Rockchip/3229/Android7/RK3229_ANDROID7.1_v1.01_20170914/rk3229_Android7.1_v1.01_xml0914/kernel
++#ARCH ?= arm
++#CROSS_COMPILE ?= /home/yaya/E/Rockchip/3229/Android7/RK3229_ANDROID7.1_v1.01_20170914/rk3229_Android7.1_v1.01_xml0914/prebuilts/gcc/linux-x86/arm/arm-eabi-4.6/bin/arm-eabi-
++#KDIR ?= /home/yaya/E/Rockchip/3229/Android9/rk3229_android9.0_box/kernel
++#ARCH ?= arm
++#CROSS_COMPILE ?= /home/yaya/E/Rockchip/3229/Android9/rk3229_android9.0_box/prebuilts/gcc/linux-x86/arm/gcc-linaro-6.3.1-2017.05-x86_64_arm-linux-gnueabihf/bin/arm-linux-gnueabihf-
++KDIR ?= /home/yaya/E/Rockchip/3566/firefly/Android11.0/Firefly-RK356X_Android11.0_git_20210824/RK356X_Android11.0/kernel
++ARCH ?= arm64
++CROSS_COMPILE ?= /home/yaya/E/Rockchip/3566/Android11/rk3566_rk3568_android11_oranth/prebuilts/gcc/linux-x86/aarch64/gcc-linaro-6.3.1-2017.05-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu-
++#ARCH ?= arm
++#CROSS_COMPILE ?=/home/yaya/D/Workspace/CyberQuantum/JinHaoYue/rk3288/prebuilts/gcc/linux-x86/arm/arm-eabi-4.6/bin/arm-eabi-
++#KDIR ?=/home/yaya/D/Workspace/CyberQuantum/JinHaoYue/rk3288/kernel
++#KDIR := /home/yaya/E/Rockchip/3566/Android/kernel
++#ARCH ?= arm64
++#CROSS_COMPILE ?= /home/yaya/E/Rockchip/3566/Android/prebuilts/gcc/linux-x86/aarch64/gcc-linaro-6.3.1-2017.05-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu-
++#KDIR ?= /home/yaya/E/Rockchip/3566/Android11/rk3566_rk3568_android11_oranth/kernel
++#ARCH ?= arm64
++#CROSS_COMPILE ?= /home/yaya/E/Rockchip/3566/Android11/rk3566_rk3568_android11_oranth/prebuilts/gcc/linux-x86/aarch64/gcc-linaro-6.3.1-2017.05-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu-
++#KDIR ?= /home/yaya/D/samba1/aiden/SDK/Rockchip/3588/Android12/RK3588_Android12.0/kernel-5.10
++#ARCH ?= arm64
++#CROSS_COMPILE ?= /home/yaya/D/samba1/aiden/SDK/Rockchip/3588/Android12/RK3588_Android12.0/prebuilts/gcc/linux-x86/aarch64/gcc-linaro-6.3.1-2017.05-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu-
++ccflags-y += -DANDROID_PLATFORM
++ccflags-y += -DCONFIG_PLATFORM_ROCKCHIP
++endif
++
++ifeq ($(CONFIG_PLATFORM_ALLWINNER), y)
++KDIR ?= /home/yaya/E/Allwinner/R818/R818/AndroidQ/lichee/kernel/linux-4.9
++ARCH ?= arm64
++CROSS_COMPILE ?= /home/yaya/E/Allwinner/R818/R818/AndroidQ/lichee/out/gcc-linaro-5.3.1-2016.05-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu-
++ccflags-y += -DANDROID_PLATFORM
++endif
++
++ifeq ($(CONFIG_PLATFORM_AMLOGIC), y)
++ccflags-y += -DANDROID_PLATFORM
++ARCH ?= arm
++CROSS_COMPILE ?= /home/yaya/D/Workspace/CyberQuantum/JinHaoYue/amls905x3/SDK/20191101-0tt-asop/android9.0/prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9/bin/arm-linux-androidkernel-
++KDIR ?= /home/yaya/D/Workspace/CyberQuantum/JinHaoYue/amls905x3/SDK/20191101-0tt-asop/android9.0/out/target/product/u202/obj/KERNEL_OBJ/
++
++endif
++
++ifeq ($(CONFIG_PLATFORM_UBUNTU), y)
++KDIR ?= /lib/modules/$(shell uname -r)/build
++#KDIR ?= ~/D/Workspace/CyberQuantum/Linux/linux-4.15/
++PWD ?= $(shell pwd)
++KVER ?= $(shell uname -r)
++MODDESTDIR ?= /lib/modules/$(KVER)/kernel/drivers/net/wireless/aic8800
++ARCH ?= x86_64
++CROSS_COMPILE ?=
++endif
++
++
++all: modules
++modules:
++# make -C $(KDIR) M=$(PWD) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) LLVM=1 LLVM_IAS=1 modules
++ make -C $(KDIR) M=$(PWD) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) modules
++install:
++ mkdir -p $(MODDESTDIR)
++ install -p -m 644 $(MODULE_NAME).ko $(MODDESTDIR)
++ /sbin/depmod -a ${KVER}
++
++uninstall:
++ rm -rfv $(MODDESTDIR)/$(MODULE_NAME).ko
++ /sbin/depmod -a ${KVER}
++
++clean:
++ rm -rf *.o *.ko *.o.* *.mod.* modules.* Module.* .a* .o* .*.o.* *.mod .tmp* .cache.mk .modules.order.cmd .Module.symvers.cmd
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/aic_br_ext.c
+@@ -0,0 +1,1569 @@
++/******************************************************************************
++ *
++ * Copyright(c) 2007 - 2017 Realtek Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ *****************************************************************************/
++#define _AIC_BR_EXT_C_
++
++#ifdef __KERNEL__
++ #include <linux/if_arp.h>
++ #include <net/ip.h>
++ #include <net/ipx.h>
++ #include <linux/atalk.h>
++ #include <linux/udp.h>
++ #include <linux/if_pppox.h>
++ #include "rwnx_defs.h"
++#endif
++
++#ifdef CL_IPV6_PASS
++ #ifdef __KERNEL__
++ #include <linux/ipv6.h>
++ #include <linux/icmpv6.h>
++ #include <net/ndisc.h>
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24))
++ #include <net/ip6_checksum.h>
++ #else
++ #include <net/checksum.h>
++ #endif
++ #endif
++#endif
++
++#ifdef CONFIG_BR_SUPPORT
++
++/* #define BR_SUPPORT_DEBUG */
++
++#define NAT25_IPV4 01
++#define NAT25_IPV6 02
++#define NAT25_IPX 03
++#define NAT25_APPLE 04
++#define NAT25_PPPOE 05
++
++#define RTL_RELAY_TAG_LEN (ETH_ALEN)
++#define TAG_HDR_LEN 4
++
++#define MAGIC_CODE 0x8186
++#define MAGIC_CODE_LEN 2
++#define WAIT_TIME_PPPOE 5 /* waiting time for pppoe server in sec */
++
++/*-----------------------------------------------------------------
++ How database records network address:
++ 0 1 2 3 4 5 6 7 8 9 10
++ |----|----|----|----|----|----|----|----|----|----|----|
++ IPv4 |type| | IP addr |
++ IPX |type| Net addr | Node addr |
++ IPX |type| Net addr |Sckt addr|
++ Apple |type| Network |node|
++ PPPoE |type| SID | AC MAC |
++-----------------------------------------------------------------*/
++
++
++/* Find a tag in pppoe frame and return the pointer */
++static __inline__ unsigned char *__nat25_find_pppoe_tag(struct pppoe_hdr *ph, unsigned short type)
++{
++ unsigned char *cur_ptr, *start_ptr;
++ unsigned short tagLen, tagType;
++
++ start_ptr = cur_ptr = (unsigned char *)ph->tag;
++ while ((cur_ptr - start_ptr) < ntohs(ph->length)) {
++ /* prevent un-alignment access */
++ tagType = (unsigned short)((cur_ptr[0] << 8) + cur_ptr[1]);
++ tagLen = (unsigned short)((cur_ptr[2] << 8) + cur_ptr[3]);
++ if (tagType == type)
++ return cur_ptr;
++ cur_ptr = cur_ptr + TAG_HDR_LEN + tagLen;
++ }
++ return 0;
++}
++
++
++static __inline__ int __nat25_add_pppoe_tag(struct sk_buff *skb, struct pppoe_tag *tag)
++{
++ struct pppoe_hdr *ph = (struct pppoe_hdr *)(skb->data + ETH_HLEN);
++ int data_len;
++
++ data_len = tag->tag_len + TAG_HDR_LEN;
++ if (skb_tailroom(skb) < data_len) {
++ printk("skb_tailroom() failed in add SID tag!\n");
++ return -1;
++ }
++
++ skb_put(skb, data_len);
++ /* have a room for new tag */
++ memmove(((unsigned char *)ph->tag + data_len), (unsigned char *)ph->tag, ntohs(ph->length));
++ ph->length = htons(ntohs(ph->length) + data_len);
++ memcpy((unsigned char *)ph->tag, tag, data_len);
++ return data_len;
++}
++
++static int skb_pull_and_merge(struct sk_buff *skb, unsigned char *src, int len)
++{
++ int tail_len;
++ unsigned long end, tail;
++
++ if ((src + len) > skb_tail_pointer(skb) || skb->len < len)
++ return -1;
++
++ tail = (unsigned long)skb_tail_pointer(skb);
++ end = (unsigned long)src + len;
++ if (tail < end)
++ return -1;
++
++ tail_len = (int)(tail - end);
++ if (tail_len > 0)
++ memmove(src, src + len, tail_len);
++
++ skb_trim(skb, skb->len - len);
++ return 0;
++}
++
++static __inline__ unsigned long __nat25_timeout(struct rwnx_vif *vif)
++{
++ unsigned long timeout;
++
++ timeout = jiffies - NAT25_AGEING_TIME * HZ;
++
++ return timeout;
++}
++
++
++static __inline__ int __nat25_has_expired(struct rwnx_vif *vif,
++ struct nat25_network_db_entry *fdb)
++{
++ if (time_before_eq(fdb->ageing_timer, __nat25_timeout(vif)))
++ return 1;
++
++ return 0;
++}
++
++
++static __inline__ void __nat25_generate_ipv4_network_addr(unsigned char *networkAddr,
++ unsigned int *ipAddr)
++{
++ memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
++
++ networkAddr[0] = NAT25_IPV4;
++ memcpy(networkAddr + 7, (unsigned char *)ipAddr, 4);
++}
++
++
++static __inline__ void __nat25_generate_ipx_network_addr_with_node(unsigned char *networkAddr,
++ unsigned int *ipxNetAddr, unsigned char *ipxNodeAddr)
++{
++ memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
++
++ networkAddr[0] = NAT25_IPX;
++ memcpy(networkAddr + 1, (unsigned char *)ipxNetAddr, 4);
++ memcpy(networkAddr + 5, ipxNodeAddr, 6);
++}
++
++
++static __inline__ void __nat25_generate_ipx_network_addr_with_socket(unsigned char *networkAddr,
++ unsigned int *ipxNetAddr, unsigned short *ipxSocketAddr)
++{
++ memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
++
++ networkAddr[0] = NAT25_IPX;
++ memcpy(networkAddr + 1, (unsigned char *)ipxNetAddr, 4);
++ memcpy(networkAddr + 5, (unsigned char *)ipxSocketAddr, 2);
++}
++
++
++static __inline__ void __nat25_generate_apple_network_addr(unsigned char *networkAddr,
++ unsigned short *network, unsigned char *node)
++{
++ memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
++
++ networkAddr[0] = NAT25_APPLE;
++ memcpy(networkAddr + 1, (unsigned char *)network, 2);
++ networkAddr[3] = *node;
++}
++
++
++static __inline__ void __nat25_generate_pppoe_network_addr(unsigned char *networkAddr,
++ unsigned char *ac_mac, unsigned short *sid)
++{
++ memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
++
++ networkAddr[0] = NAT25_PPPOE;
++ memcpy(networkAddr + 1, (unsigned char *)sid, 2);
++ memcpy(networkAddr + 3, (unsigned char *)ac_mac, 6);
++}
++
++
++#ifdef CL_IPV6_PASS
++static void __nat25_generate_ipv6_network_addr(unsigned char *networkAddr,
++ unsigned int *ipAddr)
++{
++ memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
++
++ networkAddr[0] = NAT25_IPV6;
++ memcpy(networkAddr + 1, (unsigned char *)ipAddr, 16);
++}
++
++
++static unsigned char *scan_tlv(unsigned char *data, int len, unsigned char tag, unsigned char len8b)
++{
++ while (len > 0) {
++ if (*data == tag && *(data + 1) == len8b && len >= len8b * 8)
++ return data + 2;
++
++ len -= (*(data + 1)) * 8;
++ data += (*(data + 1)) * 8;
++ }
++ return NULL;
++}
++
++
++static int update_nd_link_layer_addr(unsigned char *data, int len, unsigned char *replace_mac)
++{
++ struct icmp6hdr *icmphdr = (struct icmp6hdr *)data;
++ unsigned char *mac;
++
++ if (icmphdr->icmp6_type == NDISC_ROUTER_SOLICITATION) {
++ if (len >= 8) {
++ mac = scan_tlv(&data[8], len - 8, 1, 1);
++ if (mac) {
++ printk("Router Solicitation, replace MAC From: %02x:%02x:%02x:%02x:%02x:%02x, To: %02x:%02x:%02x:%02x:%02x:%02x\n",
++ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
++ replace_mac[0], replace_mac[1], replace_mac[2], replace_mac[3], replace_mac[4], replace_mac[5]);
++ memcpy(mac, replace_mac, 6);
++ return 1;
++ }
++ }
++ } else if (icmphdr->icmp6_type == NDISC_ROUTER_ADVERTISEMENT) {
++ if (len >= 16) {
++ mac = scan_tlv(&data[16], len - 16, 1, 1);
++ if (mac) {
++ printk("Router Advertisement, replace MAC From: %02x:%02x:%02x:%02x:%02x:%02x, To: %02x:%02x:%02x:%02x:%02x:%02x\n",
++ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
++ replace_mac[0], replace_mac[1], replace_mac[2], replace_mac[3], replace_mac[4], replace_mac[5]);
++ memcpy(mac, replace_mac, 6);
++ return 1;
++ }
++ }
++ } else if (icmphdr->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
++ if (len >= 24) {
++ mac = scan_tlv(&data[24], len - 24, 1, 1);
++ if (mac) {
++ printk("Neighbor Solicitation, replace MAC From: %02x:%02x:%02x:%02x:%02x:%02x, To: %02x:%02x:%02x:%02x:%02x:%02x\n",
++ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
++ replace_mac[0], replace_mac[1], replace_mac[2], replace_mac[3], replace_mac[4], replace_mac[5]);
++ memcpy(mac, replace_mac, 6);
++ return 1;
++ }
++ }
++ } else if (icmphdr->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
++ if (len >= 24) {
++ mac = scan_tlv(&data[24], len - 24, 2, 1);
++ if (mac) {
++ printk("Neighbor Advertisement, replace MAC From: %02x:%02x:%02x:%02x:%02x:%02x, To: %02x:%02x:%02x:%02x:%02x:%02x\n",
++ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
++ replace_mac[0], replace_mac[1], replace_mac[2], replace_mac[3], replace_mac[4], replace_mac[5]);
++ memcpy(mac, replace_mac, 6);
++ return 1;
++ }
++ }
++ } else if (icmphdr->icmp6_type == NDISC_REDIRECT) {
++ if (len >= 40) {
++ mac = scan_tlv(&data[40], len - 40, 2, 1);
++ if (mac) {
++ printk("Redirect, replace MAC From: %02x:%02x:%02x:%02x:%02x:%02x, To: %02x:%02x:%02x:%02x:%02x:%02x\n",
++ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
++ replace_mac[0], replace_mac[1], replace_mac[2], replace_mac[3], replace_mac[4], replace_mac[5]);
++ memcpy(mac, replace_mac, 6);
++ return 1;
++ }
++ }
++ }
++ return 0;
++}
++
++#ifdef SUPPORT_RX_UNI2MCAST
++static void convert_ipv6_mac_to_mc(struct sk_buff *skb)
++{
++ struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + ETH_HLEN);
++ unsigned char *dst_mac = skb->data;
++
++ /* dst_mac[0] = 0xff; */
++ /* dst_mac[1] = 0xff; */
++ /*modified by qinjunjie,ipv6 multicast address ix 0x33-33-xx-xx-xx-xx*/
++ dst_mac[0] = 0x33;
++ dst_mac[1] = 0x33;
++ memcpy(&dst_mac[2], &iph->daddr.s6_addr32[3], 4);
++#if defined(__LINUX_2_6__)
++ /*modified by qinjunjie,warning:should not remove next line*/
++ skb->pkt_type = PACKET_MULTICAST;
++#endif
++}
++#endif /* CL_IPV6_PASS */
++#endif /* SUPPORT_RX_UNI2MCAST */
++
++
++static __inline__ int __nat25_network_hash(unsigned char *networkAddr)
++{
++ if (networkAddr[0] == NAT25_IPV4) {
++ unsigned long x;
++
++ x = networkAddr[7] ^ networkAddr[8] ^ networkAddr[9] ^ networkAddr[10];
++
++ return x & (NAT25_HASH_SIZE - 1);
++ } else if (networkAddr[0] == NAT25_IPX) {
++ unsigned long x;
++
++ x = networkAddr[1] ^ networkAddr[2] ^ networkAddr[3] ^ networkAddr[4] ^ networkAddr[5] ^
++ networkAddr[6] ^ networkAddr[7] ^ networkAddr[8] ^ networkAddr[9] ^ networkAddr[10];
++
++ return x & (NAT25_HASH_SIZE - 1);
++ } else if (networkAddr[0] == NAT25_APPLE) {
++ unsigned long x;
++
++ x = networkAddr[1] ^ networkAddr[2] ^ networkAddr[3];
++
++ return x & (NAT25_HASH_SIZE - 1);
++ } else if (networkAddr[0] == NAT25_PPPOE) {
++ unsigned long x;
++
++ x = networkAddr[0] ^ networkAddr[1] ^ networkAddr[2] ^ networkAddr[3] ^ networkAddr[4] ^ networkAddr[5] ^ networkAddr[6] ^ networkAddr[7] ^ networkAddr[8];
++
++ return x & (NAT25_HASH_SIZE - 1);
++ }
++#ifdef CL_IPV6_PASS
++ else if (networkAddr[0] == NAT25_IPV6) {
++ unsigned long x;
++
++ x = networkAddr[1] ^ networkAddr[2] ^ networkAddr[3] ^ networkAddr[4] ^ networkAddr[5] ^
++ networkAddr[6] ^ networkAddr[7] ^ networkAddr[8] ^ networkAddr[9] ^ networkAddr[10] ^
++ networkAddr[11] ^ networkAddr[12] ^ networkAddr[13] ^ networkAddr[14] ^ networkAddr[15] ^
++ networkAddr[16];
++
++ return x & (NAT25_HASH_SIZE - 1);
++ }
++#endif
++ else {
++ unsigned long x = 0;
++ int i;
++
++ for (i = 0; i < MAX_NETWORK_ADDR_LEN; i++)
++ x ^= networkAddr[i];
++
++ return x & (NAT25_HASH_SIZE - 1);
++ }
++}
++
++
++static __inline__ void __network_hash_link(struct rwnx_vif *vif,
++ struct nat25_network_db_entry *ent, int hash)
++{
++ /* Caller must _enter_critical_bh already! */
++ /* _irqL irqL; */
++ /* _enter_critical_bh(&priv->br_ext_lock, &irqL); */
++
++ ent->next_hash = vif->nethash[hash];
++ if (ent->next_hash != NULL)
++ ent->next_hash->pprev_hash = &ent->next_hash;
++ vif->nethash[hash] = ent;
++ ent->pprev_hash = &vif->nethash[hash];
++
++ /* _exit_critical_bh(&priv->br_ext_lock, &irqL); */
++}
++
++
++static __inline__ void __network_hash_unlink(struct nat25_network_db_entry *ent)
++{
++ /* Caller must _enter_critical_bh already! */
++ /* _irqL irqL; */
++ /* _enter_critical_bh(&priv->br_ext_lock, &irqL); */
++
++ *(ent->pprev_hash) = ent->next_hash;
++ if (ent->next_hash != NULL)
++ ent->next_hash->pprev_hash = ent->pprev_hash;
++ ent->next_hash = NULL;
++ ent->pprev_hash = NULL;
++
++ /* _exit_critical_bh(&priv->br_ext_lock, &irqL); */
++}
++
++
++static int __nat25_db_network_lookup_and_replace(struct rwnx_vif *vif,
++ struct sk_buff *skb, unsigned char *networkAddr)
++{
++ struct nat25_network_db_entry *db;
++ spin_lock_bh(&vif->br_ext_lock);
++
++ db = vif->nethash[__nat25_network_hash(networkAddr)];
++ while (db != NULL) {
++ if (!memcmp(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN)) {
++ if (!__nat25_has_expired(vif, db)) {
++ /* replace the destination mac address */
++ memcpy(skb->data, db->macAddr, ETH_ALEN);
++ atomic_inc(&db->use_count);
++
++#ifdef CL_IPV6_PASS
++ printk("NAT25: Lookup M:%02x%02x%02x%02x%02x%02x N:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x"
++ "%02x%02x%02x%02x%02x%02x\n",
++ db->macAddr[0],
++ db->macAddr[1],
++ db->macAddr[2],
++ db->macAddr[3],
++ db->macAddr[4],
++ db->macAddr[5],
++ db->networkAddr[0],
++ db->networkAddr[1],
++ db->networkAddr[2],
++ db->networkAddr[3],
++ db->networkAddr[4],
++ db->networkAddr[5],
++ db->networkAddr[6],
++ db->networkAddr[7],
++ db->networkAddr[8],
++ db->networkAddr[9],
++ db->networkAddr[10],
++ db->networkAddr[11],
++ db->networkAddr[12],
++ db->networkAddr[13],
++ db->networkAddr[14],
++ db->networkAddr[15],
++ db->networkAddr[16]);
++#else
++ printk("NAT25: Lookup M:%02x%02x%02x%02x%02x%02x N:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
++ db->macAddr[0],
++ db->macAddr[1],
++ db->macAddr[2],
++ db->macAddr[3],
++ db->macAddr[4],
++ db->macAddr[5],
++ db->networkAddr[0],
++ db->networkAddr[1],
++ db->networkAddr[2],
++ db->networkAddr[3],
++ db->networkAddr[4],
++ db->networkAddr[5],
++ db->networkAddr[6],
++ db->networkAddr[7],
++ db->networkAddr[8],
++ db->networkAddr[9],
++ db->networkAddr[10]);
++#endif
++ }
++ spin_unlock_bh(&vif->br_ext_lock);
++ return 1;
++ }
++
++ db = db->next_hash;
++ }
++
++ spin_unlock_bh(&vif->br_ext_lock);
++ return 0;
++}
++
++
++static void __nat25_db_network_insert(struct rwnx_vif *vif,
++ unsigned char *macAddr, unsigned char *networkAddr)
++{
++ struct nat25_network_db_entry *db;
++ int hash;
++ spin_lock_bh(&vif->br_ext_lock);
++
++ hash = __nat25_network_hash(networkAddr);
++ db = vif->nethash[hash];
++
++ while (db != NULL) {
++ if (!memcmp(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN)) {
++ memcpy(db->macAddr, macAddr, ETH_ALEN);
++ db->ageing_timer = jiffies;
++ spin_unlock_bh(&vif->br_ext_lock);
++ return;
++ }
++
++ db = db->next_hash;
++ }
++
++ db = (struct nat25_network_db_entry *)kmalloc(sizeof(*db), in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
++ if (db == NULL) {
++ spin_unlock_bh(&vif->br_ext_lock);
++ return;
++ }
++
++ memcpy(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN);
++ memcpy(db->macAddr, macAddr, ETH_ALEN);
++ atomic_set(&db->use_count, 1);
++ db->ageing_timer = jiffies;
++
++ __network_hash_link(vif, db, hash);
++
++ spin_unlock_bh(&vif->br_ext_lock);
++}
++
++
++static void __nat25_db_print(struct rwnx_vif *vif)
++{
++ spin_lock_bh(&vif->br_ext_lock);
++
++#ifdef BR_SUPPORT_DEBUG
++ static int counter = 0;
++ int i, j;
++ struct nat25_network_db_entry *db;
++
++ counter++;
++ if ((counter % 16) != 0)
++ return;
++
++ for (i = 0, j = 0; i < NAT25_HASH_SIZE; i++) {
++ db = vif->nethash[i];
++
++ while (db != NULL) {
++#ifdef CL_IPV6_PASS
++ printk("NAT25: DB(%d) H(%02d) C(%d) M:%02x%02x%02x%02x%02x%02x N:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x"
++ "%02x%02x%02x%02x%02x%02x\n",
++ j,
++ i,
++ atomic_read(&db->use_count),
++ db->macAddr[0],
++ db->macAddr[1],
++ db->macAddr[2],
++ db->macAddr[3],
++ db->macAddr[4],
++ db->macAddr[5],
++ db->networkAddr[0],
++ db->networkAddr[1],
++ db->networkAddr[2],
++ db->networkAddr[3],
++ db->networkAddr[4],
++ db->networkAddr[5],
++ db->networkAddr[6],
++ db->networkAddr[7],
++ db->networkAddr[8],
++ db->networkAddr[9],
++ db->networkAddr[10],
++ db->networkAddr[11],
++ db->networkAddr[12],
++ db->networkAddr[13],
++ db->networkAddr[14],
++ db->networkAddr[15],
++ db->networkAddr[16]);
++#else
++ printk("NAT25: DB(%d) H(%02d) C(%d) M:%02x%02x%02x%02x%02x%02x N:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
++ j,
++ i,
++ atomic_read(&db->use_count),
++ db->macAddr[0],
++ db->macAddr[1],
++ db->macAddr[2],
++ db->macAddr[3],
++ db->macAddr[4],
++ db->macAddr[5],
++ db->networkAddr[0],
++ db->networkAddr[1],
++ db->networkAddr[2],
++ db->networkAddr[3],
++ db->networkAddr[4],
++ db->networkAddr[5],
++ db->networkAddr[6],
++ db->networkAddr[7],
++ db->networkAddr[8],
++ db->networkAddr[9],
++ db->networkAddr[10]);
++#endif
++ j++;
++
++ db = db->next_hash;
++ }
++ }
++#endif
++
++ spin_unlock_bh(&vif->br_ext_lock);
++}
++
++
++
++
++/*
++ * NAT2.5 interface
++ */
++
++void nat25_db_cleanup(struct rwnx_vif *vif)
++{
++ int i;
++ spin_lock_bh(&vif->br_ext_lock);
++
++ for (i = 0; i < NAT25_HASH_SIZE; i++) {
++ struct nat25_network_db_entry *f;
++ f = vif->nethash[i];
++ while (f != NULL) {
++ struct nat25_network_db_entry *g;
++
++ g = f->next_hash;
++ if (vif->scdb_entry == f) {
++ memset(vif->scdb_mac, 0, ETH_ALEN);
++ memset(vif->scdb_ip, 0, 4);
++ vif->scdb_entry = NULL;
++ }
++ __network_hash_unlink(f);
++ kfree(f);
++
++ f = g;
++ }
++ }
++
++ spin_unlock_bh(&vif->br_ext_lock);
++}
++
++
++void nat25_db_expire(struct rwnx_vif *vif)
++{
++ int i;
++ spin_lock_bh(&vif->br_ext_lock);
++
++ /* if(!priv->ethBrExtInfo.nat25_disable) */
++ {
++ for (i = 0; i < NAT25_HASH_SIZE; i++) {
++ struct nat25_network_db_entry *f;
++ f = vif->nethash[i];
++
++ while (f != NULL) {
++ struct nat25_network_db_entry *g;
++ g = f->next_hash;
++
++ if (__nat25_has_expired(vif, f)) {
++ if (atomic_dec_and_test(&f->use_count)) {
++#ifdef BR_SUPPORT_DEBUG
++#ifdef CL_IPV6_PASS
++ panic_printk("NAT25 Expire H(%02d) M:%02x%02x%02x%02x%02x%02x N:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x"
++ "%02x%02x%02x%02x%02x%02x\n",
++ i,
++ f->macAddr[0],
++ f->macAddr[1],
++ f->macAddr[2],
++ f->macAddr[3],
++ f->macAddr[4],
++ f->macAddr[5],
++ f->networkAddr[0],
++ f->networkAddr[1],
++ f->networkAddr[2],
++ f->networkAddr[3],
++ f->networkAddr[4],
++ f->networkAddr[5],
++ f->networkAddr[6],
++ f->networkAddr[7],
++ f->networkAddr[8],
++ f->networkAddr[9],
++ f->networkAddr[10],
++ f->networkAddr[11],
++ f->networkAddr[12],
++ f->networkAddr[13],
++ f->networkAddr[14],
++ f->networkAddr[15],
++ f->networkAddr[16]);
++#else
++
++ panic_printk("NAT25 Expire H(%02d) M:%02x%02x%02x%02x%02x%02x N:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
++ i,
++ f->macAddr[0],
++ f->macAddr[1],
++ f->macAddr[2],
++ f->macAddr[3],
++ f->macAddr[4],
++ f->macAddr[5],
++ f->networkAddr[0],
++ f->networkAddr[1],
++ f->networkAddr[2],
++ f->networkAddr[3],
++ f->networkAddr[4],
++ f->networkAddr[5],
++ f->networkAddr[6],
++ f->networkAddr[7],
++ f->networkAddr[8],
++ f->networkAddr[9],
++ f->networkAddr[10]);
++#endif
++#endif
++ if (vif->scdb_entry == f) {
++ memset(vif->scdb_mac, 0, ETH_ALEN);
++ memset(vif->scdb_ip, 0, 4);
++ vif->scdb_entry = NULL;
++ }
++ __network_hash_unlink(f);
++ kfree(f);
++ }
++ }
++
++ f = g;
++ }
++ }
++ }
++
++ spin_unlock_bh(&vif->br_ext_lock);
++}
++
++
++#ifdef SUPPORT_TX_MCAST2UNI
++static int checkIPMcAndReplace(struct rwnx_vif *vif, struct sk_buff *skb, unsigned int *dst_ip)
++{
++ struct stat_info *pstat;
++ struct list_head *phead, *plist;
++ int i;
++
++ phead = &vif->asoc_list;
++ plist = phead->next;
++
++ while (plist != phead) {
++ pstat = list_entry(plist, struct stat_info, asoc_list);
++ plist = plist->next;
++
++ if (pstat->ipmc_num == 0)
++ continue;
++
++ for (i = 0; i < MAX_IP_MC_ENTRY; i++) {
++ if (pstat->ipmc[i].used && !memcmp(&pstat->ipmc[i].mcmac[3], ((unsigned char *)dst_ip) + 1, 3)) {
++ memcpy(skb->data, pstat->ipmc[i].mcmac, ETH_ALEN);
++ return 1;
++ }
++ }
++ }
++ return 0;
++}
++#endif
++
++int nat25_db_handle(struct rwnx_vif *vif, struct sk_buff *skb, int method)
++{
++ unsigned short protocol;
++ unsigned char networkAddr[MAX_NETWORK_ADDR_LEN];
++
++ if (skb == NULL)
++ return -1;
++
++ if ((method <= NAT25_MIN) || (method >= NAT25_MAX))
++ return -1;
++
++ protocol = *((unsigned short *)(skb->data + 2 * ETH_ALEN));
++
++ /*---------------------------------------------------*/
++ /* Handle IP frame */
++ /*---------------------------------------------------*/
++ if (protocol == __constant_htons(ETH_P_IP)) {
++ struct iphdr *iph = (struct iphdr *)(skb->data + ETH_HLEN);
++
++ if (((unsigned char *)(iph) + (iph->ihl << 2)) >= (skb->data + ETH_HLEN + skb->len)) {
++ printk("NAT25: malformed IP packet !\n");
++ return -1;
++ }
++
++ switch (method) {
++ case NAT25_CHECK:
++ return -1;
++
++ case NAT25_INSERT: {
++ /* some muticast with source IP is all zero, maybe other case is illegal */
++ /* in class A, B, C, host address is all zero or all one is illegal */
++ if (iph->saddr == 0)
++ return 0;
++ printk("NAT25: Insert IP, SA=%08x, DA=%08x\n", iph->saddr, iph->daddr);
++ __nat25_generate_ipv4_network_addr(networkAddr, &iph->saddr);
++ /* record source IP address and , source mac address into db */
++ __nat25_db_network_insert(vif, skb->data + ETH_ALEN, networkAddr);
++
++ __nat25_db_print(vif);
++ }
++ return 0;
++
++ case NAT25_LOOKUP: {
++ printk("NAT25: Lookup IP, SA=%08x, DA=%08x\n", iph->saddr, iph->daddr);
++#ifdef SUPPORT_TX_MCAST2UNI
++ if (vif->pshare->rf_ft_var.mc2u_disable ||
++ ((((OPMODE & (WIFI_STATION_STATE | WIFI_ASOC_STATE))
++ == (WIFI_STATION_STATE | WIFI_ASOC_STATE)) &&
++ !checkIPMcAndReplace(vif, skb, &iph->daddr)) ||
++ (OPMODE & WIFI_ADHOC_STATE)))
++#endif
++ {
++ __nat25_generate_ipv4_network_addr(networkAddr, &iph->daddr);
++
++ if (!__nat25_db_network_lookup_and_replace(vif, skb, networkAddr)) {
++ if (*((unsigned char *)&iph->daddr + 3) == 0xff) {
++ /* L2 is unicast but L3 is broadcast, make L2 bacome broadcast */
++ printk("NAT25: Set DA as boardcast\n");
++ memset(skb->data, 0xff, ETH_ALEN);
++ } else {
++ /* forward unknow IP packet to upper TCP/IP */
++ printk("NAT25: Replace DA with BR's MAC\n");
++ if ((*(u32 *)vif->br_mac) == 0 && (*(u16 *)(vif->br_mac + 4)) == 0) {
++ void netdev_br_init(struct net_device *netdev);
++ printk("Re-init netdev_br_init() due to br_mac==0!\n");
++ netdev_br_init(vif->ndev);
++ }
++ memcpy(skb->data, vif->br_mac, ETH_ALEN);
++ }
++ }
++ }
++ }
++ return 0;
++
++ default:
++ return -1;
++ }
++ }
++
++ /*---------------------------------------------------*/
++ /* Handle ARP frame */
++ /*---------------------------------------------------*/
++ else if (protocol == __constant_htons(ETH_P_ARP)) {
++ struct arphdr *arp = (struct arphdr *)(skb->data + ETH_HLEN);
++ unsigned char *arp_ptr = (unsigned char *)(arp + 1);
++ unsigned int *sender, *target;
++
++ if (arp->ar_pro != __constant_htons(ETH_P_IP)) {
++ printk("NAT25: arp protocol unknown (%4x)!\n", htons(arp->ar_pro));
++ return -1;
++ }
++
++ switch (method) {
++ case NAT25_CHECK:
++ return 0; /* skb_copy for all ARP frame */
++
++ case NAT25_INSERT: {
++ printk("NAT25: Insert ARP, MAC=%02x%02x%02x%02x%02x%02x\n", arp_ptr[0],
++ arp_ptr[1], arp_ptr[2], arp_ptr[3], arp_ptr[4], arp_ptr[5]);
++
++ /* change to ARP sender mac address to wlan STA address */
++ memcpy(arp_ptr, vif->ndev->dev_addr, ETH_ALEN);
++
++ arp_ptr += arp->ar_hln;
++ sender = (unsigned int *)arp_ptr;
++
++ __nat25_generate_ipv4_network_addr(networkAddr, sender);
++
++ __nat25_db_network_insert(vif, skb->data + ETH_ALEN, networkAddr);
++
++ __nat25_db_print(vif);
++ }
++ return 0;
++
++ case NAT25_LOOKUP: {
++ printk("NAT25: Lookup ARP\n");
++
++ arp_ptr += arp->ar_hln;
++ sender = (unsigned int *)arp_ptr;
++ arp_ptr += (arp->ar_hln + arp->ar_pln);
++ target = (unsigned int *)arp_ptr;
++
++ __nat25_generate_ipv4_network_addr(networkAddr, target);
++
++ __nat25_db_network_lookup_and_replace(vif, skb, networkAddr);
++
++ /* change to ARP target mac address to Lookup result */
++ arp_ptr = (unsigned char *)(arp + 1);
++ arp_ptr += (arp->ar_hln + arp->ar_pln);
++ memcpy(arp_ptr, skb->data, ETH_ALEN);
++ }
++ return 0;
++
++ default:
++ return -1;
++ }
++ }
++
++ /*---------------------------------------------------*/
++ /* Handle IPX and Apple Talk frame */
++ /*---------------------------------------------------*/
++ else if ((protocol == __constant_htons(ETH_P_IPX)) ||
++ (protocol == __constant_htons(ETH_P_ATALK)) ||
++ (protocol == __constant_htons(ETH_P_AARP))) {
++ unsigned char ipx_header[2] = {0xFF, 0xFF};
++ struct ipxhdr *ipx = NULL;
++ struct elapaarp *ea = NULL;
++ struct ddpehdr *ddp = NULL;
++ unsigned char *framePtr = skb->data + ETH_HLEN;
++
++ if (protocol == __constant_htons(ETH_P_IPX)) {
++ printk("NAT25: Protocol=IPX (Ethernet II)\n");
++ ipx = (struct ipxhdr *)framePtr;
++ } else { /* if(protocol <= __constant_htons(ETH_FRAME_LEN)) */
++ if (!memcmp(ipx_header, framePtr, 2)) {
++ printk("NAT25: Protocol=IPX (Ethernet 802.3)\n");
++ ipx = (struct ipxhdr *)framePtr;
++ } else {
++ unsigned char ipx_8022_type = 0xE0;
++ unsigned char snap_8022_type = 0xAA;
++
++ if (*framePtr == snap_8022_type) {
++ unsigned char ipx_snap_id[5] = {0x0, 0x0, 0x0, 0x81, 0x37}; /* IPX SNAP ID */
++ unsigned char aarp_snap_id[5] = {0x00, 0x00, 0x00, 0x80, 0xF3}; /* Apple Talk AARP SNAP ID */
++ unsigned char ddp_snap_id[5] = {0x08, 0x00, 0x07, 0x80, 0x9B}; /* Apple Talk DDP SNAP ID */
++
++ framePtr += 3; /* eliminate the 802.2 header */
++
++ if (!memcmp(ipx_snap_id, framePtr, 5)) {
++ framePtr += 5; /* eliminate the SNAP header */
++
++ printk("NAT25: Protocol=IPX (Ethernet SNAP)\n");
++ ipx = (struct ipxhdr *)framePtr;
++ } else if (!memcmp(aarp_snap_id, framePtr, 5)) {
++ framePtr += 5; /* eliminate the SNAP header */
++
++ ea = (struct elapaarp *)framePtr;
++ } else if (!memcmp(ddp_snap_id, framePtr, 5)) {
++ framePtr += 5; /* eliminate the SNAP header */
++
++ ddp = (struct ddpehdr *)framePtr;
++ } else {
++ printk("NAT25: Protocol=Ethernet SNAP %02x%02x%02x%02x%02x\n", framePtr[0],
++ framePtr[1], framePtr[2], framePtr[3], framePtr[4]);
++ return -1;
++ }
++ } else if (*framePtr == ipx_8022_type) {
++ framePtr += 3; /* eliminate the 802.2 header */
++
++ if (!memcmp(ipx_header, framePtr, 2)) {
++ printk("NAT25: Protocol=IPX (Ethernet 802.2)\n");
++ ipx = (struct ipxhdr *)framePtr;
++ } else
++ return -1;
++ }
++ }
++ }
++
++ /* IPX */
++ if (ipx != NULL) {
++ switch (method) {
++ case NAT25_CHECK:
++ if (!memcmp(skb->data + ETH_ALEN, ipx->ipx_source.node, ETH_ALEN)) {
++ printk("NAT25: Check IPX skb_copy\n");
++ return 0;
++ }
++ return -1;
++
++ case NAT25_INSERT: {
++ printk("NAT25: Insert IPX, Dest=%08x,%02x%02x%02x%02x%02x%02x,%04x Source=%08x,%02x%02x%02x%02x%02x%02x,%04x\n",
++ ipx->ipx_dest.net,
++ ipx->ipx_dest.node[0],
++ ipx->ipx_dest.node[1],
++ ipx->ipx_dest.node[2],
++ ipx->ipx_dest.node[3],
++ ipx->ipx_dest.node[4],
++ ipx->ipx_dest.node[5],
++ ipx->ipx_dest.sock,
++ ipx->ipx_source.net,
++ ipx->ipx_source.node[0],
++ ipx->ipx_source.node[1],
++ ipx->ipx_source.node[2],
++ ipx->ipx_source.node[3],
++ ipx->ipx_source.node[4],
++ ipx->ipx_source.node[5],
++ ipx->ipx_source.sock);
++
++ if (!memcmp(skb->data + ETH_ALEN, ipx->ipx_source.node, ETH_ALEN)) {
++ printk("NAT25: Use IPX Net, and Socket as network addr\n");
++
++ __nat25_generate_ipx_network_addr_with_socket(networkAddr, &ipx->ipx_source.net, &ipx->ipx_source.sock);
++
++ /* change IPX source node addr to wlan STA address */
++ memcpy(ipx->ipx_source.node, vif->ndev->dev_addr, ETH_ALEN);
++ } else
++ __nat25_generate_ipx_network_addr_with_node(networkAddr, &ipx->ipx_source.net, ipx->ipx_source.node);
++
++ __nat25_db_network_insert(vif, skb->data + ETH_ALEN, networkAddr);
++
++ __nat25_db_print(vif);
++ }
++ return 0;
++
++ case NAT25_LOOKUP: {
++ if (!memcmp(vif->ndev->dev_addr, ipx->ipx_dest.node, ETH_ALEN)) {
++ printk("NAT25: Lookup IPX, Modify Destination IPX Node addr\n");
++
++ __nat25_generate_ipx_network_addr_with_socket(networkAddr, &ipx->ipx_dest.net, &ipx->ipx_dest.sock);
++
++ __nat25_db_network_lookup_and_replace(vif, skb, networkAddr);
++
++ /* replace IPX destination node addr with Lookup destination MAC addr */
++ memcpy(ipx->ipx_dest.node, skb->data, ETH_ALEN);
++ } else {
++ __nat25_generate_ipx_network_addr_with_node(networkAddr, &ipx->ipx_dest.net, ipx->ipx_dest.node);
++
++ __nat25_db_network_lookup_and_replace(vif, skb, networkAddr);
++ }
++ }
++ return 0;
++
++ default:
++ return -1;
++ }
++ }
++
++ /* AARP */
++ else if (ea != NULL) {
++ /* Sanity check fields. */
++ if (ea->hw_len != ETH_ALEN || ea->pa_len != AARP_PA_ALEN) {
++ printk("NAT25: Appletalk AARP Sanity check fail!\n");
++ return -1;
++ }
++
++ switch (method) {
++ case NAT25_CHECK:
++ return 0;
++
++ case NAT25_INSERT: {
++ /* change to AARP source mac address to wlan STA address */
++ memcpy(ea->hw_src, vif->ndev->dev_addr, ETH_ALEN);
++
++ printk("NAT25: Insert AARP, Source=%d,%d Destination=%d,%d\n",
++ ea->pa_src_net,
++ ea->pa_src_node,
++ ea->pa_dst_net,
++ ea->pa_dst_node);
++
++ __nat25_generate_apple_network_addr(networkAddr, &ea->pa_src_net, &ea->pa_src_node);
++
++ __nat25_db_network_insert(vif, skb->data + ETH_ALEN, networkAddr);
++
++ __nat25_db_print(vif);
++ }
++ return 0;
++
++ case NAT25_LOOKUP: {
++ printk("NAT25: Lookup AARP, Source=%d,%d Destination=%d,%d\n",
++ ea->pa_src_net,
++ ea->pa_src_node,
++ ea->pa_dst_net,
++ ea->pa_dst_node);
++
++ __nat25_generate_apple_network_addr(networkAddr, &ea->pa_dst_net, &ea->pa_dst_node);
++
++ __nat25_db_network_lookup_and_replace(vif, skb, networkAddr);
++
++ /* change to AARP destination mac address to Lookup result */
++ memcpy(ea->hw_dst, skb->data, ETH_ALEN);
++ }
++ return 0;
++
++ default:
++ return -1;
++ }
++ }
++
++ /* DDP */
++ else if (ddp != NULL) {
++ switch (method) {
++ case NAT25_CHECK:
++ return -1;
++
++ case NAT25_INSERT: {
++ printk("NAT25: Insert DDP, Source=%d,%d Destination=%d,%d\n",
++ ddp->deh_snet,
++ ddp->deh_snode,
++ ddp->deh_dnet,
++ ddp->deh_dnode);
++
++ __nat25_generate_apple_network_addr(networkAddr, &ddp->deh_snet, &ddp->deh_snode);
++
++ __nat25_db_network_insert(vif, skb->data + ETH_ALEN, networkAddr);
++
++ __nat25_db_print(vif);
++ }
++ return 0;
++
++ case NAT25_LOOKUP: {
++ printk("NAT25: Lookup DDP, Source=%d,%d Destination=%d,%d\n",
++ ddp->deh_snet,
++ ddp->deh_snode,
++ ddp->deh_dnet,
++ ddp->deh_dnode);
++
++ __nat25_generate_apple_network_addr(networkAddr, &ddp->deh_dnet, &ddp->deh_dnode);
++
++ __nat25_db_network_lookup_and_replace(vif, skb, networkAddr);
++ }
++ return 0;
++
++ default:
++ return -1;
++ }
++ }
++
++ return -1;
++ }
++
++ /*---------------------------------------------------*/
++ /* Handle PPPoE frame */
++ /*---------------------------------------------------*/
++ else if ((protocol == __constant_htons(ETH_P_PPP_DISC)) ||
++ (protocol == __constant_htons(ETH_P_PPP_SES))) {
++ struct pppoe_hdr *ph = (struct pppoe_hdr *)(skb->data + ETH_HLEN);
++ unsigned short *pMagic;
++
++ switch (method) {
++ case NAT25_CHECK:
++ if (ph->sid == 0)
++ return 0;
++ return 1;
++
++ case NAT25_INSERT:
++ if (ph->sid == 0) { /* Discovery phase according to tag */
++ if (ph->code == PADI_CODE || ph->code == PADR_CODE) {
++ if (vif->ethBrExtInfo.addPPPoETag) {
++ struct pppoe_tag *tag, *pOldTag;
++ unsigned char tag_buf[40];
++ int old_tag_len = 0;
++
++ tag = (struct pppoe_tag *)tag_buf;
++ pOldTag = (struct pppoe_tag *)__nat25_find_pppoe_tag(ph, ntohs(PTT_RELAY_SID));
++ if (pOldTag) { /* if SID existed, copy old value and delete it */
++ old_tag_len = ntohs(pOldTag->tag_len);
++ if (old_tag_len + TAG_HDR_LEN + MAGIC_CODE_LEN + RTL_RELAY_TAG_LEN > sizeof(tag_buf)) {
++ printk("SID tag length too long!\n");
++ return -1;
++ }
++
++ memcpy(tag->tag_data + MAGIC_CODE_LEN + RTL_RELAY_TAG_LEN,
++ pOldTag->tag_data, old_tag_len);
++
++ if (skb_pull_and_merge(skb, (unsigned char *)pOldTag, TAG_HDR_LEN + old_tag_len) < 0) {
++ printk("call skb_pull_and_merge() failed in PADI/R packet!\n");
++ return -1;
++ }
++ ph->length = htons(ntohs(ph->length) - TAG_HDR_LEN - old_tag_len);
++ }
++
++ tag->tag_type = PTT_RELAY_SID;
++ tag->tag_len = htons(MAGIC_CODE_LEN + RTL_RELAY_TAG_LEN + old_tag_len);
++
++ /* insert the magic_code+client mac in relay tag */
++ pMagic = (unsigned short *)tag->tag_data;
++ *pMagic = htons(MAGIC_CODE);
++ memcpy(tag->tag_data + MAGIC_CODE_LEN, skb->data + ETH_ALEN, ETH_ALEN);
++
++ /* Add relay tag */
++ if (__nat25_add_pppoe_tag(skb, tag) < 0)
++ return -1;
++
++ printk("NAT25: Insert PPPoE, forward %s packet\n",
++ (ph->code == PADI_CODE ? "PADI" : "PADR"));
++ } else { /* not add relay tag */
++ if (vif->pppoe_connection_in_progress &&
++ memcmp(skb->data + ETH_ALEN, vif->pppoe_addr, ETH_ALEN)) {
++ printk("Discard PPPoE packet due to another PPPoE connection is in progress!\n");
++ return -2;
++ }
++
++ if (vif->pppoe_connection_in_progress == 0)
++ memcpy(vif->pppoe_addr, skb->data + ETH_ALEN, ETH_ALEN);
++
++ vif->pppoe_connection_in_progress = WAIT_TIME_PPPOE;
++ }
++ } else
++ return -1;
++ } else { /* session phase */
++ printk("NAT25: Insert PPPoE, insert session packet to %s\n", skb->dev->name);
++
++ __nat25_generate_pppoe_network_addr(networkAddr, skb->data, &(ph->sid));
++
++ __nat25_db_network_insert(vif, skb->data + ETH_ALEN, networkAddr);
++
++ __nat25_db_print(vif);
++
++ if (!vif->ethBrExtInfo.addPPPoETag &&
++ vif->pppoe_connection_in_progress &&
++ !memcmp(skb->data + ETH_ALEN, vif->pppoe_addr, ETH_ALEN))
++ vif->pppoe_connection_in_progress = 0;
++ }
++ return 0;
++
++ case NAT25_LOOKUP:
++ if (ph->code == PADO_CODE || ph->code == PADS_CODE) {
++ if (vif->ethBrExtInfo.addPPPoETag) {
++ struct pppoe_tag *tag;
++ unsigned char *ptr;
++ unsigned short tagType, tagLen;
++ int offset = 0;
++
++ ptr = __nat25_find_pppoe_tag(ph, ntohs(PTT_RELAY_SID));
++ if (ptr == 0) {
++ printk("Fail to find PTT_RELAY_SID in FADO!\n");
++ return -1;
++ }
++
++ tag = (struct pppoe_tag *)ptr;
++ tagType = (unsigned short)((ptr[0] << 8) + ptr[1]);
++ tagLen = (unsigned short)((ptr[2] << 8) + ptr[3]);
++
++ if ((tagType != ntohs(PTT_RELAY_SID)) || (tagLen < (MAGIC_CODE_LEN + RTL_RELAY_TAG_LEN))) {
++ printk("Invalid PTT_RELAY_SID tag length [%d]!\n", tagLen);
++ return -1;
++ }
++
++ pMagic = (unsigned short *)tag->tag_data;
++ if (ntohs(*pMagic) != MAGIC_CODE) {
++ printk("Can't find MAGIC_CODE in %s packet!\n",
++ (ph->code == PADO_CODE ? "PADO" : "PADS"));
++ return -1;
++ }
++
++ memcpy(skb->data, tag->tag_data + MAGIC_CODE_LEN, ETH_ALEN);
++
++ if (tagLen > MAGIC_CODE_LEN + RTL_RELAY_TAG_LEN)
++ offset = TAG_HDR_LEN;
++
++ if (skb_pull_and_merge(skb, ptr + offset, TAG_HDR_LEN + MAGIC_CODE_LEN + RTL_RELAY_TAG_LEN - offset) < 0) {
++ printk("call skb_pull_and_merge() failed in PADO packet!\n");
++ return -1;
++ }
++ ph->length = htons(ntohs(ph->length) - (TAG_HDR_LEN + MAGIC_CODE_LEN + RTL_RELAY_TAG_LEN - offset));
++ if (offset > 0)
++ tag->tag_len = htons(tagLen - MAGIC_CODE_LEN - RTL_RELAY_TAG_LEN);
++
++ printk("NAT25: Lookup PPPoE, forward %s Packet from %s\n",
++ (ph->code == PADO_CODE ? "PADO" : "PADS"), skb->dev->name);
++ } else { /* not add relay tag */
++ if (!vif->pppoe_connection_in_progress) {
++ printk("Discard PPPoE packet due to no connection in progresss!\n");
++ return -1;
++ }
++ memcpy(skb->data, vif->pppoe_addr, ETH_ALEN);
++ vif->pppoe_connection_in_progress = WAIT_TIME_PPPOE;
++ }
++ } else {
++ if (ph->sid != 0) {
++ printk("NAT25: Lookup PPPoE, lookup session packet from %s\n", skb->dev->name);
++ __nat25_generate_pppoe_network_addr(networkAddr, skb->data + ETH_ALEN, &(ph->sid));
++
++ __nat25_db_network_lookup_and_replace(vif, skb, networkAddr);
++
++ __nat25_db_print(vif);
++ } else
++ return -1;
++
++ }
++ return 0;
++
++ default:
++ return -1;
++ }
++ }
++
++ /*---------------------------------------------------*/
++ /* Handle EAP frame */
++ /*---------------------------------------------------*/
++ else if (protocol == __constant_htons(0x888e)) {
++ switch (method) {
++ case NAT25_CHECK:
++ return -1;
++
++ case NAT25_INSERT:
++ return 0;
++
++ case NAT25_LOOKUP:
++ return 0;
++
++ default:
++ return -1;
++ }
++ }
++
++ /*---------------------------------------------------*/
++ /* Handle C-Media proprietary frame */
++ /*---------------------------------------------------*/
++ else if ((protocol == __constant_htons(0xe2ae)) ||
++ (protocol == __constant_htons(0xe2af))) {
++ switch (method) {
++ case NAT25_CHECK:
++ return -1;
++
++ case NAT25_INSERT:
++ return 0;
++
++ case NAT25_LOOKUP:
++ return 0;
++
++ default:
++ return -1;
++ }
++ }
++
++ /*---------------------------------------------------*/
++ /* Handle IPV6 frame */
++ /*---------------------------------------------------*/
++#ifdef CL_IPV6_PASS
++ else if (protocol == __constant_htons(ETH_P_IPV6)) {
++ struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + ETH_HLEN);
++
++ if (sizeof(*iph) >= (skb->len - ETH_HLEN)) {
++ printk("NAT25: malformed IPv6 packet !\n");
++ return -1;
++ }
++
++ switch (method) {
++ case NAT25_CHECK:
++ if (skb->data[0] & 1)
++ return 0;
++ return -1;
++
++ case NAT25_INSERT: {
++ printk("NAT25: Insert IP, SA=%4x:%4x:%4x:%4x:%4x:%4x:%4x:%4x,"
++ " DA=%4x:%4x:%4x:%4x:%4x:%4x:%4x:%4x\n",
++ iph->saddr.s6_addr16[0], iph->saddr.s6_addr16[1], iph->saddr.s6_addr16[2], iph->saddr.s6_addr16[3],
++ iph->saddr.s6_addr16[4], iph->saddr.s6_addr16[5], iph->saddr.s6_addr16[6], iph->saddr.s6_addr16[7],
++ iph->daddr.s6_addr16[0], iph->daddr.s6_addr16[1], iph->daddr.s6_addr16[2], iph->daddr.s6_addr16[3],
++ iph->daddr.s6_addr16[4], iph->daddr.s6_addr16[5], iph->daddr.s6_addr16[6], iph->daddr.s6_addr16[7]);
++
++ if (memcmp(&iph->saddr, "\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0", 16)) {
++ __nat25_generate_ipv6_network_addr(networkAddr, (unsigned int *)&iph->saddr);
++ __nat25_db_network_insert(vif, skb->data + ETH_ALEN, networkAddr);
++ __nat25_db_print(vif);
++
++ if (iph->nexthdr == IPPROTO_ICMPV6 &&
++ skb->len > (ETH_HLEN + sizeof(*iph) + 4)) {
++ if (update_nd_link_layer_addr(skb->data + ETH_HLEN + sizeof(*iph),
++ skb->len - ETH_HLEN - sizeof(*iph), vif->ndev->dev_addr)) {
++ struct icmp6hdr *hdr = (struct icmp6hdr *)(skb->data + ETH_HLEN + sizeof(*iph));
++ hdr->icmp6_cksum = 0;
++ hdr->icmp6_cksum = csum_ipv6_magic(&iph->saddr, &iph->daddr,
++ iph->payload_len,
++ IPPROTO_ICMPV6,
++ csum_partial((__u8 *)hdr, iph->payload_len, 0));
++ }
++ }
++ }
++ }
++ return 0;
++
++ case NAT25_LOOKUP:
++ printk("NAT25: Lookup IP, SA=%4x:%4x:%4x:%4x:%4x:%4x:%4x:%4x,"
++ " DA=%4x:%4x:%4x:%4x:%4x:%4x:%4x:%4x\n",
++ iph->saddr.s6_addr16[0], iph->saddr.s6_addr16[1], iph->saddr.s6_addr16[2], iph->saddr.s6_addr16[3],
++ iph->saddr.s6_addr16[4], iph->saddr.s6_addr16[5], iph->saddr.s6_addr16[6], iph->saddr.s6_addr16[7],
++ iph->daddr.s6_addr16[0], iph->daddr.s6_addr16[1], iph->daddr.s6_addr16[2], iph->daddr.s6_addr16[3],
++ iph->daddr.s6_addr16[4], iph->daddr.s6_addr16[5], iph->daddr.s6_addr16[6], iph->daddr.s6_addr16[7]);
++
++
++ __nat25_generate_ipv6_network_addr(networkAddr, (unsigned int *)&iph->daddr);
++ if (!__nat25_db_network_lookup_and_replace(vif, skb, networkAddr)) {
++#ifdef SUPPORT_RX_UNI2MCAST
++ if (iph->daddr.s6_addr[0] == 0xff)
++ convert_ipv6_mac_to_mc(skb);
++#endif
++ }
++ return 0;
++
++ default:
++ return -1;
++ }
++ }
++#endif /* CL_IPV6_PASS */
++
++ return -1;
++}
++
++
++int nat25_handle_frame(struct rwnx_vif *vif, struct sk_buff *skb)
++{
++ //printk("%s : vif_type=%d \n",__func__,RWNX_VIF_TYPE(vif));
++#ifdef BR_SUPPORT_DEBUG
++ if ((!vif->ethBrExtInfo.nat25_disable) && (!(skb->data[0] & 1))) {
++ printk("NAT25: Input Frame: DA=%02x%02x%02x%02x%02x%02x SA=%02x%02x%02x%02x%02x%02x\n",
++ skb->data[0],
++ skb->data[1],
++ skb->data[2],
++ skb->data[3],
++ skb->data[4],
++ skb->data[5],
++ skb->data[6],
++ skb->data[7],
++ skb->data[8],
++ skb->data[9],
++ skb->data[10],
++ skb->data[11]);
++ }
++#endif
++
++ if (!(skb->data[0] & 1)) {
++ int is_vlan_tag = 0, i, retval = 0;
++ unsigned short vlan_hdr = 0;
++
++ if (*((unsigned short *)(skb->data + ETH_ALEN * 2)) == __constant_htons(ETH_P_8021Q)) {
++ is_vlan_tag = 1;
++ vlan_hdr = *((unsigned short *)(skb->data + ETH_ALEN * 2 + 2));
++ for (i = 0; i < 6; i++)
++ *((unsigned short *)(skb->data + ETH_ALEN * 2 + 2 - i * 2)) = *((unsigned short *)(skb->data + ETH_ALEN * 2 - 2 - i * 2));
++ skb_pull(skb, 4);
++ }
++
++ if (!vif->ethBrExtInfo.nat25_disable) {
++ unsigned long irqL;
++ spin_lock_bh(&vif->br_ext_lock);
++ /*
++ * This function look up the destination network address from
++ * the NAT2.5 database. Return value = -1 means that the
++ * corresponding network protocol is NOT support.
++ */
++ if (!vif->ethBrExtInfo.nat25sc_disable &&
++ (*((unsigned short *)(skb->data + ETH_ALEN * 2)) == __constant_htons(ETH_P_IP)) &&
++ !memcmp(vif->scdb_ip, skb->data + ETH_HLEN + 16, 4)) {
++ memcpy(skb->data, vif->scdb_mac, ETH_ALEN);
++
++ spin_unlock_bh(&vif->br_ext_lock);
++ } else {
++ spin_unlock_bh(&vif->br_ext_lock);
++
++ retval = nat25_db_handle(vif, skb, NAT25_LOOKUP);
++ }
++ } else {
++ if (((*((unsigned short *)(skb->data + ETH_ALEN * 2)) == __constant_htons(ETH_P_IP)) &&
++ !memcmp(vif->br_ip, skb->data + ETH_HLEN + 16, 4)) ||
++ ((*((unsigned short *)(skb->data + ETH_ALEN * 2)) == __constant_htons(ETH_P_ARP)) &&
++ !memcmp(vif->br_ip, skb->data + ETH_HLEN + 24, 4))) {
++ /* for traffic to upper TCP/IP */
++ retval = nat25_db_handle(vif, skb, NAT25_LOOKUP);
++ }
++ }
++
++ if (is_vlan_tag) {
++ skb_push(skb, 4);
++ for (i = 0; i < 6; i++)
++ *((unsigned short *)(skb->data + i * 2)) = *((unsigned short *)(skb->data + 4 + i * 2));
++ *((unsigned short *)(skb->data + ETH_ALEN * 2)) = __constant_htons(ETH_P_8021Q);
++ *((unsigned short *)(skb->data + ETH_ALEN * 2 + 2)) = vlan_hdr;
++ }
++
++ if (retval == -1) {
++ /* DEBUG_ERR("NAT25: Lookup fail!\n"); */
++ return -1;
++ }
++ }
++
++ return 0;
++}
++
++#if 0
++void mac_clone(_adapter *priv, unsigned char *addr)
++{
++ struct sockaddr sa;
++
++ memcpy(sa.sa_data, addr, ETH_ALEN);
++ RTW_INFO("MAC Clone: Addr=%02x%02x%02x%02x%02x%02x\n",
++ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
++ rtl8192cd_set_hwaddr(priv->dev, &sa);
++}
++
++
++int mac_clone_handle_frame(_adapter *priv, struct sk_buff *skb)
++{
++ if (priv->ethBrExtInfo.macclone_enable && !priv->macclone_completed) {
++ if (!(skb->data[ETH_ALEN] & 1)) { /* check any other particular MAC add */
++ if (memcmp(skb->data + ETH_ALEN, GET_MY_HWADDR(priv), ETH_ALEN) &&
++ ((priv->dev->br_port) &&
++ memcmp(skb->data + ETH_ALEN, priv->br_mac, ETH_ALEN))) {
++ mac_clone(priv, skb->data + ETH_ALEN);
++ priv->macclone_completed = 1;
++ }
++ }
++ }
++
++ return 0;
++}
++#endif /* 0 */
++
++#define SERVER_PORT 67
++#define CLIENT_PORT 68
++#define DHCP_MAGIC 0x63825363
++#define BROADCAST_FLAG 0x8000
++
++struct dhcpMessage {
++ u_int8_t op;
++ u_int8_t htype;
++ u_int8_t hlen;
++ u_int8_t hops;
++ u_int32_t xid;
++ u_int16_t secs;
++ u_int16_t flags;
++ u_int32_t ciaddr;
++ u_int32_t yiaddr;
++ u_int32_t siaddr;
++ u_int32_t giaddr;
++ u_int8_t chaddr[16];
++ u_int8_t sname[64];
++ u_int8_t file[128];
++ u_int32_t cookie;
++ u_int8_t options[308]; /* 312 - cookie */
++};
++
++void dhcp_flag_bcast(struct rwnx_vif *vif, struct sk_buff *skb)
++{
++ if (skb == NULL)
++ return;
++ //print_hex_dump(KERN_ERR, "SKB DUMP: SKB->DATA== ", DUMP_PREFIX_NONE, 32, 1, skb->data, 64,false);
++ if (!vif->ethBrExtInfo.dhcp_bcst_disable) {
++ unsigned short protocol = *((unsigned short *)(skb->data + 2 * ETH_ALEN));
++ printk("%s protocol: %04x\n", __func__, protocol);
++
++ if (protocol == __constant_htons(ETH_P_IP)) { /* IP */
++ struct iphdr *iph = (struct iphdr *)(skb->data + ETH_HLEN);
++
++ if (iph->protocol == IPPROTO_UDP) { /* UDP */
++ struct udphdr *udph = (struct udphdr *)((u8 *)iph + (iph->ihl << 2));
++
++ if ((udph->source == __constant_htons(CLIENT_PORT))
++ && (udph->dest == __constant_htons(SERVER_PORT))) { /* DHCP request */
++ struct dhcpMessage *dhcph =
++ (struct dhcpMessage *)((u8 *)udph + sizeof(struct udphdr));
++
++ if (dhcph->cookie == __constant_htonl(DHCP_MAGIC)) { /* match magic word */
++ if (!(dhcph->flags & htons(BROADCAST_FLAG))) { /* if not broadcast */
++ register int sum = 0;
++
++ printk("DHCP: change flag of DHCP request to broadcast.\n");
++
++ #if 1
++ /* or BROADCAST flag */
++ dhcph->flags |= htons(BROADCAST_FLAG);
++ /* recalculate checksum */
++ sum = ~(udph->check) & 0xffff;
++ sum += dhcph->flags;
++ while (sum >> 16)
++ sum = (sum & 0xffff) + (sum >> 16);
++ udph->check = ~sum;
++ #endif
++ }
++ }
++ }
++ }
++ }
++ }
++}
++
++
++void *scdb_findEntry(struct rwnx_vif *vif, unsigned char *macAddr,
++ unsigned char *ipAddr)
++{
++ printk("%s()\n",__func__);
++ unsigned char networkAddr[MAX_NETWORK_ADDR_LEN];
++ struct nat25_network_db_entry *db;
++ int hash;
++ /* _irqL irqL; */
++ /* _enter_critical_bh(&priv->br_ext_lock, &irqL); */
++
++ __nat25_generate_ipv4_network_addr(networkAddr, (unsigned int *)ipAddr);
++ hash = __nat25_network_hash(networkAddr);
++ db = vif->nethash[hash];
++ while (db != NULL) {
++ if (!memcmp(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN)) {
++ /* _exit_critical_bh(&priv->br_ext_lock, &irqL); */
++ return (void *)db;
++ }
++
++ db = db->next_hash;
++ }
++
++ /* _exit_critical_bh(&priv->br_ext_lock, &irqL); */
++ return NULL;
++}
++
++#endif /* CONFIG_BR_SUPPORT */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/aic_br_ext.h
+@@ -0,0 +1,73 @@
++/******************************************************************************
++ *
++ * Copyright(c) 2007 - 2017 Realtek Corporation.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ *****************************************************************************/
++#ifndef _AIC_BR_EXT_H_
++#define _AIC_BR_EXT_H_
++
++#define CL_IPV6_PASS 1
++#define MACADDRLEN 6
++#define WLAN_ETHHDR_LEN 14
++
++#define NAT25_HASH_BITS 4
++#define NAT25_HASH_SIZE (1 << NAT25_HASH_BITS)
++#define NAT25_AGEING_TIME 300
++
++#define NDEV_FMT "%s"
++#define NDEV_ARG(ndev) ndev->name
++#define ADPT_FMT "%s"
++//#define ADPT_ARG(adapter) (adapter->pnetdev ? adapter->pnetdev->name : NULL)
++#define FUNC_NDEV_FMT "%s(%s)"
++#define FUNC_NDEV_ARG(ndev) __func__, ndev->name
++#define FUNC_ADPT_FMT "%s(%s)"
++//#define FUNC_ADPT_ARG(adapter) __func__, (adapter->pnetdev ? adapter->pnetdev->name : NULL)
++#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
++#define MAC_ARG(x) ((u8 *)(x))[0], ((u8 *)(x))[1], ((u8 *)(x))[2], ((u8 *)(x))[3], ((u8 *)(x))[4], ((u8 *)(x))[5]
++
++
++#ifdef CL_IPV6_PASS
++ #define MAX_NETWORK_ADDR_LEN 17
++#else
++ #define MAX_NETWORK_ADDR_LEN 11
++#endif
++
++struct nat25_network_db_entry {
++ struct nat25_network_db_entry *next_hash;
++ struct nat25_network_db_entry **pprev_hash;
++ atomic_t use_count;
++ unsigned char macAddr[6];
++ unsigned long ageing_timer;
++ unsigned char networkAddr[MAX_NETWORK_ADDR_LEN];
++};
++
++enum NAT25_METHOD {
++ NAT25_MIN,
++ NAT25_CHECK,
++ NAT25_INSERT,
++ NAT25_LOOKUP,
++ NAT25_PARSE,
++ NAT25_MAX
++};
++
++struct br_ext_info {
++ unsigned int nat25_disable;
++ unsigned int macclone_enable;
++ unsigned int dhcp_bcst_disable;
++ int addPPPoETag; /* 1: Add PPPoE relay-SID, 0: disable */
++ unsigned char nat25_dmzMac[MACADDRLEN];
++ unsigned int nat25sc_disable;
++};
++
++void nat25_db_cleanup(struct rwnx_vif *vif);
++
++#endif /* _AIC_BR_EXT_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/aic_vendor.c
+@@ -0,0 +1,879 @@
++#include "aic_vendor.h"
++#include "rwnx_defs.h"
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/skbuff.h>
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <linux/rtnetlink.h>
++#include <net/netlink.h>
++#include "rwnx_version_gen.h"
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
++
++static struct wifi_ring_buffer_status ring_buffer[] = {
++ {
++ .name = "aicwf_ring_buffer0",
++ .flags = 0,
++ .ring_id = 0,
++ .verbose_level = 0,
++ .written_bytes = 0,
++ .read_bytes = 0,
++ .written_records = 0,
++ },
++};
++
++static struct wlan_driver_wake_reason_cnt_t wake_reason_cnt = {
++ .total_cmd_event_wake = 10,
++};
++#endif
++
++int aic_dev_start_mkeep_alive(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ u8 mkeep_alive_id, u8 *ip_pkt, u16 ip_pkt_len, u8 *src_mac, u8 *dst_mac, u32 period_msec)
++{
++ u8 *data, *pos;
++
++ data = kzalloc(ip_pkt_len + 14, GFP_KERNEL);
++ if (!data)
++ return -ENOMEM;
++
++ pos = data;
++ memcpy(pos, dst_mac, 6);
++ pos += 6;
++ memcpy(pos, src_mac, 6);
++ pos += 6;
++ /* Mapping Ethernet type (ETHERTYPE_IP: 0x0800) */
++ *(pos++) = 0x08;
++ *(pos++) = 0x00;
++
++ /* Mapping IP pkt */
++ memcpy(pos, ip_pkt, ip_pkt_len);
++ pos += ip_pkt_len;
++
++ //add send 802.3 pkt(raw data)
++ kfree(data);
++
++ return 0;
++}
++
++int aic_dev_stop_mkeep_alive(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif, u8 mkeep_alive_id)
++{
++ int res = -1;
++
++ /*
++ * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
++ * dongle shall reject a mkeep_alive request.
++ */
++ if (rwnx_vif->wdev.iftype != NL80211_IFTYPE_STATION)
++ return res;
++
++ printk("%s execution\n", __func__);
++
++ //add send stop keep alive
++ res = 0;
++ return res;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
++static int aicwf_vendor_start_mkeep_alive(struct wiphy *wiphy, struct wireless_dev *wdev,
++ const void *data, int len)
++{
++ /* max size of IP packet for keep alive */
++ const int MKEEP_ALIVE_IP_PKT_MAX = 256;
++
++ int ret = 0, rem, type;
++ u8 mkeep_alive_id = 0;
++ u8 *ip_pkt = NULL;
++ u16 ip_pkt_len = 0;
++ u8 src_mac[6];
++ u8 dst_mac[6];
++ u32 period_msec = 0;
++ const struct nlattr *iter;
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *rwnx_vif = container_of(wdev, struct rwnx_vif, wdev);
++ gfp_t kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
++ printk("%s\n", __func__);
++
++ nla_for_each_attr(iter, data, len, rem) {
++ type = nla_type(iter);
++ switch (type) {
++ case MKEEP_ALIVE_ATTRIBUTE_ID:
++ mkeep_alive_id = nla_get_u8(iter);
++ break;
++ case MKEEP_ALIVE_ATTRIBUTE_IP_PKT_LEN:
++ ip_pkt_len = nla_get_u16(iter);
++ if (ip_pkt_len > MKEEP_ALIVE_IP_PKT_MAX) {
++ ret = -EINVAL;
++ goto exit;
++ }
++ break;
++ case MKEEP_ALIVE_ATTRIBUTE_IP_PKT:
++ if (!ip_pkt_len) {
++ ret = -EINVAL;
++ printk("ip packet length is 0\n");
++ goto exit;
++ }
++ ip_pkt = (u8 *)kzalloc(ip_pkt_len, kflags);
++ if (ip_pkt == NULL) {
++ ret = -ENOMEM;
++ printk("Failed to allocate mem for ip packet\n");
++ goto exit;
++ }
++ memcpy(ip_pkt, (u8 *)nla_data(iter), ip_pkt_len);
++ break;
++ case MKEEP_ALIVE_ATTRIBUTE_SRC_MAC_ADDR:
++ memcpy(src_mac, nla_data(iter), 6);
++ break;
++ case MKEEP_ALIVE_ATTRIBUTE_DST_MAC_ADDR:
++ memcpy(dst_mac, nla_data(iter), 6);
++ break;
++ case MKEEP_ALIVE_ATTRIBUTE_PERIOD_MSEC:
++ period_msec = nla_get_u32(iter);
++ break;
++ default:
++ pr_err("%s(%d), Unknown type: %d\n", __func__, __LINE__, type);
++ ret = -EINVAL;
++ goto exit;
++ }
++ }
++
++ if (ip_pkt == NULL) {
++ ret = -EINVAL;
++ printk("ip packet is NULL\n");
++ goto exit;
++ }
++
++ ret = aic_dev_start_mkeep_alive(rwnx_hw, rwnx_vif, mkeep_alive_id, ip_pkt, ip_pkt_len, src_mac,
++ dst_mac, period_msec);
++ if (ret < 0) {
++ printk("start_mkeep_alive is failed ret: %d\n", ret);
++ }
++
++exit:
++ if (ip_pkt) {
++ kfree(ip_pkt);
++ }
++
++ return ret;
++}
++
++static int aicwf_vendor_stop_mkeep_alive(struct wiphy *wiphy, struct wireless_dev *wdev,
++ const void *data, int len)
++{
++ int ret = 0, rem, type;
++ u8 mkeep_alive_id = 0;
++ const struct nlattr *iter;
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *rwnx_vif = container_of(wdev, struct rwnx_vif, wdev);
++
++ printk("%s\n", __func__);
++ nla_for_each_attr(iter, data, len, rem) {
++ type = nla_type(iter);
++ switch (type) {
++ case MKEEP_ALIVE_ATTRIBUTE_ID:
++ mkeep_alive_id = nla_get_u8(iter);
++ break;
++ default:
++ pr_err("%s(%d), Unknown type: %d\n", __func__, __LINE__, type);
++ ret = -EINVAL;
++ break;
++ }
++ }
++
++ ret = aic_dev_stop_mkeep_alive(rwnx_hw, rwnx_vif, mkeep_alive_id);
++ if (ret < 0) {
++ printk("stop_mkeep_alive is failed ret: %d\n", ret);
++ }
++
++ return ret;
++}
++
++static int aicwf_vendor_get_ver(struct wiphy *wiphy, struct wireless_dev *wdev,
++ const void *data, int len)
++{
++ int ret = 0, rem, type;
++ const struct nlattr *iter;
++ int payload = 0;
++ char version[128];
++ int attr = -1;
++ struct sk_buff *reply;
++
++ nla_for_each_attr(iter, data, len, rem) {
++ type = nla_type(iter);
++ switch (type) {
++ case LOGGER_ATTRIBUTE_DRIVER_VER:
++ memcpy(version, RWNX_VERS_BANNER, sizeof(RWNX_VERS_BANNER));
++ payload = strlen(version);
++ attr = LOGGER_ATTRIBUTE_DRIVER_VER;
++ break;
++ case LOGGER_ATTRIBUTE_FW_VER:
++ memcpy(version, wiphy->fw_version, sizeof(wiphy->fw_version));
++ payload = strlen(version);
++ attr = LOGGER_ATTRIBUTE_FW_VER;
++ break;
++ default:
++ AICWFDBG(LOGERROR, "%s(%d), Unknown type: %d\n", __func__, __LINE__, type);
++ return -EINVAL;
++ }
++ }
++
++ if (attr < 0)
++ return -EINVAL;
++
++ reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload);
++
++ if (!reply)
++ return -ENOMEM;
++
++ if (nla_put(reply, attr,
++ payload, version)) {
++ wiphy_err(wiphy, "%s put version error\n", __func__);
++ goto out_put_fail;
++ }
++
++ ret = cfg80211_vendor_cmd_reply(reply);
++ if (ret)
++ wiphy_err(wiphy, "%s reply cmd error\n", __func__);
++ return ret;
++
++out_put_fail:
++ kfree_skb(reply);
++ return -EMSGSIZE;
++}
++
++static int aicwf_vendor_subcmd_get_channel_list(struct wiphy *wiphy, struct wireless_dev *wdev,
++ const void *data, int len)
++{
++ int ret = 0, rem, type;
++ const struct nlattr *iter;
++ struct sk_buff *reply;
++ int num_channels = 0;
++ int *channel_list = NULL;
++ int payload;
++ int i = 0;
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct ieee80211_supported_band *rwnx_band_2GHz = rwnx_hw->wiphy->bands[NL80211_BAND_2GHZ];
++ struct ieee80211_supported_band *rwnx_band_5GHz = rwnx_hw->wiphy->bands[NL80211_BAND_5GHZ];
++
++ num_channels += rwnx_band_2GHz->n_channels;
++ num_channels += (rwnx_hw->band_5g_support) ? rwnx_band_5GHz->n_channels : 0;
++
++ channel_list = (int *)kzalloc(sizeof(int) * num_channels, GFP_KERNEL);
++ if (!channel_list)
++ return -ENOMEM;
++
++ for (i = 0; i < rwnx_band_2GHz->n_channels; i++)
++ channel_list[i] = rwnx_band_2GHz->channels[i].center_freq;
++
++ for (; rwnx_hw->band_5g_support && i < num_channels; i++)
++ channel_list[i] = rwnx_band_5GHz->channels[i].center_freq;
++
++ payload = sizeof(num_channels) + sizeof(int) * num_channels + 4;
++
++ nla_for_each_attr(iter, data, len, rem) {
++ type = nla_type(iter);
++ switch (type) {
++ case GSCAN_ATTRIBUTE_BAND:
++ reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload);
++
++ if (!reply)
++ return -ENOMEM;
++
++ if (nla_put_u32(reply, GSCAN_ATTRIBUTE_NUM_CHANNELS, num_channels))
++ goto out_put_fail;
++
++ if (nla_put(reply, GSCAN_ATTRIBUTE_CHANNEL_LIST, sizeof(int) * num_channels, channel_list))
++ goto out_put_fail;
++
++ ret = cfg80211_vendor_cmd_reply(reply);
++ if (ret)
++ wiphy_err(wiphy, "%s reply cmd error\n", __func__);
++ break;
++ default:
++ pr_err("%s(%d), Unknown type: %d\n", __func__, __LINE__, type);
++ return -EINVAL;
++ }
++ }
++
++ kfree(channel_list);
++ return ret;
++
++out_put_fail:
++ kfree(channel_list);
++ kfree_skb(reply);
++ return -EMSGSIZE;
++}
++
++static int aicwf_vendor_subcmd_set_country_code(struct wiphy *wiphy, struct wireless_dev *wdev,
++ const void *data, int len)
++{
++ int ret = 0, rem, type;
++ const struct nlattr *iter;
++
++ nla_for_each_attr(iter, data, len, rem) {
++ type = nla_type(iter);
++ switch (type) {
++ case ANDR_WIFI_ATTRIBUTE_COUNTRY:
++ printk("%s(%d), ANDR_WIFI_ATTRIBUTE_COUNTRY: %s\n", __func__, __LINE__, (char *)nla_data(iter));
++ break;
++ default:
++ pr_err("%s(%d), Unknown type: %d\n", __func__, __LINE__, type);
++ return -EINVAL;
++ }
++ }
++
++ /* TODO
++ * Add handle in the future!
++ */
++
++ return ret;
++}
++
++static int aicwf_vendor_logger_trigger_memory_dump(struct wiphy *wiphy, struct wireless_dev *wdev,
++ const void *data, int len)
++{
++ /* TODO
++ * Add handle in the future!
++ */
++ return 0;
++}
++
++static int aicwf_vendor_subcmd_get_feature_set(struct wiphy *wiphy, struct wireless_dev *wdev,
++ const void *data, int len)
++{
++ int ret;
++ struct sk_buff *reply;
++ uint32_t feature = 0, payload;
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++
++ payload = sizeof(feature);
++ reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload);
++
++ if (!reply)
++ return -ENOMEM;
++
++ /* TODO
++ * Add handle in the future!
++ */
++ /*bit 1:Basic infrastructure mode*/
++ if (wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION))
++ feature |= WIFI_FEATURE_INFRA;
++
++ /*bit 2:Support for 5 GHz Band*/
++ if (rwnx_hw->band_5g_support)
++ feature |= WIFI_FEATURE_INFRA_5G;
++
++ /*bit3:HOTSPOT is a supplicant feature, enable it by default*/
++ feature |= WIFI_FEATURE_HOTSPOT;
++
++ /*bit 4:P2P*/
++ if ((wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) &&
++ (wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_GO)))
++ feature |= WIFI_FEATURE_P2P;
++
++ /*bit 5:soft AP feature supported*/
++ if (wiphy->interface_modes & BIT(NL80211_IFTYPE_AP))
++ feature |= WIFI_FEATURE_SOFT_AP;
++
++ /*bit 18:WiFi Logger*/
++ feature |= WIFI_FEATURE_LOGGER;
++
++ /*bit 21:WiFi mkeep_alive*/
++ feature |= WIFI_FEATURE_MKEEP_ALIVE;
++
++ if (nla_put_u32(reply, ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET, feature)) {
++ wiphy_err(wiphy, "%s put u32 error\n", __func__);
++ goto out_put_fail;
++ }
++
++ ret = cfg80211_vendor_cmd_reply(reply);
++ if (ret)
++ wiphy_err(wiphy, "%s reply cmd error\n", __func__);
++
++ return ret;
++
++out_put_fail:
++ kfree_skb(reply);
++ return -EMSGSIZE;
++}
++
++static int aicwf_vendor_logger_get_feature(struct wiphy *wiphy, struct wireless_dev *wdev,
++ const void *data, int len)
++{
++ int ret;
++ struct sk_buff *reply;
++ uint32_t feature = 0, payload;
++
++ payload = sizeof(feature);
++ reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload);
++
++ if (!reply)
++ return -ENOMEM;
++
++ feature |= WIFI_LOGGER_MEMORY_DUMP_SUPPORTED;
++ feature |= WIFI_LOGGER_CONNECT_EVENT_SUPPORTED;
++
++ /*vts will test wake reason state function*/
++ feature |= WIFI_LOGGER_WAKE_LOCK_SUPPORTED;
++
++ if (nla_put_u32(reply, ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET, feature)) {
++ wiphy_err(wiphy, "put skb u32 failed\n");
++ goto out_put_fail;
++ }
++
++ ret = cfg80211_vendor_cmd_reply(reply);
++ if (ret)
++ wiphy_err(wiphy, "reply cmd error\n");
++
++ return ret;
++
++out_put_fail:
++ kfree_skb(reply);
++ return -EMSGSIZE;
++}
++
++static int aicwf_vendor_logger_get_ring_status(struct wiphy *wiphy, struct wireless_dev *wdev,
++ const void *data, int len)
++{
++ int ret;
++ struct sk_buff *reply;
++ uint32_t payload;
++ uint32_t ring_buffer_nums = sizeof(ring_buffer) / sizeof(ring_buffer[0]);
++
++ payload = sizeof(ring_buffer_nums) + sizeof(ring_buffer);
++ reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload);
++
++ if (!reply)
++ return -ENOMEM;
++
++ if (nla_put_u32(reply, LOGGER_ATTRIBUTE_RING_NUM, ring_buffer_nums)) {
++ wiphy_err(wiphy, "put skb u32 failed\n");
++ goto out_put_fail;
++ }
++
++ if (nla_put(reply, LOGGER_ATTRIBUTE_RING_STATUS, sizeof(ring_buffer), ring_buffer)) {
++ wiphy_err(wiphy, "put skb failed\n");
++ goto out_put_fail;
++ }
++
++ ret = cfg80211_vendor_cmd_reply(reply);
++ if (ret)
++ wiphy_err(wiphy, "reply cmd error\n");
++
++ return ret;
++
++out_put_fail:
++ kfree_skb(reply);
++ return -EMSGSIZE;
++}
++
++static int aicwf_vendor_logger_start_logging(struct wiphy *wiphy, struct wireless_dev *wdev,
++ const void *data, int len)
++{
++ int ret = 0, rem, type, intval, size, i;
++ const struct nlattr *iter;
++ struct wifi_ring_buffer_status rb;
++
++ nla_for_each_attr(iter, data, len, rem) {
++ type = nla_type(iter);
++ switch (type) {
++ case LOGGER_ATTRIBUTE_LOG_LEVEL:
++ rb.verbose_level = nla_get_u32(iter);
++ break;
++ case LOGGER_ATTRIBUTE_RING_FLAGS:
++ rb.flags = nla_get_u32(iter);
++ break;
++ case LOGGER_ATTRIBUTE_LOG_TIME_INTVAL:
++ intval = nla_get_u32(iter);
++ break;
++ case LOGGER_ATTRIBUTE_LOG_MIN_DATA_SIZE:
++ size = nla_get_u32(iter);
++ break;
++ case LOGGER_ATTRIBUTE_RING_NAME:
++ strcpy(rb.name, nla_data(iter));
++ break;
++ default:
++ AICWFDBG(LOGERROR, "%s(%d), Unknown type: %d\n", __func__, __LINE__, type);
++ return -EINVAL;
++ }
++ }
++
++ ret = -EINVAL;
++ for (i = 0; i < sizeof(ring_buffer) / sizeof(ring_buffer[0]); i++) {
++ if (strcmp(rb.name, ring_buffer[i].name) == 0) {
++ ret = 0;
++ break;
++ }
++ }
++
++ /* TODO
++ * Add handle in the future
++ */
++
++ return ret;
++}
++
++static int aicwf_vendor_logger_get_ring_data(struct wiphy *wiphy, struct wireless_dev *wdev,
++ const void *data, int len)
++{
++ int ret = 0, rem, type, i;
++ const struct nlattr *iter;
++ struct wifi_ring_buffer_status rb;
++
++ nla_for_each_attr(iter, data, len, rem) {
++ type = nla_type(iter);
++ switch (type) {
++ case LOGGER_ATTRIBUTE_RING_NAME:
++ strcpy(rb.name, nla_data(iter));
++ break;
++ default:
++ pr_err("%s(%d), Unknown type: %d\n", __func__, __LINE__, type);
++ return -EINVAL;
++ }
++ }
++
++ ret = -EINVAL;
++ for (i = 0; i < sizeof(ring_buffer) / sizeof(ring_buffer[0]); i++) {
++ if (strcmp(rb.name, ring_buffer[i].name) == 0) {
++ ret = 0;
++ break;
++ }
++ }
++
++ /* TODO
++ * Add handle in the future
++ */
++
++ return ret;
++}
++
++static int aicwf_vendor_logger_get_wake_reason_stats(struct wiphy *wiphy, struct wireless_dev *wdev,
++ const void *data, int len)
++{
++ int ret;
++ struct sk_buff *reply;
++ uint32_t payload;
++
++ payload = sizeof(wake_reason_cnt.total_cmd_event_wake);
++ reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload);
++
++ if (!reply)
++ return -ENOMEM;
++
++ /* TODO
++ * Add handle in the future
++ */
++ if (nla_put_u32(reply, WAKE_STAT_ATTRIBUTE_TOTAL_CMD_EVENT, wake_reason_cnt.total_cmd_event_wake))
++ goto out_put_fail;
++
++ ret = cfg80211_vendor_cmd_reply(reply);
++ if (ret)
++ wiphy_err(wiphy, "reply cmd error\n");
++
++ return ret;
++
++out_put_fail:
++ kfree_skb(reply);
++ return -EMSGSIZE;
++}
++
++static int aicwf_vendor_apf_subcmd_get_capabilities(struct wiphy *wiphy, struct wireless_dev *wdev,
++ const void *data, int len)
++{
++ /* TODO
++ * Add handle in the future
++ */
++ return 0;
++}
++
++static int aicwf_vendor_sub_cmd_set_mac(struct wiphy *wiphy, struct wireless_dev *wdev,
++ const void *data, int len)
++{
++ int ret = 0, rem, type;
++ const struct nlattr *iter;
++ u8 mac[ETH_ALEN];
++
++ printk("%s enter \r\n", __func__);
++
++ nla_for_each_attr(iter, data, len, rem) {
++ type = nla_type(iter);
++ switch (type) {
++ case WIFI_VENDOR_ATTR_DRIVER_MAC_ADDR:
++ memcpy(mac, nla_data(iter), ETH_ALEN);
++ printk("%s, %02X:%02X:%02X:%02X:%02X:%02X\n", __func__,
++ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
++ break;
++ default:
++ pr_err("%s(%d), Unknown type: %d\n", __func__, __LINE__, type);
++ return -EINVAL;
++ }
++ }
++
++ /* TODO
++ * Add handle in the future
++ */
++
++ return ret;
++}
++#endif
++
++static const struct nla_policy
++aicwf_cfg80211_mkeep_alive_policy[MKEEP_ALIVE_ATTRIBUTE_MAX+1] = {
++ [0] = {.type = NLA_UNSPEC },
++ [MKEEP_ALIVE_ATTRIBUTE_ID] = { .type = NLA_U8 },
++ [MKEEP_ALIVE_ATTRIBUTE_IP_PKT] = { .type = NLA_MSECS },
++ [MKEEP_ALIVE_ATTRIBUTE_IP_PKT_LEN] = { .type = NLA_U16 },
++ [MKEEP_ALIVE_ATTRIBUTE_SRC_MAC_ADDR] = { .type = NLA_MSECS,
++ .len = ETH_ALEN },
++ [MKEEP_ALIVE_ATTRIBUTE_DST_MAC_ADDR] = { .type = NLA_MSECS,
++ .len = ETH_ALEN },
++ [MKEEP_ALIVE_ATTRIBUTE_PERIOD_MSEC] = { .type = NLA_U32 },
++};
++
++static const struct nla_policy
++aicwf_cfg80211_logger_policy[LOGGER_ATTRIBUTE_MAX + 1] = {
++ [0] = {.type = NLA_UNSPEC },
++ [LOGGER_ATTRIBUTE_DRIVER_VER] = { .type = NLA_BINARY },
++ [LOGGER_ATTRIBUTE_FW_VER] = { .type = NLA_BINARY },
++ [LOGGER_ATTRIBUTE_LOG_LEVEL] = { .type = NLA_U32 },
++ [LOGGER_ATTRIBUTE_RING_FLAGS] = { .type = NLA_U32 },
++ [LOGGER_ATTRIBUTE_LOG_TIME_INTVAL] = { .type = NLA_U32 },
++ [LOGGER_ATTRIBUTE_LOG_MIN_DATA_SIZE] = { .type = NLA_U32 },
++ [LOGGER_ATTRIBUTE_RING_NAME] = { .type = NLA_STRING },
++};
++
++static const struct nla_policy
++aicwf_cfg80211_subcmd_policy[GSCAN_ATTRIBUTE_MAX + 1] = {
++ [0] = {.type = NLA_UNSPEC },
++ [GSCAN_ATTRIBUTE_BAND] = { .type = NLA_U32 },
++};
++
++static const struct nla_policy
++aicwf_cfg80211_andr_wifi_policy[ANDR_WIFI_ATTRIBUTE_MAX + 1] = {
++ [0] = {.type = NLA_UNSPEC },
++ [ANDR_WIFI_ATTRIBUTE_COUNTRY] = { .type = NLA_STRING },
++};
++
++static const struct nla_policy
++aicwf_cfg80211_subcmd_set_mac_policy[WIFI_VENDOR_ATTR_DRIVER_MAX + 1] = {
++ [0] = {.type = NLA_UNSPEC },
++ [WIFI_VENDOR_ATTR_DRIVER_MAC_ADDR] = { .type = NLA_MSECS, .len = ETH_ALEN },
++};
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
++static int aicwf_dump_interface(struct wiphy *wiphy,
++ struct wireless_dev *wdev, struct sk_buff *skb,
++ const void *data, int data_len,
++ unsigned long *storage)
++{
++ return 0;
++}
++
++const struct wiphy_vendor_command aicwf_vendor_cmd[] = {
++ {
++ {
++ .vendor_id = GOOGLE_OUI,
++ .subcmd = WIFI_OFFLOAD_SUBCMD_START_MKEEP_ALIVE
++ },
++ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
++ .doit = aicwf_vendor_start_mkeep_alive,
++ .dumpit = aicwf_dump_interface,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0)
++ .policy = aicwf_cfg80211_mkeep_alive_policy,
++ .maxattr = MKEEP_ALIVE_ATTRIBUTE_MAX
++#endif
++ },
++ {
++ {
++ .vendor_id = GOOGLE_OUI,
++ .subcmd = WIFI_OFFLOAD_SUBCMD_STOP_MKEEP_ALIVE
++ },
++ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
++ .doit = aicwf_vendor_stop_mkeep_alive,
++ .dumpit = aicwf_dump_interface,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0)
++ .policy = aicwf_cfg80211_mkeep_alive_policy,
++ .maxattr = MKEEP_ALIVE_ATTRIBUTE_MAX
++#endif
++ },
++ {
++ {
++ .vendor_id = GOOGLE_OUI,
++ .subcmd = LOGGER_GET_VER
++ },
++ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
++ .doit = aicwf_vendor_get_ver,
++ .dumpit = aicwf_dump_interface,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0)
++ .policy = aicwf_cfg80211_logger_policy,
++ .maxattr = LOGGER_ATTRIBUTE_MAX
++#endif
++ },
++ {
++ {
++ .vendor_id = GOOGLE_OUI,
++ .subcmd = GSCAN_SUBCMD_GET_CHANNEL_LIST
++ },
++ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
++ .doit = aicwf_vendor_subcmd_get_channel_list,
++ .dumpit = aicwf_dump_interface,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0)
++ .policy = aicwf_cfg80211_subcmd_policy,
++ .maxattr = GSCAN_ATTRIBUTE_MAX
++#endif
++ },
++ {
++ {
++ .vendor_id = GOOGLE_OUI,
++ .subcmd = WIFI_SUBCMD_SET_COUNTRY_CODE
++ },
++ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
++ .doit = aicwf_vendor_subcmd_set_country_code,
++ .dumpit = aicwf_dump_interface,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0)
++ .policy = aicwf_cfg80211_andr_wifi_policy,
++ .maxattr = ANDR_WIFI_ATTRIBUTE_MAX
++#endif
++ },
++ {
++ {
++ .vendor_id = GOOGLE_OUI,
++ .subcmd = LOGGER_TRIGGER_MEM_DUMP
++ },
++ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
++ .doit = aicwf_vendor_logger_trigger_memory_dump,
++ .dumpit = aicwf_dump_interface,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0)
++ .policy = VENDOR_CMD_RAW_DATA,
++#endif
++ },
++ {
++ {
++ .vendor_id = GOOGLE_OUI,
++ .subcmd = WIFI_SUBCMD_GET_FEATURE_SET
++ },
++ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
++ .doit = aicwf_vendor_subcmd_get_feature_set,
++ .dumpit = aicwf_dump_interface,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0)
++ .policy = VENDOR_CMD_RAW_DATA,
++#endif
++ },
++ {
++ {
++ .vendor_id = GOOGLE_OUI,
++ .subcmd = LOGGER_GET_FEATURE
++ },
++ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
++ .doit = aicwf_vendor_logger_get_feature,
++ .dumpit = aicwf_dump_interface,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0)
++ .policy = VENDOR_CMD_RAW_DATA,
++#endif
++ },
++ {
++ {
++ .vendor_id = GOOGLE_OUI,
++ .subcmd = LOGGER_GET_RING_STATUS
++ },
++ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
++ .doit = aicwf_vendor_logger_get_ring_status,
++ .dumpit = aicwf_dump_interface,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0)
++ .policy = VENDOR_CMD_RAW_DATA,
++#endif
++ },
++ {
++ {
++ .vendor_id = GOOGLE_OUI,
++ .subcmd = LOGGER_START_LOGGING
++ },
++ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
++ .doit = aicwf_vendor_logger_start_logging,
++ .dumpit = aicwf_dump_interface,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0)
++ .policy = aicwf_cfg80211_logger_policy,
++ .maxattr = LOGGER_ATTRIBUTE_MAX
++#endif
++ },
++ {
++ {
++ .vendor_id = GOOGLE_OUI,
++ .subcmd = LOGGER_GET_RING_DATA
++ },
++ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
++ .doit = aicwf_vendor_logger_get_ring_data,
++ .dumpit = aicwf_dump_interface,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0)
++ .policy = aicwf_cfg80211_logger_policy,
++ .maxattr = LOGGER_ATTRIBUTE_MAX
++#endif
++ },
++ {
++ {
++ .vendor_id = GOOGLE_OUI,
++ .subcmd = LOGGER_GET_WAKE_REASON_STATS
++ },
++ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
++ .doit = aicwf_vendor_logger_get_wake_reason_stats,
++ .dumpit = aicwf_dump_interface,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0)
++ .policy = VENDOR_CMD_RAW_DATA,
++#endif
++ },
++ {
++ {
++ .vendor_id = GOOGLE_OUI,
++ .subcmd = APF_SUBCMD_GET_CAPABILITIES
++ },
++ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
++ .doit = aicwf_vendor_apf_subcmd_get_capabilities,
++ .dumpit = aicwf_dump_interface,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0)
++ .policy = VENDOR_CMD_RAW_DATA,
++#endif
++ },
++ {
++ {
++ .vendor_id = GOOGLE_OUI,
++ .subcmd = VENDOR_NL80211_SUBCMD_SET_MAC
++ },
++ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_RUNNING,
++ .doit = aicwf_vendor_sub_cmd_set_mac,
++ .dumpit = aicwf_dump_interface,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0)
++ .policy = aicwf_cfg80211_subcmd_set_mac_policy,
++ .maxattr = WIFI_VENDOR_ATTR_DRIVER_MAX,
++#endif
++ },
++ {
++ {
++ .vendor_id = BRCM_OUI,
++ .subcmd = VENDOR_NL80211_SUBCMD_SET_MAC
++ },
++ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_RUNNING,
++ .doit = aicwf_vendor_sub_cmd_set_mac,
++ .dumpit = aicwf_dump_interface,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0)
++ .policy = aicwf_cfg80211_subcmd_set_mac_policy,
++ .maxattr = WIFI_VENDOR_ATTR_DRIVER_MAX,
++#endif
++ },
++};
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
++static const struct nl80211_vendor_cmd_info aicwf_vendor_events[] = {
++};
++#endif
++
++int aicwf_vendor_init(struct wiphy *wiphy)
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
++ wiphy->vendor_commands = aicwf_vendor_cmd;
++ wiphy->n_vendor_commands = ARRAY_SIZE(aicwf_vendor_cmd);
++ wiphy->vendor_events = aicwf_vendor_events;
++ wiphy->n_vendor_events = ARRAY_SIZE(aicwf_vendor_events);
++#endif
++ return 0;
++}
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/aic_vendor.h
+@@ -0,0 +1,346 @@
++#ifndef _AIC_VENDOR_H
++#define _AIC_VENDOR_H
++
++#include <linux/types.h>
++
++#define GOOGLE_OUI 0x001A11
++#define BRCM_OUI 0x001018
++
++typedef enum {
++ START_MKEEP_ALIVE,
++ STOP_MKEEP_ALIVE,
++} GetCmdType;
++
++typedef enum {
++ /* don't use 0 as a valid subcommand */
++ VENDOR_NL80211_SUBCMD_UNSPECIFIED,
++
++ /* define all vendor startup commands between 0x0 and 0x0FFF */
++ VENDOR_NL80211_SUBCMD_RANGE_START = 0x0001,
++ VENDOR_NL80211_SUBCMD_RANGE_END = 0x0FFF,
++
++ /* define all GScan related commands between 0x1000 and 0x10FF */
++ ANDROID_NL80211_SUBCMD_GSCAN_RANGE_START = 0x1000,
++ ANDROID_NL80211_SUBCMD_GSCAN_RANGE_END = 0x10FF,
++
++ /* define all NearbyDiscovery related commands between 0x1100 and 0x11FF */
++ ANDROID_NL80211_SUBCMD_NBD_RANGE_START = 0x1100,
++ ANDROID_NL80211_SUBCMD_NBD_RANGE_END = 0x11FF,
++
++ /* define all RTT related commands between 0x1100 and 0x11FF */
++ ANDROID_NL80211_SUBCMD_RTT_RANGE_START = 0x1100,
++ ANDROID_NL80211_SUBCMD_RTT_RANGE_END = 0x11FF,
++
++ ANDROID_NL80211_SUBCMD_LSTATS_RANGE_START = 0x1200,
++ ANDROID_NL80211_SUBCMD_LSTATS_RANGE_END = 0x12FF,
++
++ /* define all Logger related commands between 0x1400 and 0x14FF */
++ ANDROID_NL80211_SUBCMD_DEBUG_RANGE_START = 0x1400,
++ ANDROID_NL80211_SUBCMD_DEBUG_RANGE_END = 0x14FF,
++
++ /* define all wifi offload related commands between 0x1600 and 0x16FF */
++ ANDROID_NL80211_SUBCMD_WIFI_OFFLOAD_RANGE_START = 0x1600,
++ ANDROID_NL80211_SUBCMD_WIFI_OFFLOAD_RANGE_END = 0x16FF,
++
++ /* define all NAN related commands between 0x1700 and 0x17FF */
++ ANDROID_NL80211_SUBCMD_NAN_RANGE_START = 0x1700,
++ ANDROID_NL80211_SUBCMD_NAN_RANGE_END = 0x17FF,
++
++ /* define all Android Packet Filter related commands between 0x1800 and 0x18FF */
++ ANDROID_NL80211_SUBCMD_PKT_FILTER_RANGE_START = 0x1800,
++ ANDROID_NL80211_SUBCMD_PKT_FILTER_RANGE_END = 0x18FF,
++
++ /* This is reserved for future usage */
++
++} ANDROID_VENDOR_SUB_COMMAND;
++
++typedef enum {
++ WIFI_OFFLOAD_SUBCMD_START_MKEEP_ALIVE = ANDROID_NL80211_SUBCMD_WIFI_OFFLOAD_RANGE_START,
++ WIFI_OFFLOAD_SUBCMD_STOP_MKEEP_ALIVE,
++} WIFI_OFFLOAD_SUB_COMMAND;
++
++
++enum mkeep_alive_attributes {
++ MKEEP_ALIVE_ATTRIBUTE_ID = 0x1,
++ MKEEP_ALIVE_ATTRIBUTE_IP_PKT,
++ MKEEP_ALIVE_ATTRIBUTE_IP_PKT_LEN,
++ MKEEP_ALIVE_ATTRIBUTE_SRC_MAC_ADDR,
++ MKEEP_ALIVE_ATTRIBUTE_DST_MAC_ADDR,
++ MKEEP_ALIVE_ATTRIBUTE_PERIOD_MSEC,
++ MKEEP_ALIVE_ATTRIBUTE_AFTER_LAST,
++ MKEEP_ALIVE_ATTRIBUTE_MAX = MKEEP_ALIVE_ATTRIBUTE_AFTER_LAST - 1
++};
++
++enum debug_sub_command {
++ LOGGER_START_LOGGING = ANDROID_NL80211_SUBCMD_DEBUG_RANGE_START,
++ LOGGER_TRIGGER_MEM_DUMP,
++ LOGGER_GET_MEM_DUMP,
++ LOGGER_GET_VER,
++ LOGGER_GET_RING_STATUS,
++ LOGGER_GET_RING_DATA,
++ LOGGER_GET_FEATURE,
++ LOGGER_RESET_LOGGING,
++ LOGGER_TRIGGER_DRIVER_MEM_DUMP,
++ LOGGER_GET_DRIVER_MEM_DUMP,
++ LOGGER_START_PKT_FATE_MONITORING,
++ LOGGER_GET_TX_PKT_FATES,
++ LOGGER_GET_RX_PKT_FATES,
++ LOGGER_GET_WAKE_REASON_STATS,
++ LOGGER_DEBUG_GET_DUMP,
++ LOGGER_FILE_DUMP_DONE_IND,
++ LOGGER_SET_HAL_START,
++ LOGGER_HAL_STOP,
++ LOGGER_SET_HAL_PID,
++};
++
++enum logger_attributes {
++ LOGGER_ATTRIBUTE_INVALID = 0,
++ LOGGER_ATTRIBUTE_DRIVER_VER,
++ LOGGER_ATTRIBUTE_FW_VER,
++ LOGGER_ATTRIBUTE_RING_ID,
++ LOGGER_ATTRIBUTE_RING_NAME,
++ LOGGER_ATTRIBUTE_RING_FLAGS,
++ LOGGER_ATTRIBUTE_LOG_LEVEL,
++ LOGGER_ATTRIBUTE_LOG_TIME_INTVAL,
++ LOGGER_ATTRIBUTE_LOG_MIN_DATA_SIZE,
++ LOGGER_ATTRIBUTE_FW_DUMP_LEN,
++ LOGGER_ATTRIBUTE_FW_DUMP_DATA,
++ // LOGGER_ATTRIBUTE_FW_ERR_CODE,
++ LOGGER_ATTRIBUTE_RING_DATA,
++ LOGGER_ATTRIBUTE_RING_STATUS,
++ LOGGER_ATTRIBUTE_RING_NUM,
++ LOGGER_ATTRIBUTE_DRIVER_DUMP_LEN,
++ LOGGER_ATTRIBUTE_DRIVER_DUMP_DATA,
++ LOGGER_ATTRIBUTE_PKT_FATE_NUM,
++ LOGGER_ATTRIBUTE_PKT_FATE_DATA,
++ LOGGER_ATTRIBUTE_AFTER_LAST,
++ LOGGER_ATTRIBUTE_MAX = LOGGER_ATTRIBUTE_AFTER_LAST - 1,
++};
++
++enum wifi_sub_command {
++ GSCAN_SUBCMD_GET_CAPABILITIES = ANDROID_NL80211_SUBCMD_GSCAN_RANGE_START,
++ GSCAN_SUBCMD_SET_CONFIG, /* 0x1001 */
++ GSCAN_SUBCMD_SET_SCAN_CONFIG, /* 0x1002 */
++ GSCAN_SUBCMD_ENABLE_GSCAN, /* 0x1003 */
++ GSCAN_SUBCMD_GET_SCAN_RESULTS, /* 0x1004 */
++ GSCAN_SUBCMD_SCAN_RESULTS, /* 0x1005 */
++ GSCAN_SUBCMD_SET_HOTLIST, /* 0x1006 */
++ GSCAN_SUBCMD_SET_SIGNIFICANT_CHANGE_CONFIG, /* 0x1007 */
++ GSCAN_SUBCMD_ENABLE_FULL_SCAN_RESULTS, /* 0x1008 */
++ GSCAN_SUBCMD_GET_CHANNEL_LIST, /* 0x1009 */
++ WIFI_SUBCMD_GET_FEATURE_SET, /* 0x100A */
++ WIFI_SUBCMD_GET_FEATURE_SET_MATRIX, /* 0x100B */
++ WIFI_SUBCMD_SET_PNO_RANDOM_MAC_OUI, /* 0x100C */
++ WIFI_SUBCMD_NODFS_SET, /* 0x100D */
++ WIFI_SUBCMD_SET_COUNTRY_CODE, /* 0x100E */
++ /* Add more sub commands here */
++ GSCAN_SUBCMD_SET_EPNO_SSID, /* 0x100F */
++ WIFI_SUBCMD_SET_SSID_WHITE_LIST, /* 0x1010 */
++ WIFI_SUBCMD_SET_ROAM_PARAMS, /* 0x1011 */
++ WIFI_SUBCMD_ENABLE_LAZY_ROAM, /* 0x1012 */
++ WIFI_SUBCMD_SET_BSSID_PREF, /* 0x1013 */
++ WIFI_SUBCMD_SET_BSSID_BLACKLIST, /* 0x1014 */
++ GSCAN_SUBCMD_ANQPO_CONFIG, /* 0x1015 */
++ WIFI_SUBCMD_SET_RSSI_MONITOR, /* 0x1016 */
++ WIFI_SUBCMD_CONFIG_ND_OFFLOAD, /* 0x1017 */
++ /* Add more sub commands here */
++ GSCAN_SUBCMD_MAX,
++ APF_SUBCMD_GET_CAPABILITIES = ANDROID_NL80211_SUBCMD_PKT_FILTER_RANGE_START,
++ APF_SUBCMD_SET_FILTER,
++};
++
++enum gscan_attributes {
++ GSCAN_ATTRIBUTE_NUM_BUCKETS = 10,
++ GSCAN_ATTRIBUTE_BASE_PERIOD,
++ GSCAN_ATTRIBUTE_BUCKETS_BAND,
++ GSCAN_ATTRIBUTE_BUCKET_ID,
++ GSCAN_ATTRIBUTE_BUCKET_PERIOD,
++ GSCAN_ATTRIBUTE_BUCKET_NUM_CHANNELS,
++ GSCAN_ATTRIBUTE_BUCKET_CHANNELS,
++ GSCAN_ATTRIBUTE_NUM_AP_PER_SCAN,
++ GSCAN_ATTRIBUTE_REPORT_THRESHOLD,
++ GSCAN_ATTRIBUTE_NUM_SCANS_TO_CACHE,
++ GSCAN_ATTRIBUTE_BAND = GSCAN_ATTRIBUTE_BUCKETS_BAND,
++
++ GSCAN_ATTRIBUTE_ENABLE_FEATURE = 20,
++ GSCAN_ATTRIBUTE_SCAN_RESULTS_COMPLETE, /* indicates no more results */
++ GSCAN_ATTRIBUTE_FLUSH_FEATURE, /* Flush all the configs */
++ GSCAN_ENABLE_FULL_SCAN_RESULTS,
++ GSCAN_ATTRIBUTE_REPORT_EVENTS,
++
++ /* remaining reserved for additional attributes */
++ GSCAN_ATTRIBUTE_NUM_OF_RESULTS = 30,
++ GSCAN_ATTRIBUTE_FLUSH_RESULTS,
++ GSCAN_ATTRIBUTE_SCAN_RESULTS, /* flat array of wifi_scan_result */
++ GSCAN_ATTRIBUTE_SCAN_ID, /* indicates scan number */
++ GSCAN_ATTRIBUTE_SCAN_FLAGS, /* indicates if scan was aborted */
++ GSCAN_ATTRIBUTE_AP_FLAGS, /* flags on significant change event */
++ GSCAN_ATTRIBUTE_NUM_CHANNELS,
++ GSCAN_ATTRIBUTE_CHANNEL_LIST,
++ GSCAN_ATTRIBUTE_CH_BUCKET_BITMASK,
++
++ GSCAN_ATTRIBUTE_AFTER_LAST,
++ GSCAN_ATTRIBUTE_MAX = GSCAN_ATTRIBUTE_AFTER_LAST - 1,
++};
++
++enum andr_wifi_attributes {
++ ANDR_WIFI_ATTRIBUTE_NUM_FEATURE_SET,
++ ANDR_WIFI_ATTRIBUTE_FEATURE_SET,
++ ANDR_WIFI_ATTRIBUTE_PNO_RANDOM_MAC_OUI,
++ ANDR_WIFI_ATTRIBUTE_NODFS_SET,
++ ANDR_WIFI_ATTRIBUTE_COUNTRY,
++ ANDR_WIFI_ATTRIBUTE_ND_OFFLOAD_VALUE,
++ // Add more attribute here
++ ANDR_WIFI_ATTRIBUTE_AFTER_LAST,
++ ANDR_WIFI_ATTRIBUTE_MAX = ANDR_WIFI_ATTRIBUTE_AFTER_LAST - 1,
++};
++
++enum wifi_support_feature {
++ /* Feature enums */
++ WIFI_FEATURE_INFRA = 0x0001, /* Basic infrastructure mode */
++ WIFI_FEATURE_INFRA_5G = 0x0002, /* Support for 5, GHz Band */
++ WIFI_FEATURE_HOTSPOT = 0x0004, /* Support for GAS/ANQP */
++ WIFI_FEATURE_P2P = 0x0008, /* Wifi-Direct */
++ WIFI_FEATURE_SOFT_AP = 0x0010, /* Soft AP */
++ WIFI_FEATURE_GSCAN = 0x0020, /* Google-Scan APIs */
++ WIFI_FEATURE_NAN = 0x0040, /* Neighbor Awareness Networking */
++ WIFI_FEATURE_D2D_RTT = 0x0080, /* Device-to-device RTT */
++ WIFI_FEATURE_D2AP_RTT = 0x0100, /* Device-to-AP RTT */
++ WIFI_FEATURE_BATCH_SCAN = 0x0200, /* Batched Scan (legacy) */
++ WIFI_FEATURE_PNO = 0x0400, /* Preferred network offload */
++ WIFI_FEATURE_ADDITIONAL_STA = 0x0800, /* Support for two STAs */
++ WIFI_FEATURE_TDLS = 0x1000, /* Tunnel directed link setup */
++ WIFI_FEATURE_TDLS_OFFCHANNEL = 0x2000, /* Support for TDLS off channel */
++ WIFI_FEATURE_EPR = 0x4000, /* Enhanced power reporting */
++ WIFI_FEATURE_AP_STA = 0x8000, /* Support for AP STA Concurrency */
++ WIFI_FEATURE_LINK_LAYER_STATS = 0x10000, /* Support for Linkstats */
++ WIFI_FEATURE_LOGGER = 0x20000, /* WiFi Logger */
++ WIFI_FEATURE_HAL_EPNO = 0x40000, /* WiFi PNO enhanced */
++ WIFI_FEATURE_RSSI_MONITOR = 0x80000, /* RSSI Monitor */
++ WIFI_FEATURE_MKEEP_ALIVE = 0x100000, /* WiFi mkeep_alive */
++ WIFI_FEATURE_CONFIG_NDO = 0x200000, /* ND offload configure */
++ WIFI_FEATURE_TX_TRANSMIT_POWER = 0x400000, /* Capture Tx transmit power levels */
++ WIFI_FEATURE_CONTROL_ROAMING = 0x800000, /* Enable/Disable firmware roaming */
++ WIFI_FEATURE_IE_WHITELIST = 0x1000000, /* Support Probe IE white listing */
++ WIFI_FEATURE_SCAN_RAND = 0x2000000, /* Support MAC & Probe Sequence Number randomization */
++ WIFI_FEATURE_INVALID = 0xFFFFFFFF, /* Invalid Feature */
++};
++
++enum wifi_logger_feature {
++ WIFI_LOGGER_MEMORY_DUMP_SUPPORTED = (1 << (0)), // Memory dump of FW
++ WIFI_LOGGER_PER_PACKET_TX_RX_STATUS_SUPPORTED = (1 << (1)), // PKT status
++ WIFI_LOGGER_CONNECT_EVENT_SUPPORTED = (1 << (2)), // Connectivity event
++ WIFI_LOGGER_POWER_EVENT_SUPPORTED = (1 << (3)), // POWER of Driver
++ WIFI_LOGGER_WAKE_LOCK_SUPPORTED = (1 << (4)), // WAKE LOCK of Driver
++ WIFI_LOGGER_VERBOSE_SUPPORTED = (1 << (5)), // verbose log of FW
++ WIFI_LOGGER_WATCHDOG_TIMER_SUPPORTED = (1 << (6)), // monitor the health of FW
++ WIFI_LOGGER_DRIVER_DUMP_SUPPORTED = (1 << (7)), // dumps driver state
++ WIFI_LOGGER_PACKET_FATE_SUPPORTED = (1 << (8)), // tracks connection packets' fate
++};
++
++enum wake_stats_attributes {
++ WAKE_STAT_ATTRIBUTE_TOTAL_CMD_EVENT,
++ WAKE_STAT_ATTRIBUTE_CMD_EVENT_WAKE,
++ WAKE_STAT_ATTRIBUTE_CMD_EVENT_COUNT,
++ WAKE_STAT_ATTRIBUTE_CMD_COUNT_USED,
++ WAKE_STAT_ATTRIBUTE_TOTAL_DRIVER_FW,
++ WAKE_STAT_ATTRIBUTE_DRIVER_FW_WAKE,
++ WAKE_STAT_ATTRIBUTE_DRIVER_FW_COUNT,
++ WAKE_STAT_ATTRIBUTE_DRIVER_FW_COUNT_USED,
++ WAKE_STAT_ATTRIBUTE_TOTAL_RX_DATA_WAKE,
++ WAKE_STAT_ATTRIBUTE_RX_UNICAST_COUNT,
++ WAKE_STAT_ATTRIBUTE_RX_MULTICAST_COUNT,
++ WAKE_STAT_ATTRIBUTE_RX_BROADCAST_COUNT,
++ WAKE_STAT_ATTRIBUTE_RX_ICMP_PKT,
++ WAKE_STAT_ATTRIBUTE_RX_ICMP6_PKT,
++ WAKE_STAT_ATTRIBUTE_RX_ICMP6_RA,
++ WAKE_STAT_ATTRIBUTE_RX_ICMP6_NA,
++ WAKE_STAT_ATTRIBUTE_RX_ICMP6_NS,
++ WAKE_STAT_ATTRIBUTE_IPV4_RX_MULTICAST_ADD_CNT,
++ WAKE_STAT_ATTRIBUTE_IPV6_RX_MULTICAST_ADD_CNT,
++ WAKE_STAT_ATTRIBUTE_OTHER__RX_MULTICAST_ADD_CNT,
++ WAKE_STAT_ATTRIBUTE_RX_MULTICAST_PKT_INFO,
++ WAKE_STAT_ATTRIBUTE_AFTER_LAST,
++ WAKE_STAT_ATTRIBUTE_MAX = WAKE_STAT_ATTRIBUTE_AFTER_LAST - 1,
++};
++
++enum vendor_nl80211_subcmd {
++ /* copied from wpa_supplicant brcm definations */
++ VENDOR_NL80211_SUBCMD_UNSPEC = 0,
++ VENDOR_NL80211_SUBCMD_SET_PMK = 4,
++ VENDOR_NL80211_SUBCMD_SET_MAC = 6,
++ VENDOR_NL80211_SCMD_ACS = 9,
++ VENDOR_NL80211_SCMD_MAX = 10,
++};
++
++enum nl80211_vendor_subcmd_attributes {
++ WIFI_VENDOR_ATTR_DRIVER_CMD = 0,
++ WIFI_VENDOR_ATTR_DRIVER_KEY_PMK = 1,
++ WIFI_VENDOR_ATTR_DRIVER_MAC_ADDR = 3,
++ WIFI_VENDOR_ATTR_DRIVER_AFTER_LAST = 5,
++ WIFI_VENDOR_ATTR_DRIVER_MAX =
++ WIFI_VENDOR_ATTR_DRIVER_AFTER_LAST - 1,
++};
++
++typedef int wifi_ring_buffer_id;
++
++struct wifi_ring_buffer_status {
++ u8 name[32];
++ u32 flags;
++ wifi_ring_buffer_id ring_id;
++ u32 ring_buffer_byte_size;
++ u32 verbose_level;
++ u32 written_bytes;
++ u32 read_bytes;
++ u32 written_records;
++};
++
++struct rx_data_cnt_details_t {
++ int rx_unicast_cnt; /*Total rx unicast packet which woke up host */
++ int rx_multicast_cnt; /*Total rx multicast packet which woke up host */
++ int rx_broadcast_cnt; /*Total rx broadcast packet which woke up host */
++};
++
++struct rx_wake_pkt_type_classification_t {
++ int icmp_pkt; /*wake icmp packet count */
++ int icmp6_pkt; /*wake icmp6 packet count */
++ int icmp6_ra; /*wake icmp6 RA packet count */
++ int icmp6_na; /*wake icmp6 NA packet count */
++ int icmp6_ns; /*wake icmp6 NS packet count */
++ //ToDo: Any more interesting classification to add?
++};
++
++struct rx_multicast_cnt_t{
++ int ipv4_rx_multicast_addr_cnt; /*Rx wake packet was ipv4 multicast */
++ int ipv6_rx_multicast_addr_cnt; /*Rx wake packet was ipv6 multicast */
++ int other_rx_multicast_addr_cnt;/*Rx wake packet was non-ipv4 and non-ipv6*/
++};
++
++struct wlan_driver_wake_reason_cnt_t {
++ int total_cmd_event_wake; /* Total count of cmd event wakes */
++ int *cmd_event_wake_cnt; /* Individual wake count array, each index a reason */
++ int cmd_event_wake_cnt_sz; /* Max number of cmd event wake reasons */
++ int cmd_event_wake_cnt_used; /* Number of cmd event wake reasons specific to the driver */
++
++ int total_driver_fw_local_wake; /* Total count of drive/fw wakes, for local reasons */
++ int *driver_fw_local_wake_cnt; /* Individual wake count array, each index a reason */
++ int driver_fw_local_wake_cnt_sz; /* Max number of local driver/fw wake reasons */
++ int driver_fw_local_wake_cnt_used; /* Number of local driver/fw wake reasons specific to the driver */
++
++ int total_rx_data_wake; /* total data rx packets, that woke up host */
++ struct rx_data_cnt_details_t rx_wake_details;
++ struct rx_wake_pkt_type_classification_t rx_wake_pkt_classification_info;
++ struct rx_multicast_cnt_t rx_multicast_wake_pkt_info;
++};
++
++typedef struct wl_mkeep_alive_pkt {
++ u16 version; /* Version for mkeep_alive */
++ u16 length; /* length of fixed parameters in the structure */
++ u32 period_msec; /* high bit on means immediate send */
++ u16 len_bytes;
++ u8 keep_alive_id; /* 0 - 3 for N = 4 */
++ u8 data[1];
++} wl_mkeep_alive_pkt_t;
++
++#endif /* _AIC_VENDOR_H */
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_compat_8800d80.c
+@@ -0,0 +1,66 @@
++#include "rwnx_main.h"
++#include "rwnx_msg_tx.h"
++#include "reg_access.h"
++
++#define FW_USERCONFIG_NAME_8800D80 "aic_userconfig_8800d80.txt"
++
++extern char aic_fw_path[200];
++
++int rwnx_request_firmware_common(struct rwnx_hw *rwnx_hw,
++ u32** buffer, const char *filename);
++void rwnx_plat_userconfig_parsing(char *buffer, int size);
++void rwnx_release_firmware_common(u32** buffer);
++
++
++int aicwf_set_rf_config_8800d80(struct rwnx_hw *rwnx_hw, struct mm_set_rf_calib_cfm *cfm)
++{
++ int ret = 0;
++
++ if ((ret = rwnx_send_txpwr_lvl_v3_req(rwnx_hw))) {
++ return -1;
++ }
++ if ((ret = rwnx_send_txpwr_ofst_req(rwnx_hw))) {
++ return -1;
++ }
++ if (testmode == 0) {
++ if ((ret = rwnx_send_rf_calib_req(rwnx_hw, cfm))) {
++ return -1;
++ }
++ }
++ return 0 ;
++}
++
++
++int rwnx_plat_userconfig_load_8800d80(struct rwnx_hw *rwnx_hw){
++ int size;
++ u32 *dst=NULL;
++ char *filename = FW_USERCONFIG_NAME_8800D80;
++
++#ifndef ANDROID_PLATFORM
++ sprintf(aic_fw_path, "%s/%s", aic_fw_path, "aic8800D80");
++#endif
++
++ AICWFDBG(LOGINFO, "userconfig file path:%s \r\n", filename);
++
++ /* load file */
++ size = rwnx_request_firmware_common(rwnx_hw, &dst, filename);
++ if (size <= 0) {
++ AICWFDBG(LOGERROR, "wrong size of firmware file\n");
++ dst = NULL;
++ return 0;
++ }
++
++ /* Copy the file on the Embedded side */
++ AICWFDBG(LOGINFO, "### Load file done: %s, size=%d\n", filename, size);
++
++ rwnx_plat_userconfig_parsing((char *)dst, size);
++
++ rwnx_release_firmware_common(&dst);
++
++ AICWFDBG(LOGINFO, "userconfig download complete\n\n");
++ return 0;
++
++}
++
++
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_compat_8800d80.h
+@@ -0,0 +1,6 @@
++#include <linux/types.h>
++
++int rwnx_plat_userconfig_load_8800d80(struct rwnx_hw *rwnx_hw);
++int aicwf_set_rf_config_8800d80(struct rwnx_hw *rwnx_hw, struct mm_set_rf_calib_cfm *cfm);
++
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_compat_8800dc.c
+@@ -0,0 +1,2329 @@
++#include "rwnx_main.h"
++#include "rwnx_msg_tx.h"
++#include "reg_access.h"
++#include "rwnx_platform.h"
++#include "aicwf_compat_8800dc.h"
++
++#define RWNX_MAC_FW_RF_BASE_NAME_8800DC "lmacfw_rf_8800dc.bin"
++
++#ifdef CONFIG_FOR_IPCAM
++#define RWNX_MAC_PATCH_BASE_NAME_8800DC "fmacfw_patch_8800dc_ipc"
++#else
++#define RWNX_MAC_PATCH_BASE_NAME_8800DC "fmacfw_patch_8800dc"
++#endif
++#define RWNX_MAC_PATCH_NAME2_8800DC RWNX_MAC_PATCH_BASE_NAME_8800DC".bin"
++#define RWNX_MAC_PATCH_NAME2_8800DC_U02 RWNX_MAC_PATCH_BASE_NAME_8800DC"_u02.bin"
++
++#define RWNX_MAC_CALIB_BASE_NAME_8800DC "fmacfw_calib_8800dc"
++#define RWNX_MAC_CALIB_NAME_8800DC_U02 RWNX_MAC_CALIB_BASE_NAME_8800DC"_u02.bin"
++
++#ifdef CONFIG_FOR_IPCAM
++#define RWNX_MAC_PATCH_TABLE_NAME_8800DC "fmacfw_patch_tbl_8800dc_ipc"
++#else
++#define RWNX_MAC_PATCH_TABLE_NAME_8800DC "fmacfw_patch_tbl_8800dc"
++#endif
++#define RWNX_MAC_PATCH_TABLE_8800DC RWNX_MAC_PATCH_TABLE_NAME_8800DC ".bin"
++#define RWNX_MAC_PATCH_TABLE_8800DC_U02 RWNX_MAC_PATCH_TABLE_NAME_8800DC "_u02.bin"
++
++
++#define RWNX_MAC_RF_PATCH_BASE_NAME_8800DC "fmacfw_rf_patch_8800dc"
++#define RWNX_MAC_RF_PATCH_NAME_8800DC RWNX_MAC_RF_PATCH_BASE_NAME_8800DC".bin"
++#define FW_USERCONFIG_NAME_8800DC "aic_userconfig_8800dc.txt"
++#define FW_USERCONFIG_NAME_8800DW "aic_userconfig_8800dw.txt"
++
++
++int rwnx_plat_bin_fw_upload_2(struct rwnx_hw *rwnx_hw, u32 fw_addr,
++ char *filename);
++int rwnx_request_firmware_common(struct rwnx_hw *rwnx_hw,
++ u32** buffer, const char *filename);
++void rwnx_plat_userconfig_parsing(char *buffer, int size);
++void rwnx_release_firmware_common(u32** buffer);
++
++typedef u32 (*array2_tbl_t)[2];
++
++u32 syscfg_tbl_masked_8800dc[][3] = {
++ //#ifdef CONFIG_PMIC_SETTING
++ #if defined(CONFIG_VRF_DCDC_MODE)
++ {0x7000216C, (0x3 << 2), (0x1 << 2)}, // pmic_pmu_init
++ {0x700021BC, (0x3 << 2), (0x1 << 2)},
++ {0x70002118, ((0x7 << 4) | (0x1 << 7)), ((0x2 << 4) | (0x1 << 7))},
++ {0x70002104, ((0x3F << 0) | (0x1 << 6)), ((0x2 << 0) | (0x1 << 6))},
++ {0x7000210C, ((0x3F << 0) | (0x1 << 6)), ((0x2 << 0) | (0x1 << 6))},
++ {0x70002170, (0xF << 0), (0x1 << 0)},
++ {0x70002190, (0x3F << 0), (24 << 0)},
++ {0x700021CC, ((0x7 << 4) | (0x1 << 7)), ((0x0 << 4) | (0x0 << 7))},
++ {0x700010A0, (0x1 << 11), (0x1 << 11)},
++ {0x70001034, ((0x1 << 20) | (0x7 << 26)), ((0x0 << 20) | (0x2 << 26))},
++ {0x70001038, (0x1 << 8), (0x1 << 8)},
++ {0x70001094, (0x3 << 2), (0x0 << 2)},
++ {0x700021D0, ((0x1 << 5) | (0x1 << 6)), ((0x1 << 5) | (0x1 << 6))},
++ {0x70001000, ((0x1 << 0) | (0x1 << 20) | (0x1 << 22)),
++ ((0x1 << 0) | (0x1 << 20) | (0x0 << 22))},
++ {0x70001028, (0xf << 2), (0x1 << 2)},
++ #else
++ {0x7000216C, (0x3 << 2), (0x1 << 2)}, // pmic_pmu_init
++ {0x700021BC, (0x3 << 2), (0x1 << 2)},
++ {0x70002118, ((0x7 << 4) | (0x1 << 7)), ((0x2 << 4) | (0x1 << 7))},
++ {0x70002104, ((0x3F << 0) | (0x1 << 6)), ((0x2 << 0) | (0x1 << 6))},
++ {0x7000210C, ((0x3F << 0) | (0x1 << 6)), ((0x2 << 0) | (0x1 << 6))},
++ {0x70002170, (0xF << 0), (0x1 << 0)},
++ {0x70002190, (0x3F << 0), (24 << 0)},
++ {0x700021CC, ((0x7 << 4) | (0x1 << 7)), ((0x0 << 4) | (0x0 << 7))},
++ {0x700010A0, (0x1 << 11), (0x1 << 11)},
++ {0x70001034, ((0x1 << 20) | (0x7 << 26)), ((0x0 << 20) | (0x2 << 26))},
++ {0x70001038, (0x1 << 8), (0x1 << 8)},
++ {0x70001094, (0x3 << 2), (0x0 << 2)},
++ {0x700021D0, ((0x1 << 5) | (0x1 << 6)), ((0x1 << 5) | (0x1 << 6))},
++ {0x70001000, ((0x1 << 0) | (0x1 << 20) | (0x1 << 22)),
++ ((0x0 << 0) | (0x1 << 20) | (0x0 << 22))},
++ #endif
++ //#endif /* CONFIG_PMIC_SETTING */
++ {0x00000000, 0x00000000, 0x00000000}, // last one
++};
++
++u32 syscfg_tbl_masked_8800dc_u01[][3] = {
++ //#ifdef CONFIG_PMIC_SETTING
++ {0x70001000, (0x1 << 16), (0x1 << 16)}, // for low temperature
++ {0x70001028, (0x1 << 6), (0x1 << 6)},
++ {0x70001000, (0x1 << 16), (0x0 << 16)},
++ //#endif /* CONFIG_PMIC_SETTING */
++};
++
++
++u32 syscfg_tbl_8800dc[][2] = {
++ {0x40500010, 0x00000004},
++ {0x40500010, 0x00000006},//160m clk
++};
++
++
++u32 patch_tbl_wifisetting[][2] =
++{
++ #if !defined(CONFIG_FPGA_VERIFICATION)
++ {0x0090, 0x0013FC00}, //rx_ringbuf_start2
++ #endif
++#ifdef CONFIG_USB_TX_AGGR
++ {0x0100, 0x03021714}, //usb fc params(rx msg fc recover, rx msg fc trigger, wifi fc recover, wifi fc trigger)
++ {0x0120, 0x140A0100}, //usb agg tx params(total cnt, aggr cnt, out en, global out nak)
++#endif //CONFIG_USB_TX_AGGR
++ {0x00b0, 0xAD180100},
++};
++
++u32 jump_tbl[][2] =
++{
++ {296, 0x180001},
++ {137, 0x180011},
++ {303, 0x1810f9},
++ {168, 0x18186d},
++ {308, 0x181bbd},
++ {288, 0x1820c1},
++};
++
++
++uint32_t ldpc_cfg_ram[] = {
++#if 0//def CONFIG_FPGA_VERIFICATION
++ 0x00363638,
++ 0x1DF8F834,
++ 0x1DF8F834,
++ 0x1DF8F834,
++ 0x1DF8F834,
++ 0x002F2F31,
++ 0x1DF8F82C,
++ 0x1DF8F82C,
++ 0x1DF8F82C,
++ 0x1DF8F82C,
++ 0x00363639,
++ 0x1AA5F834,
++ 0x1AA5F834,
++ 0x1ADEF834,
++ 0x1ADEF834,
++ 0x003A3A3E,
++ 0x1578F436,
++ 0x1578F436,
++ 0x1578F436,
++ 0x15B6F436,
++ 0x003B3B40,
++ 0x1DF8F838,
++ 0x1DF8F838,
++ 0x1DF8F838,
++ 0x1DF8F838,
++ 0x003B3B41,
++ 0x1DC4F838,
++ 0x1DC4F838,
++ 0x1DF8F838,
++ 0x1DF8F838,
++ 0x003B3B40,
++ 0x1781F838,
++ 0x1781F838,
++ 0x1781F838,
++ 0x17C4F838,
++ 0x003B3B40,
++ 0x0E81F838,
++ 0x0E81F838,
++ 0x0E81F838,
++ 0x0E82F838,
++ 0x003F3F43,
++ 0x1A92F83D,
++ 0x1A92F83E,
++ 0x1A92F83D,
++ 0x1ADDF83D,
++ 0x00272729,
++ 0x1DF8F824,
++ 0x1DF8F824,
++ 0x1DF8F843,
++ 0x1DF8F843,
++ 0x00272729,
++ 0x1DF8F824,
++ 0x1DF8F824,
++ 0x1DF8F842,
++ 0x1DF8F842,
++ 0x00262628,
++ 0x1DF8F823,
++ 0x1DF8F823,
++ 0x1DF8F823,
++ 0x1DF8F823,
++ 0x00252528,
++ 0x1DF8F823,
++ 0x1DF8F823,
++ 0x1DF8F823,
++ 0x1DF8F823,
++ 0x00262628,
++ 0x1DF8F823,
++ 0x1DF8F823,
++ 0x1DF8F823,
++ 0x1DF8F823,
++ 0x00242427,
++ 0x1DF8F821,
++ 0x1DF8F821,
++ 0x1DF8F821,
++ 0x1DF8F821,
++ 0x00232326,
++ 0x1DF8F821,
++ 0x1DF8F820,
++ 0x1DF8F820,
++ 0x1DF8F820,
++ 0x00262628,
++ 0x1DF8F823,
++ 0x1DF8F823,
++ 0x1DF8F823,
++ 0x1DF8F823,
++ 0x00242427,
++ 0x1DF8F821,
++ 0x1DF8F821,
++ 0x1DF8F821,
++ 0x1DF8F821,
++ 0x001F1F21,
++ 0x1DF8F81D,
++ 0x1DF8F81D,
++ 0x1DF8F81D,
++ 0x1DF8F81D,
++ 0x00262643,
++ 0x1DF8F822,
++ 0x1DF8F821,
++ 0x1DF8F821,
++ 0x1DF8F821,
++ 0x0018182B,
++ 0x1DF8F816,
++ 0x1DBDF815,
++ 0x1DF8F815,
++ 0x1DF8F815,
++ 0x0018182A,
++ 0x1195F836,
++ 0x1195F815,
++ 0x1195F815,
++ 0x1196F815,
++ 0x0028282C,
++ 0x1DF8F824,
++ 0x1DF8F824,
++ 0x1DF8F824,
++ 0x1DF8F824,
++ 0x0027272C,
++ 0x1DF8F824,
++ 0x1DF8F823,
++ 0x1DF8F823,
++ 0x1DF8F823,
++ 0x0082824A,
++ 0x1ADFF841,
++ 0x1ADDF822,
++ 0x1ADEF822,
++ 0x1ADFF822,
++ 0x003E3E40,
++ 0x09D1F81D,
++ 0x095BF81D,
++ 0x095BF81D,
++ 0x095BF81D,
++ 0x0029292D,
++ 0x1DF8F825,
++ 0x1DF8F825,
++ 0x1DF8F825,
++ 0x1DF8F825,
++ 0x0028282C,
++ 0x1DF8F824,
++ 0x1DF8F824,
++ 0x1DF8F824,
++ 0x1DF8F824,
++ 0x0029292D,
++ 0x1DF8F825,
++ 0x1DF8F825,
++ 0x1DF8F825,
++ 0x1DF8F825,
++ 0x0028282E,
++ 0x1DF8F825,
++ 0x1DF8F824,
++ 0x1DF8F824,
++ 0x1DF8F824,
++ 0x0026262C,
++ 0x1DF8F823,
++ 0x1DF8F822,
++ 0x1DF8F822,
++ 0x1DF8F822,
++ 0x0028282D,
++ 0x1DF8F825,
++ 0x1DF8F824,
++ 0x1DF8F824,
++ 0x1DF8F824,
++ 0x00282852,
++ 0x1DF8F827,
++ 0x1DF8F824,
++ 0x1DF8F824,
++ 0x1DF8F824,
++ 0x0029294E,
++ 0x1DF8F823,
++ 0x1DF8F822,
++ 0x1DF8F822,
++ 0x1DF8F822,
++ 0x00212143,
++ 0x1DF8F821,
++ 0x1DECF81D,
++ 0x1DF4F81D,
++ 0x1DF8F81D,
++ 0x0086864D,
++ 0x1CF0F844,
++ 0x1CEDF823,
++ 0x1CEFF822,
++ 0x1CF0F822,
++ 0x0047474D,
++ 0x1BE8F823,
++ 0x1BE8F823,
++ 0x1BE9F822,
++ 0x1BEAF822,
++ 0x0018182F,
++ 0x14B0F83C,
++ 0x14B0F814,
++ 0x14B0F814,
++ 0x14B0F814,
++ 0x00404040,
++ 0x0AE1F81E,
++ 0x0A61F81D,
++ 0x0A61F81D,
++ 0x0A61F81D,
++ 0x002C2C40,
++ 0x09555526,
++ 0x09555512,
++ 0x09555513,
++ 0x09555512,
++ 0x00181840,
++ 0x06333329,
++ 0x06333314,
++ 0x06333314,
++ 0x06333314,
++ 0x002B2B2F,
++ 0x1DF8F828,
++ 0x1DF8F828,
++ 0x1DF8F828,
++ 0x1DF8F828,
++ 0x002B2B32,
++ 0x1DF8F829,
++ 0x1DF8F828,
++ 0x1DF8F828,
++ 0x1DF8F828,
++ 0x002A2A2F,
++ 0x1DF8F827,
++ 0x1DF8F827,
++ 0x1DF8F827,
++ 0x1DF8F827,
++ 0x002A2A57,
++ 0x1DF8F82B,
++ 0x1DF8F827,
++ 0x1DF8F827,
++ 0x1DF8F827,
++ 0x00919152,
++ 0x1DF8F84B,
++ 0x1DF8F825,
++ 0x1DF8F825,
++ 0x1DF8F825,
++ 0x004C4C51,
++ 0x1DF8F826,
++ 0x1DF8F825,
++ 0x1DF8F825,
++ 0x1DF8F825,
++ 0x00444440,
++ 0x0CF8F820,
++ 0x0C6EF81F,
++ 0x0C6EF81F,
++ 0x0C6EF81F,
++ 0x00424240,
++ 0x0D75753E,
++ 0x0D75751E,
++ 0x0D75751E,
++ 0x0D75751E,
++ 0x00191940,
++ 0x0539392E,
++ 0x05393914,
++ 0x05393914,
++ 0x05393914,
++ 0x002F2F32,
++ 0x1AA5F82C,
++ 0x1AA5F82C,
++ 0x1ADEF82C,
++ 0x1ADEF82C,
++ 0x002F2F40,
++ 0x0C6EDE2C,
++ 0x0C6EDE2C,
++ 0x0C6EDE2C,
++ 0x0C6EDE2C,
++ 0x00323240,
++ 0x053BB62E,
++ 0x053BB62E,
++ 0x053BB62E,
++ 0x053BB62E,
++ 0x00333339,
++ 0x1DC4F82F,
++ 0x1DC4F82F,
++ 0x1DF8F82F,
++ 0x1DF8F82F,
++ 0x00333340,
++ 0x0E81F82F,
++ 0x0E81F82F,
++ 0x0E81F82F,
++ 0x0E82F82F,
++ 0x00333340,
++ 0x063FC42F,
++ 0x063FC42F,
++ 0x063FC42F,
++ 0x063FC42F,
++ 0x00404040,
++ 0x063FC42F,
++ 0x063FC42F,
++ 0x063FC42F,
++ 0x063FC42F,
++ 0x00363640,
++ 0x0747DD33,
++ 0x0747DD33,
++ 0x0747DD33,
++ 0x0747DD33,
++ 0x00404040,
++ 0x0747DD33,
++ 0x0747DD33,
++ 0x0747DD33,
++ 0x0747DD33,
++ 0x00292940,
++ 0x07484825,
++ 0x07484812,
++ 0x07484812,
++ 0x07484812,
++ 0x00404040,
++ 0x07343428,
++ 0x07343414,
++ 0x07343414,
++ 0x07343414,
++ 0x00404040,
++ 0x0538382A,
++ 0x05383814,
++ 0x05383814,
++ 0x05383814,
++ 0x00404040,
++ 0x05292914,
++ 0x05292909,
++ 0x05292909,
++ 0x05292909,
++ 0x000B0B40,
++ 0x02111108,
++ 0x0211110E,
++ 0x02111108,
++ 0x02111108,
++ 0x00404040,
++ 0x063E3E2E,
++ 0x063E3E15,
++ 0x063E3E14,
++ 0x063E3E14,
++ 0x00404040,
++ 0x062E2E14,
++ 0x062E2E09,
++ 0x062E2E09,
++ 0x062E2E09,
++ 0x000B0B40,
++ 0x02131308,
++ 0x0213130F,
++ 0x02131308,
++ 0x02131308
++#else
++ 0x00767679,
++ 0x1DF8F870,
++ 0x1DF8F870,
++ 0x1DF8F870,
++ 0x1DF8F870,
++ 0x006E6E72,
++ 0x1DF8F869,
++ 0x1DF8F869,
++ 0x1DF8F869,
++ 0x1DF8F869,
++ 0x0076767B,
++ 0x1DF8F870,
++ 0x1DF8F870,
++ 0x1DF8F870,
++ 0x1DF8F870,
++ 0x007E7E85,
++ 0x1DF4F876,
++ 0x1DF4F876,
++ 0x1DF4F876,
++ 0x1DF8F876,
++ 0x0081818A,
++ 0x1DF8F87B,
++ 0x1DF8F87B,
++ 0x1DF8F87B,
++ 0x1DF8F87B,
++ 0x0081818D,
++ 0x1DF8F87B,
++ 0x1DF8F87B,
++ 0x1DF8F87B,
++ 0x1DF8F87B,
++ 0x0081818A,
++ 0x1DF8F87B,
++ 0x1DF8F87C,
++ 0x1DF8F87B,
++ 0x1DF8F87B,
++ 0x007E7E40,
++ 0x1DF8F87B,
++ 0x1DF8F87B,
++ 0x1DF8F87B,
++ 0x1DF8F87B,
++ 0x008B8B92,
++ 0x1DF8F887,
++ 0x1DF8F889,
++ 0x1DF8F887,
++ 0x1DF8F887,
++ 0x00515155,
++ 0x1DF8F84C,
++ 0x1DF8F84C,
++ 0x1DF8F889,
++ 0x1DF8F889,
++ 0x00515154,
++ 0x1DF8F84C,
++ 0x1DF8F84C,
++ 0x1DF8F888,
++ 0x1DF8F888,
++ 0x004F4F53,
++ 0x1DF8F84A,
++ 0x1DF8F84A,
++ 0x1DF8F84A,
++ 0x1DF8F84A,
++ 0x004F4F53,
++ 0x1DF8F84A,
++ 0x1DF8F84A,
++ 0x1DF8F84A,
++ 0x1DF8F84A,
++ 0x004F4F53,
++ 0x1DF8F84A,
++ 0x1DF8F84A,
++ 0x1DF8F84A,
++ 0x1DF8F84A,
++ 0x004E4E53,
++ 0x1DF8F849,
++ 0x1DF8F848,
++ 0x1DF8F848,
++ 0x1DF8F848,
++ 0x004D4D52,
++ 0x1DF8F847,
++ 0x1DF8F847,
++ 0x1DF8F847,
++ 0x1DF8F847,
++ 0x004F4F55,
++ 0x1DF8F84B,
++ 0x1DF8F84A,
++ 0x1DF8F84A,
++ 0x1DF8F84A,
++ 0x004E4E53,
++ 0x1DF8F849,
++ 0x1DF8F848,
++ 0x1DF8F848,
++ 0x1DF8F848,
++ 0x0049494D,
++ 0x1DF8F844,
++ 0x1DF8F844,
++ 0x1DF8F844,
++ 0x1DF8F844,
++ 0x0051518F,
++ 0x1DF8F849,
++ 0x1DF8F848,
++ 0x1DF8F848,
++ 0x1DF8F848,
++ 0x00424277,
++ 0x1DF8F83F,
++ 0x1DF8F83C,
++ 0x1DF8F83C,
++ 0x1DF8F83C,
++ 0x00424275,
++ 0x1DF8F89E,
++ 0x1DF8F83C,
++ 0x1DF8F83C,
++ 0x1DF8F83C,
++ 0x0055555C,
++ 0x1DF8F84C,
++ 0x1DF8F84C,
++ 0x1DF8F84C,
++ 0x1DF8F84C,
++ 0x0053535C,
++ 0x1DF8F84C,
++ 0x1DF8F84B,
++ 0x1DF8F84B,
++ 0x1DF8F84B,
++ 0x00F8F89E,
++ 0x1DF8F88C,
++ 0x1DF8F84A,
++ 0x1DF8F84A,
++ 0x1DF8F84A,
++ 0x00898940,
++ 0x18F8F846,
++ 0x18CFF845,
++ 0x18CFF844,
++ 0x18CFF844,
++ 0x0056565F,
++ 0x1DF8F84F,
++ 0x1DF8F84F,
++ 0x1DF8F84F,
++ 0x1DF8F84F,
++ 0x0055555E,
++ 0x1DF8F84E,
++ 0x1DF8F84E,
++ 0x1DF8F84E,
++ 0x1DF8F84E,
++ 0x0056565F,
++ 0x1DF8F84F,
++ 0x1DF8F84F,
++ 0x1DF8F84F,
++ 0x1DF8F84F,
++ 0x00555561,
++ 0x1DF8F850,
++ 0x1DF8F84E,
++ 0x1DF8F84E,
++ 0x1DF8F84E,
++ 0x0053535F,
++ 0x1DF8F84D,
++ 0x1DF8F84C,
++ 0x1DF8F84C,
++ 0x1DF8F84C,
++ 0x0055555F,
++ 0x1DF8F84F,
++ 0x1DF8F84E,
++ 0x1DF8F84E,
++ 0x1DF8F84E,
++ 0x005555AA,
++ 0x1DF8F854,
++ 0x1DF8F84E,
++ 0x1DF8F84E,
++ 0x1DF8F84E,
++ 0x005959A6,
++ 0x1DF8F84D,
++ 0x1DF8F84C,
++ 0x1DF8F84C,
++ 0x1DF8F84C,
++ 0x004F4F9B,
++ 0x1DF8F84E,
++ 0x1DF8F846,
++ 0x1DF8F846,
++ 0x1DF8F846,
++ 0x00F8F8A5,
++ 0x1DF8F894,
++ 0x1DF8F84C,
++ 0x1DF8F84C,
++ 0x1DF8F84C,
++ 0x009898A4,
++ 0x1DF8F84D,
++ 0x1DF8F84C,
++ 0x1DF8F84C,
++ 0x1DF8F84C,
++ 0x00464686,
++ 0x1DF8F8B3,
++ 0x1DF8F83D,
++ 0x1DF8F83D,
++ 0x1DF8F83D,
++ 0x008E8E40,
++ 0x1AF8F848,
++ 0x1ADFF848,
++ 0x1ADFF846,
++ 0x1ADFF846,
++ 0x007F7F40,
++ 0x18D2D275,
++ 0x18D2D23A,
++ 0x18D2D23A,
++ 0x18D2D239,
++ 0x00454540,
++ 0x0F868664,
++ 0x0F86863E,
++ 0x0F86863D,
++ 0x0F86863D,
++ 0x005C5C64,
++ 0x1DF8F856,
++ 0x1DF8F855,
++ 0x1DF8F855,
++ 0x1DF8F855,
++ 0x005B5B68,
++ 0x1DF8F858,
++ 0x1DF8F855,
++ 0x1DF8F855,
++ 0x1DF8F855,
++ 0x005A5A64,
++ 0x1DF8F855,
++ 0x1DF8F854,
++ 0x1DF8F854,
++ 0x1DF8F854,
++ 0x005A5AB5,
++ 0x1DF8F85B,
++ 0x1DF8F855,
++ 0x1DF8F854,
++ 0x1DF8F854,
++ 0x00F8F8B0,
++ 0x1DF8F8A3,
++ 0x1DF8F852,
++ 0x1DF8F852,
++ 0x1DF8F852,
++ 0x00A4A4AE,
++ 0x1DF8F854,
++ 0x1DF8F852,
++ 0x1DF8F852,
++ 0x1DF8F852,
++ 0x009A9A40,
++ 0x1DF8F84E,
++ 0x1DF8F84D,
++ 0x1DF8F84C,
++ 0x1DF8F84C,
++ 0x009C9C40,
++ 0x1DF8F895,
++ 0x1DF8F849,
++ 0x1DF8F84A,
++ 0x1DF8F84A,
++ 0x00494940,
++ 0x1197976F,
++ 0x11979742,
++ 0x11979741,
++ 0x11979741,
++ 0x006E6E74,
++ 0x1DF8F869,
++ 0x1DF8F869,
++ 0x1DF8F869,
++ 0x1DF8F869,
++ 0x006E6E40,
++ 0x1ADEF869,
++ 0x1ADEF869,
++ 0x1ADEF869,
++ 0x1ADEF869,
++ 0x00757540,
++ 0x0D78F86E,
++ 0x0D78F86E,
++ 0x0D78F86E,
++ 0x0D79F86E,
++ 0x00787885,
++ 0x1DF8F873,
++ 0x1DF8F873,
++ 0x1DF8F873,
++ 0x1DF8F873,
++ 0x00787840,
++ 0x1DF8F873,
++ 0x1DF8F873,
++ 0x1DF8F873,
++ 0x1DF8F873,
++ 0x00787840,
++ 0x0E81F873,
++ 0x0E81F873,
++ 0x0E81F873,
++ 0x0E82F873,
++ 0x00404040,
++ 0x0E82F873,
++ 0x0E82F873,
++ 0x0E82F873,
++ 0x0E82F873,
++ 0x00818140,
++ 0x1092F87E,
++ 0x1092F87E,
++ 0x1092F87E,
++ 0x1092F87E,
++ 0x00404040,
++ 0x1092F87E,
++ 0x1092F87E,
++ 0x1092F87E,
++ 0x1092F87E,
++ 0x00737340,
++ 0x14B2B26B,
++ 0x14B2B235,
++ 0x14B2B235,
++ 0x14B2B235,
++ 0x00404040,
++ 0x0E828260,
++ 0x0E82823D,
++ 0x0E82823C,
++ 0x0E82823C,
++ 0x00404040,
++ 0x0F8B8B66,
++ 0x0F8B8B3F,
++ 0x0F8B8B3D,
++ 0x0F8B8B3D,
++ 0x00404040,
++ 0x0B68683D,
++ 0x0B68681E,
++ 0x0B68681E,
++ 0x0B68681E,
++ 0x00222240,
++ 0x06434318,
++ 0x06434329,
++ 0x06434318,
++ 0x06434318,
++ 0x00404040,
++ 0x129D9D72,
++ 0x129D9D43,
++ 0x129D9D41,
++ 0x129D9D41,
++ 0x00404040,
++ 0x0D757542,
++ 0x0D757520,
++ 0x0D757520,
++ 0x0D757520,
++ 0x00232340,
++ 0x084C4C19,
++ 0x084C4C2C,
++ 0x084C4C19,
++ 0x084C4C19
++#endif
++};
++
++
++uint32_t agc_cfg_ram[] = {
++ 0x20000000,
++ 0x0400000E,
++ 0x3000200E,
++ 0x5B000000,
++ 0x0400004B,
++ 0x3000008E,
++ 0x32000000,
++ 0x0400007B,
++ 0x40000000,
++ 0xF8000026,
++ 0x04000011,
++ 0x4819008E,
++ 0x9C000020,
++ 0x08000191,
++ 0x38008000,
++ 0x0A000000,
++ 0x08104411,
++ 0x38018000,
++ 0x0C004641,
++ 0x08D00014,
++ 0x30000000,
++ 0x01000000,
++ 0x04000017,
++ 0x30000000,
++ 0x3C000000,
++ 0x0400001A,
++ 0x38020000,
++ 0x40000001,
++ 0x0800001D,
++ 0x3808008E,
++ 0x14000050,
++ 0x08000020,
++ 0x4000008E,
++ 0xA400007B,
++ 0x00000101,
++ 0x3000339F,
++ 0x41000700,
++ 0x04104420,
++ 0x90000000,
++ 0x49000000,
++ 0xF00E842F,
++ 0xEC0E842C,
++ 0xEC0E842C,
++ 0x04000032,
++ 0x30000000,
++ 0x48000101,
++ 0x04000032,
++ 0x30000000,
++ 0x48000202,
++ 0x04000032,
++ 0x30000000,
++ 0x46000000,
++ 0x04000011,
++ 0x58010006,
++ 0x3D040472,
++ 0xDC204439,
++ 0x081DD4D2,
++ 0x480A0006,
++ 0xDC2044DC,
++ 0x081DD43C,
++ 0x38050004,
++ 0x0EF1F1C3,
++ 0x342044DC,
++ 0x30000000,
++ 0x01000000,
++ 0x04000042,
++ 0x30000000,
++ 0x33000000,
++ 0x04104445,
++ 0x38008000,
++ 0x2200109C,
++ 0x08104448,
++ 0x38008000,
++ 0x23D4509C,
++ 0x08104417,
++ 0x9000A000,
++ 0x32000000,
++ 0x18000063,
++ 0x14000060,
++ 0x1C000051,
++ 0x10000057,
++ 0x38028000,
++ 0x0C000001,
++ 0x08D04466,
++ 0x3000200F,
++ 0x00000000,
++ 0x00000000,
++ 0x38030000,
++ 0x0C002601,
++ 0x08D0445A,
++ 0x30000000,
++ 0x3D020230,
++ 0x0400005D,
++ 0x30000000,
++ 0x3E000100,
++ 0x04000066,
++ 0x38028000,
++ 0x0C001601,
++ 0x34204466,
++ 0x38028000,
++ 0x0C000A01,
++ 0x34204466,
++ 0x38008004,
++ 0xFF000000,
++ 0x0800007B,
++ 0x3800802F,
++ 0x26000000,
++ 0x0800006C,
++ 0x380404AF,
++ 0x1F191010,
++ 0x0800006F,
++ 0x20000CAF,
++ 0x04000071,
++ 0x60000CAF,
++ 0x18700079,
++ 0x14000077,
++ 0x10000075,
++ 0x28140CAF,
++ 0x09B00084,
++ 0x280A0CAF,
++ 0x09B00084,
++ 0x28060CAF,
++ 0x09B00084,
++ 0x28048086,
++ 0x0800007D,
++ 0x38000086,
++ 0x22800000,
++ 0x04000080,
++ 0x30000000,
++ 0x0EF1F101,
++ 0x36004883,
++ 0x28020000,
++ 0x08000085,
++ 0x3802008E,
++ 0x3D040431,
++ 0x08000088,
++ 0x3805008E,
++ 0x1F241821,
++ 0x0800008B,
++ 0x3000008E,
++ 0xA0163021,
++ 0x0400008E,
++ 0x3000008E,
++ 0x0EF10012,
++ 0x34000091,
++ 0x300000CC,
++ 0x50000000,
++ 0x04000094,
++ 0x380095FE,
++ 0x32010000,
++ 0x04000097,
++ 0x50001FFE,
++ 0x5A010000,
++ 0x6DC9989B,
++ 0xFC19D4B9,
++ 0x30000186,
++ 0x3D840373,
++ 0x0400009E,
++ 0x3000008E,
++ 0x0A000000,
++ 0x040000A1,
++ 0x3000008E,
++ 0x22C00000,
++ 0x040000A4,
++ 0x9000028E,
++ 0x32010001,
++ 0x8E4000AA,
++ 0xC80000B0,
++ 0x00000000,
++ 0x00000000,
++ 0x3000008E,
++ 0x32010001,
++ 0x040000CB,
++ 0x3000008E,
++ 0x29000000,
++ 0x94045011,
++ 0x300019B6,
++ 0x32010000,
++ 0x040000B3,
++ 0x300019B6,
++ 0x3D040431,
++ 0x040000B6,
++ 0x300019B6,
++ 0x22800000,
++ 0x04000097,
++ 0x30000186,
++ 0x3D840473,
++ 0x040000BC,
++ 0x3000008E,
++ 0x29030000,
++ 0x040000BF,
++ 0x9AEE028E,
++ 0x32010100,
++ 0x7C0000C5,
++ 0xCC0000B0,
++ 0x080000B0,
++ 0x00000000,
++ 0x3000008E,
++ 0x32010100,
++ 0x040000C8,
++ 0x3000028E,
++ 0x29000000,
++ 0x94045011,
++ 0x5000038E,
++ 0x29000000,
++ 0x94045011,
++ 0xC0000035,
++ 0x38010006,
++ 0x3D040472,
++ 0x080000D2,
++ 0x30000004,
++ 0x0EF1F141,
++ 0x340000D5,
++ 0x28040004,
++ 0x080000D7,
++ 0x2808000E,
++ 0x080000D9,
++ 0x3000018E,
++ 0x0EF10052,
++ 0x340000DC,
++ 0x3000038E,
++ 0x29000000,
++ 0x94045011,
++ 0x38020000,
++ 0x32000000,
++ 0x080000E2,
++ 0x60000000,
++ 0xD80000E6,
++ 0xD40000E9,
++ 0x040000EC,
++ 0x30000000,
++ 0x0EF1F121,
++ 0x360048EF,
++ 0x30000000,
++ 0x0C002421,
++ 0x360048EF,
++ 0x30000000,
++ 0x0C000021,
++ 0x360048EF,
++ 0x28020000,
++ 0x0800007B,
++ 0x50001EFE,
++ 0x5A010000,
++ 0x6DC998F5,
++ 0xFC19D4F8,
++ 0x3000028E,
++ 0x32000040,
++ 0x040000FB,
++ 0x3AEE028E,
++ 0x32000080,
++ 0x040000FB,
++ 0x30000000,
++ 0x0EF1F101,
++ 0x360048FE,
++ 0x28020000,
++ 0x08000100,
++ 0x3802008E,
++ 0x3D040431,
++ 0x08000103,
++ 0x3805008E,
++ 0x1F241821,
++ 0x08000106,
++ 0x3000008E,
++ 0xA0163021,
++ 0x04000109,
++ 0x3000008E,
++ 0x0EF10012,
++ 0x3400010C,
++ 0x300014F6,
++ 0x32010000,
++ 0x04000114,
++ 0x20000000,
++ 0x04000111,
++ 0x300000EC,
++ 0x50000000,
++ 0x040000F1,
++ 0x300014F6,
++ 0x32030000,
++ 0x04000117,
++ 0x30001086,
++ 0x3D840473,
++ 0x0400011A,
++ 0x5000108E,
++ 0x22C00000,
++ 0x8E47C0CB,
++ 0xCB30011E,
++ 0x300019B6,
++ 0x32040000,
++ 0x04000121,
++ 0x300019B6,
++ 0x3D040431,
++ 0x04000124,
++ 0x300019B6,
++ 0x22800000,
++ 0x04000111,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x30000186,
++ 0x3D840473,
++ 0x0400012D,
++ 0x5000038E,
++ 0x29000000,
++ 0x94045011,
++ 0xC0000131,
++ 0x380C800E,
++ 0xFF000000,
++ 0x08000134,
++ 0x30000004,
++ 0x0FF1F103,
++ 0x34000137,
++ 0x28020000,
++ 0x08000139,
++ 0x3000038E,
++ 0x29000000,
++ 0x94045011,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x58010006,
++ 0x3D040472,
++ 0xDC204543,
++ 0x081DD4D2,
++ 0x480A0006,
++ 0xDC2044DC,
++ 0x081DD546,
++ 0x38050004,
++ 0x0EF1F141,
++ 0x342044DC,
++ 0x2802800E,
++ 0x080000DC,
++ 0x48000035,
++ 0x0400014A,
++ 0x7896638F,
++ 0x4100000F,
++ 0x8C00014F,
++ 0x080450C4,
++ 0x90104574,
++ 0x88C8620F,
++ 0xC000015A,
++ 0x90104574,
++ 0x08104554,
++ 0x94104557,
++ 0x3000628F,
++ 0x29000000,
++ 0x9404517A,
++ 0x3000638F,
++ 0x29000000,
++ 0x0410457A,
++ 0x3800E005,
++ 0x3D010131,
++ 0x0810455D,
++ 0xA832600F,
++ 0x90104574,
++ 0x08000154,
++ 0x94104557,
++ 0xC6104567,
++ 0xC4185563,
++ 0x5802E00F,
++ 0x0FEEEA07,
++ 0x80000174,
++ 0x3420456B,
++ 0x5802E00F,
++ 0x0EEEEA07,
++ 0x80000174,
++ 0x3420456B,
++ 0x30004000,
++ 0x33000001,
++ 0x0400016E,
++ 0x38034005,
++ 0x3D030373,
++ 0x08000171,
++ 0x30006007,
++ 0x33000000,
++ 0x04000174,
++ 0x3000608F,
++ 0x29000000,
++ 0x94045177,
++ 0x4000608F,
++ 0xA010457D,
++ 0x0410457A,
++ 0x3000608F,
++ 0x64000101,
++ 0x04104411,
++ 0x3000608F,
++ 0x64000101,
++ 0x04104580,
++ 0x3000618F,
++ 0x42000001,
++ 0x04000183,
++ 0x38028000,
++ 0x32000000,
++ 0x08104586,
++ 0x280A618F,
++ 0x08000188,
++ 0x480A618F,
++ 0xBC00018B,
++ 0x0800018E,
++ 0x3000618F,
++ 0x34000001,
++ 0x04000005,
++ 0x3000618F,
++ 0x34000000,
++ 0x04000008,
++ 0x3000008F,
++ 0x0EEAED0F,
++ 0x36000194,
++ 0x38038000,
++ 0x34000000,
++ 0x08000197,
++ 0x38028005,
++ 0x29010002,
++ 0x0800019A,
++ 0x3000028F,
++ 0x2200209C,
++ 0x0400019D,
++ 0x3000028F,
++ 0x23D4509C,
++ 0x040001A0,
++ 0x2814028F,
++ 0x080001A2,
++ 0x3000028F,
++ 0x43010201,
++ 0x040001A5,
++ 0x3000128F,
++ 0x32000100,
++ 0x040001A8,
++ 0x5AEE138F,
++ 0x4100000F,
++ 0x7C0001AC,
++ 0x080000F9,
++ 0x592C138F,
++ 0x29000000,
++ 0x8C0001B0,
++ 0x080000F9,
++ 0x2000138F,
++ 0x94045011,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000,
++ 0x00000000
++};
++
++uint32_t txgain_map[96] = {
++#ifdef CONFIG_FPGA_VERIFICATION
++ 0x20c0c971,
++ 0x20c0c980,
++ 0x20c0c992,
++ 0x20c0c9a6,
++ 0x20c0c9bf,
++ 0x20c0caa5,
++ 0x20c0cabd,
++ 0x20c0cba0,
++ 0x20c0cbb6,
++ 0x20c0cbea,
++ 0x20c0ccc5,
++ 0x20c0cdac,
++ 0x20c0cdd0,
++ 0x20c0ceb2,
++ 0x20c0ceff,
++ 0x20c0cfff,
++ 0x20c0c922,
++ 0x20c0c922,
++ 0x20c0c922,
++ 0x20c0c922,
++ 0x20c0c922,
++ 0x20c0c922,
++ 0x20c0c922,
++ 0x20c0c927,
++ 0x20c0c92c,
++ 0x20c0c931,
++ 0x20c0c937,
++ 0x20c0c93f,
++ 0x20c0c946,
++ 0x20c0c94f,
++ 0x20c0c959,
++ 0x20c0c964,
++ 0x20c0cbee,
++ 0x20c0cce0,
++ 0x20c0ccff,
++ 0x20c0cde2,
++ 0x20c0cdfe,
++ 0x20c0cede,
++ 0x20c0cefc,
++ 0x20c0cfd9,
++ 0x20c0cff8,
++ 0x20c0cfff,
++ 0x20c0cfff,
++ 0x20c0cfff,
++ 0x20c0cfff,
++ 0x20c0cfff,
++ 0x20c0cfff,
++ 0x20c0cfff,
++ 0x20c0c97c,
++ 0x20c0c97c,
++ 0x20c0c97c,
++ 0x20c0c97c,
++ 0x20c0c97c,
++ 0x20c0c97c,
++ 0x20c0c97c,
++ 0x20c0c98c,
++ 0x20c0ca79,
++ 0x20c0ca89,
++ 0x20c0cb74,
++ 0x20c0cb84,
++ 0x20c0cb94,
++ 0x20c0cba8,
++ 0x20c0cbbb,
++ 0x20c0cbd2,
++ 0x20c0cbee,
++ 0x20c0cce0,
++ 0x20c0ccff,
++ 0x20c0cde2,
++ 0x20c0cdfe,
++ 0x20c0cede,
++ 0x20c0cefc,
++ 0x20c0cfd9,
++ 0x20c0cff8,
++ 0x20c0cfff,
++ 0x20c0cfff,
++ 0x20c0cfff,
++ 0x20c0cfff,
++ 0x20c0cfff,
++ 0x20c0cfff,
++ 0x20c0cfff,
++ 0x20c0c97c,
++ 0x20c0c97c,
++ 0x20c0c97c,
++ 0x20c0c97c,
++ 0x20c0c97c,
++ 0x20c0c97c,
++ 0x20c0c97c,
++ 0x20c0c98c,
++ 0x20c0ca79,
++ 0x20c0ca89,
++ 0x20c0cb74,
++ 0x20c0cb84,
++ 0x20c0cb94,
++ 0x20c0cba8,
++ 0x20c0cbbb,
++ 0x20c0cbd2,
++#else
++ //11b
++ 0x00ffd780,
++ 0x00ffd872,
++ 0x00ffd880,
++ 0x00ffd972,
++ 0x00ffd980,
++ 0x00ffda75,
++ 0x00ffda86,
++ 0x00ffdb77,
++ 0x00ffdb86,
++ 0x00ffdc78,
++ 0x00ffdc89,
++ 0x00ffdd79,
++ 0x00ffdd89,
++ 0x00ffde83,
++ 0x00ffdf79,
++ 0x00ffdf8b,
++ 0x00ffd072,
++ 0x00ffd072,
++ 0x00ffd080,
++ 0x00ffd172,
++ 0x00ffd180,
++ 0x00ffd272,
++ 0x00ffd280,
++ 0x00ffd36d,
++ 0x00ffd379,
++ 0x00ffd46d,
++ 0x00ffd479,
++ 0x00ffd572,
++ 0x00ffd580,
++ 0x00ffd672,
++ 0x00ffd680,
++ 0x00ffd772,
++ //high
++ 0x00ffc87d,
++ 0x00ffc88b,
++ 0x00ffc979,
++ 0x00ffc989,
++ 0x00ffca7d,
++ 0x00ffca88,
++ 0x00ffcc5e,
++ 0x00ffcc69,
++ 0x00ffcc78,
++ 0x00ffcc85,
++ 0x00ffcd70,
++ 0x00ffcd80,
++ 0x00ffce70,
++ 0x00ffce80,
++ 0x00ffcf7d,
++ 0x00ffcf90,
++ 0x00ffc080,
++ 0x00ffc090,
++ 0x00ffc180,
++ 0x00ffc190,
++ 0x00ffc27b,
++ 0x00ffc28b,
++ 0x00ffc37b,
++ 0x00ffc390,
++ 0x00ffc485,
++ 0x00ffc495,
++ 0x00ffc579,
++ 0x00ffc589,
++ 0x00ffc679,
++ 0x00ffc689,
++ 0x00ffc780,
++ 0x00ffc790,
++ //low
++ 0x00ffc87d,
++ 0x00ffc88b,
++ 0x00ffc979,
++ 0x00ffc989,
++ 0x00ffca7d,
++ 0x00ffca88,
++ 0x00ffcc5e,
++ 0x00ffcc69,
++ 0x00ffcc78,
++ 0x00ffcc85,
++ 0x00ffcd70,
++ 0x00ffcd80,
++ 0x00ffce70,
++ 0x00ffce80,
++ 0x00ffce93,
++ 0x00ffcf90,
++ 0x00ffc080,
++ 0x00ffc090,
++ 0x00ffc180,
++ 0x00ffc190,
++ 0x00ffc27b,
++ 0x00ffc28b,
++ 0x00ffc37b,
++ 0x00ffc390,
++ 0x00ffc485,
++ 0x00ffc495,
++ 0x00ffc579,
++ 0x00ffc589,
++ 0x00ffc679,
++ 0x00ffc689,
++ 0x00ffc780,
++ 0x00ffc790,
++#endif
++};
++
++u32 patch_tbl_func[][2] =
++{
++ {0x00110054, 0x0018186D}, // same as jump_tbl idx 168
++ {0x0011005C, 0x0018186D}, // same as jump_tbl idx 168
++};
++
++
++u32 patch_tbl_rf_func[][2] =
++{
++ {0x00110bf0, 0x00180001},
++};
++
++
++u32 wifi_txgain_table_24g_8800dcdw[32] =
++{
++ 0xA4B22189, //index 0
++ 0x00007825,
++ 0xA4B2214B, //index 1
++ 0x00007825,
++ 0xA4B2214F, //index 2
++ 0x00007825,
++ 0xA4B221D5, //index 3
++ 0x00007825,
++ 0xA4B221DC, //index 4
++ 0x00007825,
++ 0xA4B221E5, //index 5
++ 0x00007825,
++ 0xAC9221E5, //index 6
++ 0x00006825,
++ 0xAC9221EF, //index 7
++ 0x00006825,
++ 0xBC9221EE, //index 8
++ 0x00006825,
++ 0xBC9221FF, //index 9
++ 0x00006825,
++ 0xBC9221FF, //index 10
++ 0x00004025,
++ 0xB792203F, //index 11
++ 0x00004026,
++ 0xDC92203F, //index 12
++ 0x00004025,
++ 0xE692203F, //index 13
++ 0x00004025,
++ 0xFF92203F, //index 14
++ 0x00004035,
++ 0xFFFE203F, //index 15
++ 0x00004832
++};
++
++u32 wifi_txgain_table_24g_1_8800dcdw[32] =
++{
++ 0x090E2011, //index 0
++ 0x00004001,
++ 0x090E2015, //index 1
++ 0x00004001,
++ 0x090E201B, //index 2
++ 0x00004001,
++ 0x110E2018, //index 3
++ 0x00004001,
++ 0x110E201E, //index 4
++ 0x00004001,
++ 0x110E2023, //index 5
++ 0x00004001,
++ 0x190E2021, //index 6
++ 0x00004001,
++ 0x190E202B, //index 7
++ 0x00004001,
++ 0x210E202B, //index 8
++ 0x00004001,
++ 0x230E2027, //index 9
++ 0x00004001,
++ 0x230E2031, //index 10
++ 0x00004001,
++ 0x240E2039, //index 11
++ 0x00004001,
++ 0x260E2039, //index 12
++ 0x00004001,
++ 0x2E0E203F, //index 13
++ 0x00004001,
++ 0x368E203F, //index 14
++ 0x00004001,
++ 0x3EF2203F, //index 15
++ 0x00004001
++};
++
++u32 wifi_rxgain_table_24g_20m_8800dcdw[64] = {
++ 0x82f282d1,//index 0
++ 0x9591a324,
++ 0x80808419,
++ 0x000000f0,
++ 0x42f282d1,//index 1
++ 0x95923524,
++ 0x80808419,
++ 0x000000f0,
++ 0x22f282d1,//index 2
++ 0x9592c724,
++ 0x80808419,
++ 0x000000f0,
++ 0x02f282d1,//index 3
++ 0x9591a324,
++ 0x80808419,
++ 0x000000f0,
++ 0x06f282d1,//index 4
++ 0x9591a324,
++ 0x80808419,
++ 0x000000f0,
++ 0x0ef29ad1,//index 5
++ 0x9591a324,
++ 0x80808419,
++ 0x000000f0,
++ 0x0ef29ad3,//index 6
++ 0x95923524,
++ 0x80808419,
++ 0x000000f0,
++ 0x0ef29ad7,//index 7
++ 0x9595a324,
++ 0x80808419,
++ 0x000000f0,
++ 0x02f282d2,//index 8
++ 0x95951124,
++ 0x80808419,
++ 0x000000f0,
++ 0x02f282f4,//index 9
++ 0x95951124,
++ 0x80808419,
++ 0x000000f0,
++ 0x02f282e6,//index 10
++ 0x9595a324,
++ 0x80808419,
++ 0x000000f0,
++ 0x02f282e6,//index 11
++ 0x9599a324,
++ 0x80808419,
++ 0x000000f0,
++ 0x02f282e6,//index 12
++ 0x959da324,
++ 0x80808419,
++ 0x000000f0,
++ 0x02f282e6,//index 13
++ 0x959f5924,
++ 0x80808419,
++ 0x000000f0,
++ 0x06f282e6,//index 14
++ 0x959f5924,
++ 0x80808419,
++ 0x000000f0,
++ 0x0ef29ae6,//index 15
++ 0x959f592c,//////0x959f5924, //loft [35:34]=3
++ 0x80808419,
++ 0x000000f0
++};
++
++u32 wifi_rxgain_table_24g_40m_8800dcdw[64] = {
++ 0x83428151,//index 0
++ 0x9631a328,
++ 0x80808419,
++ 0x000000f0,
++ 0x43428151,//index 1
++ 0x96323528,
++ 0x80808419,
++ 0x000000f0,
++ 0x23428151,//index 2
++ 0x9632c728,
++ 0x80808419,
++ 0x000000f0,
++ 0x03428151,//index 3
++ 0x9631a328,
++ 0x80808419,
++ 0x000000f0,
++ 0x07429951,//index 4
++ 0x9631a328,
++ 0x80808419,
++ 0x000000f0,
++ 0x0f42d151,//index 5
++ 0x9631a328,
++ 0x80808419,
++ 0x000000f0,
++ 0x0f42d153,//index 6
++ 0x96323528,
++ 0x80808419,
++ 0x000000f0,
++ 0x0f42d157,//index 7
++ 0x9635a328,
++ 0x80808419,
++ 0x000000f0,
++ 0x03428152,//index 8
++ 0x96351128,
++ 0x80808419,
++ 0x000000f0,
++ 0x03428174,//index 9
++ 0x96351128,
++ 0x80808419,
++ 0x000000f0,
++ 0x03428166,//index 10
++ 0x9635a328,
++ 0x80808419,
++ 0x000000f0,
++ 0x03428166,//index 11
++ 0x9639a328,
++ 0x80808419,
++ 0x000000f0,
++ 0x03428166,//index 12
++ 0x963da328,
++ 0x80808419,
++ 0x000000f0,
++ 0x03428166,//index 13
++ 0x963f5928,
++ 0x80808419,
++ 0x000000f0,
++ 0x07429966,//index 14
++ 0x963f5928,
++ 0x80808419,
++ 0x000000f0,
++ 0x0f42d166,//index 15
++ 0x963f5928,
++ 0x80808419,
++ 0x000000f0
++};
++
++//adap test
++u32 adaptivity_patch_tbl[][2] = {
++ {0x000C, 0x0000320A}, //linkloss_thd
++ {0x009C, 0x00000000}, //ac_param_conf
++ {0x0128, 0xF6140001}, //tx_adaptivity_en
++};
++//adap test
++
++int aicwf_patch_table_load(struct rwnx_hw *rwnx_hw, char *filename)
++{
++ int err = 0;
++ unsigned int i = 0, size;
++ u32 *dst = NULL;
++ u8 *describle;
++ u32 fmacfw_patch_tbl_8800dc_u02_describe_size = 124;
++ u32 fmacfw_patch_tbl_8800dc_u02_describe_base;//read from patch_tbl
++
++ /* Copy the file on the Embedded side */
++ AICWFDBG(LOGINFO, "### Upload %s \n", filename);
++
++ size = rwnx_request_firmware_common(rwnx_hw, &dst, filename);
++ if (!dst) {
++ AICWFDBG(LOGERROR, "No such file or directory\n");
++ return -1;
++ }
++ if (size <= 0) {
++ AICWFDBG(LOGERROR, "wrong size of firmware file\n");
++ dst = NULL;
++ err = -1;
++ }
++
++ AICWFDBG(LOGINFO, "tbl size = %d \n",size);
++
++ fmacfw_patch_tbl_8800dc_u02_describe_base = dst[0];
++ AICWFDBG(LOGINFO, "FMACFW_PATCH_TBL_8800DC_U02_DESCRIBE_BASE = %x \n",fmacfw_patch_tbl_8800dc_u02_describe_base);
++
++ if (!err && (i < size)) {
++ err = rwnx_send_dbg_mem_block_write_req(rwnx_hw, fmacfw_patch_tbl_8800dc_u02_describe_base, fmacfw_patch_tbl_8800dc_u02_describe_size + 4, dst);
++ if(err){
++ printk("write describe information fail \n");
++ }
++
++ describle = kzalloc(fmacfw_patch_tbl_8800dc_u02_describe_size, GFP_KERNEL);
++ memcpy(describle, &dst[1], fmacfw_patch_tbl_8800dc_u02_describe_size);
++ AICWFDBG(LOGINFO, "%s", describle);
++ kfree(describle);
++ describle = NULL;
++ }
++
++ if (!err && (i < size)) {
++ for (i =(128/4); i < (size/4); i +=2) {
++ AICWFDBG(LOGERROR, "patch_tbl: %x %x\n", dst[i], dst[i+1]);
++ err = rwnx_send_dbg_mem_write_req(rwnx_hw, dst[i], dst[i+1]);
++ }
++ if (err) {
++ AICWFDBG(LOGERROR, "bin upload fail: %x, err:%d\r\n", dst[i], err);
++ }
++ }
++
++ if (dst) {
++ rwnx_release_firmware_common(&dst);
++ }
++
++ return err;
++
++}
++
++//adap test
++extern int get_adap_test(void);
++//adap test
++
++void aicwf_patch_config_8800dc(struct rwnx_hw *rwnx_hw)
++{
++ #ifdef CONFIG_ROM_PATCH_EN
++ int ret = 0;
++ int cnt = 0;
++
++//adap test
++ int adap_test = 0;
++ int adap_patch_num = 0;
++
++ adap_test = get_adap_test();
++//adap test
++
++ if (testmode == 0) {
++ const u32 cfg_base = 0x10164;
++ struct dbg_mem_read_cfm cfm;
++ int i;
++ u32 wifisetting_cfg_addr;
++ u32 ldpc_cfg_addr;
++ u32 agc_cfg_addr;
++ u32 txgain_cfg_addr;
++ u32 jump_tbl_addr = 0;
++
++ u32 patch_tbl_wifisetting_num = sizeof(patch_tbl_wifisetting)/sizeof(u32)/2;
++ u32 ldpc_cfg_size = sizeof(ldpc_cfg_ram);
++ u32 agc_cfg_size = sizeof(agc_cfg_ram);
++ u32 txgain_cfg_size = sizeof(txgain_map);
++ u32 jump_tbl_size = 0;
++ u32 patch_tbl_func_num = 0;
++
++ array2_tbl_t jump_tbl_base = NULL;
++ array2_tbl_t patch_tbl_func_base = NULL;
++
++ if (chip_sub_id == 0) {
++ jump_tbl_base = jump_tbl;
++ jump_tbl_size = sizeof(jump_tbl)/2;
++ patch_tbl_func_base = patch_tbl_func;
++ patch_tbl_func_num = sizeof(patch_tbl_func)/sizeof(u32)/2;
++ }
++
++ //struct dbg_mem_read_cfm cfm;
++ //int i;
++
++ if ((ret = rwnx_send_dbg_mem_read_req(rwnx_hw, cfg_base, &cfm))) {
++ AICWFDBG(LOGERROR, "setting base[0x%x] rd fail: %d\n", cfg_base, ret);
++ }
++ wifisetting_cfg_addr = cfm.memdata;
++
++ if(chip_sub_id == 0){
++ if ((ret = rwnx_send_dbg_mem_read_req(rwnx_hw, cfg_base + 4, &cfm))) {
++ AICWFDBG(LOGERROR, "setting base[0x%x] rd fail: %d\n", cfg_base + 4, ret);
++ }
++ jump_tbl_addr = cfm.memdata;
++ }
++
++ if ((ret = rwnx_send_dbg_mem_read_req(rwnx_hw, cfg_base + 8, &cfm))) {
++ AICWFDBG(LOGERROR, "setting base[0x%x] rd fail: %d\n", cfg_base + 8, ret);
++ }
++ ldpc_cfg_addr = cfm.memdata;
++
++ if ((ret = rwnx_send_dbg_mem_read_req(rwnx_hw, cfg_base + 0xc, &cfm))) {
++ AICWFDBG(LOGERROR, "setting base[0x%x] rd fail: %d\n", cfg_base + 0xc, ret);
++ }
++ agc_cfg_addr = cfm.memdata;
++
++ if ((ret = rwnx_send_dbg_mem_read_req(rwnx_hw, cfg_base + 0x10, &cfm))) {
++ AICWFDBG(LOGERROR, "setting base[0x%x] rd fail: %d\n", cfg_base + 0x10, ret);
++ }
++ txgain_cfg_addr = cfm.memdata;
++
++ AICWFDBG(LOGINFO, "wifisetting_cfg_addr=%x, ldpc_cfg_addr=%x, agc_cfg_addr=%x, txgain_cfg_addr=%x\n", wifisetting_cfg_addr, ldpc_cfg_addr, agc_cfg_addr, txgain_cfg_addr);
++
++ for (cnt = 0; cnt < patch_tbl_wifisetting_num; cnt++) {
++ if ((ret = rwnx_send_dbg_mem_write_req(rwnx_hw, wifisetting_cfg_addr + patch_tbl_wifisetting[cnt][0], patch_tbl_wifisetting[cnt][1]))) {
++ AICWFDBG(LOGERROR, "wifisetting %x write fail\n", patch_tbl_wifisetting[cnt][0]);
++ }
++ }
++
++//adap test
++ if(adap_test){
++ adap_patch_num = sizeof(adaptivity_patch_tbl)/sizeof(u32)/2;
++ for(cnt = 0; cnt < adap_patch_num; cnt++)
++ {
++ if((ret = rwnx_send_dbg_mem_write_req(rwnx_hw, wifisetting_cfg_addr + adaptivity_patch_tbl[cnt][0], adaptivity_patch_tbl[cnt][1]))) {
++ AICWFDBG(LOGERROR, "%x write fail\n", wifisetting_cfg_addr + adaptivity_patch_tbl[cnt][0]);
++ }
++ }
++ }
++//adap test
++
++ if (ldpc_cfg_size > 512) {// > 0.5KB data
++ for (i = 0; i < (ldpc_cfg_size - 512); i += 512) {//each time write 0.5KB
++ ret = rwnx_send_dbg_mem_block_write_req(rwnx_hw, ldpc_cfg_addr + i, 512, ldpc_cfg_ram + i / 4);
++ if (ret) {
++ AICWFDBG(LOGERROR, "ldpc upload fail: %x, err:%d\r\n", ldpc_cfg_addr + i, ret);
++ break;
++ }
++ }
++ }
++
++ if (!ret && (i < ldpc_cfg_size)) {// < 0.5KB data
++ ret = rwnx_send_dbg_mem_block_write_req(rwnx_hw, ldpc_cfg_addr + i, ldpc_cfg_size - i, ldpc_cfg_ram + i / 4);
++ if (ret) {
++ AICWFDBG(LOGERROR, "ldpc upload fail: %x, err:%d\r\n", ldpc_cfg_addr + i, ret);
++ }
++ }
++
++ if (agc_cfg_size > 512) {// > 0.5KB data
++ for (i = 0; i < (agc_cfg_size - 512); i += 512) {//each time write 0.5KB
++ ret = rwnx_send_dbg_mem_block_write_req(rwnx_hw, agc_cfg_addr + i, 512, agc_cfg_ram + i / 4);
++ if (ret) {
++ AICWFDBG(LOGERROR, "agc upload fail: %x, err:%d\r\n", agc_cfg_addr + i, ret);
++ break;
++ }
++ }
++ }
++
++ if (!ret && (i < agc_cfg_size)) {// < 0.5KB data
++ ret = rwnx_send_dbg_mem_block_write_req(rwnx_hw, agc_cfg_addr + i, agc_cfg_size - i, agc_cfg_ram + i / 4);
++ if (ret) {
++ AICWFDBG(LOGERROR, "agc upload fail: %x, err:%d\r\n", agc_cfg_addr + i, ret);
++ }
++ }
++
++ #if !defined(CONFIG_FPGA_VERIFICATION)
++ ret = rwnx_send_dbg_mem_block_write_req(rwnx_hw, txgain_cfg_addr, txgain_cfg_size, txgain_map);
++ if (ret) {
++ AICWFDBG(LOGERROR, "txgain upload fail: %x, err:%d\r\n", txgain_cfg_addr, ret);
++ }
++
++ if(chip_sub_id == 0 ){
++ for (cnt = 0; cnt < jump_tbl_size/4; cnt+=1) {
++ AICWFDBG(LOGDEBUG, "%x = %x\n", jump_tbl_base[cnt][0]*4+jump_tbl_addr, jump_tbl_base[cnt][1]);
++ if ((ret = rwnx_send_dbg_mem_write_req(rwnx_hw, jump_tbl_base[cnt][0]*4+jump_tbl_addr, jump_tbl_base[cnt][1]))) {
++ AICWFDBG(LOGERROR, "%x write fail\n", jump_tbl_addr+8*cnt);
++ }
++ }
++ for (cnt = 0; cnt < patch_tbl_func_num; cnt++) {
++ if ((ret = rwnx_send_dbg_mem_write_req(rwnx_hw, patch_tbl_func_base[cnt][0], patch_tbl_func_base[cnt][1]))) {
++ AICWFDBG(LOGERROR, "patch_tbl_func %x write fail\n", patch_tbl_func_base[cnt][0]);
++ }
++ }
++ }
++ else{
++ ret = aicwf_patch_table_load(rwnx_hw, RWNX_MAC_PATCH_TABLE_8800DC_U02);
++ if(ret){
++ printk("patch_tbl upload fail: err:%d\r\n", ret);
++ }
++#ifdef CONFIG_FOR_IPCAM
++ if ((ret = rwnx_send_dbg_mem_write_req(rwnx_hw, 0x00111944, 0x00000101))) {
++ AICWFDBG(LOGERROR, "patch_tbl_func %x write fail\n", patch_tbl_func_base[cnt][0]);
++ }
++#endif
++ }
++
++ #endif
++ } else {
++ if (chip_sub_id == 0) {
++ u32 patch_tbl_rf_func_num = sizeof(patch_tbl_rf_func)/sizeof(u32)/2;
++ for (cnt = 0; cnt < patch_tbl_rf_func_num; cnt++) {
++ if ((ret = rwnx_send_dbg_mem_write_req(rwnx_hw, patch_tbl_rf_func[cnt][0], patch_tbl_rf_func[cnt][1]))) {
++ AICWFDBG(LOGERROR, "patch_tbl_rf_func %x write fail\n", patch_tbl_rf_func[cnt][0]);
++ }
++ }
++ }
++ }
++ #endif
++}
++
++
++
++int aicwf_set_rf_config_8800dc(struct rwnx_hw *rwnx_hw, struct mm_set_rf_calib_cfm *cfm){
++ int ret = 0;
++
++ if ((ret = rwnx_send_txpwr_lvl_req(rwnx_hw))) {
++ return -1;
++ }
++
++ if ((ret = rwnx_send_txpwr_ofst_req(rwnx_hw))) {
++ return -1;
++ }
++
++
++ if (testmode == FW_NORMAL_MODE) {
++ if ((ret = rwnx_send_rf_config_req(rwnx_hw, 0, 1, (u8_l *)wifi_txgain_table_24g_8800dcdw, 128)))
++ return -1;
++
++ if ((ret = rwnx_send_rf_config_req(rwnx_hw, 16, 1, (u8_l *)wifi_txgain_table_24g_1_8800dcdw, 128)))
++ return -1;
++
++ if ((ret = rwnx_send_rf_config_req(rwnx_hw, 0, 0, (u8_l *)wifi_rxgain_table_24g_20m_8800dcdw, 256)))
++ return -1;
++
++ if ((ret = rwnx_send_rf_config_req(rwnx_hw, 32, 0, (u8_l *)wifi_rxgain_table_24g_40m_8800dcdw, 256)))
++ return -1;
++
++ if ((ret = rwnx_send_rf_calib_req(rwnx_hw, cfm))) {
++ return -1;
++ }
++ } else if (testmode == FW_RFTEST_MODE) {
++#ifdef CONFIG_DPD
++ if (is_file_exist(FW_DPDRESULT_NAME_8800DC) == 1) {
++ AICWFDBG(LOGINFO, "%s load dpd bin\n", __func__);
++ ret = aicwf_dpd_result_load_8800dc(rwnx_hw);
++ if (ret) {
++ AICWFDBG(LOGINFO, "load dpd bin fail: %d\n", ret);
++ return ret;
++ }
++ ret = rwnx_send_rf_calib_req(rwnx_hw, cfm);
++ if (ret) {
++ AICWFDBG(LOGINFO, "rf calib req fail: %d\n", ret);
++ return ret;
++ }
++ }
++#endif
++ }
++
++ return 0 ;
++}
++
++extern char aic_fw_path[200];
++
++int aicwf_plat_patch_load_8800dc(struct rwnx_hw *rwnx_hw){
++ int ret = 0;
++
++ if (testmode == 0 || testmode == 4) {
++#if !defined(CONFIG_FPGA_VERIFICATION)
++ if (chip_sub_id == 0) {
++ ret = rwnx_plat_bin_fw_upload_2(rwnx_hw, ROM_FMAC_PATCH_ADDR, RWNX_MAC_PATCH_NAME2_8800DC);
++ } else if (chip_sub_id == 1) {
++ ret = rwnx_plat_bin_fw_upload_2(rwnx_hw, ROM_FMAC_PATCH_ADDR, RWNX_MAC_PATCH_NAME2_8800DC_U02);
++ } else {
++ printk("unsupported id: %d\n", chip_sub_id);
++ }
++#endif
++ } else {
++ if (chip_sub_id == 0) {
++ ret = rwnx_plat_bin_fw_upload_2(rwnx_hw, ROM_FMAC_PATCH_ADDR, RWNX_MAC_RF_PATCH_NAME_8800DC);
++ }
++ if (!ret) {
++ ret = rwnx_plat_bin_fw_upload_2(rwnx_hw, RAM_LMAC_FW_ADDR, RWNX_MAC_FW_RF_BASE_NAME_8800DC);
++ }
++ }
++
++ return ret;
++}
++
++int aicwf_misc_ram_init_8800dc(struct rwnx_hw *rwnx_hw)
++{
++ int ret = 0;
++ const uint32_t cfg_base = 0x10164;
++ struct dbg_mem_read_cfm cfm;
++ uint32_t misc_ram_addr;
++ uint32_t misc_ram_size = 12;
++ int i;
++ // init misc ram
++ ret = rwnx_send_dbg_mem_read_req(rwnx_hw, cfg_base + 0x14, &cfm);
++ if (ret) {
++ AICWFDBG(LOGERROR, "rf misc ram[0x%x] rd fail: %d\n", cfg_base + 0x14, ret);
++ return ret;
++ }
++ misc_ram_addr = cfm.memdata;
++ AICWFDBG(LOGERROR, "misc_ram_addr=%x\n", misc_ram_addr);
++ for (i = 0; i < (misc_ram_size / 4); i++) {
++ ret = rwnx_send_dbg_mem_write_req(rwnx_hw, misc_ram_addr + i * 4, 0);
++ if (ret) {
++ AICWFDBG(LOGERROR, "rf misc ram[0x%x] wr fail: %d\n", misc_ram_addr + i * 4, ret);
++ return ret;
++ }
++ }
++ return ret;
++}
++
++#ifdef CONFIG_DPD
++int aicwf_dpd_calib_8800dc(struct rwnx_hw *rwnx_hw, uint32_t *dpd_res)
++{
++ int ret = 0;
++ uint32_t fw_addr, boot_type;
++ ret = aicwf_plat_patch_load_8800dc(rwnx_hw);
++ if (ret) {
++ AICWFDBG(LOGINFO, "load patch bin fail: %d\n", ret);
++ return ret;
++ }
++ ret = rwnx_plat_bin_fw_upload_2(rwnx_hw, ROM_FMAC_CALIB_ADDR, RWNX_MAC_CALIB_NAME_8800DC_U02);
++ if (ret) {
++ AICWFDBG(LOGINFO, "load calib bin fail: %d\n", ret);
++ return ret;
++ }
++ /* fw start */
++ fw_addr = 0x00130009;
++ boot_type = HOST_START_APP_FNCALL;
++ AICWFDBG(LOGINFO, "Start app: %08x, %d\n", fw_addr, boot_type);
++ ret = rwnx_send_dbg_start_app_req(rwnx_hw, fw_addr, boot_type);
++ if (ret) {
++ AICWFDBG(LOGINFO, "start app fail: %d\n", ret);
++ return ret;
++ }
++ { // read dpd res
++ const uint32_t cfg_base = 0x10164;
++ struct dbg_mem_read_cfm cfm;
++ uint32_t misc_ram_addr;
++ uint32_t misc_ram_size = DPD_RESULT_SIZE_8800DC;
++ int i;
++ ret = rwnx_send_dbg_mem_read_req(rwnx_hw, cfg_base + 0x14, &cfm);
++ if (ret) {
++ AICWFDBG(LOGERROR, "rf misc ram[0x%x] rd fail: %d\n", cfg_base + 0x14, ret);
++ return ret;
++ }
++ misc_ram_addr = cfm.memdata;
++ for (i = 0; i < (misc_ram_size / 4); i++) {
++ ret = rwnx_send_dbg_mem_read_req(rwnx_hw, misc_ram_addr + i * 4, &cfm);
++ if (ret) {
++ AICWFDBG(LOGERROR, "rf misc ram[0x%x] rd fail: %d\n", misc_ram_addr + i * 4, ret);
++ return ret;
++ }
++ dpd_res[i] = cfm.memdata;
++ }
++ }
++ return ret;
++}
++
++int aicwf_dpd_result_load_8800dc(struct rwnx_hw *rwnx_hw)
++{
++ int ret = 0;
++ uint32_t cfg_base = 0x10164;
++ struct dbg_mem_read_cfm cfm;
++ uint32_t misc_ram_addr;
++ if (testmode == FW_RFTEST_MODE) {
++ cfg_base = RAM_LMAC_FW_ADDR + 0x0164;
++ }
++ if ((ret = rwnx_send_dbg_mem_read_req(rwnx_hw, cfg_base + 0x14, &cfm))) {
++ AICWFDBG(LOGERROR, "rf misc ram[0x%x] rd fail: %d\n", cfg_base + 0x14, ret);
++ return ret;
++ }
++ misc_ram_addr = cfm.memdata;
++ ret = rwnx_plat_bin_fw_upload_2(rwnx_hw, misc_ram_addr, FW_DPDRESULT_NAME_8800DC);
++ if (ret) {
++ AICWFDBG(LOGINFO, "load calib bin fail: %d\n", ret);
++ return ret;
++ }
++ return ret;
++}
++
++#define FW_PATH_MAX_LEN 200
++extern char aic_fw_path[FW_PATH_MAX_LEN];
++
++int aicwf_dpd_result_write_8800dc(void *buf, int buf_len)
++{
++ AICWFDBG(LOGINFO, "%s\n", __func__);
++ int sum = 0, len = 0;
++ char *path = NULL;
++ struct file *fp = NULL;
++ loff_t pos = 0;
++ mm_segment_t fs;
++
++ path = __getname();
++ if (!path) {
++ AICWFDBG(LOGINFO, "get path fail\n");
++ return -1;
++ }
++
++ len = snprintf(path, FW_PATH_MAX_LEN, "%s/%s", aic_fw_path, FW_DPDRESULT_NAME_8800DC);
++ //AICWFDBG(LOGINFO, "%s\n", path);
++
++ fp = filp_open(path, O_RDWR | O_APPEND | O_CREAT, 0644);
++ if (IS_ERR(fp)) {
++ AICWFDBG(LOGINFO, "fp open fial\n");
++ __putname(path);
++ fp = NULL;
++ return -1;
++ }
++
++ fs = get_fs();
++ set_fs(KERNEL_DS);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
++ sum = kernel_write(fp, buf, buf_len, &pos);
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
++ sum = kernel_write(fp, (char *)buf, buf_len, pos);
++#else
++ sum = vfs_write(fp, (char *)buf, buf_len, &pos);
++#endif
++
++ set_fs(fs);
++ __putname(path);
++ filp_close(fp, NULL);
++ fp = NULL;
++
++ return 0;
++}
++#endif
++
++int rwnx_plat_userconfig_load_8800dc(struct rwnx_hw *rwnx_hw){
++ int size;
++ u32 *dst=NULL;
++ char *filename = FW_USERCONFIG_NAME_8800DC;
++
++ AICWFDBG(LOGINFO, "userconfig file path:%s \r\n", filename);
++
++ /* load file */
++ size = rwnx_request_firmware_common(rwnx_hw, &dst, filename);
++ if (size <= 0) {
++ AICWFDBG(LOGERROR, "wrong size of firmware file\n");
++ dst = NULL;
++ return 0;
++ }
++
++ /* Copy the file on the Embedded side */
++ AICWFDBG(LOGINFO, "### Load file done: %s, size=%d\n", filename, size);
++
++ rwnx_plat_userconfig_parsing((char *)dst, size);
++
++ rwnx_release_firmware_common(&dst);
++
++ AICWFDBG(LOGINFO, "userconfig download complete\n\n");
++ return 0;
++
++}
++
++int rwnx_plat_userconfig_load_8800dw(struct rwnx_hw *rwnx_hw){
++ int size;
++ u32 *dst=NULL;
++ char *filename = FW_USERCONFIG_NAME_8800DW;
++
++ AICWFDBG(LOGINFO, "userconfig file path:%s \r\n", filename);
++
++ /* load file */
++ size = rwnx_request_firmware_common(rwnx_hw, &dst, filename);
++ if (size <= 0) {
++ AICWFDBG(LOGERROR, "wrong size of firmware file\n");
++ dst = NULL;
++ return 0;
++ }
++
++ /* Copy the file on the Embedded side */
++ AICWFDBG(LOGINFO, "### Load file done: %s, size=%d\n", filename, size);
++
++ rwnx_plat_userconfig_parsing((char *)dst, size);
++
++ rwnx_release_firmware_common(&dst);
++
++ AICWFDBG(LOGINFO, "userconfig download complete\n\n");
++ return 0;
++
++}
++
++
++void system_config_8800dc(struct rwnx_hw *rwnx_hw){
++ int syscfg_num;
++ int ret, cnt;
++ const u32 mem_addr = 0x40500000;
++ struct dbg_mem_read_cfm rd_mem_addr_cfm;
++
++ ret = rwnx_send_dbg_mem_read_req(rwnx_hw, mem_addr, &rd_mem_addr_cfm);
++ if (ret) {
++ AICWFDBG(LOGERROR, "%x rd fail: %d\n", mem_addr, ret);
++ return;
++ }
++ chip_id = (u8)(rd_mem_addr_cfm.memdata >> 16);
++ //printk("%x=%x\n", rd_mem_addr_cfm.memaddr, rd_mem_addr_cfm.memdata);
++ if (((rd_mem_addr_cfm.memdata >> 25) & 0x01UL) == 0x00UL) {
++ chip_mcu_id = 1;
++ }
++
++ ret = rwnx_send_dbg_mem_read_req(rwnx_hw, 0x00000020, &rd_mem_addr_cfm);
++ if (ret) {
++ AICWFDBG(LOGERROR, "[0x00000020] rd fail: %d\n", ret);
++ return;
++ }
++ chip_sub_id = (u8)(rd_mem_addr_cfm.memdata);
++ //printk("%x=%x\n", rd_mem_addr_cfm.memaddr, rd_mem_addr_cfm.memdata);
++ AICWFDBG(LOGINFO, "chip_id=%x, chip_sub_id=%x\n", chip_id, chip_sub_id);
++
++
++ ret = rwnx_send_dbg_mem_read_req(rwnx_hw, 0x40500010, &rd_mem_addr_cfm);
++ printk("[0x40500010]=%x\n", rd_mem_addr_cfm.memdata);
++ if (ret) {
++ printk("[0x40500010] rd fail: %d\n", ret);
++ return;
++ }
++
++ syscfg_num = sizeof(syscfg_tbl_8800dc) / sizeof(u32) / 2;
++
++ for (cnt = 0; cnt < syscfg_num; cnt++) {
++ ret = rwnx_send_dbg_mem_write_req(rwnx_hw, syscfg_tbl_8800dc[cnt][0], syscfg_tbl_8800dc[cnt][1]);
++ if (ret) {
++ AICWFDBG(LOGERROR, "%x write fail: %d\n", syscfg_tbl_8800dc[cnt][0], ret);
++ return;
++ }
++ }
++
++ syscfg_num = sizeof(syscfg_tbl_masked_8800dc) / sizeof(u32) / 3;
++
++
++ for (cnt = 0; cnt < syscfg_num; cnt++) {
++ if (syscfg_tbl_masked_8800dc[cnt][0] == 0x00000000) {
++ break;
++ } else if (syscfg_tbl_masked_8800dc[cnt][0] == 0x70001000) {
++ if (chip_mcu_id == 0) {
++ syscfg_tbl_masked_8800dc[cnt][1] |= ((0x1 << 8) | (0x1 << 15)); // mask
++ syscfg_tbl_masked_8800dc[cnt][2] |= ((0x1 << 8) | (0x1 << 15));
++ }
++ }
++
++ ret = rwnx_send_dbg_mem_mask_write_req(rwnx_hw,
++ syscfg_tbl_masked_8800dc[cnt][0], syscfg_tbl_masked_8800dc[cnt][1], syscfg_tbl_masked_8800dc[cnt][2]);
++ if (ret) {
++ AICWFDBG(LOGERROR, "%x mask write fail: %d\n", syscfg_tbl_masked_8800dc[cnt][0], ret);
++ return;
++ }
++ }
++
++ if (chip_sub_id == 0) {
++ syscfg_num = sizeof(syscfg_tbl_masked_8800dc_u01) / sizeof(u32) / 3;
++ for (cnt = 0; cnt < syscfg_num; cnt++) {
++ ret = rwnx_send_dbg_mem_mask_write_req(rwnx_hw,
++ syscfg_tbl_masked_8800dc_u01[cnt][0], syscfg_tbl_masked_8800dc_u01[cnt][1], syscfg_tbl_masked_8800dc_u01[cnt][2]);
++ if (ret) {
++ AICWFDBG(LOGERROR, "%x mask write fail: %d\n", syscfg_tbl_masked_8800dc_u01[cnt][0], ret);
++ return;
++ }
++ }
++ }
++
++}
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_compat_8800dc.h
+@@ -0,0 +1,16 @@
++#include <linux/types.h>
++
++#define DPD_RESULT_SIZE_8800DC 1880
++int aicwf_patch_table_load(struct rwnx_hw *rwnx_hw, char *filename);
++void aicwf_patch_config_8800dc(struct rwnx_hw *rwnx_hw);
++int aicwf_set_rf_config_8800dc(struct rwnx_hw *rwnx_hw, struct mm_set_rf_calib_cfm *cfm);
++int aicwf_misc_ram_init_8800dc(struct rwnx_hw *rwnx_hw);
++#ifdef CONFIG_DPD
++int aicwf_dpd_calib_8800dc(struct rwnx_hw *rwnx_hw, uint32_t *dpd_res);
++int aicwf_dpd_result_load_8800dc(struct rwnx_hw *rwnx_hw);
++int aicwf_dpd_result_write_8800dc(void *buf, int buf_len);
++#endif
++int aicwf_plat_patch_load_8800dc(struct rwnx_hw *rwnx_hw);
++int rwnx_plat_userconfig_load_8800dc(struct rwnx_hw *rwnx_hw);
++int rwnx_plat_userconfig_load_8800dw(struct rwnx_hw *rwnx_hw);
++void system_config_8800dc(struct rwnx_hw *rwnx_hw);
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_debug.h
+@@ -0,0 +1,52 @@
++
++
++#define RWNX_FN_ENTRY_STR ">>> %s()\n", __func__
++
++
++
++/* message levels */
++#define LOGERROR 0x0001
++#define LOGINFO 0x0002
++#define LOGTRACE 0x0004
++#define LOGDEBUG 0x0008
++#define LOGDATA 0x0010
++
++extern int aicwf_dbg_level;
++void rwnx_data_dump(char* tag, void* data, unsigned long len);
++
++#define AICWF_LOG "AICWFDBG("
++
++#define AICWFDBG(level, args, arg...) \
++do { \
++ if (aicwf_dbg_level & level) { \
++ printk(AICWF_LOG#level")\t" args, ##arg); \
++ } \
++} while (0)
++
++#define RWNX_DBG(fmt, ...) \
++do { \
++ if (aicwf_dbg_level & LOGTRACE) { \
++ printk(AICWF_LOG"LOGTRACE)\t"fmt , ##__VA_ARGS__); \
++ } \
++} while (0)
++
++
++
++#if 0
++#define RWNX_DBG(fmt, ...) \
++ do { \
++ if (aicwf_dbg_level & LOGTRACE) { \
++ printk(AICWF_LOG"LOGTRACE"")\t" fmt, ##__VA_ARGS__); \
++ } \
++ } while (0)
++#define AICWFDBG(args, level) \
++do { \
++ if (aicwf_dbg_level & level) { \
++ printk(AICWF_LOG"(%s)\t" ,#level); \
++ printf args; \
++ } \
++} while (0)
++#endif
++
++
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_rx_prealloc.h
+@@ -0,0 +1,27 @@
++
++#ifndef _AICWF_RX_PREALLOC_H_
++#define _AICWF_RX_PREALLOC_H_
++
++#ifdef CONFIG_PREALLOC_RX_SKB
++
++struct rx_buff {
++ struct list_head queue;
++ unsigned char *data;
++ u32 len;
++ uint8_t *start;
++ uint8_t *end;
++ uint8_t *read;
++};
++
++struct aicwf_rx_buff_list {
++ struct list_head rxbuff_list;
++ atomic_t rxbuff_list_len;
++};
++
++extern struct rx_buff *aicwf_prealloc_rxbuff_alloc(spinlock_t *lock);
++extern void aicwf_prealloc_rxbuff_free(struct rx_buff *rxbuff, spinlock_t *lock);
++extern int aicwf_prealloc_init(void);
++extern void aicwf_prealloc_exit(void);
++extern int aicwf_rxbuff_size_get(void);
++#endif
++#endif /* _AICWF_RX_PREALLOC_H_ */
+\ No newline at end of file
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_sdio.c
+@@ -0,0 +1,1251 @@
++/**
++ * aicwf_sdmmc.c
++ *
++ * SDIO function declarations
++ *
++ * Copyright (C) AICSemi 2018-2020
++ */
++#include <linux/interrupt.h>
++#include <linux/completion.h>
++#include <linux/mmc/sdio.h>
++#include <linux/mmc/sdio_ids.h>
++#include <linux/mmc/sdio_func.h>
++#include <linux/mmc/card.h>
++#include <linux/mmc/host.h>
++#include <linux/semaphore.h>
++#include <linux/debugfs.h>
++#include <linux/kthread.h>
++#include "aicwf_txrxif.h"
++#include "aicwf_sdio.h"
++#include "sdio_host.h"
++#include "rwnx_defs.h"
++#include "rwnx_platform.h"
++#ifdef CONFIG_INGENIC_T20
++#include "mach/jzmmc.h"
++#endif /* CONFIG_INGENIC_T20 */
++extern uint8_t scanning;
++
++#ifdef CONFIG_PLATFORM_ALLWINNER
++void platform_wifi_power_off(void);
++#endif
++
++int aicwf_sdio_readb(struct aic_sdio_dev *sdiodev, uint regaddr, u8 *val)
++{
++ int ret;
++ sdio_claim_host(sdiodev->func);
++ *val = sdio_readb(sdiodev->func, regaddr, &ret);
++ sdio_release_host(sdiodev->func);
++ return ret;
++}
++
++int aicwf_sdio_writeb(struct aic_sdio_dev *sdiodev, uint regaddr, u8 val)
++{
++ int ret;
++ sdio_claim_host(sdiodev->func);
++ sdio_writeb(sdiodev->func, val, regaddr, &ret);
++ sdio_release_host(sdiodev->func);
++ return ret;
++}
++
++int aicwf_sdio_flow_ctrl(struct aic_sdio_dev *sdiodev)
++{
++ int ret = -1;
++ u8 fc_reg = 0;
++ u32 count = 0;
++
++ while (true) {
++ ret = aicwf_sdio_readb(sdiodev, SDIOWIFI_FLOW_CTRL_REG, &fc_reg);
++ if (ret) {
++ return -1;
++ }
++
++ if ((fc_reg & SDIOWIFI_FLOWCTRL_MASK_REG) != 0) {
++ ret = fc_reg & SDIOWIFI_FLOWCTRL_MASK_REG;
++ return ret;
++ } else {
++ if (count >= FLOW_CTRL_RETRY_COUNT) {
++ ret = -fc_reg;
++ break;
++ }
++ count++;
++ if (count < 30)
++ udelay(200);
++ else if(count < 40)
++ mdelay(1);
++ else
++ mdelay(10);
++ }
++ }
++
++ return ret;
++}
++
++int aicwf_sdio_send_pkt(struct aic_sdio_dev *sdiodev, u8 *buf, uint count)
++{
++ int ret = 0;
++
++ sdio_claim_host(sdiodev->func);
++ ret = sdio_writesb(sdiodev->func, 7, buf, count);
++ sdio_release_host(sdiodev->func);
++
++ return ret;
++}
++
++int aicwf_sdio_recv_pkt(struct aic_sdio_dev *sdiodev, struct sk_buff *skbbuf,
++ u32 size)
++{
++ int ret;
++
++ if ((!skbbuf) || (!size)) {
++ return -EINVAL;;
++ }
++
++ sdio_claim_host(sdiodev->func);
++ ret = sdio_readsb(sdiodev->func, skbbuf->data, 8, size);
++ sdio_release_host(sdiodev->func);
++
++ if (ret < 0) {
++ return ret;
++ }
++ skbbuf->len = size;
++
++ return ret;
++}
++
++static int aicwf_sdio_probe(struct sdio_func *func,
++ const struct sdio_device_id *id)
++{
++ struct mmc_host *host;
++ struct aic_sdio_dev *sdiodev;
++ struct aicwf_bus *bus_if;
++ int err = -ENODEV;
++
++ sdio_dbg("%s:%d\n", __func__, func->num);
++ host = func->card->host;
++ if(func->num != 1) {
++ return err;
++ }
++
++ bus_if = kzalloc(sizeof(struct aicwf_bus), GFP_KERNEL);
++ if (!bus_if) {
++ sdio_err("alloc bus fail\n");
++ return -ENOMEM;
++ }
++
++ sdiodev = kzalloc(sizeof(struct aic_sdio_dev), GFP_KERNEL);
++ if (!sdiodev) {
++ sdio_err("alloc sdiodev fail\n");
++ kfree(bus_if);
++ return -ENOMEM;
++ }
++
++ sdiodev->func = func;
++ sdiodev->bus_if = bus_if;
++ bus_if->bus_priv.sdio = sdiodev;
++ dev_set_drvdata(&func->dev, bus_if);
++ sdiodev->dev = &func->dev;
++ err = aicwf_sdio_func_init(sdiodev);
++ if (err < 0) {
++ sdio_err("sdio func init fail\n");
++ goto fail;
++ }
++
++ aicwf_sdio_bus_init(sdiodev);
++ host->caps |= MMC_CAP_NONREMOVABLE;
++ aicwf_rwnx_sdio_platform_init(sdiodev);
++ aicwf_hostif_ready();
++
++ return 0;
++fail:
++ aicwf_sdio_func_deinit(sdiodev);
++ dev_set_drvdata(&func->dev, NULL);
++ kfree(sdiodev);
++ kfree(bus_if);
++ return err;
++}
++
++static void aicwf_sdio_remove(struct sdio_func *func)
++{
++ struct mmc_host *host;
++ struct aicwf_bus *bus_if = NULL;
++ struct aic_sdio_dev *sdiodev = NULL;
++
++ sdio_dbg("%s\n", __func__);
++ host = func->card->host;
++ host->caps &= ~MMC_CAP_NONREMOVABLE;
++ bus_if = dev_get_drvdata(&func->dev);
++ if (!bus_if) {
++ return;
++ }
++
++ sdiodev = bus_if->bus_priv.sdio;
++ if (!sdiodev) {
++ return;
++ }
++ sdiodev->bus_if->state = BUS_DOWN_ST;
++ aicwf_sdio_release(sdiodev);
++ aicwf_sdio_func_deinit(sdiodev);
++ dev_set_drvdata(&sdiodev->func->dev, NULL);
++ kfree(sdiodev);
++ kfree(bus_if);
++ sdio_dbg("%s done\n", __func__);
++#ifdef CONFIG_PLATFORM_ALLWINNER
++ platform_wifi_power_off();
++#endif
++}
++
++static int aicwf_sdio_suspend(struct device *dev)
++{
++ int ret = 0;
++ struct aicwf_bus *bus_if = dev_get_drvdata(dev);
++ struct aic_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
++ mmc_pm_flag_t sdio_flags;
++ struct rwnx_vif *rwnx_vif, *tmp;
++
++ sdio_dbg("%s\n", __func__);
++ list_for_each_entry_safe(rwnx_vif, tmp, &sdiodev->rwnx_hw->vifs, list) {
++ if(rwnx_vif->ndev)
++ netif_device_detach(rwnx_vif->ndev);
++ }
++
++ sdio_flags = sdio_get_host_pm_caps(sdiodev->func);
++ if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
++ return -EINVAL;
++ }
++ ret = sdio_set_host_pm_flags(sdiodev->func, MMC_PM_KEEP_POWER);
++ if (ret) {
++ return ret;
++ }
++
++ while (sdiodev->state == SDIO_ACTIVE_ST) {
++ if (down_interruptible(&sdiodev->tx_priv->txctl_sema)){
++ continue;
++ }
++ aicwf_sdio_pwr_stctl(sdiodev, SDIO_SLEEP_ST);
++ up(&sdiodev->tx_priv->txctl_sema);
++ break;
++ }
++
++ return 0;
++}
++
++static int aicwf_sdio_resume(struct device *dev)
++{
++ struct aicwf_bus *bus_if = dev_get_drvdata(dev);
++ struct aic_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
++ struct rwnx_vif *rwnx_vif, *tmp;
++
++ sdio_dbg("%s\n", __func__);
++ list_for_each_entry_safe(rwnx_vif, tmp, &sdiodev->rwnx_hw->vifs, list) {
++ if(rwnx_vif->ndev)
++ netif_device_attach(rwnx_vif->ndev);
++ }
++
++ return 0;
++}
++
++static const struct sdio_device_id aicwf_sdmmc_ids[] = {
++ {SDIO_DEVICE_CLASS(SDIO_CLASS_WLAN)},
++ { },
++};
++
++MODULE_DEVICE_TABLE(sdio, aicwf_sdmmc_ids);
++
++static const struct dev_pm_ops aicwf_sdio_pm_ops = {
++ SET_SYSTEM_SLEEP_PM_OPS(aicwf_sdio_suspend, aicwf_sdio_resume)
++};
++
++static struct sdio_driver aicwf_sdio_driver = {
++ .probe = aicwf_sdio_probe,
++ .remove = aicwf_sdio_remove,
++ .name = AICWF_SDIO_NAME,
++ .id_table = aicwf_sdmmc_ids,
++ .drv = {
++ .pm = &aicwf_sdio_pm_ops,
++ },
++};
++
++#ifdef CONFIG_NANOPI_M4
++ extern int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq);
++ extern unsigned aic_max_freqs;
++ extern struct mmc_host* aic_host_drv;
++ extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort);
++ extern void mmc_release_host(struct mmc_host *host);
++#endif
++#ifdef CONFIG_PLATFORM_ALLWINNER
++extern void sunxi_mmc_rescan_card(unsigned ids);
++extern void sunxi_wlan_set_power(int on);
++extern int sunxi_wlan_get_bus_index(void);
++
++int platform_wifi_power_on(void)
++{
++ int ret=0;
++ int wlan_bus_index=sunxi_wlan_get_bus_index();
++ if(wlan_bus_index < 0)
++ return wlan_bus_index;
++
++ sunxi_wlan_set_power(1);
++ mdelay(1000);
++ sunxi_mmc_rescan_card(wlan_bus_index);
++
++ printk("platform_wifi_power_on");
++
++ return ret;
++}
++
++void platform_wifi_power_off(void)
++{
++ int wlan_bus_index = sunxi_wlan_get_bus_index();
++ if(wlan_bus_index < 0) {
++ printk("no wlan_bus_index\n");
++ return ;
++ }
++ printk("power_off\n");
++ sunxi_wlan_set_power(0);
++ mdelay(100);
++ //sunxi_mmc_rescan_card(wlan_bus_index);
++
++ printk("platform_wifi_power_off");
++}
++#endif
++void aicwf_sdio_register(void)
++{
++#ifdef CONFIG_PLATFORM_NANOPI
++ extern_wifi_set_enable(0);
++ mdelay(200);
++ extern_wifi_set_enable(1);
++ mdelay(200);
++ sdio_reinit();
++#endif /*CONFIG_PLATFORM_NANOPI*/
++
++#ifdef CONFIG_INGENIC_T20
++ jzmmc_manual_detect(1, 1);
++#endif /* CONFIG_INGENIC_T20 */
++
++#ifdef CONFIG_NANOPI_M4
++ if(aic_host_drv->card == NULL){
++ __mmc_claim_host(aic_host_drv,NULL);
++ printk("aic: >>>mmc_rescan_try_freq\n");
++ mmc_rescan_try_freq(aic_host_drv,aic_max_freqs);
++ mmc_release_host(aic_host_drv);
++ }
++#endif
++#ifdef CONFIG_PLATFORM_ALLWINNER
++ platform_wifi_power_on();
++#endif
++ if (sdio_register_driver(&aicwf_sdio_driver)) {
++
++ } else {
++ //may add mmc_rescan here
++ }
++}
++
++void aicwf_sdio_exit(void)
++{
++ if(g_rwnx_plat && g_rwnx_plat->enabled)
++ rwnx_platform_deinit(g_rwnx_plat->sdiodev->rwnx_hw);
++
++ sdio_unregister_driver(&aicwf_sdio_driver);
++
++#ifdef CONFIG_PLATFORM_NANOPI
++ extern_wifi_set_enable(0);
++#endif /*CONFIG_PLATFORM_NANOPI*/
++ kfree(g_rwnx_plat);
++}
++
++int aicwf_sdio_wakeup(struct aic_sdio_dev *sdiodev)
++{
++ int ret = 0;
++ int read_retry;
++ int write_retry = 20;
++
++ if (sdiodev->state == SDIO_SLEEP_ST) {
++ down(&sdiodev->pwrctl_wakeup_sema);
++ if (sdiodev->rwnx_hw->vif_started) {
++ if(sdiodev->state == SDIO_ACTIVE_ST) {
++ up(&sdiodev->pwrctl_wakeup_sema);
++ return 0;
++ }
++ sdio_dbg("w\n");
++ while(write_retry) {
++ ret = aicwf_sdio_writeb(sdiodev, SDIOWIFI_WAKEUP_REG, 1);
++ if (ret) {
++ txrx_err("sdio wakeup fail\n");
++ ret = -1;
++ } else {
++ read_retry=10;
++ while (read_retry) {
++ u8 val;
++ ret = aicwf_sdio_readb(sdiodev, SDIOWIFI_SLEEP_REG, &val);
++ if (ret==0 && val&0x10) {
++ break;
++ }
++ read_retry--;
++ udelay(200);
++ }
++ if(read_retry != 0)
++ break;
++ }
++ sdio_dbg("write retry: %d \n",write_retry);
++ write_retry--;
++ udelay(100);
++ }
++ }
++
++ sdiodev->state = SDIO_ACTIVE_ST;
++ aicwf_sdio_pwrctl_timer(sdiodev, sdiodev->active_duration);
++ up(&sdiodev->pwrctl_wakeup_sema);
++ }
++ return ret;
++}
++
++extern u8 dhcped;
++int aicwf_sdio_sleep_allow(struct aic_sdio_dev *sdiodev)
++{
++ int ret = 0;
++ struct aicwf_bus *bus_if = sdiodev->bus_if;
++ struct rwnx_hw *rwnx_hw = sdiodev->rwnx_hw;
++ struct rwnx_vif *rwnx_vif, *tmp;
++
++ if (bus_if->state == BUS_DOWN_ST) {
++ ret = aicwf_sdio_writeb(sdiodev, SDIOWIFI_SLEEP_REG, 0x10);
++ if (ret) {
++ sdio_err("Write sleep fail!\n");
++ }
++ aicwf_sdio_pwrctl_timer(sdiodev, 0);
++ return ret;
++ }
++
++ list_for_each_entry_safe(rwnx_vif, tmp, &rwnx_hw->vifs, list) {
++ if((rwnx_hw->avail_idx_map & BIT(rwnx_vif->drv_vif_index)) == 0) {
++ switch(RWNX_VIF_TYPE(rwnx_vif)) {
++ case NL80211_IFTYPE_P2P_CLIENT:
++ rwnx_hw->is_p2p_alive = 1;
++ return ret;
++ case NL80211_IFTYPE_AP:
++ return ret;
++ case NL80211_IFTYPE_P2P_GO:
++ rwnx_hw->is_p2p_alive = 1;
++ return ret;
++ default:
++ break;
++ }
++ }
++ }
++
++ sdio_info("sleep: %d, %d\n", sdiodev->state, scanning);
++ if (sdiodev->state == SDIO_ACTIVE_ST && !scanning && !rwnx_hw->is_p2p_alive \
++ && !rwnx_hw->is_p2p_connected && dhcped) {
++ down(&sdiodev->pwrctl_wakeup_sema);
++ if (rwnx_hw->vif_started) {
++ sdio_dbg("s\n");
++ ret = aicwf_sdio_writeb(sdiodev, SDIOWIFI_SLEEP_REG, 0x10);
++ if (ret)
++ sdio_err("Write sleep fail!\n");
++ }
++ sdiodev->state = SDIO_SLEEP_ST;
++ aicwf_sdio_pwrctl_timer(sdiodev, 0);
++ up(&sdiodev->pwrctl_wakeup_sema);
++ }
++ else{
++ aicwf_sdio_pwrctl_timer(sdiodev, sdiodev->active_duration);
++ }
++
++ return ret;
++}
++
++int aicwf_sdio_pwr_stctl(struct aic_sdio_dev *sdiodev, uint target)
++{
++ int ret = 0;
++
++ if (sdiodev->bus_if->state == BUS_DOWN_ST) {
++ return -1;
++ }
++
++ if (sdiodev->state == target) {
++ if (target == SDIO_ACTIVE_ST) {
++ aicwf_sdio_pwrctl_timer(sdiodev, sdiodev->active_duration);
++ }
++ return ret;
++ }
++
++ switch (target) {
++ case SDIO_ACTIVE_ST:
++ aicwf_sdio_wakeup(sdiodev);
++ break;
++ case SDIO_SLEEP_ST:
++ aicwf_sdio_sleep_allow(sdiodev);
++ break;
++ }
++
++ return ret;
++}
++
++int aicwf_sdio_txpkt(struct aic_sdio_dev *sdiodev, struct sk_buff *pkt)
++{
++ int ret = 0;
++ u8 *frame;
++ u32 len = 0;
++ struct aicwf_bus *bus_if = dev_get_drvdata(sdiodev->dev);
++
++ if (bus_if->state == BUS_DOWN_ST) {
++ sdio_dbg("tx bus is down!\n");
++ return -EINVAL;
++ }
++
++ frame = (u8 *) (pkt->data);
++ len = pkt->len;
++ len = (len + SDIOWIFI_FUNC_BLOCKSIZE - 1) / SDIOWIFI_FUNC_BLOCKSIZE * SDIOWIFI_FUNC_BLOCKSIZE;
++ ret = aicwf_sdio_send_pkt(sdiodev, pkt->data, len);
++ if (ret)
++ sdio_err("aicwf_sdio_send_pkt fail%d\n", ret);
++
++ return ret;
++}
++
++static int aicwf_sdio_intr_get_len_bytemode(struct aic_sdio_dev *sdiodev, u8 *byte_len)
++{
++ int ret = 0;
++
++ if (!byte_len)
++ return -EBADE;
++
++ if (sdiodev->bus_if->state == BUS_DOWN_ST) {
++ *byte_len = 0;
++ } else {
++ ret = aicwf_sdio_readb(sdiodev, SDIOWIFI_BYTEMODE_LEN_REG, byte_len);
++ sdiodev->rx_priv->data_len = (*byte_len)*4;
++ }
++
++ return ret;
++}
++
++static void aicwf_sdio_bus_stop(struct device *dev)
++{
++ struct aicwf_bus *bus_if = dev_get_drvdata(dev);
++ struct aic_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
++ int ret;
++
++ aicwf_sdio_pwrctl_timer(sdiodev, 0);
++ if(timer_pending(&sdiodev->rwnx_hw->p2p_alive_timer)){
++ ret = del_timer(&sdiodev->rwnx_hw->p2p_alive_timer);}
++ sdio_dbg("%s\n",__func__);
++ if (sdiodev->pwrctl_tsk) {
++ complete(&sdiodev->pwrctrl_trgg);
++ kthread_stop(sdiodev->pwrctl_tsk);
++ sdiodev->pwrctl_tsk = NULL;
++ }
++
++ sdio_dbg("%s:pwrctl stopped\n",__func__);
++
++ bus_if->state = BUS_DOWN_ST;
++ ret = down_interruptible(&sdiodev->tx_priv->txctl_sema);
++ if (ret)
++ sdio_err("down txctl_sema fail\n");
++
++ aicwf_sdio_pwr_stctl(sdiodev, SDIO_SLEEP_ST);
++ if (!ret)
++ up(&sdiodev->tx_priv->txctl_sema);
++ aicwf_frame_queue_flush(&sdiodev->tx_priv->txq);
++ sdio_dbg("exit %s\n",__func__);
++}
++
++struct sk_buff *aicwf_sdio_readframes(struct aic_sdio_dev *sdiodev)
++{
++ int ret = 0;
++ u32 size = 0;
++ struct sk_buff *skb = NULL;
++ struct aicwf_bus *bus_if = dev_get_drvdata(sdiodev->dev);
++
++ if (bus_if->state == BUS_DOWN_ST) {
++ sdio_dbg("bus down\n");
++ return NULL;
++ }
++
++ size = sdiodev->rx_priv->data_len;
++ skb = __dev_alloc_skb(size, GFP_KERNEL);
++ if (!skb) {
++ return NULL;
++ }
++
++ ret = aicwf_sdio_recv_pkt(sdiodev, skb, size);
++ if (ret) {
++ dev_kfree_skb(skb);
++ skb = NULL;
++ }
++
++ return skb;
++}
++
++static int aicwf_sdio_tx_msg(struct aic_sdio_dev *sdiodev)
++{
++ int err = 0;
++ u16 len;
++ u8 *payload = sdiodev->tx_priv->cmd_buf;
++ u16 payload_len = sdiodev->tx_priv->cmd_len;
++ u8 adjust_str[4] = {0, 0, 0, 0};
++ int adjust_len = 0;
++ int buffer_cnt = 0;
++ u8 retry = 0;
++
++ len = payload_len;
++ if ((len % TX_ALIGNMENT) != 0) {
++ adjust_len = roundup(len, TX_ALIGNMENT);
++ memcpy(payload+payload_len, adjust_str, (adjust_len - len));
++ payload_len += (adjust_len - len);
++ }
++ len = payload_len;
++
++ //link tail is necessary
++ if ((len % SDIOWIFI_FUNC_BLOCKSIZE) != 0) {
++ memset(payload+payload_len, 0, TAIL_LEN);
++ payload_len += TAIL_LEN;
++ len = (payload_len/SDIOWIFI_FUNC_BLOCKSIZE + 1) * SDIOWIFI_FUNC_BLOCKSIZE;
++ } else{
++ len = payload_len;
++ }
++ buffer_cnt = aicwf_sdio_flow_ctrl(sdiodev);
++ while ((buffer_cnt <= 0 || (buffer_cnt > 0 && len > (buffer_cnt * BUFFER_SIZE))) && retry < 5) {
++ retry++;
++ buffer_cnt = aicwf_sdio_flow_ctrl(sdiodev);
++ }
++
++ down(&sdiodev->tx_priv->cmd_txsema);
++ if (buffer_cnt > 0 && len <= (buffer_cnt * BUFFER_SIZE)) {
++ err = aicwf_sdio_send_pkt(sdiodev, payload, len);
++ if (err) {
++ sdio_err("aicwf_sdio_send_pkt fail%d\n", err);
++ }
++ } else {
++ sdio_err("tx msg fc retry fail\n");
++ up(&sdiodev->tx_priv->cmd_txsema);
++ return -1;
++ }
++
++ sdiodev->tx_priv->cmd_txstate = false;
++ if (!err)
++ sdiodev->tx_priv->cmd_tx_succ= true;
++ else
++ sdiodev->tx_priv->cmd_tx_succ= false;
++
++ up(&sdiodev->tx_priv->cmd_txsema);
++
++ return err;
++}
++
++static void aicwf_sdio_tx_process(struct aic_sdio_dev *sdiodev)
++{
++ int err = 0;
++
++ if (sdiodev->bus_if->state == BUS_DOWN_ST) {
++ sdio_err("Bus is down\n");
++ return;
++ }
++
++ aicwf_sdio_pwr_stctl(sdiodev, SDIO_ACTIVE_ST);
++
++ //config
++ sdio_info("send cmd\n");
++ if (sdiodev->tx_priv->cmd_txstate) {
++ if (down_interruptible(&sdiodev->tx_priv->txctl_sema)) {
++ txrx_err("txctl down bus->txctl_sema fail\n");
++ return;
++ }
++ if (sdiodev->state != SDIO_ACTIVE_ST) {
++ txrx_err("state err\n");
++ up(&sdiodev->tx_priv->txctl_sema);
++ txrx_err("txctl up bus->txctl_sema fail\n");
++ return;
++ }
++
++ err = aicwf_sdio_tx_msg(sdiodev);
++ up(&sdiodev->tx_priv->txctl_sema);
++ if (waitqueue_active(&sdiodev->tx_priv->cmd_txdone_wait))
++ wake_up(&sdiodev->tx_priv->cmd_txdone_wait);
++ }
++
++ //data
++ sdio_info("send data\n");
++ if (down_interruptible(&sdiodev->tx_priv->txctl_sema)) {
++ txrx_err("txdata down bus->txctl_sema\n");
++ return;
++ }
++
++ if (sdiodev->state != SDIO_ACTIVE_ST) {
++ txrx_err("sdio state err\n");
++ up(&sdiodev->tx_priv->txctl_sema);
++ return;
++ }
++
++ sdiodev->tx_priv->fw_avail_bufcnt = aicwf_sdio_flow_ctrl(sdiodev);
++ while (!aicwf_is_framequeue_empty(&sdiodev->tx_priv->txq)) {
++ aicwf_sdio_send(sdiodev->tx_priv);
++ if(sdiodev->tx_priv->cmd_txstate)
++ break;
++ }
++
++ up(&sdiodev->tx_priv->txctl_sema);
++}
++
++static int aicwf_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt)
++{
++ uint prio;
++ int ret = -EBADE;
++ struct aicwf_bus *bus_if = dev_get_drvdata(dev);
++ struct aic_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
++
++ prio = (pkt->priority & 0x7);
++ spin_lock_bh(&sdiodev->tx_priv->txqlock);
++ if (!aicwf_frame_enq(sdiodev->dev, &sdiodev->tx_priv->txq, pkt, prio)) {
++ aicwf_dev_skb_free(pkt);
++ spin_unlock_bh(&sdiodev->tx_priv->txqlock);
++ return -ENOSR;
++ } else {
++ ret = 0;
++ }
++
++ if (bus_if->state != BUS_UP_ST) {
++ sdio_err("bus_if stopped\n");
++ spin_unlock_bh(&sdiodev->tx_priv->txqlock);
++ return -1;
++ }
++
++ atomic_inc(&sdiodev->tx_priv->tx_pktcnt);
++ spin_unlock_bh(&sdiodev->tx_priv->txqlock);
++ complete(&bus_if->bustx_trgg);
++
++ return ret;
++}
++
++static int aicwf_sdio_bus_txmsg(struct device *dev, u8 *msg, uint msglen)
++{
++// int ret = -1;
++ struct aicwf_bus *bus_if = dev_get_drvdata(dev);
++ struct aic_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
++
++ down(&sdiodev->tx_priv->cmd_txsema);
++ sdiodev->tx_priv->cmd_txstate = true;
++ sdiodev->tx_priv->cmd_tx_succ = false;
++ sdiodev->tx_priv->cmd_buf = msg;
++ sdiodev->tx_priv->cmd_len = msglen;
++ up(&sdiodev->tx_priv->cmd_txsema);
++
++ if (bus_if->state != BUS_UP_ST) {
++ sdio_err("bus has stop\n");
++ return -1;
++ }
++
++ complete(&bus_if->bustx_trgg);
++ #if 0
++ if (sdiodev->tx_priv->cmd_txstate) {
++ int timeout = msecs_to_jiffies(CMD_TX_TIMEOUT);
++ ret = wait_event_interruptible_timeout(sdiodev->tx_priv->cmd_txdone_wait, \
++ !(sdiodev->tx_priv->cmd_txstate), timeout);
++ }
++
++ if (!sdiodev->tx_priv->cmd_txstate && sdiodev->tx_priv->cmd_tx_succ) {
++ ret = 0;
++ } else {
++ sdio_err("send faild:%d, %d,%x\n", sdiodev->tx_priv->cmd_txstate, sdiodev->tx_priv->cmd_tx_succ, ret);
++ ret = -EIO;
++ }
++
++ return ret;
++ #endif
++ return 0;
++}
++
++int aicwf_sdio_send(struct aicwf_tx_priv *tx_priv)
++{
++ struct sk_buff *pkt;
++ struct aic_sdio_dev *sdiodev = tx_priv->sdiodev;
++ u32 aggr_len = 0;
++ int retry_times = 0;
++ int max_retry_times = 5;
++
++ aggr_len = (tx_priv->tail - tx_priv->head);
++ if(((atomic_read(&tx_priv->aggr_count) == 0) && (aggr_len != 0))
++ || ((atomic_read(&tx_priv->aggr_count) != 0) && (aggr_len == 0))) {
++ if (aggr_len > 0)
++ aicwf_sdio_aggrbuf_reset(tx_priv);
++ goto done;
++ }
++
++ if (tx_priv->fw_avail_bufcnt <= 0) { //flow control failed
++ tx_priv->fw_avail_bufcnt = aicwf_sdio_flow_ctrl(sdiodev);
++ while(tx_priv->fw_avail_bufcnt <=0 && retry_times < max_retry_times) {
++ retry_times++;
++ tx_priv->fw_avail_bufcnt = aicwf_sdio_flow_ctrl(sdiodev);
++ }
++ if(tx_priv->fw_avail_bufcnt <= 0) {
++ sdio_err("fc retry %d fail\n", tx_priv->fw_avail_bufcnt);
++ goto done;
++ }
++ }
++
++ if (atomic_read(&tx_priv->aggr_count) == tx_priv->fw_avail_bufcnt) {
++ if (atomic_read(&tx_priv->aggr_count) > 0) {
++ tx_priv->fw_avail_bufcnt -= atomic_read(&tx_priv->aggr_count);
++ aicwf_sdio_aggr_send(tx_priv); //send and check the next pkt;
++ }
++ } else {
++ spin_lock_bh(&sdiodev->tx_priv->txqlock);
++ pkt = aicwf_frame_dequeue(&sdiodev->tx_priv->txq);
++ if (pkt == NULL) {
++ sdio_err("txq no pkt\n");
++ spin_unlock_bh(&sdiodev->tx_priv->txqlock);
++ goto done;
++ }
++ atomic_dec(&sdiodev->tx_priv->tx_pktcnt);
++ spin_unlock_bh(&sdiodev->tx_priv->txqlock);
++
++ if(tx_priv==NULL || tx_priv->tail==NULL || pkt==NULL)
++ txrx_err("null error\n");
++ if (aicwf_sdio_aggr(tx_priv, pkt)) {
++ aicwf_sdio_aggrbuf_reset(tx_priv);
++ sdio_err("add aggr pkts failed!\n");
++ goto done;
++ }
++
++ //when aggr finish or there is cmd to send, just send this aggr pkt to fw
++ if ((int)atomic_read(&sdiodev->tx_priv->tx_pktcnt) == 0 || sdiodev->tx_priv->cmd_txstate) { //no more pkt send it!
++ tx_priv->fw_avail_bufcnt -= atomic_read(&tx_priv->aggr_count);
++ aicwf_sdio_aggr_send(tx_priv);
++ } else
++ goto done;
++ }
++
++done:
++ return 0;
++}
++
++int aicwf_sdio_aggr(struct aicwf_tx_priv *tx_priv, struct sk_buff *pkt)
++{
++ struct rwnx_txhdr *txhdr = (struct rwnx_txhdr *)pkt->data;
++ u8 *start_ptr = tx_priv->tail;
++ u8 sdio_header[4];
++ u8 adjust_str[4] = {0, 0, 0, 0};
++ u32 curr_len = 0;
++ int allign_len = 0;
++
++ sdio_header[0] =((pkt->len - sizeof(struct rwnx_txhdr) + sizeof(struct txdesc_api)) & 0xff);
++ sdio_header[1] =(((pkt->len - sizeof(struct rwnx_txhdr) + sizeof(struct txdesc_api)) >> 8)&0x0f);
++ sdio_header[2] = 0x01; //data
++ sdio_header[3] = 0; //reserved
++
++ memcpy(tx_priv->tail, (u8 *)&sdio_header, sizeof(sdio_header));
++ tx_priv->tail += sizeof(sdio_header);
++ //payload
++ memcpy(tx_priv->tail, (u8 *)(long)&txhdr->sw_hdr->desc, sizeof(struct txdesc_api));
++ tx_priv->tail += sizeof(struct txdesc_api); //hostdesc
++ memcpy(tx_priv->tail, (u8 *)((u8 *)txhdr + txhdr->sw_hdr->headroom), pkt->len-txhdr->sw_hdr->headroom);
++ tx_priv->tail += (pkt->len - txhdr->sw_hdr->headroom);
++
++ //word alignment
++ curr_len = tx_priv->tail - tx_priv->head;
++ if (curr_len & (TX_ALIGNMENT - 1)) {
++ allign_len = roundup(curr_len, TX_ALIGNMENT)-curr_len;
++ memcpy(tx_priv->tail, adjust_str, allign_len);
++ tx_priv->tail += allign_len;
++ }
++
++ start_ptr[0] = ((tx_priv->tail - start_ptr - 4) & 0xff);
++ start_ptr[1] = (((tx_priv->tail - start_ptr - 4)>>8) & 0x0f);
++ tx_priv->aggr_buf->dev = pkt->dev;
++
++ if(!txhdr->sw_hdr->need_cfm) {
++ kmem_cache_free(txhdr->sw_hdr->rwnx_vif->rwnx_hw->sw_txhdr_cache, txhdr->sw_hdr);
++ skb_pull(pkt, txhdr->sw_hdr->headroom);
++ consume_skb(pkt);
++ }
++
++ atomic_inc(&tx_priv->aggr_count);
++ return 0;
++}
++
++void aicwf_sdio_aggr_send(struct aicwf_tx_priv *tx_priv)
++{
++ struct sk_buff *tx_buf = tx_priv->aggr_buf;
++ int ret = 0;
++ int curr_len = 0;
++
++ //link tail is necessary
++ curr_len = tx_priv->tail - tx_priv->head;
++ if ((curr_len % TXPKT_BLOCKSIZE) != 0) {
++ memset(tx_priv->tail, 0, TAIL_LEN);
++ tx_priv->tail += TAIL_LEN;
++ }
++
++ tx_buf->len = tx_priv->tail - tx_priv->head;
++ ret = aicwf_sdio_txpkt(tx_priv->sdiodev, tx_buf);
++ if (ret < 0) {
++ sdio_err("fail to send aggr pkt!\n");
++ }
++
++ aicwf_sdio_aggrbuf_reset(tx_priv);
++}
++
++void aicwf_sdio_aggrbuf_reset(struct aicwf_tx_priv *tx_priv)
++{
++ struct sk_buff *aggr_buf = tx_priv->aggr_buf;
++
++ tx_priv->tail = tx_priv->head;
++ aggr_buf->len = 0;
++ atomic_set(&tx_priv->aggr_count, 0);
++}
++
++static int aicwf_sdio_bus_start(struct device *dev)
++{
++ struct aicwf_bus *bus_if = dev_get_drvdata(dev);
++ struct aic_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
++ int ret = 0;
++
++#if 1
++ sdio_claim_host(sdiodev->func);
++ sdio_claim_irq(sdiodev->func, aicwf_sdio_hal_irqhandler);
++#else
++ //since we have func2 we don't register irq handler
++ sdio_claim_irq(sdiodev->func, NULL);
++ sdiodev->func->irq_handler = (sdio_irq_handler_t *)aicwf_sdio_hal_irqhandler;
++#endif
++ //enable sdio interrupt
++ ret = aicwf_sdio_writeb(sdiodev, SDIOWIFI_INTR_CONFIG_REG, 0x07);
++ sdio_release_host(sdiodev->func);
++
++ if (ret != 0)
++ sdio_err("intr register failed:%d\n", ret);
++
++ bus_if->state = BUS_UP_ST;
++
++ return ret;
++}
++
++int sdio_bustx_thread(void *data)
++{
++ struct aicwf_bus *bus = (struct aicwf_bus *) data;
++ struct aic_sdio_dev *sdiodev = bus->bus_priv.sdio;
++
++ while (1) {
++ if(kthread_should_stop()) {
++ sdio_err("sdio bustx thread stop\n");
++ break;
++ }
++ if (!wait_for_completion_interruptible(&bus->bustx_trgg)) {
++ if(sdiodev->bus_if->state == BUS_DOWN_ST)
++ break;
++
++ if((int)(atomic_read(&sdiodev->tx_priv->tx_pktcnt) > 0) || (sdiodev->tx_priv->cmd_txstate == true))
++ aicwf_sdio_tx_process(sdiodev);
++ }
++ }
++
++ return 0;
++}
++
++int sdio_busrx_thread(void *data)
++{
++ struct aicwf_rx_priv *rx_priv = (struct aicwf_rx_priv *)data;
++ struct aicwf_bus *bus_if = rx_priv->sdiodev->bus_if;
++
++ while (1) {
++ if(kthread_should_stop()) {
++ sdio_err("sdio busrx thread stop\n");
++ break;
++ }
++ if (!wait_for_completion_interruptible(&bus_if->busrx_trgg)) {
++ if(bus_if->state == BUS_DOWN_ST)
++ break;
++ aicwf_process_rxframes(rx_priv);
++ }
++ }
++
++ return 0;
++}
++
++static int aicwf_sdio_pwrctl_thread(void *data)
++{
++ struct aic_sdio_dev *sdiodev = (struct aic_sdio_dev *) data;
++
++ while (1) {
++ if(kthread_should_stop()) {
++ sdio_err("sdio pwrctl thread stop\n");
++ break;
++ }
++ if (!wait_for_completion_interruptible(&sdiodev->pwrctrl_trgg)) {
++ if(sdiodev->bus_if->state == BUS_DOWN_ST)
++ break;
++ if (sdiodev->state == SDIO_ACTIVE_ST) {
++ if((int)(atomic_read(&sdiodev->tx_priv->tx_pktcnt) <= 0) && (sdiodev->tx_priv->cmd_txstate == false) && \
++ atomic_read(&sdiodev->rx_priv->rx_cnt)==0)
++ aicwf_sdio_pwr_stctl(sdiodev, SDIO_SLEEP_ST);
++ else
++ aicwf_sdio_pwrctl_timer(sdiodev, sdiodev->active_duration);
++ }
++ } else {
++ continue;
++ }
++ }
++
++ return 0;
++}
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
++static void aicwf_sdio_bus_pwrctl(ulong data)
++#else
++static void aicwf_sdio_bus_pwrctl(struct timer_list *t)
++#endif
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
++ struct aic_sdio_dev *sdiodev = (struct aic_sdio_dev *) data;
++#else
++ struct aic_sdio_dev *sdiodev = from_timer(sdiodev, t, timer);
++#endif
++
++ if (sdiodev->bus_if->state == BUS_DOWN_ST) {
++ sdio_err("bus down\n");
++ return;
++ }
++
++ if (sdiodev->pwrctl_tsk){
++ complete(&sdiodev->pwrctrl_trgg);
++ }
++}
++
++
++static void aicwf_sdio_enq_rxpkt(struct aic_sdio_dev *sdiodev, struct sk_buff *pkt)
++{
++ struct aicwf_rx_priv* rx_priv = sdiodev->rx_priv;
++ unsigned long flags = 0;
++
++ spin_lock_irqsave(&rx_priv->rxqlock, flags);
++ if (!aicwf_rxframe_enqueue(sdiodev->dev, &rx_priv->rxq, pkt)) {
++ spin_unlock_irqrestore(&rx_priv->rxqlock, flags);
++ aicwf_dev_skb_free(pkt);
++ return;
++ }
++ spin_unlock_irqrestore(&rx_priv->rxqlock, flags);
++
++ atomic_inc(&rx_priv->rx_cnt);
++}
++
++#define SDIO_OTHER_INTERRUPT (0x1ul << 7)
++
++void aicwf_sdio_hal_irqhandler(struct sdio_func *func)
++{
++ struct aicwf_bus *bus_if = dev_get_drvdata(&func->dev);
++ struct aic_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
++ u8 intstatus = 0;
++ u8 byte_len = 0;
++ struct sk_buff *pkt = NULL;
++ int ret;
++
++ if (!bus_if || bus_if->state == BUS_DOWN_ST) {
++ sdio_err("bus err\n");
++ return;
++ }
++
++ ret = aicwf_sdio_readb(sdiodev, SDIOWIFI_BLOCK_CNT_REG, &intstatus);
++ while(ret || (intstatus & SDIO_OTHER_INTERRUPT)) {
++ sdio_err("ret=%d, intstatus=%x\r\n",ret, intstatus);
++ ret = aicwf_sdio_readb(sdiodev, SDIOWIFI_BLOCK_CNT_REG, &intstatus);
++ }
++ sdiodev->rx_priv->data_len = intstatus * SDIOWIFI_FUNC_BLOCKSIZE;
++
++ if (intstatus > 0) {
++ if(intstatus < 64) {
++ pkt = aicwf_sdio_readframes(sdiodev);
++ } else {
++ aicwf_sdio_intr_get_len_bytemode(sdiodev, &byte_len);//byte_len must<= 128
++ sdio_info("byte mode len=%d\r\n", byte_len);
++ pkt = aicwf_sdio_readframes(sdiodev);
++ }
++ } else {
++ #ifndef CONFIG_PLATFORM_ALLWINNER
++ sdio_err("Interrupt but no data\n");
++ #endif
++ }
++
++ if (pkt)
++ aicwf_sdio_enq_rxpkt(sdiodev, pkt);
++
++ complete(&bus_if->busrx_trgg);
++}
++
++void aicwf_sdio_pwrctl_timer(struct aic_sdio_dev *sdiodev, uint duration)
++{
++ uint timeout;
++
++ if (sdiodev->bus_if->state == BUS_DOWN_ST && duration)
++ return;
++
++ spin_lock_bh(&sdiodev->pwrctl_lock);
++ if (!duration) {
++ if (timer_pending(&sdiodev->timer))
++ del_timer_sync(&sdiodev->timer);
++ } else {
++ sdiodev->active_duration = duration;
++ timeout = msecs_to_jiffies(sdiodev->active_duration);
++ mod_timer(&sdiodev->timer, jiffies + timeout);
++ }
++ spin_unlock_bh(&sdiodev->pwrctl_lock);
++}
++
++static struct aicwf_bus_ops aicwf_sdio_bus_ops = {
++ .stop = aicwf_sdio_bus_stop,
++ .start = aicwf_sdio_bus_start,
++ .txdata = aicwf_sdio_bus_txdata,
++ .txmsg = aicwf_sdio_bus_txmsg,
++};
++
++void aicwf_sdio_release(struct aic_sdio_dev *sdiodev)
++{
++ struct aicwf_bus *bus_if;
++ struct aicwf_rx_priv* rx_priv = NULL;
++ int ret;
++ sdio_dbg("%s\n", __func__);
++
++ bus_if = dev_get_drvdata(sdiodev->dev);
++ bus_if->state = BUS_DOWN_ST;
++
++ sdio_claim_host(sdiodev->func);
++ //disable sdio interrupt
++ ret = aicwf_sdio_writeb(sdiodev, SDIOWIFI_INTR_CONFIG_REG, 0x0);
++ if (ret < 0) {
++ sdio_err("reg:%d write failed!\n", SDIOWIFI_INTR_CONFIG_REG);
++ }
++ sdio_release_irq(sdiodev->func);
++ sdio_release_host(sdiodev->func);
++
++ if (sdiodev->dev)
++ aicwf_bus_deinit(sdiodev->dev);
++
++ aicwf_tx_deinit(sdiodev->tx_priv);
++ rx_priv = sdiodev->rx_priv;
++ if(rx_priv != NULL)
++ aicwf_rx_deinit(rx_priv);
++ rwnx_cmd_mgr_deinit(&sdiodev->cmd_mgr);
++ sdio_dbg("exit %s\n", __func__);
++}
++
++int aicwf_sdio_func_init(struct aic_sdio_dev *sdiodev)
++{
++ struct mmc_host *host;
++ u8 block_bit0 = 0x1;
++ u8 byte_mode_disable = 0x1;//1: no byte mode
++ int ret = 0;
++ host = sdiodev->func->card->host;
++
++ sdio_claim_host(sdiodev->func);
++ ret = sdio_set_block_size(sdiodev->func, SDIOWIFI_FUNC_BLOCKSIZE);
++ if (ret < 0) {
++ sdio_err("set blocksize fail %d\n", ret);
++ sdio_release_host(sdiodev->func);
++ return ret;
++ }
++ ret = sdio_enable_func(sdiodev->func);
++ if (ret < 0) {
++ sdio_release_host(sdiodev->func);
++ sdio_err("enable func fail %d.\n", ret);
++ }
++
++ host->ios.clock = 60000000;
++ host->ops->set_ios(host, &host->ios);
++ sdio_release_host(sdiodev->func);
++ sdio_dbg("Set SDIO Clock %d MHz\n",host->ios.clock/1000000);
++
++ ret = aicwf_sdio_writeb(sdiodev, SDIOWIFI_REGISTER_BLOCK, block_bit0);
++ if (ret < 0) {
++ sdio_err("reg:%d write failed!\n", SDIOWIFI_REGISTER_BLOCK);
++ return ret;
++ }
++
++ //1: no byte mode
++ ret = aicwf_sdio_writeb(sdiodev, SDIOWIFI_BYTEMODE_ENABLE_REG, byte_mode_disable);
++ if (ret < 0) {
++ sdio_err("reg:%d write failed!\n", SDIOWIFI_BYTEMODE_ENABLE_REG);
++ return ret;
++ }
++
++ return ret;
++}
++
++
++void aicwf_sdio_func_deinit(struct aic_sdio_dev *sdiodev)
++{
++ sdio_claim_host(sdiodev->func);
++ sdio_disable_func(sdiodev->func);
++ sdio_release_host(sdiodev->func);
++}
++
++void *aicwf_sdio_bus_init(struct aic_sdio_dev *sdiodev)
++{
++ int ret;
++ struct aicwf_bus *bus_if;
++ struct aicwf_rx_priv *rx_priv;
++ struct aicwf_tx_priv *tx_priv;
++
++ spin_lock_init(&sdiodev->pwrctl_lock);
++ sema_init(&sdiodev->pwrctl_wakeup_sema, 1);
++
++ bus_if = sdiodev->bus_if;
++ bus_if->dev = sdiodev->dev;
++ bus_if->ops = &aicwf_sdio_bus_ops;
++ bus_if->state = BUS_DOWN_ST;
++ sdiodev->state = SDIO_SLEEP_ST;
++ sdiodev->active_duration = SDIOWIFI_PWR_CTRL_INTERVAL;
++
++ rx_priv = aicwf_rx_init(sdiodev);
++ if(!rx_priv) {
++ sdio_err("rx init fail\n");
++ goto fail;
++ }
++ sdiodev->rx_priv = rx_priv;
++
++ tx_priv = aicwf_tx_init(sdiodev);
++ if(!tx_priv) {
++ sdio_err("tx init fail\n");
++ goto fail;
++ }
++ sdiodev->tx_priv = tx_priv;
++ aicwf_frame_queue_init(&tx_priv->txq, 8, TXQLEN);
++ spin_lock_init(&tx_priv->txqlock);
++ sema_init(&tx_priv->txctl_sema,1);
++ sema_init(&tx_priv->cmd_txsema, 1);
++ init_waitqueue_head(&tx_priv->cmd_txdone_wait);
++ atomic_set(&tx_priv->tx_pktcnt, 0);
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
++ init_timer(&sdiodev->timer);
++ sdiodev->timer.data = (ulong) sdiodev;
++ sdiodev->timer.function = aicwf_sdio_bus_pwrctl;
++#else
++ timer_setup(&sdiodev->timer, aicwf_sdio_bus_pwrctl, 0);
++#endif
++ init_completion(&sdiodev->pwrctrl_trgg);
++#ifdef AICWF_SDIO_SUPPORT
++ sdiodev->pwrctl_tsk = kthread_run(aicwf_sdio_pwrctl_thread, sdiodev, "aicwf_pwrctl");
++#endif
++ if (IS_ERR(sdiodev->pwrctl_tsk)) {
++ sdiodev->pwrctl_tsk = NULL;
++ }
++
++ ret = aicwf_bus_init(0, sdiodev->dev);
++ if (ret < 0) {
++ sdio_err("bus init fail\n");
++ goto fail;
++ }
++
++ ret = aicwf_bus_start(bus_if);
++ if (ret != 0) {
++ sdio_err("bus start fail\n");
++ goto fail;
++ }
++
++ return sdiodev;
++
++fail:
++ aicwf_sdio_release(sdiodev);
++ return NULL;
++}
++
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_sdio.h
+@@ -0,0 +1,99 @@
++/**
++ * aicwf_sdio.h
++ *
++ * SDIO function declarations
++ *
++ * Copyright (C) AICSemi 2018-2020
++ */
++
++#ifndef _AICWF_SDMMC_H_
++#define _AICWF_SDMMC_H_
++
++#ifdef AICWF_SDIO_SUPPORT
++#include <linux/skbuff.h>
++#include <linux/if_ether.h>
++#include <linux/ieee80211.h>
++#include <linux/semaphore.h>
++#include "rwnx_cmds.h"
++#define AICWF_SDIO_NAME "aicwf_sdio"
++#define SDIOWIFI_FUNC_BLOCKSIZE 512
++
++#define SDIO_VENDOR_ID_AIC 0x8800
++#define SDIO_DEVICE_ID_AIC 0x0001
++#define SDIOWIFI_BYTEMODE_LEN_REG 0x02
++#define SDIOWIFI_INTR_CONFIG_REG 0x04
++#define SDIOWIFI_SLEEP_REG 0x05
++#define SDIOWIFI_WAKEUP_REG 0x09
++#define SDIOWIFI_FLOW_CTRL_REG 0x0A
++#define SDIOWIFI_REGISTER_BLOCK 0x0B
++#define SDIOWIFI_BYTEMODE_ENABLE_REG 0x11
++#define SDIOWIFI_BLOCK_CNT_REG 0x12
++#define SDIOWIFI_FLOWCTRL_MASK_REG 0x7F
++
++#define SDIOWIFI_PWR_CTRL_INTERVAL 30
++#define FLOW_CTRL_RETRY_COUNT 50
++#define BUFFER_SIZE 1536
++#define TAIL_LEN 4
++#define TXQLEN (2048*4)
++
++#define SDIO_SLEEP_ST 0
++#define SDIO_ACTIVE_ST 1
++
++typedef enum {
++ SDIO_TYPE_DATA = 0X00,
++ SDIO_TYPE_CFG = 0X10,
++ SDIO_TYPE_CFG_CMD_RSP = 0X11,
++ SDIO_TYPE_CFG_DATA_CFM = 0X12
++} sdio_type;
++
++struct rwnx_hw;
++
++
++struct aic_sdio_dev {
++ struct rwnx_hw *rwnx_hw;
++ struct sdio_func *func;
++ struct device *dev;
++ struct aicwf_bus *bus_if;
++ struct rwnx_cmd_mgr cmd_mgr;
++
++ struct aicwf_rx_priv *rx_priv;
++ struct aicwf_tx_priv *tx_priv;
++ u32 state;
++
++ //for sdio pwr ctrl
++ struct timer_list timer;
++ uint active_duration;
++ struct completion pwrctrl_trgg;
++ struct task_struct *pwrctl_tsk;
++ spinlock_t pwrctl_lock;
++ struct semaphore pwrctl_wakeup_sema;
++};
++int aicwf_sdio_writeb(struct aic_sdio_dev *sdiodev, uint regaddr, u8 val);
++void aicwf_sdio_hal_irqhandler(struct sdio_func *func);
++void aicwf_sdio_pwrctl_timer(struct aic_sdio_dev *sdiodev, uint duration);
++int aicwf_sdio_pwr_stctl(struct aic_sdio_dev *sdiodev, uint target);
++int aicwf_sdio_func_init(struct aic_sdio_dev *sdiodev);
++void aicwf_sdio_func_deinit(struct aic_sdio_dev *sdiodev);
++int aicwf_sdio_flow_ctrl(struct aic_sdio_dev *sdiodev);
++int aicwf_sdio_recv_pkt(struct aic_sdio_dev *sdiodev, struct sk_buff *skbbuf, u32 size);
++int aicwf_sdio_send_pkt(struct aic_sdio_dev *sdiodev, u8 *buf, uint count);
++void *aicwf_sdio_bus_init(struct aic_sdio_dev *sdiodev);
++void aicwf_sdio_release(struct aic_sdio_dev *sdiodev);
++void aicwf_sdio_exit(void);
++void aicwf_sdio_register(void);
++int aicwf_sdio_txpkt(struct aic_sdio_dev *sdiodev, struct sk_buff *pkt);
++int sdio_bustx_thread(void *data);
++int sdio_busrx_thread(void *data);
++int aicwf_sdio_aggr(struct aicwf_tx_priv *tx_priv, struct sk_buff *pkt);
++int aicwf_sdio_send(struct aicwf_tx_priv *tx_priv);
++void aicwf_sdio_aggr_send(struct aicwf_tx_priv *tx_priv);
++void aicwf_sdio_aggrbuf_reset(struct aicwf_tx_priv* tx_priv);
++extern void aicwf_hostif_ready(void);
++#ifdef CONFIG_PLATFORM_NANOPI
++extern void extern_wifi_set_enable(int is_on);
++extern void sdio_reinit(void);
++#endif /*CONFIG_PLATFORM_NANOPI*/
++
++#endif /* AICWF_SDIO_SUPPORT */
++
++#endif /*_AICWF_SDMMC_H_*/
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_txrxif.c
+@@ -0,0 +1,1227 @@
++/**
++ * aicwf_bus.c
++ *
++ * bus function declarations
++ *
++ * Copyright (C) AICSemi 2018-2020
++ */
++
++#include <linux/kthread.h>
++#include <linux/netdevice.h>
++#include <linux/printk.h>
++#include <linux/interrupt.h>
++#include <linux/sched.h>
++#include <linux/completion.h>
++#include <linux/semaphore.h>
++#include <linux/debugfs.h>
++#include <linux/atomic.h>
++#include <linux/vmalloc.h>
++#include "lmac_msg.h"
++#include "aicwf_txrxif.h"
++#include "rwnx_platform.h"
++#include "rwnx_defs.h"
++#include "rwnx_msg_rx.h"
++#include "rwnx_rx.h"
++#include "aicwf_rx_prealloc.h"
++#ifdef AICWF_SDIO_SUPPORT
++#include "sdio_host.h"
++#endif
++
++#ifdef CONFIG_RX_TASKLET
++int aicwf_tasklet_rxframes(struct aicwf_rx_priv *rx_priv);
++#endif
++#ifdef CONFIG_TX_TASKLET
++void aicwf_tasklet_tx_process(struct aic_usb_dev *usb_dev);
++#endif
++extern bool aicwf_usb_rx_aggr;
++
++#ifdef CONFIG_PREALLOC_RX_SKB
++void aicwf_rxframe_queue_init_2(struct rx_frame_queue *pq, int max_len)
++{
++ //int prio;
++
++ memset(pq, 0, offsetof(struct rx_frame_queue, queuelist) + (sizeof(struct list_head)));
++ pq->qmax = (u16)max_len;
++ INIT_LIST_HEAD(&pq->queuelist);
++#if 0
++ memset(pq, 0, offsetof(struct rx_frame_queue, queuelist) + (sizeof(struct list_head) * num_prio));
++ pq->num_prio = (u16)num_prio;
++ pq->qmax = (u16)max_len;
++
++ for (prio = 0; prio < num_prio; prio++) {
++ INIT_LIST_HEAD(&pq->queuelist[prio]);
++ }
++#endif
++}
++
++//extern struct aic_sdio_dev *g_sdiodev;
++void rxbuff_queue_flush(struct aicwf_rx_priv* rx_priv)
++{
++
++ //int prio;
++ struct rx_frame_queue *pq = &rx_priv->rxq;
++ struct list_head *pos;
++ struct list_head *n;
++ struct list_head *head;
++ struct rx_buff *tempbuf = NULL;
++
++ head = &pq->queuelist;
++ list_for_each_safe(pos, n, head) {
++ tempbuf = list_entry(pos, struct rx_buff, queue);
++ list_del_init(&tempbuf->queue);
++#if 0
++ rxbuff_free(tempbuf);
++#else
++ aicwf_prealloc_rxbuff_free(tempbuf, &rx_priv->rxbuff_lock);
++#endif
++ pq->qcnt--;
++ }
++}
++#endif
++
++int aicwf_bus_init(uint bus_hdrlen, struct device *dev)
++{
++ int ret = 0;
++ struct aicwf_bus *bus_if;
++
++ if (!dev) {
++ AICWFDBG(LOGERROR, "device not found\n");
++ return -1;
++ }
++ bus_if = dev_get_drvdata(dev);
++ #if defined CONFIG_USB_SUPPORT && defined CONFIG_USB_NO_TRANS_DMA_MAP
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
++ bus_if->cmd_buf = usb_alloc_coherent(bus_if->bus_priv.usb->udev, CMD_BUF_MAX, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), &bus_if->bus_priv.usb->cmd_dma_trans_addr);
++ #else
++ bus_if->cmd_buf = usb_buffer_alloc(bus_if->bus_priv.usb->udev, CMD_BUF_MAX, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), &bus_if->bus_priv.usb->cmd_dma_trans_addr);
++ #endif
++ #else
++ bus_if->cmd_buf = kzalloc(CMD_BUF_MAX, GFP_KERNEL);
++ #endif
++ if(!bus_if->cmd_buf) {
++ ret = -ENOMEM;
++ AICWFDBG(LOGERROR, "proto_attach failed\n");
++ goto fail;
++ }
++ memset(bus_if->cmd_buf, '\0', CMD_BUF_MAX);
++
++ init_completion(&bus_if->bustx_trgg);
++ init_completion(&bus_if->busrx_trgg);
++#ifdef CONFIG_USB_MSG_IN_EP
++ if(bus_if->bus_priv.usb->chipid != PRODUCT_ID_AIC8801 &&
++ bus_if->bus_priv.usb->chipid != PRODUCT_ID_AIC8800D81){
++ init_completion(&bus_if->msg_busrx_trgg);
++ }
++#endif
++
++#ifdef AICWF_SDIO_SUPPORT
++ bus_if->bustx_thread = kthread_run(sdio_bustx_thread, (void *)bus_if, "aicwf_bustx_thread");
++ bus_if->busrx_thread = kthread_run(sdio_busrx_thread, (void *)bus_if->bus_priv.sdio->rx_priv, "aicwf_busrx_thread");
++#endif
++#ifdef AICWF_USB_SUPPORT
++ bus_if->busrx_thread = kthread_run(usb_busrx_thread, (void *)bus_if->bus_priv.usb->rx_priv, "aicwf_busrx_thread");
++ bus_if->bustx_thread = kthread_run(usb_bustx_thread, (void *)bus_if, "aicwf_bustx_thread");
++
++#if 1
++ //waiting for rx/tx thread init finish
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 14, 0)
++ while(bus_if->busrx_thread->__state != TASK_INTERRUPTIBLE ||
++ bus_if->bustx_thread->__state != TASK_INTERRUPTIBLE)
++#else
++ while(bus_if->busrx_thread->state != TASK_INTERRUPTIBLE ||
++ bus_if->bustx_thread->state != TASK_INTERRUPTIBLE)
++#endif
++ {
++ AICWFDBG(LOGINFO, "%s waiting for rx/tx thread init finish \r\n", __func__);
++ msleep(100);
++ }
++ //waiting for rx/tx thread Initialization finish
++#endif
++
++#ifdef CONFIG_USB_MSG_IN_EP
++ if(bus_if->bus_priv.usb->chipid != PRODUCT_ID_AIC8801 &&
++ bus_if->bus_priv.usb->chipid != PRODUCT_ID_AIC8800D81){
++ bus_if->msg_busrx_thread = kthread_run(usb_msg_busrx_thread, (void *)bus_if->bus_priv.usb->rx_priv, "aicwf_msg_busrx_thread");
++ }
++#endif
++
++#ifdef CONFIG_RX_TASKLET//AIDEN tasklet
++ AICWFDBG(LOGINFO, "%s use tasklet for rx \r\n", __func__);
++ tasklet_init(&((bus_if->bus_priv.usb)->recv_tasklet),
++ (void(*)(unsigned long))aicwf_tasklet_rxframes,
++ (unsigned long)(bus_if->bus_priv.usb)->rx_priv);
++#endif
++
++#ifdef CONFIG_TX_TASKLET//AIDEN tasklet
++ AICWFDBG(LOGINFO, "%s use tasklet for tx \r\n", __func__);
++ tasklet_init(&((bus_if->bus_priv.usb)->xmit_tasklet),
++ (void(*)(unsigned long))aicwf_tasklet_tx_process,
++ (unsigned long)(bus_if->bus_priv.usb));
++#endif
++
++
++#endif
++
++ if (IS_ERR(bus_if->bustx_thread)) {
++ bus_if->bustx_thread = NULL;
++ AICWFDBG(LOGERROR, "aicwf_bustx_thread run fail\n");
++ goto fail;
++ }
++
++ if (IS_ERR(bus_if->busrx_thread)) {
++ bus_if->busrx_thread = NULL;
++ AICWFDBG(LOGERROR, "aicwf_bustx_thread run fail\n");
++ goto fail;
++ }
++
++#ifdef CONFIG_USB_MSG_IN_EP
++ if(bus_if->bus_priv.usb->chipid != PRODUCT_ID_AIC8801 &&
++ bus_if->bus_priv.usb->chipid != PRODUCT_ID_AIC8800D81){
++ if (IS_ERR(bus_if->msg_busrx_thread)) {
++ bus_if->msg_busrx_thread = NULL;
++ txrx_err("aicwf_msg_busrx_thread run fail\n");
++ goto fail;
++ }
++ }
++#endif
++
++
++ return ret;
++fail:
++ aicwf_bus_deinit(dev);
++
++ return ret;
++}
++
++void aicwf_usb_cancel_all_urbs(struct aic_usb_dev *usb_dev);
++
++void aicwf_bus_deinit(struct device *dev)
++{
++ struct aicwf_bus *bus_if;
++#ifdef AICWF_USB_SUPPORT
++ struct aic_usb_dev *usb;
++#endif
++#ifdef AICWF_SDIO_SUPPORT
++ struct aic_sdio_dev *sdiodev;
++#endif
++
++ if (!dev) {
++ txrx_err("device not found\n");
++ return;
++ }
++ AICWFDBG(LOGINFO, "%s Enter\r\n", __func__);
++
++ bus_if = dev_get_drvdata(dev);
++ aicwf_bus_stop(bus_if);
++
++#ifdef AICWF_USB_SUPPORT
++ usb = bus_if->bus_priv.usb;
++ aicwf_usb_cancel_all_urbs(usb);//AIDEN
++
++ if(g_rwnx_plat && g_rwnx_plat->enabled){
++ rwnx_platform_deinit(usb->rwnx_hw);
++ }else{
++ AICWFDBG(LOGINFO, "%s g_rwnx_plat->enabled is false\r\n", __func__);
++ }
++
++#endif
++#ifdef AICWF_SDIO_SUPPORT
++ sdiodev =bus_if->bus_priv.sdio;
++ if(g_rwnx_plat && g_rwnx_plat->enabled)
++ rwnx_platform_deinit(sdiodev->rwnx_hw);
++#endif
++
++ if (bus_if->cmd_buf) {
++ #if defined CONFIG_USB_SUPPORT && defined CONFIG_USB_NO_TRANS_DMA_MAP
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
++ usb_free_coherent(bus_if->bus_priv.usb->udev, CMD_BUF_MAX, bus_if->cmd_buf, bus_if->bus_priv.usb->cmd_dma_trans_addr);
++ #else
++ usb_buffer_free(bus_if->bus_priv.usb->udev, CMD_BUF_MAX, bus_if->cmd_buf, bus_if->bus_priv.usb->cmd_dma_trans_addr);
++ #endif
++ #else
++ kfree(bus_if->cmd_buf);
++ #endif
++ bus_if->cmd_buf = NULL;
++ }
++
++ if (bus_if->bustx_thread) {
++ AICWFDBG(LOGINFO, "%s stop to bustx_thread!! \r\n", __func__);
++
++
++ complete_all(&bus_if->bustx_trgg);
++ kthread_stop(bus_if->bustx_thread);
++ bus_if->bustx_thread = NULL;
++ }
++
++#ifdef CONFIG_TX_TASKLET//AIDEN tasklet
++ tasklet_kill(&usb->xmit_tasklet);
++#endif
++
++ AICWFDBG(LOGINFO, "%s Exit \n", __func__);
++}
++
++void aicwf_frame_tx(void *dev, struct sk_buff *skb)
++{
++#ifdef AICWF_SDIO_SUPPORT
++ struct aic_sdio_dev *sdiodev = (struct aic_sdio_dev *)dev;
++ aicwf_bus_txdata(sdiodev->bus_if, skb);
++#else
++ struct aic_usb_dev *usbdev = (struct aic_usb_dev *)dev;
++ struct rwnx_txhdr *txhdr = NULL;
++ unsigned long flags;
++
++ if (!usbdev->state) {
++ txrx_err("down\n");
++ spin_lock_irqsave(&usbdev->tx_flow_lock, flags);
++ if(!usbdev->tbusy)
++ aicwf_usb_tx_flowctrl(usbdev->rwnx_hw, true);
++ spin_unlock_irqrestore(&usbdev->tx_flow_lock, flags);
++ txhdr = (struct rwnx_txhdr *)skb->data;
++ kmem_cache_free(usbdev->rwnx_hw->sw_txhdr_cache, txhdr->sw_hdr);
++ dev_kfree_skb(skb);
++ return;
++ }
++ aicwf_bus_txdata(usbdev->bus_if, skb);
++#endif
++}
++
++struct aicwf_tx_priv* aicwf_tx_init(void *arg)
++{
++ struct aicwf_tx_priv* tx_priv;
++
++ tx_priv = kzalloc(sizeof(struct aicwf_tx_priv), GFP_KERNEL);
++ if (!tx_priv)
++ return NULL;
++
++#ifdef AICWF_SDIO_SUPPORT
++ tx_priv->sdiodev = (struct aic_sdio_dev *)arg;
++#else
++ tx_priv->usbdev = (struct aic_usb_dev *)arg;
++#endif
++
++ atomic_set(&tx_priv->aggr_count, 0);
++ tx_priv->aggr_buf = dev_alloc_skb(MAX_AGGR_TXPKT_LEN);
++ if(!tx_priv->aggr_buf) {
++ txrx_err("Alloc bus->txdata_buf failed!\n");
++ kfree(tx_priv);
++ return NULL;
++ }
++ tx_priv->head = tx_priv->aggr_buf->data;
++ tx_priv->tail = tx_priv->aggr_buf->data;
++
++ return tx_priv;
++}
++
++void aicwf_tx_deinit(struct aicwf_tx_priv* tx_priv)
++{
++ if (tx_priv && tx_priv->aggr_buf)
++ dev_kfree_skb(tx_priv->aggr_buf);
++
++ kfree(tx_priv);
++}
++
++static bool aicwf_another_ptk(struct sk_buff *skb)
++{
++ u8 *data;
++ u16 aggr_len = 0;
++
++ if(skb->data == NULL || skb->len == 0) {
++ return false;
++ }
++ data = skb->data;
++ aggr_len = (*skb->data | (*(skb->data + 1) << 8));
++ if(aggr_len == 0) {
++ return false;
++ }
++
++ return true;
++}
++
++#if 0//AIDEN
++void rwnx_frame_parser(char* tag, char* data, unsigned long len);
++#endif
++
++#ifdef CONFIG_RX_TASKLET
++int aicwf_tasklet_rxframes(struct aicwf_rx_priv *rx_priv)
++{
++
++ int ret = 0;
++ unsigned long flags = 0;
++ struct sk_buff *skb = NULL; /* Packet for event or data frames */
++ u16 pkt_len = 0;
++ //struct sk_buff *skb_inblock = NULL;
++ u16 aggr_len = 0, adjust_len = 0;
++ u8 *data = NULL;
++ u8_l *msg = NULL;
++
++ while (1) {
++ //aicwf_usb_rx_submit_all_urb_(rx_priv->usbdev);
++ spin_lock_irqsave(&rx_priv->rxqlock, flags);
++ if(aicwf_is_framequeue_empty(&rx_priv->rxq)) {
++ usb_info("no more rxdata\n");
++ spin_unlock_irqrestore(&rx_priv->rxqlock,flags);
++ break;
++ }
++ skb = aicwf_frame_dequeue(&rx_priv->rxq);
++ spin_unlock_irqrestore(&rx_priv->rxqlock, flags);
++ if (skb == NULL) {
++ txrx_err("skb_error\r\n");
++ break;
++ }
++ data = skb->data;
++ pkt_len = (*skb->data | (*(skb->data + 1) << 8));
++ //printk("p:%d, s:%d , %x\n", pkt_len, skb->len, data[2]);
++ if (pkt_len > 1600) {
++ dev_kfree_skb(skb);
++ atomic_dec(&rx_priv->rx_cnt);
++ continue;
++ }
++
++ if((skb->data[2] & USB_TYPE_CFG) != USB_TYPE_CFG) { // type : data
++#if 0
++ aggr_len = pkt_len + RX_HWHRD_LEN;
++ if (aggr_len & (RX_ALIGNMENT - 1))
++ adjust_len = roundup(aggr_len, RX_ALIGNMENT);
++ else
++ adjust_len = aggr_len;
++
++ skb_inblock = __dev_alloc_skb(aggr_len + CCMP_OR_WEP_INFO, GFP_ATOMIC);//8 is for ccmp mic or wep icv
++ if(skb_inblock == NULL){
++ txrx_err("no more space! skip!\n");
++ skb_pull(skb, adjust_len);
++ continue;
++ }
++
++ skb_put(skb_inblock, aggr_len);
++ memcpy(skb_inblock->data, data, aggr_len);
++#if 0//AIDEN
++ rwnx_frame_parser((char*)__func__, skb_inblock->data + 60, aggr_len - 60);
++#endif
++#endif
++ rwnx_rxdataind_aicwf(rx_priv->usbdev->rwnx_hw, skb, (void *)rx_priv);
++ ///TODO: here need to add rx data process
++ //skb_pull(skb, adjust_len);
++ }
++ else { // type : config
++ aggr_len = pkt_len;
++ if (aggr_len & (RX_ALIGNMENT - 1))
++ adjust_len = roundup(aggr_len, RX_ALIGNMENT);
++ else
++ adjust_len = aggr_len;
++
++ msg = kmalloc(aggr_len+4, GFP_ATOMIC);
++ if(msg == NULL){
++ txrx_err("no more space for msg!\n");
++ aicwf_dev_skb_free(skb);
++ return -EBADE;
++ }
++ memcpy(msg, data, aggr_len + 4);
++ if((*(msg + 2) & 0x7f) == USB_TYPE_CFG_CMD_RSP)
++ rwnx_rx_handle_msg(rx_priv->usbdev->rwnx_hw, (struct ipc_e2a_msg *)(msg + 4));
++
++ if((*(msg + 2) & 0x7f) == USB_TYPE_CFG_DATA_CFM)
++ aicwf_usb_host_tx_cfm_handler(&(rx_priv->usbdev->rwnx_hw->usb_env), (u32 *)(msg + 4));
++ skb_pull(skb, adjust_len+4);
++ kfree(msg);
++ dev_kfree_skb(skb);
++ }
++
++ //dev_kfree_skb(skb);
++ atomic_dec(&rx_priv->rx_cnt);
++ }
++
++ return ret;
++ }
++#endif
++
++int aicwf_process_rxframes(struct aicwf_rx_priv *rx_priv)
++{
++#ifdef AICWF_SDIO_SUPPORT
++ int ret = 0;
++ unsigned long flags = 0;
++ struct sk_buff *skb = NULL;
++ u16 pkt_len = 0;
++ struct sk_buff *skb_inblock = NULL;
++ u16 aggr_len = 0, adjust_len = 0;
++ u8 *data = NULL;
++ u8_l *msg = NULL;
++
++ while (1) {
++ spin_lock_irqsave(&rx_priv->rxqlock, flags);
++ if(aicwf_is_framequeue_empty(&rx_priv->rxq)) {
++ spin_unlock_irqrestore(&rx_priv->rxqlock,flags);
++ break;
++ }
++ skb = aicwf_frame_dequeue(&rx_priv->rxq);
++ spin_unlock_irqrestore(&rx_priv->rxqlock,flags);
++ if (skb == NULL) {
++ txrx_err("skb_error\r\n");
++ break;
++ }
++ while(aicwf_another_ptk(skb)) {
++ data = skb->data;
++ pkt_len = (*skb->data | (*(skb->data + 1) << 8));
++
++ if((skb->data[2] & SDIO_TYPE_CFG) != SDIO_TYPE_CFG) { // type : data
++ aggr_len = pkt_len + RX_HWHRD_LEN;
++
++ if (aggr_len & (RX_ALIGNMENT - 1))
++ adjust_len = roundup(aggr_len, RX_ALIGNMENT);
++ else
++ adjust_len = aggr_len;
++
++ skb_inblock = __dev_alloc_skb(aggr_len + CCMP_OR_WEP_INFO, GFP_KERNEL);
++ if(skb_inblock == NULL){
++ txrx_err("no more space! skip\n");
++ skb_pull(skb, adjust_len);
++ continue;
++ }
++
++ skb_put(skb_inblock, aggr_len);
++ memcpy(skb_inblock->data, data, aggr_len);
++ rwnx_rxdataind_aicwf(rx_priv->sdiodev->rwnx_hw, skb_inblock, (void *)rx_priv);
++ skb_pull(skb, adjust_len);
++ }
++ else { // type : config
++ aggr_len = pkt_len;
++
++ if (aggr_len & (RX_ALIGNMENT - 1))
++ adjust_len = roundup(aggr_len, RX_ALIGNMENT);
++ else
++ adjust_len = aggr_len;
++
++ msg = kmalloc(aggr_len+4, GFP_KERNEL);
++ if(msg == NULL){
++ txrx_err("no more space for msg!\n");
++ aicwf_dev_skb_free(skb);
++ return -EBADE;
++ }
++
++ memcpy(msg, data, aggr_len + 4);
++ if((*(msg + 2) & 0x7f) == SDIO_TYPE_CFG_CMD_RSP)
++ rwnx_rx_handle_msg(rx_priv->sdiodev->rwnx_hw, (struct ipc_e2a_msg *)(msg + 4));
++
++ if((*(msg + 2) & 0x7f) == SDIO_TYPE_CFG_DATA_CFM)
++ aicwf_sdio_host_tx_cfm_handler(&(rx_priv->sdiodev->rwnx_hw->sdio_env), (u32 *)(msg + 4));
++ skb_pull(skb, adjust_len+4);
++ kfree(msg);
++ }
++ }
++
++ dev_kfree_skb(skb);
++ atomic_dec(&rx_priv->rx_cnt);
++ }
++
++ aicwf_sdio_pwr_stctl(rx_priv->sdiodev, SDIO_ACTIVE_ST);
++
++ return ret;
++#else //AICWF_USB_SUPPORT
++ int ret = 0;
++ unsigned long flags = 0;
++#ifndef CONFIG_PREALLOC_RX_SKB
++ struct sk_buff *skb = NULL;/* Packet for event or data frames */
++#else
++ struct sk_buff *skb_inblock = NULL;
++#endif
++ u16 pkt_len = 0;
++ u16 aggr_len = 0, adjust_len = 0;
++ u8 *data = NULL;
++ u8_l *msg = NULL;
++//#ifdef CONFIG_USB_RX_AGGR
++ struct sk_buff *skb_inblock = NULL;
++ u8 cnt = 0 ;
++//#endif
++#ifdef CONFIG_PREALLOC_RX_SKB
++ struct rx_buff *buffer = NULL;
++ while (1) {
++ spin_lock_irqsave(&rx_priv->rxqlock, flags);
++ if (!rx_priv->rxq.qcnt) {
++ usb_info("no more rxdata\n");
++ spin_unlock_irqrestore(&rx_priv->rxqlock,flags);
++ break;
++ }
++ buffer = rxbuff_dequeue(&rx_priv->rxq);
++ spin_unlock_irqrestore(&rx_priv->rxqlock, flags);
++
++ if (buffer == NULL) {
++ txrx_err("skb_error\r\n");
++ ASSERT_ERR(1);
++ break;
++ }
++ data = buffer->data;
++ pkt_len = (*data | (*(data + 1) << 8));
++ //printk("p:%d, s:%d , %x\n", pkt_len, skb->len, data[2]);
++ if (pkt_len > 1600) {
++ aicwf_prealloc_rxbuff_free(buffer, &rx_priv->rxbuff_lock);
++ atomic_dec(&rx_priv->rx_cnt);
++ continue;
++ }
++
++ if((data[2] & USB_TYPE_CFG) != USB_TYPE_CFG) { // type : data
++ skb_inblock = __dev_alloc_skb(pkt_len + RX_HWHRD_LEN + CCMP_OR_WEP_INFO, GFP_KERNEL);
++ if (skb_inblock == NULL) {
++ txrx_err("no more space! skip\n");
++ aicwf_prealloc_rxbuff_free(buffer, &rx_priv->rxbuff_lock);
++ atomic_dec(&rx_priv->rx_cnt);
++ continue;
++ }
++
++ skb_put(skb_inblock, pkt_len + RX_HWHRD_LEN);
++ memcpy(skb_inblock->data, data, pkt_len + RX_HWHRD_LEN);
++ rwnx_rxdataind_aicwf(rx_priv->usbdev->rwnx_hw, skb_inblock, (void *)rx_priv);
++ }
++ else { // type : config
++ aggr_len = pkt_len;
++ if (aggr_len & (RX_ALIGNMENT - 1))
++ adjust_len = roundup(aggr_len, RX_ALIGNMENT);
++ else
++ adjust_len = aggr_len;
++
++ msg = kmalloc(aggr_len+4, GFP_KERNEL);
++ if(msg == NULL){
++ txrx_err("no more space for msg!\n");
++ aicwf_prealloc_rxbuff_free(buffer, &rx_priv->rxbuff_lock);
++ return -EBADE;
++ }
++ memcpy(msg, data, aggr_len + 4);
++
++ if(((*(msg + 2) & 0x7f) == USB_TYPE_CFG_CMD_RSP) && (rx_priv->usbdev->bus_if->state != (int)USB_DOWN_ST))
++ rwnx_rx_handle_msg(rx_priv->usbdev->rwnx_hw, (struct ipc_e2a_msg *)(msg + 4));
++
++ if((*(msg + 2) & 0x7f) == USB_TYPE_CFG_DATA_CFM)
++ aicwf_usb_host_tx_cfm_handler(&(rx_priv->usbdev->rwnx_hw->usb_env), (u32 *)(msg + 4));
++
++ if ((*(msg + 2) & 0x7f) == USB_TYPE_CFG_PRINT)
++ rwnx_rx_handle_print(rx_priv->usbdev->rwnx_hw, msg + 4, aggr_len);
++
++ kfree(msg);
++ }
++
++ aicwf_prealloc_rxbuff_free(buffer, &rx_priv->rxbuff_lock);
++ atomic_dec(&rx_priv->rx_cnt);
++ }
++
++#else
++ if(aicwf_usb_rx_aggr){
++ while (1) {
++ spin_lock_irqsave(&rx_priv->rxqlock, flags);
++ if(aicwf_is_framequeue_empty(&rx_priv->rxq)) {
++ usb_info("no more rxdata\n");
++ spin_unlock_irqrestore(&rx_priv->rxqlock,flags);
++ break;
++ }
++ skb = aicwf_frame_dequeue(&rx_priv->rxq);
++ spin_unlock_irqrestore(&rx_priv->rxqlock, flags);
++
++ if (skb == NULL) {
++ txrx_err("skb_error\r\n");
++ ASSERT_ERR(1);
++ break;
++ }
++ while(aicwf_another_ptk(skb)) {
++ cnt++;
++ if (cnt > 30) {
++ //printk("%s err, break %d\n", __func__, cnt);
++ //break;
++ }
++ data = skb->data;
++ pkt_len = (*skb->data | (*(skb->data + 1) << 8));
++ //printk("p:%d, s:%d , %x\n", pkt_len, skb->len, data[2]);
++ if (pkt_len > 1600) {
++ dev_kfree_skb(skb);
++ atomic_dec(&rx_priv->rx_cnt);
++ continue;
++ }
++
++ if((skb->data[2] & USB_TYPE_CFG) != USB_TYPE_CFG) { // type : data
++ aggr_len = pkt_len + RX_HWHRD_LEN;
++ adjust_len = aggr_len;
++ skb_inblock = __dev_alloc_skb(aggr_len + CCMP_OR_WEP_INFO, GFP_KERNEL);//8 is for ccmp mic or wep icv
++ if(skb_inblock == NULL){
++ txrx_err("no more space! skip!\n");
++ skb_pull(skb, adjust_len);
++ continue;
++ }
++
++ skb_put(skb_inblock, aggr_len);
++ memcpy(skb_inblock->data, data, aggr_len);
++ rwnx_rxdataind_aicwf(rx_priv->usbdev->rwnx_hw, skb_inblock, (void *)rx_priv);
++
++ ///TODO: here need to add rx data process
++
++ skb_pull(skb, adjust_len);
++
++ }
++ else { // type : config
++ aggr_len = pkt_len;
++ if (aggr_len & (RX_ALIGNMENT - 1))
++ adjust_len = roundup(aggr_len, RX_ALIGNMENT);
++ else
++ adjust_len = aggr_len;
++
++ msg = kmalloc(aggr_len+4, GFP_KERNEL);
++ if(msg == NULL){
++ txrx_err("no more space for msg!\n");
++ aicwf_dev_skb_free(skb);
++ return -EBADE;
++ }
++ memcpy(msg, data, aggr_len + 4);
++
++ if(((*(msg + 2) & 0x7f) == USB_TYPE_CFG_CMD_RSP) && (rx_priv->usbdev->bus_if->state != (int)USB_DOWN_ST))
++ rwnx_rx_handle_msg(rx_priv->usbdev->rwnx_hw, (struct ipc_e2a_msg *)(msg + 4));
++
++ if((*(msg + 2) & 0x7f) == USB_TYPE_CFG_DATA_CFM)
++ aicwf_usb_host_tx_cfm_handler(&(rx_priv->usbdev->rwnx_hw->usb_env), (u32 *)(msg + 4));
++
++ if ((*(msg + 2) & 0x7f) == USB_TYPE_CFG_PRINT)
++ rwnx_rx_handle_print(rx_priv->usbdev->rwnx_hw, msg + 4, aggr_len);
++
++
++ skb_pull(skb, adjust_len+4);
++ kfree(msg);
++
++ }
++ }
++ dev_kfree_skb(skb);
++ atomic_dec(&rx_priv->rx_cnt);
++ }
++ }else{
++ while (1) {
++ spin_lock_irqsave(&rx_priv->rxqlock, flags);
++ if(aicwf_is_framequeue_empty(&rx_priv->rxq)) {
++ usb_info("no more rxdata\n");
++ spin_unlock_irqrestore(&rx_priv->rxqlock,flags);
++ break;
++ }
++ skb = aicwf_frame_dequeue(&rx_priv->rxq);
++ spin_unlock_irqrestore(&rx_priv->rxqlock, flags);
++
++ if (skb == NULL) {
++ txrx_err("skb_error\r\n");
++ ASSERT_ERR(1);
++ break;
++ }
++ data = skb->data;
++ pkt_len = (*skb->data | (*(skb->data + 1) << 8));
++ //printk("p:%d, s:%d , %x\n", pkt_len, skb->len, data[2]);
++ if (pkt_len > 1600) {
++ dev_kfree_skb(skb);
++ atomic_dec(&rx_priv->rx_cnt);
++ continue;
++ }
++
++ if((skb->data[2] & USB_TYPE_CFG) != USB_TYPE_CFG) { // type : data
++ rwnx_rxdataind_aicwf(rx_priv->usbdev->rwnx_hw, skb, (void *)rx_priv);
++ }
++ else { // type : config
++ aggr_len = pkt_len;
++ if (aggr_len & (RX_ALIGNMENT - 1))
++ adjust_len = roundup(aggr_len, RX_ALIGNMENT);
++ else
++ adjust_len = aggr_len;
++
++ msg = kmalloc(aggr_len+4, GFP_KERNEL);
++ if(msg == NULL){
++ txrx_err("no more space for msg!\n");
++ aicwf_dev_skb_free(skb);
++ return -EBADE;
++ }
++ memcpy(msg, data, aggr_len + 4);
++
++ if(((*(msg + 2) & 0x7f) == USB_TYPE_CFG_CMD_RSP) && (rx_priv->usbdev->bus_if->state != (int)USB_DOWN_ST))
++ rwnx_rx_handle_msg(rx_priv->usbdev->rwnx_hw, (struct ipc_e2a_msg *)(msg + 4));
++
++ if((*(msg + 2) & 0x7f) == USB_TYPE_CFG_DATA_CFM)
++ aicwf_usb_host_tx_cfm_handler(&(rx_priv->usbdev->rwnx_hw->usb_env), (u32 *)(msg + 4));
++
++ if ((*(msg + 2) & 0x7f) == USB_TYPE_CFG_PRINT)
++ rwnx_rx_handle_print(rx_priv->usbdev->rwnx_hw, msg + 4, aggr_len);
++
++ skb_pull(skb, adjust_len+4);
++ kfree(msg);
++ dev_kfree_skb(skb);
++ }
++ atomic_dec(&rx_priv->rx_cnt);
++ }
++ }
++#endif
++
++ return ret;
++#endif //AICWF_SDIO_SUPPORT
++}
++
++
++#ifdef CONFIG_USB_MSG_IN_EP
++int aicwf_process_msg_rxframes(struct aicwf_rx_priv *rx_priv)
++{
++ int ret = 0;
++ unsigned long flags = 0;
++ struct sk_buff *skb = NULL; /* Packet for event or data frames */
++ u16 pkt_len = 0;
++ struct sk_buff *skb_inblock = NULL;
++ u16 aggr_len = 0, adjust_len = 0;
++ u8 *data = NULL;
++ u8_l *msg = NULL;
++
++ while (1) {
++ spin_lock_irqsave(&rx_priv->msg_rxqlock, flags);
++ if(aicwf_is_framequeue_empty(&rx_priv->msg_rxq)) {
++ usb_info("no more rxmsg\n");
++ spin_unlock_irqrestore(&rx_priv->msg_rxqlock,flags);
++ break;
++ }
++ skb = aicwf_frame_dequeue(&rx_priv->msg_rxq);
++ spin_unlock_irqrestore(&rx_priv->msg_rxqlock, flags);
++ if (skb == NULL) {
++ txrx_err("skb_error\r\n");
++ break;
++ }
++ data = skb->data;
++ pkt_len = (*skb->data | (*(skb->data + 1) << 8));
++ //printk("p:%d, s:%d , %x\n", pkt_len, skb->len, data[2]);
++#if 0 //amsdu > 1600
++ if (pkt_len > 1600) {
++ dev_kfree_skb(skb);
++ atomic_dec(&rx_priv->rx_cnt);
++ continue;
++ }
++#endif
++
++ if((skb->data[2] & USB_TYPE_CFG) != USB_TYPE_CFG) { // type : data
++ aggr_len = pkt_len + RX_HWHRD_LEN;
++ if (aggr_len & (RX_ALIGNMENT - 1))
++ adjust_len = roundup(aggr_len, RX_ALIGNMENT);
++ else
++ adjust_len = aggr_len;
++
++ skb_inblock = __dev_alloc_skb(aggr_len + CCMP_OR_WEP_INFO, GFP_KERNEL);//8 is for ccmp mic or wep icv
++ if(skb_inblock == NULL){
++ txrx_err("no more space! skip!\n");
++ skb_pull(skb, adjust_len);
++ continue;
++ }
++
++ skb_put(skb_inblock, aggr_len);
++ memcpy(skb_inblock->data, data, aggr_len);
++#if 0//AIDEN
++ rwnx_frame_parser((char*)__func__, skb_inblock->data + 60, aggr_len - 60);
++#endif
++ rwnx_rxdataind_aicwf(rx_priv->usbdev->rwnx_hw, skb_inblock, (void *)rx_priv);
++ ///TODO: here need to add rx data process
++
++ skb_pull(skb, adjust_len);
++ }
++ else { // type : config
++ aggr_len = pkt_len;
++ if (aggr_len & (RX_ALIGNMENT - 1))
++ adjust_len = roundup(aggr_len, RX_ALIGNMENT);
++ else
++ adjust_len = aggr_len;
++
++ msg = kmalloc(aggr_len+4, GFP_KERNEL);
++ if(msg == NULL){
++ txrx_err("no more space for msg!\n");
++ aicwf_dev_skb_free(skb);
++ return -EBADE;
++ }
++ memcpy(msg, data, aggr_len + 4);
++
++ if(((*(msg + 2) & 0x7f) == USB_TYPE_CFG_CMD_RSP) && (rx_priv->usbdev->bus_if->state != (int)USB_DOWN_ST))
++ rwnx_rx_handle_msg(rx_priv->usbdev->rwnx_hw, (struct ipc_e2a_msg *)(msg + 4));
++
++ if((*(msg + 2) & 0x7f) == USB_TYPE_CFG_DATA_CFM)
++ aicwf_usb_host_tx_cfm_handler(&(rx_priv->usbdev->rwnx_hw->usb_env), (u32 *)(msg + 4));
++
++ if ((*(msg + 2) & 0x7f) == USB_TYPE_CFG_PRINT)
++ rwnx_rx_handle_print(rx_priv->usbdev->rwnx_hw, msg + 4, aggr_len);
++
++
++ skb_pull(skb, adjust_len+4);
++ kfree(msg);
++ }
++
++ dev_kfree_skb(skb);
++ atomic_dec(&rx_priv->msg_rx_cnt);
++ }
++
++ return ret;
++}
++#endif
++
++
++#ifdef AICWF_RX_REORDER
++static struct recv_msdu *aicwf_rxframe_queue_init(struct list_head *q, int qsize)
++{
++ int i;
++ struct recv_msdu *req, *reqs;
++
++ reqs = vmalloc(qsize*sizeof(struct recv_msdu));
++ if (reqs == NULL)
++ return NULL;
++
++ req = reqs;
++ for (i = 0; i < qsize; i++) {
++ INIT_LIST_HEAD(&req->rxframe_list);
++ list_add(&req->rxframe_list, q);
++ req->len = 0;
++ req++;
++ }
++
++ return reqs;
++}
++#endif
++struct aicwf_rx_priv *aicwf_rx_init(void *arg)
++{
++ struct aicwf_rx_priv* rx_priv;
++ rx_priv = kzalloc(sizeof(struct aicwf_rx_priv), GFP_KERNEL);
++ if (!rx_priv)
++ return NULL;
++
++#ifdef AICWF_SDIO_SUPPORT
++ rx_priv->sdiodev = (struct aic_sdio_dev *)arg;
++#else
++ rx_priv->usbdev = (struct aic_usb_dev *)arg;
++#endif
++
++#ifdef CONFIG_PREALLOC_RX_SKB
++ aicwf_rxframe_queue_init_2(&rx_priv->rxq, MAX_RXQLEN);
++#else
++ aicwf_frame_queue_init(&rx_priv->rxq, 1, MAX_RXQLEN);
++#endif
++ spin_lock_init(&rx_priv->rxqlock);
++#ifdef CONFIG_PREALLOC_RX_SKB
++ spin_lock_init(&rx_priv->rxbuff_lock);
++ //aicwf_prealloc_init();
++#endif
++ atomic_set(&rx_priv->rx_cnt, 0);
++
++#ifdef CONFIG_USB_MSG_IN_EP
++ if(rx_priv->usbdev->chipid != PRODUCT_ID_AIC8801 &&
++ rx_priv->usbdev->chipid != PRODUCT_ID_AIC8800D81){
++ aicwf_frame_queue_init(&rx_priv->msg_rxq, 1, MAX_RXQLEN);
++ spin_lock_init(&rx_priv->msg_rxqlock);
++ atomic_set(&rx_priv->msg_rx_cnt, 0);
++ }
++#endif
++
++
++#ifdef AICWF_RX_REORDER
++ INIT_LIST_HEAD(&rx_priv->rxframes_freequeue);
++ spin_lock_init(&rx_priv->freeq_lock);
++ rx_priv->recv_frames = aicwf_rxframe_queue_init(&rx_priv->rxframes_freequeue, MAX_REORD_RXFRAME);
++ if (!rx_priv->recv_frames) {
++ txrx_err("no enough buffer for free recv frame queue!\n");
++ kfree(rx_priv);
++ return NULL;
++ }
++ spin_lock_init(&rx_priv->stas_reord_lock);
++ INIT_LIST_HEAD(&rx_priv->stas_reord_list);
++#endif
++
++ return rx_priv;
++}
++
++#ifdef AICWF_RX_REORDER
++static void aicwf_recvframe_queue_deinit(struct list_head *q)
++{
++ struct recv_msdu *req, *next;
++
++ list_for_each_entry_safe(req, next, q, rxframe_list) {
++ list_del_init(&req->rxframe_list);
++ }
++}
++#endif
++void aicwf_rx_deinit(struct aicwf_rx_priv* rx_priv)
++{
++#ifdef AICWF_RX_REORDER
++ struct reord_ctrl_info *reord_info, *tmp;
++
++ AICWFDBG(LOGINFO, "%s Enter\n", __func__);
++
++ list_for_each_entry_safe(reord_info, tmp,
++ &rx_priv->stas_reord_list, list) {
++ reord_deinit_sta(rx_priv, reord_info);
++ }
++
++#endif
++ AICWFDBG(LOGINFO, "stio rx thread\n");
++#ifdef AICWF_SDIO_SUPPORT
++ if (rx_priv->sdiodev->bus_if->busrx_thread) {
++ complete_all(&rx_priv->sdiodev->bus_if->busrx_trgg);
++ kthread_stop(rx_priv->sdiodev->bus_if->busrx_thread);
++ rx_priv->sdiodev->bus_if->busrx_thread = NULL;
++ }
++#endif
++#ifdef AICWF_USB_SUPPORT
++
++#ifdef CONFIG_RX_TASKLET//AIDEN tasklet
++ tasklet_kill(&rx_priv->usbdev->recv_tasklet);
++#endif
++ if (rx_priv->usbdev->bus_if->busrx_thread) {
++ complete_all(&rx_priv->usbdev->bus_if->busrx_trgg);
++ kthread_stop(rx_priv->usbdev->bus_if->busrx_thread);
++ rx_priv->usbdev->bus_if->busrx_thread = NULL;
++ }
++#ifdef CONFIG_USB_MSG_IN_EP
++ if(rx_priv->usbdev->chipid != PRODUCT_ID_AIC8801 &&
++ rx_priv->usbdev->chipid != PRODUCT_ID_AIC8800D81){
++ if (rx_priv->usbdev->bus_if->msg_busrx_thread) {
++ complete(&rx_priv->usbdev->bus_if->msg_busrx_trgg);
++ kthread_stop(rx_priv->usbdev->bus_if->msg_busrx_thread);
++ rx_priv->usbdev->bus_if->msg_busrx_thread = NULL;
++ }
++ }
++#endif
++
++#endif
++
++#ifdef CONFIG_USB_MSG_IN_EP
++ if(rx_priv->usbdev->chipid != PRODUCT_ID_AIC8801 &&
++ rx_priv->usbdev->chipid != PRODUCT_ID_AIC8800D81){
++ aicwf_frame_queue_flush(&rx_priv->msg_rxq);
++ }
++#endif
++
++#ifdef CONFIG_PREALLOC_RX_SKB
++ rxbuff_queue_flush(rx_priv);
++#else
++ aicwf_frame_queue_flush(&rx_priv->rxq);
++#endif
++
++#ifdef AICWF_RX_REORDER
++ aicwf_recvframe_queue_deinit(&rx_priv->rxframes_freequeue);
++ if (rx_priv->recv_frames)
++ vfree(rx_priv->recv_frames);
++#endif
++
++#ifdef CONFIG_PREALLOC_RX_SKB
++ //aicwf_prealloc_exit();
++#endif
++ kfree(rx_priv);
++ AICWFDBG(LOGINFO, "%s Exit\n", __func__);
++
++}
++
++bool aicwf_rxframe_enqueue(struct device *dev, struct frame_queue *q, struct sk_buff *pkt)
++{
++ return aicwf_frame_enq(dev, q, pkt, 0);
++}
++
++
++void aicwf_dev_skb_free(struct sk_buff *skb)
++{
++ if (!skb)
++ return;
++
++ dev_kfree_skb_any(skb);
++}
++
++static struct sk_buff *aicwf_frame_queue_penq(struct frame_queue *pq, int prio, struct sk_buff *p)
++{
++ struct sk_buff_head *q;
++
++ if (pq->queuelist[prio].qlen >= pq->qmax)
++ return NULL;
++
++ q = &pq->queuelist[prio];
++ __skb_queue_tail(q, p);
++ pq->qcnt++;
++ if (pq->hi_prio < prio)
++ pq->hi_prio = (u16)prio;
++
++ return p;
++}
++
++void aicwf_frame_queue_flush(struct frame_queue *pq)
++{
++ int prio;
++ struct sk_buff_head *q;
++ struct sk_buff *p, *next;
++
++ for (prio = 0; prio < pq->num_prio; prio++)
++ {
++ q = &pq->queuelist[prio];
++ skb_queue_walk_safe(q, p, next) {
++ skb_unlink(p, q);
++ aicwf_dev_skb_free(p);
++ pq->qcnt--;
++ }
++ }
++}
++
++void aicwf_frame_queue_init(struct frame_queue *pq, int num_prio, int max_len)
++{
++ int prio;
++
++ memset(pq, 0, offsetof(struct frame_queue, queuelist) + (sizeof(struct sk_buff_head) * num_prio));
++ pq->num_prio = (u16)num_prio;
++ pq->qmax = (u16)max_len;
++
++ for (prio = 0; prio < num_prio; prio++) {
++ skb_queue_head_init(&pq->queuelist[prio]);
++ }
++}
++
++struct sk_buff *aicwf_frame_queue_peek_tail(struct frame_queue *pq, int *prio_out)
++{
++ int prio;
++
++ if (pq->qcnt == 0)
++ return NULL;
++
++ for (prio = 0; prio < pq->hi_prio; prio++)
++ if (!skb_queue_empty(&pq->queuelist[prio]))
++ break;
++
++ if (prio_out)
++ *prio_out = prio;
++
++ return skb_peek_tail(&pq->queuelist[prio]);
++}
++
++bool aicwf_is_framequeue_empty(struct frame_queue *pq)
++{
++ int prio, len = 0;
++
++ for (prio = 0; prio <= pq->hi_prio; prio++)
++ len += pq->queuelist[prio].qlen;
++
++ if(len > 0)
++ return false;
++ else
++ return true;
++}
++
++struct sk_buff *aicwf_frame_dequeue(struct frame_queue *pq)
++{
++ struct sk_buff_head *q;
++ struct sk_buff *p;
++ int prio;
++
++ if (pq->qcnt == 0)
++ return NULL;
++
++ while ((prio = pq->hi_prio) > 0 && skb_queue_empty(&pq->queuelist[prio]))
++ pq->hi_prio--;
++
++ q = &pq->queuelist[prio];
++ p = __skb_dequeue(q);
++ if (p == NULL)
++ return NULL;
++
++ pq->qcnt--;
++
++ return p;
++}
++
++static struct sk_buff *aicwf_skb_dequeue_tail(struct frame_queue *pq, int prio)
++{
++ struct sk_buff_head *q = &pq->queuelist[prio];
++ struct sk_buff *p = skb_dequeue_tail(q);
++
++ if (!p)
++ return NULL;
++
++ pq->qcnt--;
++
++ return p;
++}
++
++bool aicwf_frame_enq(struct device *dev, struct frame_queue *q, struct sk_buff *pkt, int prio)
++{
++ struct sk_buff *p = NULL;
++ int prio_modified = -1;
++
++ if (q->queuelist[prio].qlen < q->qmax && q->qcnt < q->qmax) {
++ aicwf_frame_queue_penq(q, prio, pkt);
++ return true;
++ }
++ if (q->queuelist[prio].qlen >= q->qmax) {
++ prio_modified = prio;
++ } else if (q->qcnt >= q->qmax) {
++ p = aicwf_frame_queue_peek_tail(q, &prio_modified);
++ if (prio_modified > prio)
++ return false;
++ }
++
++ if (prio_modified >= 0) {
++ if (prio_modified == prio)
++ return false;
++
++ p = aicwf_skb_dequeue_tail(q, prio_modified);
++ aicwf_dev_skb_free(p);
++
++ p = aicwf_frame_queue_penq(q, prio_modified, pkt);
++ if (p == NULL)
++ txrx_err("failed\n");
++ }
++
++ return p != NULL;
++}
++
++#ifdef CONFIG_PREALLOC_RX_SKB
++void rxbuff_free(struct rx_buff *rxbuff)
++{
++ kfree(rxbuff->data);
++ kfree(rxbuff);
++}
++
++struct rx_buff *rxbuff_queue_penq(struct rx_frame_queue *pq, struct rx_buff *p)
++{
++
++ struct list_head *q;
++ if (pq->qcnt >= pq->qmax)
++ return NULL;
++
++ q = &pq->queuelist;
++ list_add_tail(&p->queue,q);
++
++ pq->qcnt++;
++
++ return p;
++}
++
++struct rx_buff *rxbuff_dequeue(struct rx_frame_queue *pq)
++{
++ struct rx_buff *p = NULL;
++
++ if (pq->qcnt == 0) {
++ printk("%s %d, rxq is empty\n", __func__, __LINE__);
++ return NULL;
++ }
++
++ if(list_empty(&pq->queuelist)) {
++ printk("%s %d, rxq is empty\n", __func__, __LINE__);
++ return NULL;
++ } else {
++ p = list_first_entry(&pq->queuelist, struct rx_buff, queue);
++ list_del_init(&p->queue);
++ pq->qcnt--;
++ }
++
++ return p;
++}
++
++bool aicwf_rxbuff_enqueue(struct device *dev, struct rx_frame_queue *rxq, struct rx_buff *pkt)
++{
++// struct rx_buff *p = NULL;
++
++ if ((rxq == NULL) || (pkt == NULL)) {
++ printk("%s %d, rxq or pkt is NULL\n", __func__, __LINE__);
++ return false;
++ }
++
++ if (rxq->qcnt < rxq->qmax) {
++ if (rxbuff_queue_penq(rxq, pkt)) {
++ return true;
++ } else {
++ printk("%s %d, rxbuff enqueue fail\n", __func__, __LINE__);
++ return false;
++ }
++ } else {
++ printk("%s %d, rxq or pkt is full\n", __func__, __LINE__);
++ return false;
++ }
++}
++#endif
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_txrxif.h
+@@ -0,0 +1,291 @@
++/**
++ * aicwf_txrxif.h
++ *
++ * bus function declarations
++ *
++ * Copyright (C) AICSemi 2018-2020
++ */
++
++#ifndef _AICWF_TXRXIF_H_
++#define _AICWF_TXRXIF_H_
++
++#include <linux/skbuff.h>
++#include <linux/sched.h>
++#include "ipc_shared.h"
++#include "aicwf_rx_prealloc.h"
++#ifdef AICWF_SDIO_SUPPORT
++#include "aicwf_sdio.h"
++#else
++#include "aicwf_usb.h"
++#endif
++
++#define CMD_BUF_MAX 1536
++#define DATA_BUF_MAX 2048
++#define TXPKT_BLOCKSIZE 512
++#define MAX_AGGR_TXPKT_LEN (1536*64)
++#define CMD_TX_TIMEOUT 5000
++#define TX_ALIGNMENT 4
++
++#ifdef CONFIG_USB_TX_AGGR
++#define MAX_USB_AGGR_TXPKT_LEN (1536*15)
++#endif
++
++#define RX_HWHRD_LEN 60 //58->60 word allined
++#define CCMP_OR_WEP_INFO 8
++#define MAX_RXQLEN 2000
++#define RX_ALIGNMENT 4
++
++#define DEBUG_ERROR_LEVEL 0
++#define DEBUG_DEBUG_LEVEL 1
++#define DEBUG_INFO_LEVEL 2
++
++#define DBG_LEVEL DEBUG_DEBUG_LEVEL
++
++#define txrx_err(fmt, ...) pr_err("txrx_err:<%s,%d>: " fmt, __func__, __LINE__, ##__VA_ARGS__)
++#define sdio_err(fmt, ...) pr_err("sdio_err:<%s,%d>: " fmt, __func__, __LINE__, ##__VA_ARGS__)
++#define usb_err(fmt, ...) pr_err("usb_err:<%s,%d>: " fmt, __func__, __LINE__, ##__VA_ARGS__)
++#if DBG_LEVEL >= DEBUG_DEBUG_LEVEL
++#define txrx_dbg(fmt, ...) printk("txrx: " fmt, ##__VA_ARGS__)
++#define sdio_dbg(fmt, ...) printk("aicsdio: " fmt, ##__VA_ARGS__)
++#define usb_dbg(fmt, ...) printk("aicusb: " fmt, ##__VA_ARGS__)
++#else
++#define txrx_dbg(fmt, ...)
++#define sdio_dbg(fmt, ...)
++#define usb_dbg(fmt, ...)
++#endif
++#if DBG_LEVEL >= DEBUG_INFO_LEVEL
++#define txrx_info(fmt, ...) printk("aicsdio: " fmt, ##__VA_ARGS__)
++#define sdio_info(fmt, ...) printk("aicsdio: " fmt, ##__VA_ARGS__)
++#define usb_info(fmt, ...) printk("aicusb: " fmt, ##__VA_ARGS__)
++#else
++#define txrx_info(fmt, ...)
++#define sdio_info(fmt, ...)
++#define usb_info(fmt, ...)
++#endif
++
++enum aicwf_bus_state {
++ BUS_DOWN_ST,
++ BUS_UP_ST
++};
++
++struct aicwf_bus_ops {
++ int (*start) (struct device * dev);
++ void (*stop) (struct device * dev);
++ int (*txdata) (struct device * dev, struct sk_buff * skb);
++ int (*txmsg) (struct device * dev, u8 * msg, uint len);
++};
++
++struct frame_queue {
++ u16 num_prio;
++ u16 hi_prio;
++ u16 qmax; /* max number of queued frames */
++ u16 qcnt;
++ struct sk_buff_head queuelist[8];
++};
++
++#ifdef CONFIG_PREALLOC_RX_SKB
++struct rx_frame_queue {
++ u16 qmax; /* max number of queued frames */
++ u16 qcnt;
++ struct list_head queuelist;
++};
++#endif
++
++struct aicwf_bus {
++ union {
++ struct aic_sdio_dev *sdio;
++ struct aic_usb_dev *usb;
++ } bus_priv;
++ struct device *dev;
++ struct aicwf_bus_ops *ops;
++ enum aicwf_bus_state state;
++ u8 *cmd_buf;
++ struct completion bustx_trgg;
++ struct completion busrx_trgg;
++#ifdef CONFIG_USB_MSG_IN_EP
++ struct completion msg_busrx_trgg;
++#endif
++
++ struct task_struct *bustx_thread;
++ struct task_struct *busrx_thread;
++#ifdef CONFIG_USB_MSG_IN_EP
++ struct task_struct *msg_busrx_thread;
++#endif
++
++};
++
++struct aicwf_tx_priv {
++#ifdef AICWF_SDIO_SUPPORT
++ struct aic_sdio_dev *sdiodev;
++ int fw_avail_bufcnt;
++ //for cmd tx
++ u8 *cmd_buf;
++ uint cmd_len;
++ bool cmd_txstate;
++ bool cmd_tx_succ;
++ struct semaphore cmd_txsema;
++ wait_queue_head_t cmd_txdone_wait;
++ //for data tx
++ atomic_t tx_pktcnt;
++
++ struct frame_queue txq;
++ spinlock_t txqlock;
++ struct semaphore txctl_sema;
++#endif
++#ifdef AICWF_USB_SUPPORT
++ struct aic_usb_dev *usbdev;
++#ifdef CONFIG_USB_TX_AGGR
++ int fw_avail_bufcnt;
++
++ //for data tx
++ atomic_t tx_pktcnt;
++
++ struct frame_queue txq;
++ spinlock_t txqlock;
++ spinlock_t txdlock;
++#endif
++#endif//AICWF_USB_SUPPORT
++ struct sk_buff *aggr_buf;
++ atomic_t aggr_count;
++ u8 *head;
++ u8 *tail;
++};
++
++
++#ifdef AICWF_RX_REORDER
++#define MAX_REORD_RXFRAME 250
++#define REORDER_UPDATE_TIME 500//50
++#define AICWF_REORDER_WINSIZE 64
++//SN_LESS(a, b) a-b<0 is ture
++#define SN_LESS(a, b) (((a-b)&0x800)!=0)
++#define SN_EQUAL(a, b) (a == b)
++
++struct reord_ctrl {
++ struct aicwf_rx_priv *rx_priv;
++ u8 enable;
++ u16 ind_sn;
++ u8 wsize_b;
++ spinlock_t reord_list_lock;
++ struct list_head reord_list;
++ struct timer_list reord_timer;
++ struct work_struct reord_timer_work;
++};
++
++struct reord_ctrl_info {
++ u8 mac_addr[6];
++ struct reord_ctrl preorder_ctrl[8];
++ struct list_head list;
++};
++
++struct recv_msdu {
++ struct sk_buff *pkt;
++ u8 tid;
++ u8 forward;
++ u16 seq_num;
++ uint len;
++ u8 *rx_data;
++ //for pending rx reorder list
++ struct list_head reord_pending_list;
++ //for total frame list, when rxframe from busif, dequeue, when submit frame to net, enqueue
++ struct list_head rxframe_list;
++ struct reord_ctrl *preorder_ctrl;
++};
++#endif
++
++struct aicwf_rx_priv {
++#ifdef AICWF_SDIO_SUPPORT
++ struct aic_sdio_dev *sdiodev;
++#endif
++#ifdef AICWF_USB_SUPPORT
++ struct aic_usb_dev *usbdev;
++#endif
++
++ void *rwnx_vif;
++ atomic_t rx_cnt;
++ u32 data_len;
++ spinlock_t rxqlock;
++#ifdef CONFIG_PREALLOC_RX_SKB
++ struct rx_frame_queue rxq;
++#else
++ struct frame_queue rxq;
++#endif
++
++#ifdef CONFIG_USB_MSG_IN_EP
++ atomic_t msg_rx_cnt;
++ spinlock_t msg_rxqlock;
++ struct frame_queue msg_rxq;
++#endif
++
++
++#ifdef AICWF_RX_REORDER
++ spinlock_t freeq_lock;
++ struct list_head rxframes_freequeue;
++ struct list_head stas_reord_list;
++ spinlock_t stas_reord_lock;
++ struct recv_msdu *recv_frames;
++#endif
++
++#ifdef CONFIG_PREALLOC_RX_SKB
++ spinlock_t rxbuff_lock;
++#endif
++
++};
++
++static inline int aicwf_bus_start(struct aicwf_bus *bus)
++{
++ return bus->ops->start(bus->dev);
++}
++
++static inline void aicwf_bus_stop(struct aicwf_bus *bus)
++{
++ bus->ops->stop(bus->dev);
++}
++
++static inline int aicwf_bus_txdata(struct aicwf_bus *bus, struct sk_buff *skb)
++{
++ return bus->ops->txdata(bus->dev, skb);
++}
++
++static inline int aicwf_bus_txmsg(struct aicwf_bus *bus, u8 *msg, uint len)
++{
++ return bus->ops->txmsg(bus->dev, msg, len);
++}
++
++static inline void aicwf_sched_timeout(u32 millisec)
++{
++ ulong timeout = 0, expires = 0;
++ expires = jiffies + msecs_to_jiffies(millisec);
++ timeout = millisec;
++
++ while (timeout) {
++ timeout = schedule_timeout(timeout);
++ if (time_after(jiffies, expires))
++ break;
++ }
++}
++
++int aicwf_bus_init(uint bus_hdrlen, struct device *dev);
++void aicwf_bus_deinit(struct device *dev);
++void aicwf_tx_deinit(struct aicwf_tx_priv* tx_priv);
++void aicwf_rx_deinit(struct aicwf_rx_priv* rx_priv);
++struct aicwf_tx_priv* aicwf_tx_init(void *arg);
++struct aicwf_rx_priv* aicwf_rx_init(void *arg);
++void aicwf_frame_queue_init(struct frame_queue *pq, int num_prio, int max_len);
++void aicwf_frame_queue_flush(struct frame_queue *pq);
++bool aicwf_frame_enq(struct device *dev, struct frame_queue *q, struct sk_buff *pkt, int prio);
++bool aicwf_rxframe_enqueue(struct device *dev, struct frame_queue *q, struct sk_buff *pkt);
++bool aicwf_is_framequeue_empty(struct frame_queue *pq);
++void aicwf_frame_tx(void *dev, struct sk_buff *skb);
++void aicwf_dev_skb_free(struct sk_buff *skb);
++struct sk_buff *aicwf_frame_dequeue(struct frame_queue *pq);
++struct sk_buff *aicwf_frame_queue_peek_tail(struct frame_queue *pq, int *prio_out);
++#ifdef CONFIG_PREALLOC_RX_SKB
++void rxbuff_queue_flush(struct aicwf_rx_priv* rx_priv);
++void aicwf_rxframe_queue_init_2(struct rx_frame_queue *pq, int max_len);
++void rxbuff_free(struct rx_buff *rxbuff);
++struct rx_buff *rxbuff_dequeue(struct rx_frame_queue *pq);
++bool aicwf_rxbuff_enqueue(struct device *dev, struct rx_frame_queue *rxq, struct rx_buff *pkt);
++extern struct aicwf_rx_buff_list aic_rx_buff_list;
++#endif
++
++#endif /* _AICWF_TXRXIF_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_usb.c
+@@ -0,0 +1,2318 @@
++/**
++ * aicwf_usb.c
++ *
++ * USB function declarations
++ *
++ * Copyright (C) AICSemi 2018-2020
++ */
++
++#include <linux/usb.h>
++#include <linux/kthread.h>
++#include "aicwf_txrxif.h"
++#include "aicwf_usb.h"
++#include "rwnx_tx.h"
++#include "rwnx_defs.h"
++#include "usb_host.h"
++#include "rwnx_platform.h"
++
++#ifdef CONFIG_GPIO_WAKEUP
++#ifdef CONFIG_PLATFORM_ROCKCHIP
++#include <linux/rfkill-wlan.h>
++#endif
++static int wakeup_enable;
++static u32 hostwake_irq_num;
++atomic_t irq_count;
++spinlock_t irq_lock;
++#endif
++
++#include <linux/semaphore.h>
++extern struct semaphore aicwf_deinit_sem;
++extern atomic_t aicwf_deinit_atomic;
++#define SEM_TIMOUT 2000
++
++
++#ifdef CONFIG_TXRX_THREAD_PRIO
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
++#include "uapi/linux/sched/types.h"
++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
++#include "linux/sched/types.h"
++#else
++#include "linux/sched/rt.h"
++#endif
++
++int bustx_thread_prio = 1;
++module_param(bustx_thread_prio, int, 0);
++int busrx_thread_prio = 1;
++module_param(busrx_thread_prio, int, 0);
++#endif
++
++#ifdef CONFIG_USB_RX_AGGR
++bool aicwf_usb_rx_aggr = true;
++#else
++bool aicwf_usb_rx_aggr = false;
++#endif
++atomic_t rx_urb_cnt;
++
++void aicwf_usb_tx_flowctrl(struct rwnx_hw *rwnx_hw, bool state)
++{
++ struct rwnx_vif *rwnx_vif;
++
++ list_for_each_entry(rwnx_vif, &rwnx_hw->vifs, list) {
++ if (!rwnx_vif || !rwnx_vif->ndev || !rwnx_vif->up)
++ continue;
++ if (state)
++ netif_tx_stop_all_queues(rwnx_vif->ndev);//netif_stop_queue(rwnx_vif->ndev);
++ else
++ netif_tx_wake_all_queues(rwnx_vif->ndev);//netif_wake_queue(rwnx_vif->ndev);
++ }
++}
++
++static struct aicwf_usb_buf *aicwf_usb_tx_dequeue(struct aic_usb_dev *usb_dev,
++ struct list_head *q, int *counter, spinlock_t *qlock)
++{
++ unsigned long flags;
++ struct aicwf_usb_buf *usb_buf;
++
++ spin_lock_irqsave(qlock, flags);
++ if (list_empty(q)) {
++ usb_buf = NULL;
++ } else {
++ usb_buf = list_first_entry(q, struct aicwf_usb_buf, list);
++ list_del_init(&usb_buf->list);
++ if (counter)
++ (*counter)--;
++ }
++ spin_unlock_irqrestore(qlock, flags);
++ return usb_buf;
++}
++
++static void aicwf_usb_tx_queue(struct aic_usb_dev *usb_dev,
++ struct list_head *q, struct aicwf_usb_buf *usb_buf, int *counter,
++ spinlock_t *qlock)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(qlock, flags);
++ list_add_tail(&usb_buf->list, q);
++ (*counter)++;
++ spin_unlock_irqrestore(qlock, flags);
++}
++
++static struct aicwf_usb_buf *aicwf_usb_rx_buf_get(struct aic_usb_dev *usb_dev)
++{
++ unsigned long flags;
++ struct aicwf_usb_buf *usb_buf;
++
++ spin_lock_irqsave(&usb_dev->rx_free_lock, flags);
++ if (list_empty(&usb_dev->rx_free_list)) {
++ usb_buf = NULL;
++ } else {
++ usb_buf = list_first_entry(&usb_dev->rx_free_list, struct aicwf_usb_buf, list);
++ list_del_init(&usb_buf->list);
++ }
++ spin_unlock_irqrestore(&usb_dev->rx_free_lock, flags);
++ return usb_buf;
++}
++
++static void aicwf_usb_rx_buf_put(struct aic_usb_dev *usb_dev, struct aicwf_usb_buf *usb_buf)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&usb_dev->rx_free_lock, flags);
++ list_add_tail(&usb_buf->list, &usb_dev->rx_free_list);
++ spin_unlock_irqrestore(&usb_dev->rx_free_lock, flags);
++}
++
++#ifdef CONFIG_USB_MSG_IN_EP
++static struct aicwf_usb_buf *aicwf_usb_msg_rx_buf_get(struct aic_usb_dev *usb_dev)
++{
++ unsigned long flags;
++ struct aicwf_usb_buf *usb_buf;
++
++ spin_lock_irqsave(&usb_dev->msg_rx_free_lock, flags);
++ if (list_empty(&usb_dev->msg_rx_free_list)) {
++ usb_buf = NULL;
++ } else {
++ usb_buf = list_first_entry(&usb_dev->msg_rx_free_list, struct aicwf_usb_buf, list);
++ list_del_init(&usb_buf->list);
++ }
++ spin_unlock_irqrestore(&usb_dev->msg_rx_free_lock, flags);
++ return usb_buf;
++}
++
++static void aicwf_usb_msg_rx_buf_put(struct aic_usb_dev *usb_dev, struct aicwf_usb_buf *usb_buf)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&usb_dev->msg_rx_free_lock, flags);
++ list_add_tail(&usb_buf->list, &usb_dev->msg_rx_free_list);
++ spin_unlock_irqrestore(&usb_dev->msg_rx_free_lock, flags);
++}
++#endif
++
++void rwnx_stop_sta_all_queues(struct rwnx_sta *sta, struct rwnx_hw *rwnx_hw)
++{
++ u8 tid;
++ struct rwnx_txq *txq;
++ for(tid=0; tid<8; tid++) {
++ txq = rwnx_txq_sta_get(sta, tid, rwnx_hw);
++ netif_stop_subqueue(txq->ndev, txq->ndev_idx);
++ }
++ }
++
++void rwnx_wake_sta_all_queues(struct rwnx_sta *sta, struct rwnx_hw *rwnx_hw)
++{
++ u8 tid;
++ struct rwnx_txq *txq;
++ for(tid=0; tid<8; tid++) {
++ txq = rwnx_txq_sta_get(sta, tid, rwnx_hw);
++ netif_wake_subqueue(txq->ndev, txq->ndev_idx);
++ }
++ }
++
++static void usb_txc_sta_flowctrl(struct aicwf_usb_buf *usb_buf, struct aic_usb_dev *usb_dev)
++{
++#ifdef CONFIG_PER_STA_FC
++ unsigned long flags;
++ struct rwnx_sta *sta;
++ struct txdesc_api *hostdesc;
++ u8 sta_idx;
++ if(usb_buf->cfm)
++ hostdesc = (struct txdesc_api *)((u8 *)usb_buf->skb + 4);
++ else
++ hostdesc = (struct txdesc_api *)((u8 *)usb_buf->skb->data + 4);
++ //printk("txcpl: sta %d\n", hostdesc->host.staid);
++ sta_idx = hostdesc->host.staid;
++ if(sta_idx < NX_REMOTE_STA_MAX && !(hostdesc->host.flags & TXU_CNTRL_MGMT)) {
++ struct rwnx_vif *vif = NULL;
++ sta = &usb_dev->rwnx_hw->sta_table[sta_idx];
++ vif = usb_dev->rwnx_hw->vif_table[sta->vif_idx];
++ spin_lock_irqsave(&usb_dev->tx_flow_lock, flags);
++ atomic_dec(&usb_dev->rwnx_hw->sta_flowctrl[sta_idx].tx_pending_cnt);
++ //printk("sta:%d, pending:%d, flowctrl=%d\n", sta->sta_idx, sta->tx_pending_cnt, sta->flowctrl);
++ if(RWNX_VIF_TYPE(vif) == NL80211_IFTYPE_AP) {
++ if(atomic_read(&usb_dev->rwnx_hw->sta_flowctrl[sta_idx].tx_pending_cnt) < AICWF_USB_FC_PERSTA_LOW_WATER &&
++ usb_dev->rwnx_hw->sta_flowctrl[sta_idx].flowctrl) {
++ //AICWFDBG(LOGDEBUG, "sta 0x%x:0x%x, %d pending %d, wake\n", sta->mac_addr[4], sta->mac_addr[5], sta->sta_idx, atomic_read(&usb_dev->rwnx_hw->sta_flowctrl[sta_idx].tx_pending_cnt));
++ if(!usb_dev->tbusy)
++ rwnx_wake_sta_all_queues(sta, usb_dev->rwnx_hw);
++ usb_dev->rwnx_hw->sta_flowctrl[sta_idx].flowctrl = 0;
++ }
++ }
++ spin_unlock_irqrestore(&usb_dev->tx_flow_lock, flags);
++ }
++#endif
++}
++
++static void aicwf_usb_tx_complete(struct urb *urb)
++{
++ unsigned long flags;
++ struct aicwf_usb_buf *usb_buf = (struct aicwf_usb_buf *) urb->context;
++ struct aic_usb_dev *usb_dev = usb_buf->usbdev;
++ #ifndef CONFIG_USB_TX_AGGR
++ struct sk_buff *skb;
++ #endif
++
++ usb_txc_sta_flowctrl(usb_buf, usb_dev);
++
++#ifdef CONFIG_USB_ALIGN_DATA
++ if(usb_buf->usb_align_data) {
++ kfree(usb_buf->usb_align_data);
++ }
++#endif
++#ifndef CONFIG_USB_TX_AGGR
++ if (usb_buf->cfm == false) {
++ skb = usb_buf->skb;
++ dev_kfree_skb_any(skb);
++ }
++ #if !defined CONFIG_USB_NO_TRANS_DMA_MAP
++ else {
++ u8 *buf;
++ buf = (u8 *)usb_buf->skb;
++ kfree(buf);
++ }
++ #endif
++ usb_buf->skb = NULL;
++#else
++ AICWFDBG(LOGDEBUG,"tx com %d\n", usb_buf->aggr_cnt);
++ usb_buf->aggr_cnt = 0;
++#endif//CONFIG_USB_TX_AGGR
++ aicwf_usb_tx_queue(usb_dev, &usb_dev->tx_free_list, usb_buf,
++ &usb_dev->tx_free_count, &usb_dev->tx_free_lock);
++
++ spin_lock_irqsave(&usb_dev->tx_flow_lock, flags);
++ if (usb_dev->tx_free_count > AICWF_USB_TX_HIGH_WATER) {
++ if (usb_dev->tbusy) {
++ usb_dev->tbusy = false;
++ aicwf_usb_tx_flowctrl(usb_dev->rwnx_hw, false);
++ }
++ }
++ spin_unlock_irqrestore(&usb_dev->tx_flow_lock, flags);
++}
++
++void aicwf_usb_rx_submit_all_urb_(struct aic_usb_dev *usb_dev);
++
++#ifdef CONFIG_PREALLOC_RX_SKB
++static void aicwf_usb_rx_complete(struct urb *urb)
++ {
++ struct aicwf_usb_buf *usb_buf = (struct aicwf_usb_buf *) urb->context;
++ struct aic_usb_dev *usb_dev = usb_buf->usbdev;
++ struct aicwf_rx_priv* rx_priv = usb_dev->rx_priv;
++ struct rx_buff *rx_buff = NULL;
++ unsigned long flags = 0;
++
++ rx_buff = usb_buf->rx_buff;
++ usb_buf->rx_buff = NULL;
++
++ atomic_dec(&rx_urb_cnt);
++ if(atomic_read(&rx_urb_cnt) < 10){
++ AICWFDBG(LOGDEBUG, "%s %d \r\n", __func__, atomic_read(&rx_urb_cnt));
++ //printk("%s %d \r\n", __func__, atomic_read(&rx_urb_cnt));
++ }
++
++ if(!usb_dev->rwnx_hw){
++ aicwf_prealloc_rxbuff_free(rx_buff, &rx_priv->rxbuff_lock);
++ aicwf_usb_rx_buf_put(usb_dev, usb_buf);
++ AICWFDBG(LOGERROR, "usb_dev->rwnx_hw is not ready \r\n");
++ return;
++ }
++
++ if (urb->actual_length > urb->transfer_buffer_length) {
++ aicwf_prealloc_rxbuff_free(rx_buff, &rx_priv->rxbuff_lock);
++ aicwf_usb_rx_buf_put(usb_dev, usb_buf);
++ aicwf_usb_rx_submit_all_urb_(usb_dev);
++ return;
++ }
++
++ if (urb->status != 0 || !urb->actual_length) {
++ aicwf_prealloc_rxbuff_free(rx_buff, &rx_priv->rxbuff_lock);
++ aicwf_usb_rx_buf_put(usb_dev, usb_buf);
++ if(urb->status < 0){
++ AICWFDBG(LOGDEBUG, "%s urb->status:%d \r\n", __func__, urb->status);
++
++ if(g_rwnx_plat->wait_disconnect_cb == false){
++ g_rwnx_plat->wait_disconnect_cb = true;
++ if(atomic_read(&aicwf_deinit_atomic) > 0){
++ atomic_set(&aicwf_deinit_atomic, 0);
++ AICWFDBG(LOGERROR, "%s in_interrupt:%d in_softirq:%d in_atomic:%d\r\n", __func__, (int)in_interrupt(), (int)in_softirq(), (int)in_atomic());
++ down(&aicwf_deinit_sem);
++ AICWFDBG(LOGINFO, "%s need to wait for disconnect callback \r\n", __func__);
++ }else{
++ g_rwnx_plat->wait_disconnect_cb = false;
++ }
++ }
++
++ return;
++ }else{
++ //schedule_work(&usb_dev->rx_urb_work);
++ aicwf_usb_rx_submit_all_urb_(usb_dev);
++ return;
++ }
++ }
++
++ if (usb_dev->state == USB_UP_ST) {
++ spin_lock_irqsave(&rx_priv->rxqlock, flags);
++
++ if(!aicwf_rxbuff_enqueue(usb_dev->dev, &rx_priv->rxq, rx_buff)){
++ spin_unlock_irqrestore(&rx_priv->rxqlock, flags);
++ usb_err("rx_priv->rxq is over flow!!!\n");
++ aicwf_prealloc_rxbuff_free(rx_buff, &rx_priv->rxbuff_lock);
++ aicwf_usb_rx_buf_put(usb_dev, usb_buf);
++ aicwf_usb_rx_submit_all_urb_(usb_dev);
++ return;
++ }
++ spin_unlock_irqrestore(&rx_priv->rxqlock, flags);
++ atomic_inc(&rx_priv->rx_cnt);
++
++ if(atomic_read(&rx_priv->rx_cnt) == 1){
++ complete(&rx_priv->usbdev->bus_if->busrx_trgg);
++ }
++
++ aicwf_usb_rx_buf_put(usb_dev, usb_buf);
++ aicwf_usb_rx_submit_all_urb_(usb_dev);
++ } else {
++ aicwf_prealloc_rxbuff_free(rx_buff, &rx_priv->rxbuff_lock);
++ aicwf_usb_rx_buf_put(usb_dev, usb_buf);
++ }
++}
++
++#else
++static void aicwf_usb_rx_complete(struct urb *urb)
++{
++ struct aicwf_usb_buf *usb_buf = (struct aicwf_usb_buf *) urb->context;
++ struct aic_usb_dev *usb_dev = usb_buf->usbdev;
++ struct aicwf_rx_priv* rx_priv = usb_dev->rx_priv;
++ struct sk_buff *skb = NULL;
++ unsigned long flags = 0;
++
++ skb = usb_buf->skb;
++ usb_buf->skb = NULL;
++
++ atomic_dec(&rx_urb_cnt);
++ if(atomic_read(&rx_urb_cnt) < 10){
++ AICWFDBG(LOGDEBUG, "%s %d \r\n", __func__, atomic_read(&rx_urb_cnt));
++ //printk("%s %d \r\n", __func__, atomic_read(&rx_urb_cnt));
++ }
++
++ if(!usb_dev->rwnx_hw){
++ aicwf_dev_skb_free(skb);
++ aicwf_usb_rx_buf_put(usb_dev, usb_buf);
++ AICWFDBG(LOGERROR, "usb_dev->rwnx_hw is not ready \r\n");
++ return;
++ }
++
++ if (urb->actual_length > urb->transfer_buffer_length) {
++ aicwf_dev_skb_free(skb);
++ aicwf_usb_rx_buf_put(usb_dev, usb_buf);
++ aicwf_usb_rx_submit_all_urb_(usb_dev);
++ return;
++ }
++
++ if (urb->status != 0 || !urb->actual_length) {
++ aicwf_dev_skb_free(skb);
++ aicwf_usb_rx_buf_put(usb_dev, usb_buf);
++ if(urb->status < 0){
++ AICWFDBG(LOGDEBUG, "%s urb->status:%d \r\n", __func__, urb->status);
++
++ if(g_rwnx_plat->wait_disconnect_cb == false){
++ g_rwnx_plat->wait_disconnect_cb = true;
++ if(atomic_read(&aicwf_deinit_atomic) > 0){
++ atomic_set(&aicwf_deinit_atomic, 0);
++ AICWFDBG(LOGERROR, "%s in_interrupt:%d in_softirq:%d in_atomic:%d\r\n", __func__, (int)in_interrupt(), (int)in_softirq(), (int)in_atomic());
++ down(&aicwf_deinit_sem);
++ AICWFDBG(LOGINFO, "%s need to wait for disconnect callback \r\n", __func__);
++ }else{
++ g_rwnx_plat->wait_disconnect_cb = false;
++ }
++ }
++
++ return;
++ }else{
++ //schedule_work(&usb_dev->rx_urb_work);
++ aicwf_usb_rx_submit_all_urb_(usb_dev);
++ return;
++ }
++ }
++
++ if ((urb->actual_length > 1600 * 30) && (aicwf_usb_rx_aggr)) {
++ printk("r%d\n", urb->actual_length);
++ }
++
++ if (usb_dev->state == USB_UP_ST) {
++
++ skb_put(skb, urb->actual_length);
++
++ spin_lock_irqsave(&rx_priv->rxqlock, flags);
++ if (aicwf_usb_rx_aggr) {
++ skb->len = urb->actual_length;
++ }
++ if(!aicwf_rxframe_enqueue(usb_dev->dev, &rx_priv->rxq, skb)){
++ spin_unlock_irqrestore(&rx_priv->rxqlock, flags);
++ usb_err("rx_priv->rxq is over flow!!!\n");
++ aicwf_dev_skb_free(skb);
++ aicwf_usb_rx_buf_put(usb_dev, usb_buf);
++ aicwf_usb_rx_submit_all_urb_(usb_dev);
++ return;
++ }
++ spin_unlock_irqrestore(&rx_priv->rxqlock, flags);
++ atomic_inc(&rx_priv->rx_cnt);
++
++#ifndef CONFIG_RX_TASKLET
++ //if(!rx_priv->rx_thread_working && (atomic_read(&rx_priv->rx_cnt)>0)){
++ if(atomic_read(&rx_priv->rx_cnt) == 1){
++ complete(&rx_priv->usbdev->bus_if->busrx_trgg);
++ }
++#else
++ tasklet_schedule(&rx_priv->usbdev->recv_tasklet);
++#endif
++
++ aicwf_usb_rx_buf_put(usb_dev, usb_buf);
++ aicwf_usb_rx_submit_all_urb_(usb_dev);
++ //schedule_work(&usb_dev->rx_urb_work);
++ } else {
++ aicwf_dev_skb_free(skb);
++ aicwf_usb_rx_buf_put(usb_dev, usb_buf);
++ }
++}
++#endif
++
++#ifdef CONFIG_USB_MSG_IN_EP
++void aicwf_usb_msg_rx_submit_all_urb_(struct aic_usb_dev *usb_dev);
++
++static void aicwf_usb_msg_rx_complete(struct urb *urb)
++{
++ struct aicwf_usb_buf *usb_buf = (struct aicwf_usb_buf *) urb->context;
++ struct aic_usb_dev *usb_dev = usb_buf->usbdev;
++ struct aicwf_rx_priv* rx_priv = usb_dev->rx_priv;
++ struct sk_buff *skb = NULL;
++ unsigned long flags = 0;
++
++ skb = usb_buf->skb;
++ usb_buf->skb = NULL;
++
++ if (urb->actual_length > urb->transfer_buffer_length) {
++ aicwf_dev_skb_free(skb);
++ aicwf_usb_msg_rx_buf_put(usb_dev, usb_buf);
++ aicwf_usb_msg_rx_submit_all_urb_(usb_dev);
++ //schedule_work(&usb_dev->msg_rx_urb_work);
++ return;
++ }
++
++ if (urb->status != 0 || !urb->actual_length) {
++ aicwf_dev_skb_free(skb);
++ aicwf_usb_msg_rx_buf_put(usb_dev, usb_buf);
++
++ if(urb->status < 0){
++ AICWFDBG(LOGDEBUG, "%s urb->status:%d \r\n", __func__, urb->status);
++ return;
++ }else{
++ aicwf_usb_msg_rx_submit_all_urb_(usb_dev);
++ //schedule_work(&usb_dev->msg_rx_urb_work);
++ return;
++ }
++ }
++
++ if (usb_dev->state == USB_UP_ST) {
++ skb_put(skb, urb->actual_length);
++
++ spin_lock_irqsave(&rx_priv->msg_rxqlock, flags);
++ if(!aicwf_rxframe_enqueue(usb_dev->dev, &rx_priv->msg_rxq, skb)){
++ spin_unlock_irqrestore(&rx_priv->msg_rxqlock, flags);
++ usb_err("rx_priv->rxq is over flow!!!\n");
++ aicwf_dev_skb_free(skb);
++ return;
++ }
++ spin_unlock_irqrestore(&rx_priv->msg_rxqlock, flags);
++ atomic_inc(&rx_priv->msg_rx_cnt);
++ complete(&rx_priv->usbdev->bus_if->msg_busrx_trgg);
++ aicwf_usb_msg_rx_buf_put(usb_dev, usb_buf);
++ aicwf_usb_msg_rx_submit_all_urb_(usb_dev);
++ //schedule_work(&usb_dev->msg_rx_urb_work);
++ } else {
++ aicwf_dev_skb_free(skb);
++ aicwf_usb_msg_rx_buf_put(usb_dev, usb_buf);
++ }
++}
++#endif
++
++#ifdef CONFIG_PREALLOC_RX_SKB
++//extern int aic_rxbuff_size;
++static int aicwf_usb_submit_rx_urb(struct aic_usb_dev *usb_dev,
++ struct aicwf_usb_buf *usb_buf)
++{
++ int ret;
++ struct rx_buff *rx_buff;
++
++ if (!usb_buf || !usb_dev)
++ return -1;
++
++ if (usb_dev->state != USB_UP_ST) {
++ usb_err("usb state is not up!\r\n");
++ aicwf_usb_rx_buf_put(usb_dev, usb_buf);
++ return -1;
++ }
++ rx_buff = aicwf_prealloc_rxbuff_alloc(&usb_dev->rx_priv->rxbuff_lock);
++ if (rx_buff == NULL) {
++ AICWFDBG(LOGERROR, "failed to alloc rxbuff\r\n");
++ aicwf_usb_rx_buf_put(usb_dev, usb_buf);
++ return -1;
++ }
++ rx_buff->len = 0;
++ rx_buff->start = rx_buff->data;
++ rx_buff->read = rx_buff->start;
++ rx_buff->end = rx_buff->data + aicwf_rxbuff_size_get();
++
++ usb_buf->rx_buff = rx_buff;
++
++ usb_fill_bulk_urb(usb_buf->urb,
++ usb_dev->udev,
++ usb_dev->bulk_in_pipe,
++ rx_buff->data, aicwf_rxbuff_size_get(), aicwf_usb_rx_complete, usb_buf);
++
++ usb_buf->usbdev = usb_dev;
++
++ usb_anchor_urb(usb_buf->urb, &usb_dev->rx_submitted);
++ ret = usb_submit_urb(usb_buf->urb, GFP_ATOMIC);
++ if (ret) {
++ usb_err("usb submit rx urb fail:%d\n", ret);
++ usb_unanchor_urb(usb_buf->urb);
++ aicwf_prealloc_rxbuff_free(rx_buff, &usb_dev->rx_priv->rxbuff_lock);
++ aicwf_usb_rx_buf_put(usb_dev, usb_buf);
++
++ msleep(100);
++ }else{
++ atomic_inc(&rx_urb_cnt);
++ }
++ return 0;
++}
++
++#else
++static int aicwf_usb_submit_rx_urb(struct aic_usb_dev *usb_dev,
++ struct aicwf_usb_buf *usb_buf)
++{
++ struct sk_buff *skb;
++ int ret;
++
++ if (!usb_buf || !usb_dev)
++ return -1;
++
++ if (usb_dev->state != USB_UP_ST) {
++ usb_err("usb state is not up!\n");
++ aicwf_usb_rx_buf_put(usb_dev, usb_buf);
++ return -1;
++ }
++
++ if(aicwf_usb_rx_aggr){
++ skb = __dev_alloc_skb(AICWF_USB_AGGR_MAX_PKT_SIZE, GFP_ATOMIC/*GFP_KERNEL*/);
++ } else {
++ skb = __dev_alloc_skb(AICWF_USB_MAX_PKT_SIZE, GFP_ATOMIC/*GFP_KERNEL*/);
++ }
++ if (!skb) {
++ aicwf_usb_rx_buf_put(usb_dev, usb_buf);
++ return -1;
++ }
++
++ usb_buf->skb = skb;
++
++ usb_fill_bulk_urb(usb_buf->urb,
++ usb_dev->udev,
++ usb_dev->bulk_in_pipe,
++ skb->data, skb_tailroom(skb), aicwf_usb_rx_complete, usb_buf);
++
++ usb_buf->usbdev = usb_dev;
++
++ usb_anchor_urb(usb_buf->urb, &usb_dev->rx_submitted);
++ ret = usb_submit_urb(usb_buf->urb, GFP_ATOMIC);
++ if (ret) {
++ usb_err("usb submit rx urb fail:%d\n", ret);
++ usb_unanchor_urb(usb_buf->urb);
++ aicwf_dev_skb_free(usb_buf->skb);
++ usb_buf->skb = NULL;
++ aicwf_usb_rx_buf_put(usb_dev, usb_buf);
++
++ msleep(100);
++ }else{
++ atomic_inc(&rx_urb_cnt);
++ }
++ return 0;
++}
++#endif
++
++static void aicwf_usb_rx_submit_all_urb(struct aic_usb_dev *usb_dev)
++{
++ struct aicwf_usb_buf *usb_buf;
++// int i = 0;
++
++ if (usb_dev->state != USB_UP_ST) {
++ AICWFDBG(LOGERROR, "bus is not up=%d\n", usb_dev->state);
++ return;
++ }
++
++ while((usb_buf = aicwf_usb_rx_buf_get(usb_dev)) != NULL) {
++ if (aicwf_usb_submit_rx_urb(usb_dev, usb_buf)) {
++ AICWFDBG(LOGERROR, "usb rx refill fail\n");
++ if (usb_dev->state != USB_UP_ST)
++ return;
++ }
++ }
++}
++
++#ifdef CONFIG_USB_MSG_IN_EP
++static int aicwf_usb_submit_msg_rx_urb(struct aic_usb_dev *usb_dev,
++ struct aicwf_usb_buf *usb_buf)
++{
++ struct sk_buff *skb;
++ int ret;
++
++ if (!usb_buf || !usb_dev)
++ return -1;
++
++ if (usb_dev->state != USB_UP_ST) {
++ AICWFDBG(LOGERROR, "usb state is not up!\n");
++ aicwf_usb_msg_rx_buf_put(usb_dev, usb_buf);
++ return -1;
++ }
++
++ skb = __dev_alloc_skb(AICWF_USB_MAX_PKT_SIZE, GFP_ATOMIC);
++ if (!skb) {
++ aicwf_usb_msg_rx_buf_put(usb_dev, usb_buf);
++ return -1;
++ }
++
++ usb_buf->skb = skb;
++
++ usb_fill_bulk_urb(usb_buf->urb,
++ usb_dev->udev,
++ usb_dev->msg_in_pipe,
++ skb->data, skb_tailroom(skb), aicwf_usb_msg_rx_complete, usb_buf);
++
++ usb_buf->usbdev = usb_dev;
++
++ usb_anchor_urb(usb_buf->urb, &usb_dev->msg_rx_submitted);
++ ret = usb_submit_urb(usb_buf->urb, GFP_ATOMIC);
++ if (ret) {
++ AICWFDBG(LOGERROR, "usb submit msg rx urb fail:%d\n", ret);
++ usb_unanchor_urb(usb_buf->urb);
++ aicwf_dev_skb_free(usb_buf->skb);
++ usb_buf->skb = NULL;
++ aicwf_usb_msg_rx_buf_put(usb_dev, usb_buf);
++
++ msleep(100);
++ }
++ return 0;
++}
++
++
++static void aicwf_usb_msg_rx_submit_all_urb(struct aic_usb_dev *usb_dev)
++{
++ struct aicwf_usb_buf *usb_buf;
++
++ if (usb_dev->state != USB_UP_ST) {
++ AICWFDBG(LOGERROR, "bus is not up=%d\n", usb_dev->state);
++ return;
++ }
++
++ while((usb_buf = aicwf_usb_msg_rx_buf_get(usb_dev)) != NULL) {
++ if (aicwf_usb_submit_msg_rx_urb(usb_dev, usb_buf)) {
++ AICWFDBG(LOGERROR, "usb msg rx refill fail\n");
++ if (usb_dev->state != USB_UP_ST)
++ return;
++ }
++ }
++}
++#endif
++
++#ifdef CONFIG_USB_MSG_IN_EP
++void aicwf_usb_msg_rx_submit_all_urb_(struct aic_usb_dev *usb_dev){
++ aicwf_usb_msg_rx_submit_all_urb(usb_dev);
++}
++#endif
++
++void aicwf_usb_rx_submit_all_urb_(struct aic_usb_dev *usb_dev){
++ aicwf_usb_rx_submit_all_urb(usb_dev);
++}
++
++
++static void aicwf_usb_rx_prepare(struct aic_usb_dev *usb_dev)
++{
++ aicwf_usb_rx_submit_all_urb(usb_dev);
++}
++
++#ifdef CONFIG_USB_MSG_IN_EP
++static void aicwf_usb_msg_rx_prepare(struct aic_usb_dev *usb_dev)
++{
++ aicwf_usb_msg_rx_submit_all_urb(usb_dev);
++}
++#endif
++
++
++static void aicwf_usb_tx_prepare(struct aic_usb_dev *usb_dev)
++{
++ struct aicwf_usb_buf *usb_buf;
++
++ while(!list_empty(&usb_dev->tx_post_list)){
++ usb_buf = aicwf_usb_tx_dequeue(usb_dev, &usb_dev->tx_post_list,
++ &usb_dev->tx_post_count, &usb_dev->tx_post_lock);
++ #ifndef CONFIG_USB_TX_AGGR
++ if(usb_buf->skb) {
++ dev_kfree_skb(usb_buf->skb);
++ usb_buf->skb = NULL;
++ }
++ #endif
++ aicwf_usb_tx_queue(usb_dev, &usb_dev->tx_free_list, usb_buf,
++ &usb_dev->tx_free_count, &usb_dev->tx_free_lock);
++ }
++}
++
++#ifdef CONFIG_USB_TX_AGGR
++int aicwf_usb_send_pkt(struct aic_usb_dev *usb_dev, u8 *buf, uint buf_len)
++{
++ int ret = 0;
++ struct aicwf_usb_buf *usb_buf;
++ unsigned long flags;
++ bool need_cfm = false;
++
++ if (usb_dev->state != USB_UP_ST) {
++ AICWFDBG(LOGERROR, "usb state is not up!\n");
++ return -EIO;
++ }
++
++ usb_buf = aicwf_usb_tx_dequeue(usb_dev, &usb_dev->tx_free_list,
++ &usb_dev->tx_free_count, &usb_dev->tx_free_lock);
++ if (!usb_buf) {
++ AICWFDBG(LOGERROR, "free:%d, post:%d\n", usb_dev->tx_free_count, usb_dev->tx_post_count);
++ ret = -ENOMEM;
++ goto flow_ctrl;
++ }
++
++ usb_buf->skb = (struct sk_buff *)buf;
++ usb_buf->usbdev = usb_dev;
++ if (need_cfm)
++ usb_buf->cfm = true;
++ else
++ usb_buf->cfm = false;
++ AICWFDBG(LOGERROR, "%s len %d\n", __func__, buf_len);
++ print_hex_dump(KERN_ERR, "buf ", DUMP_PREFIX_NONE, 16, 1, &buf[0], 32, false);
++ usb_fill_bulk_urb(usb_buf->urb, usb_dev->udev, usb_dev->bulk_out_pipe,
++ buf, buf_len, aicwf_usb_tx_complete, usb_buf);
++ usb_buf->urb->transfer_flags |= URB_ZERO_PACKET;
++
++ aicwf_usb_tx_queue(usb_dev, &usb_dev->tx_post_list, usb_buf,
++ &usb_dev->tx_post_count, &usb_dev->tx_post_lock);
++ ret = 0;
++
++ flow_ctrl:
++ spin_lock_irqsave(&usb_dev->tx_flow_lock, flags);
++ if (usb_dev->tx_free_count < AICWF_USB_TX_LOW_WATER) {
++ usb_dev->tbusy = true;
++ aicwf_usb_tx_flowctrl(usb_dev->rwnx_hw, true);
++ }
++ spin_unlock_irqrestore(&usb_dev->tx_flow_lock, flags);
++
++ return ret;
++}
++
++int aicwf_usb_aggr(struct aicwf_tx_priv *tx_priv, struct sk_buff *pkt)
++{
++ struct rwnx_txhdr *txhdr = (struct rwnx_txhdr *)pkt->data;
++ u8 usb_header[8];
++ u8 adjust_str[4] = {0, 0, 0, 0};
++ u32 curr_len = 0;
++ int allign_len = 0;
++ u32 data_len = (pkt->len - sizeof(struct rwnx_txhdr) + sizeof(struct txdesc_api)) + 4;
++
++ usb_header[0] =(data_len & 0xff);
++ usb_header[1] =((data_len >> 8)&0x0f);
++ usb_header[2] =(data_len & 0xff);
++ usb_header[3] =((data_len >> 8)&0x0f);
++
++ usb_header[4] =(data_len & 0xff);
++ usb_header[5] =((data_len >> 8)&0x0f);
++ usb_header[6] = 0x01; //data
++ usb_header[7] = 0; //reserved
++
++ memcpy(tx_priv->tail, (u8 *)&usb_header, sizeof(usb_header));
++ tx_priv->tail += sizeof(usb_header);
++ //payload
++ memcpy(tx_priv->tail, (u8 *)(long)&txhdr->sw_hdr->desc, sizeof(struct txdesc_api));
++ tx_priv->tail += sizeof(struct txdesc_api); //hostdesc
++ memcpy(tx_priv->tail, (u8 *)((u8 *)txhdr + txhdr->sw_hdr->headroom), pkt->len-txhdr->sw_hdr->headroom);
++ tx_priv->tail += (pkt->len - txhdr->sw_hdr->headroom);
++
++ //word alignment
++ curr_len = tx_priv->tail - tx_priv->head;
++ if (curr_len & (TX_ALIGNMENT - 1)) {
++ allign_len = roundup(curr_len, TX_ALIGNMENT)-curr_len;
++ memcpy(tx_priv->tail, adjust_str, allign_len);
++ tx_priv->tail += allign_len;
++ }
++
++ tx_priv->aggr_buf->dev = pkt->dev;
++
++ if(!txhdr->sw_hdr->need_cfm) {
++ kmem_cache_free(txhdr->sw_hdr->rwnx_vif->rwnx_hw->sw_txhdr_cache, txhdr->sw_hdr);
++ skb_pull(pkt, txhdr->sw_hdr->headroom);
++ consume_skb(pkt);
++ }
++
++ atomic_inc(&tx_priv->aggr_count);
++ return 0;
++}
++
++int aicwf_usb_send(struct aicwf_tx_priv *tx_priv)
++{
++ struct sk_buff *pkt;
++ struct sk_buff *tx_buf;
++ struct aic_usb_dev *usbdev = tx_priv->usbdev;
++ struct aicwf_usb_buf *usb_buf;
++ u8* buf;
++ int ret = 0;
++ int curr_len = 0;
++ unsigned long flags;
++
++ if (aicwf_is_framequeue_empty(&tx_priv->txq)) {
++ ret = -1;
++ AICWFDBG(LOGERROR, "no buf to send\n");
++ return ret;
++ }
++
++ if (usbdev->state != USB_UP_ST) {
++ AICWFDBG(LOGERROR, "usb state is not up!\n");
++ ret = -ENODEV;
++ return ret;
++ }
++ usb_buf = aicwf_usb_tx_dequeue(usbdev, &usbdev->tx_free_list,
++ &usbdev->tx_free_count, &usbdev->tx_free_lock);
++ if (!usb_buf) {
++ AICWFDBG(LOGERROR, "free:%d, post:%d\n", usbdev->tx_free_count, usbdev->tx_post_count);
++ ret = -ENOMEM;
++ return ret;
++ }
++
++ usb_buf->aggr_cnt = 0;
++ spin_lock_bh(&usbdev->tx_priv->txdlock);
++ tx_priv->head = usb_buf->skb->data;
++ tx_priv->tail = usb_buf->skb->data;
++
++ while (!aicwf_is_framequeue_empty(&usbdev->tx_priv->txq)) {
++ if (usbdev->state != USB_UP_ST) {
++ AICWFDBG(LOGERROR, "usb state is not up, break!\n");
++ ret = -ENODEV;
++ break;
++ }
++
++ if (usb_buf->aggr_cnt == 10) {
++ break;
++ }
++ spin_lock_bh(&usbdev->tx_priv->txqlock);
++ pkt = aicwf_frame_dequeue(&usbdev->tx_priv->txq);
++ if (pkt == NULL) {
++ AICWFDBG(LOGERROR, "txq no pkt\n");
++ spin_unlock_bh(&usbdev->tx_priv->txqlock);
++ ret = -1;
++ return ret;
++ }
++ atomic_dec(&usbdev->tx_priv->tx_pktcnt);
++ spin_unlock_bh(&usbdev->tx_priv->txqlock);
++ if(tx_priv==NULL || tx_priv->tail==NULL || pkt==NULL) {
++ AICWFDBG(LOGERROR, "null error\n");
++ }
++ aicwf_usb_aggr(tx_priv, pkt);
++ usb_buf->aggr_cnt++;
++ }
++
++ tx_buf = usb_buf->skb;
++ buf = tx_buf->data;
++
++ curr_len = tx_priv->tail - tx_priv->head;
++
++ AICWFDBG(LOGTRACE, "%s len %d, cnt %d\n", __func__,curr_len, usb_buf->aggr_cnt);
++ tx_buf->len = tx_priv->tail - tx_priv->head;
++ spin_unlock_bh(&usbdev->tx_priv->txdlock);
++ usb_fill_bulk_urb(usb_buf->urb, usbdev->udev, usbdev->bulk_out_pipe,
++ buf, curr_len, aicwf_usb_tx_complete, usb_buf);
++ usb_buf->urb->transfer_flags |= URB_ZERO_PACKET;
++
++ aicwf_usb_tx_queue(usbdev, &usbdev->tx_post_list, usb_buf,
++ &usbdev->tx_post_count, &usbdev->tx_post_lock);
++
++ flow_ctrl:
++ spin_lock_irqsave(&usbdev->tx_flow_lock, flags);
++ if (usbdev->tx_free_count < AICWF_USB_TX_LOW_WATER) {
++ usbdev->tbusy = true;
++ aicwf_usb_tx_flowctrl(usbdev->rwnx_hw, true);
++ }
++ spin_unlock_irqrestore(&usbdev->tx_flow_lock, flags);
++
++ return ret;
++}
++
++#endif
++
++static void aicwf_usb_tx_process(struct aic_usb_dev *usb_dev)
++{
++ struct aicwf_usb_buf *usb_buf;
++ int ret = 0;
++ u8* data = NULL;
++
++#ifdef CONFIG_USB_TX_AGGR
++ if (!aicwf_is_framequeue_empty(&usb_dev->tx_priv->txq)) {
++ if (aicwf_usb_send(usb_dev->tx_priv)) {
++ AICWFDBG(LOGERROR, "%s no buf send\n", __func__);
++ }
++ }
++#endif
++
++ while(!list_empty(&usb_dev->tx_post_list)) {
++
++ if (usb_dev->state != USB_UP_ST) {
++ usb_err("usb state is not up!\n");
++ return;
++ }
++
++ usb_buf = aicwf_usb_tx_dequeue(usb_dev, &usb_dev->tx_post_list,
++ &usb_dev->tx_post_count, &usb_dev->tx_post_lock);
++ if(!usb_buf) {
++ usb_err("can not get usb_buf from tx_post_list!\n");
++ return;
++ }
++ data = usb_buf->skb->data;
++
++ ret = usb_submit_urb(usb_buf->urb, GFP_KERNEL);
++ if (ret) {
++ AICWFDBG(LOGERROR, "aicwf_usb_bus_tx usb_submit_urb FAILED err:%d\n", ret);
++ #ifdef CONFIG_USB_TX_AGGR
++ aicwf_usb_tx_queue(usb_dev, &usb_dev->tx_post_list, usb_buf,
++ &usb_dev->tx_post_count, &usb_dev->tx_post_lock);
++ break;
++ #else
++ goto fail;
++ #endif
++ }
++
++ continue;
++#ifndef CONFIG_USB_TX_AGGR
++fail:
++ usb_txc_sta_flowctrl(usb_buf, usb_dev);
++ dev_kfree_skb(usb_buf->skb);
++ usb_buf->skb = NULL;
++ aicwf_usb_tx_queue(usb_dev, &usb_dev->tx_free_list, usb_buf,
++ &usb_dev->tx_free_count, &usb_dev->tx_free_lock);
++#endif
++ }
++}
++
++#ifdef CONFIG_TX_TASKLET
++void aicwf_tasklet_tx_process(struct aic_usb_dev *usb_dev){
++ aicwf_usb_tx_process(usb_dev);
++}
++#endif
++
++static inline void aic_thread_wait_stop(void)
++{
++#if 1// PLATFORM_LINUX
++ #if 0
++ while (!kthread_should_stop())
++ rtw_msleep_os(10);
++ #else
++ set_current_state(TASK_INTERRUPTIBLE);
++ while (!kthread_should_stop()) {
++ schedule();
++ set_current_state(TASK_INTERRUPTIBLE);
++ }
++ __set_current_state(TASK_RUNNING);
++ #endif
++#endif
++}
++
++
++int usb_bustx_thread(void *data)
++{
++ struct aicwf_bus *bus = (struct aicwf_bus *)data;
++ struct aic_usb_dev *usbdev = bus->bus_priv.usb;
++
++#ifdef CONFIG_TXRX_THREAD_PRIO
++ if (bustx_thread_prio > 0) {
++ struct sched_param param;
++ param.sched_priority = (bustx_thread_prio < MAX_RT_PRIO)?bustx_thread_prio:(MAX_RT_PRIO-1);
++ sched_setscheduler(current, SCHED_FIFO, ¶m);
++ }
++#endif
++ AICWFDBG(LOGINFO, "%s the policy of current thread is:%d\n", __func__, current->policy);
++ AICWFDBG(LOGINFO, "%s the rt_priority of current thread is:%d\n", __func__, current->rt_priority);
++ AICWFDBG(LOGINFO, "%s the current pid is:%d\n", __func__, current->pid);
++
++
++ while (1) {
++ #if 0
++ if(kthread_should_stop()) {
++ usb_err("usb bustx thread stop 2\n");
++ break;
++ }
++ #endif
++ if (!wait_for_completion_interruptible(&bus->bustx_trgg)) {
++ if(usbdev->bus_if->state == BUS_DOWN_ST){
++ AICWFDBG(LOGINFO, "usb bustx thread will to stop\n");
++ break;
++ }
++ #ifdef CONFIG_USB_TX_AGGR
++ if ((usbdev->tx_post_count > 0) || !aicwf_is_framequeue_empty(&usbdev->tx_priv->txq))
++ #else
++ if (usbdev->tx_post_count > 0)
++ #endif
++ aicwf_usb_tx_process(usbdev);
++ }
++ }
++
++ aic_thread_wait_stop();
++ AICWFDBG(LOGINFO, "usb bustx thread stop\n");
++
++ return 0;
++}
++
++int usb_busrx_thread(void *data)
++{
++ struct aicwf_rx_priv *rx_priv = (struct aicwf_rx_priv *)data;
++ struct aicwf_bus *bus_if = rx_priv->usbdev->bus_if;
++
++#ifdef CONFIG_TXRX_THREAD_PRIO
++ if (busrx_thread_prio > 0) {
++ struct sched_param param;
++ param.sched_priority = (busrx_thread_prio < MAX_RT_PRIO)?busrx_thread_prio:(MAX_RT_PRIO-1);
++ sched_setscheduler(current, SCHED_FIFO, ¶m);
++ }
++#endif
++ AICWFDBG(LOGINFO, "%s the policy of current thread is:%d\n", __func__, current->policy);
++ AICWFDBG(LOGINFO, "%s the rt_priority of current thread is:%d\n", __func__, current->rt_priority);
++ AICWFDBG(LOGINFO, "%s the current pid is:%d\n", __func__, current->pid);
++
++ while (1) {
++#if 0
++ if(kthread_should_stop()) {
++ usb_err("usb busrx thread stop 2\n");
++ break;
++ }
++#endif
++ //rx_priv->rx_thread_working = 0;//AIDEN
++ if (!wait_for_completion_interruptible(&bus_if->busrx_trgg)) {
++ if(bus_if->state == BUS_DOWN_ST){
++ AICWFDBG(LOGINFO, "usb busrx thread will to stop\n");
++ break;
++ }
++ //rx_priv->rx_thread_working = 1;//AIDEN
++ aicwf_process_rxframes(rx_priv);
++ }
++ }
++
++ aic_thread_wait_stop();
++ AICWFDBG(LOGINFO, "usb busrx thread stop\n");
++
++ return 0;
++}
++
++#ifdef CONFIG_USB_MSG_IN_EP
++int usb_msg_busrx_thread(void *data)
++{
++ struct aicwf_rx_priv *rx_priv = (struct aicwf_rx_priv *)data;
++ struct aicwf_bus *bus_if = rx_priv->usbdev->bus_if;
++
++#ifdef CONFIG_TXRX_THREAD_PRIO
++ if (busrx_thread_prio > 0) {
++ struct sched_param param;
++ param.sched_priority = (busrx_thread_prio < MAX_RT_PRIO)?busrx_thread_prio:(MAX_RT_PRIO-1);
++ sched_setscheduler(current, SCHED_FIFO, ¶m);
++ }
++#endif
++ AICWFDBG(LOGINFO, "%s the policy of current thread is:%d\n", __func__, current->policy);
++ AICWFDBG(LOGINFO, "%s the rt_priority of current thread is:%d\n", __func__, current->rt_priority);
++ AICWFDBG(LOGINFO, "%s the current pid is:%d\n", __func__, current->pid);
++
++
++
++ while (1) {
++ if(kthread_should_stop()) {
++ usb_err("usb msg busrx thread stop\n");
++ break;
++ }
++ if (!wait_for_completion_interruptible(&bus_if->msg_busrx_trgg)) {
++ if(bus_if->state == BUS_DOWN_ST)
++ break;
++ aicwf_process_msg_rxframes(rx_priv);
++ }
++ }
++
++ return 0;
++}
++#endif
++
++
++static void aicwf_usb_send_msg_complete(struct urb *urb)
++{
++ struct aic_usb_dev *usb_dev = (struct aic_usb_dev *) urb->context;
++
++ usb_dev->msg_finished = true;
++ if (waitqueue_active(&usb_dev->msg_wait))
++ wake_up(&usb_dev->msg_wait);
++}
++
++static int aicwf_usb_bus_txmsg(struct device *dev, u8 *buf, u32 len)
++{
++ int ret = 0;
++ struct aicwf_bus *bus_if = dev_get_drvdata(dev);
++ struct aic_usb_dev *usb_dev = bus_if->bus_priv.usb;
++
++ if (usb_dev->state != USB_UP_ST)
++ return -EIO;
++
++ if (buf == NULL || len == 0 || usb_dev->msg_out_urb == NULL)
++ return -EINVAL;
++
++#if 0
++ if (test_and_set_bit(0, &usb_dev->msg_busy)) {
++ usb_err("In a control frame option, can't tx!\n");
++ return -EIO;
++ }
++#endif
++
++ usb_dev->msg_finished = false;
++
++#ifdef CONFIG_USB_MSG_OUT_EP
++ if (usb_dev->msg_out_pipe) {
++ usb_fill_bulk_urb(usb_dev->msg_out_urb,
++ usb_dev->udev,
++ usb_dev->msg_out_pipe,
++ buf, len, (usb_complete_t) aicwf_usb_send_msg_complete, usb_dev);
++ } else {
++ usb_fill_bulk_urb(usb_dev->msg_out_urb,
++ usb_dev->udev,
++ usb_dev->bulk_out_pipe,
++ buf, len, (usb_complete_t) aicwf_usb_send_msg_complete, usb_dev);
++ }
++#else
++ usb_fill_bulk_urb(usb_dev->msg_out_urb,
++ usb_dev->udev,
++ usb_dev->bulk_out_pipe,
++ buf, len, (usb_complete_t) aicwf_usb_send_msg_complete, usb_dev);
++#endif
++ #if defined CONFIG_USB_NO_TRANS_DMA_MAP
++ usb_dev->msg_out_urb->transfer_dma = usb_dev->cmd_dma_trans_addr;
++ usb_dev->msg_out_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
++ #endif
++#ifdef CONFIG_USE_USB_ZERO_PACKET
++ usb_dev->msg_out_urb->transfer_flags |= URB_ZERO_PACKET;
++#endif
++ ret = usb_submit_urb(usb_dev->msg_out_urb, GFP_ATOMIC);
++ if (ret) {
++ usb_err("usb_submit_urb failed %d\n", ret);
++ goto exit;
++ }
++
++ ret = wait_event_timeout(usb_dev->msg_wait,
++ usb_dev->msg_finished, msecs_to_jiffies(CMD_TX_TIMEOUT));
++ if (!ret) {
++ if (usb_dev->msg_out_urb)
++ usb_kill_urb(usb_dev->msg_out_urb);
++ usb_err("Txmsg wait timed out\n");
++ ret = -EIO;
++ goto exit;
++ }
++
++ if (usb_dev->msg_finished == false) {
++ usb_err("Txmsg timed out\n");
++ ret = -ETIMEDOUT;
++ goto exit;
++ }
++exit:
++#if 0
++ clear_bit(0, &usb_dev->msg_busy);
++#endif
++ return ret;
++}
++
++
++static void aicwf_usb_free_urb(struct list_head *q, spinlock_t *qlock)
++{
++ struct aicwf_usb_buf *usb_buf, *tmp;
++ unsigned long flags;
++
++ spin_lock_irqsave(qlock, flags);
++ list_for_each_entry_safe(usb_buf, tmp, q, list) {
++ spin_unlock_irqrestore(qlock, flags);
++ if (!usb_buf->urb) {
++ usb_err("bad usb_buf\n");
++ spin_lock_irqsave(qlock, flags);
++ break;
++ }
++ #ifdef CONFIG_USB_TX_AGGR
++ if (usb_buf->skb) {
++ dev_kfree_skb(usb_buf->skb);
++ }
++ #endif
++ usb_free_urb(usb_buf->urb);
++ #if defined CONFIG_USB_NO_TRANS_DMA_MAP
++ // free dma buf if needed
++ if (usb_buf->data_buf) {
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
++ usb_free_coherent(usb_buf->usbdev->udev, DATA_BUF_MAX, usb_buf->data_buf, usb_buf->data_dma_trans_addr);
++ #else
++ usb_buffer_free(usb_buf->usbdev->udev, DATA_BUF_MAX, usb_buf->data_buf, usb_buf->data_dma_trans_addr);
++ #endif
++ usb_buf->data_buf = NULL;
++ usb_buf->data_dma_trans_addr = 0x0;
++ }
++ #endif
++ list_del_init(&usb_buf->list);
++ spin_lock_irqsave(qlock, flags);
++ }
++ spin_unlock_irqrestore(qlock, flags);
++}
++
++static int aicwf_usb_alloc_rx_urb(struct aic_usb_dev *usb_dev)
++{
++ int i;
++
++ AICWFDBG(LOGINFO, "%s AICWF_USB_RX_URBS:%d \r\n", __func__, AICWF_USB_RX_URBS);
++ for (i = 0; i < AICWF_USB_RX_URBS; i++) {
++ struct aicwf_usb_buf *usb_buf = &usb_dev->usb_rx_buf[i];
++
++ usb_buf->usbdev = usb_dev;
++ usb_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
++ if (!usb_buf->urb) {
++ usb_err("could not allocate rx data urb\n");
++ goto err;
++ }
++ #if defined CONFIG_USB_NO_TRANS_DMA_MAP
++ // dma buf unused
++ usb_buf->data_buf = NULL;
++ usb_buf->data_dma_trans_addr = 0x0;
++ #endif
++ list_add_tail(&usb_buf->list, &usb_dev->rx_free_list);
++ }
++ return 0;
++
++err:
++ aicwf_usb_free_urb(&usb_dev->rx_free_list, &usb_dev->rx_free_lock);
++ return -ENOMEM;
++}
++
++static int aicwf_usb_alloc_tx_urb(struct aic_usb_dev *usb_dev)
++{
++ int i;
++
++ AICWFDBG(LOGINFO, "%s AICWF_USB_TX_URBS:%d \r\n", __func__, AICWF_USB_TX_URBS);
++ for (i = 0; i < AICWF_USB_TX_URBS; i++) {
++ struct aicwf_usb_buf *usb_buf = &usb_dev->usb_tx_buf[i];
++
++ usb_buf->usbdev = usb_dev;
++ usb_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
++ if (!usb_buf->urb) {
++ usb_err("could not allocate tx data urb\n");
++ goto err;
++ }
++ #ifdef CONFIG_USB_TX_AGGR
++ usb_buf->skb = dev_alloc_skb(MAX_USB_AGGR_TXPKT_LEN);
++ #endif
++ #if defined CONFIG_USB_NO_TRANS_DMA_MAP
++ // alloc dma buf
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
++ usb_buf->data_buf = usb_alloc_coherent(usb_dev->udev, DATA_BUF_MAX, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), &usb_buf->data_dma_trans_addr);
++ #else
++ usb_buf->data_buf = usb_buffer_alloc(usb_dev->udev, DATA_BUF_MAX, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), &usb_buf->data_dma_trans_addr);
++ #endif
++ if (usb_buf->data_buf == NULL) {
++ usb_err("could not allocate tx data dma buf\n");
++ goto err;
++ }
++ #endif
++ list_add_tail(&usb_buf->list, &usb_dev->tx_free_list);
++ (usb_dev->tx_free_count)++;
++ }
++ return 0;
++
++err:
++ aicwf_usb_free_urb(&usb_dev->tx_free_list, &usb_dev->tx_free_lock);
++ return -ENOMEM;
++}
++
++#ifdef CONFIG_USB_MSG_IN_EP
++static int aicwf_usb_alloc_msg_rx_urb(struct aic_usb_dev *usb_dev)
++{
++ int i;
++
++ AICWFDBG(LOGINFO, "%s AICWF_USB_MSG_RX_URBS:%d \r\n", __func__, AICWF_USB_MSG_RX_URBS);
++
++ for (i = 0; i < AICWF_USB_MSG_RX_URBS; i++) {
++ struct aicwf_usb_buf *usb_buf = &usb_dev->usb_msg_rx_buf[i];
++
++ usb_buf->usbdev = usb_dev;
++ usb_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
++ if (!usb_buf->urb) {
++ usb_err("could not allocate rx data urb\n");
++ goto err;
++ }
++ list_add_tail(&usb_buf->list, &usb_dev->msg_rx_free_list);
++ }
++ return 0;
++
++err:
++ aicwf_usb_free_urb(&usb_dev->msg_rx_free_list, &usb_dev->msg_rx_free_lock);
++ return -ENOMEM;
++}
++#endif
++
++static void aicwf_usb_state_change(struct aic_usb_dev *usb_dev, int state)
++{
++ int old_state;
++
++ if (usb_dev->state == state)
++ return;
++
++ old_state = usb_dev->state;
++ usb_dev->state = state;
++
++ if (state == USB_DOWN_ST) {
++ usb_dev->bus_if->state = BUS_DOWN_ST;
++ }
++ if (state == USB_UP_ST) {
++ usb_dev->bus_if->state = BUS_UP_ST;
++ }
++}
++
++int align_param = 8;
++module_param(align_param, int, 0660);
++
++static void usb_tx_flow_ctrl(struct rwnx_txhdr *txhdr, struct aic_usb_dev *usb_dev, struct rwnx_hw *rwnx_hw)
++{
++#ifdef CONFIG_PER_STA_FC
++ struct rwnx_sta *sta;
++ u8 sta_idx;
++ unsigned long flags;
++
++ //printk("txdata: sta %d\n", txhdr->sw_hdr->desc.host.staid);
++ sta_idx = txhdr->sw_hdr->desc.host.staid;
++ if(sta_idx < NX_REMOTE_STA_MAX && !(txhdr->sw_hdr->desc.host.flags & TXU_CNTRL_MGMT)) {
++ struct rwnx_vif *vif = NULL;
++ sta = &rwnx_hw->sta_table[sta_idx];
++ vif = rwnx_hw->vif_table[sta->vif_idx];
++ spin_lock_irqsave(&usb_dev->tx_flow_lock, flags);
++ atomic_inc(&rwnx_hw->sta_flowctrl[sta_idx].tx_pending_cnt);
++ //printk("sta %d pending %d >= 64, flowctrl=%d\n", sta->sta_idx, sta->tx_pending_cnt, sta->flowctrl);
++ if(RWNX_VIF_TYPE(vif) == NL80211_IFTYPE_AP) {
++ if((atomic_read(&rwnx_hw->sta_flowctrl[sta_idx].tx_pending_cnt) >= AICWF_USB_FC_PERSTA_HIGH_WATER && rwnx_hw->sta_flowctrl[sta_idx].flowctrl == 0) ||
++ rwnx_hw->sta_flowctrl[sta_idx].flowctrl) {
++ //AICWFDBG(LOGDEBUG, "sta 0x%x:0x%x, %d pending %d, stop\n", sta->mac_addr[4], sta->mac_addr[5], sta->sta_idx, atomic_read(&rwnx_hw->sta_flowctrl[sta_idx].tx_pending_cnt));
++ if(!usb_dev->tbusy)
++ rwnx_stop_sta_all_queues(sta, usb_dev->rwnx_hw);
++ rwnx_hw->sta_flowctrl[sta_idx].flowctrl = 1;
++ }
++ }
++ spin_unlock_irqrestore(&usb_dev->tx_flow_lock, flags);
++ }
++#endif
++}
++
++#ifdef CONFIG_USB_TX_AGGR
++static int aicwf_usb_bus_txdata(struct device *dev, struct sk_buff *pkt)
++{
++ uint prio;
++ int ret = -EBADE;
++ struct aicwf_bus *bus_if = dev_get_drvdata(dev);
++ struct aic_usb_dev *usbdev = bus_if->bus_priv.usb;
++
++ //printk("%s\n", __func__);
++ prio = (pkt->priority & 0x7);
++ spin_lock_bh(&usbdev->tx_priv->txqlock);
++ if (!aicwf_frame_enq(usbdev->dev, &usbdev->tx_priv->txq, pkt, prio)) {
++ aicwf_dev_skb_free(pkt);
++ spin_unlock_bh(&usbdev->tx_priv->txqlock);
++ return -ENOSR;
++ } else {
++ ret = 0;
++ }
++
++ if (bus_if->state != BUS_UP_ST) {
++ usb_err("bus_if stopped\n");
++ spin_unlock_bh(&usbdev->tx_priv->txqlock);
++ return -1;
++ }
++
++ atomic_inc(&usbdev->tx_priv->tx_pktcnt);
++ spin_unlock_bh(&usbdev->tx_priv->txqlock);
++ complete(&bus_if->bustx_trgg);
++
++ return ret;
++}
++
++#else
++static int aicwf_usb_bus_txdata(struct device *dev, struct sk_buff *skb)
++{
++ u8 *buf;
++ u16 buf_len = 0;
++ u16 adjust_len = 0;
++ struct aicwf_usb_buf *usb_buf;
++ int ret = 0;
++ unsigned long flags;
++ struct aicwf_bus *bus_if = dev_get_drvdata(dev);
++ struct aic_usb_dev *usb_dev = bus_if->bus_priv.usb;
++ struct rwnx_txhdr *txhdr = (struct rwnx_txhdr *)skb->data;
++ struct rwnx_hw *rwnx_hw = usb_dev->rwnx_hw;
++ u8 usb_header[4];
++ u8 adj_buf[4] = {0};
++ u16 index = 0;
++ bool need_cfm = false;
++#ifdef CONFIG_USB_ALIGN_DATA//AIDEN
++ int align;
++#endif
++
++ if (usb_dev->state != USB_UP_ST) {
++ usb_err("usb state is not up!\n");
++ kmem_cache_free(rwnx_hw->sw_txhdr_cache, txhdr->sw_hdr);
++ dev_kfree_skb_any(skb);
++ return -EIO;
++ }
++
++ usb_buf = aicwf_usb_tx_dequeue(usb_dev, &usb_dev->tx_free_list,
++ &usb_dev->tx_free_count, &usb_dev->tx_free_lock);
++ if (!usb_buf) {
++ usb_err("free:%d, post:%d\n", usb_dev->tx_free_count, usb_dev->tx_post_count);
++ kmem_cache_free(rwnx_hw->sw_txhdr_cache, txhdr->sw_hdr);
++ dev_kfree_skb_any(skb);
++ ret = -ENOMEM;
++ goto flow_ctrl;
++ }
++
++ usb_tx_flow_ctrl(txhdr, usb_dev, rwnx_hw);
++
++ if (txhdr->sw_hdr->need_cfm) {
++ need_cfm = true;
++ #if defined CONFIG_USB_NO_TRANS_DMA_MAP
++ buf = usb_buf->data_buf;
++ #else
++ buf = kmalloc(skb->len + 1, GFP_ATOMIC/*GFP_KERNEL*/);
++ #endif
++ index += sizeof(usb_header);
++ memcpy(&buf[index], (u8 *)(long)&txhdr->sw_hdr->desc, sizeof(struct txdesc_api));
++ index += sizeof(struct txdesc_api);
++ memcpy(&buf[index], &skb->data[txhdr->sw_hdr->headroom], skb->len - txhdr->sw_hdr->headroom);
++ index += skb->len - txhdr->sw_hdr->headroom;
++ buf_len = index;
++ if (buf_len & (TX_ALIGNMENT - 1)) {
++ adjust_len = roundup(buf_len, TX_ALIGNMENT)-buf_len;
++ memcpy(&buf[buf_len], adj_buf, adjust_len);
++ buf_len += adjust_len;
++ }
++ usb_header[0] =((buf_len) & 0xff);
++ usb_header[1] =(((buf_len) >> 8)&0x0f);
++ usb_header[2] = 0x01; //data
++ usb_header[3] = 0; //reserved
++ memcpy(&buf[0], usb_header, sizeof(usb_header));
++ usb_buf->skb = (struct sk_buff *)buf;
++ } else {
++ skb_pull(skb, txhdr->sw_hdr->headroom);
++ skb_push(skb, sizeof(struct txdesc_api));
++ memcpy(&skb->data[0], (u8 *)(long)&txhdr->sw_hdr->desc, sizeof(struct txdesc_api));
++ kmem_cache_free(rwnx_hw->sw_txhdr_cache, txhdr->sw_hdr);
++
++ skb_push(skb, sizeof(usb_header));
++ usb_header[0] =((skb->len) & 0xff);
++ usb_header[1] =(((skb->len) >> 8)&0x0f);
++ usb_header[2] = 0x01; //data
++ usb_header[3] = 0; //reserved
++ memcpy(&skb->data[0], usb_header, sizeof(usb_header));
++
++ #if defined CONFIG_USB_NO_TRANS_DMA_MAP
++ buf = usb_buf->data_buf;
++ memcpy(&buf[0], skb->data, skb->len);
++ #else
++ buf = skb->data;
++ #endif
++ buf_len = skb->len;
++
++ usb_buf->skb = skb;
++ }
++ usb_buf->usbdev = usb_dev;
++ if (need_cfm)
++ usb_buf->cfm = true;
++ else
++ usb_buf->cfm = false;
++
++
++#ifndef CONFIG_USE_USB_ZERO_PACKET
++ if((buf_len % 512) == 0){
++ printk("%s send zero package buf_len: %d\r\n", __func__, buf_len);
++ if(txhdr->sw_hdr->need_cfm){
++ buf[buf_len] = 0x00;
++ buf_len = buf_len + 1;
++ }else{
++ skb_put(skb, 1);
++ skb->data[buf_len] = 0x00;
++ buf = skb->data;
++ buf_len = skb->len;
++ }
++ }
++#endif
++
++#ifdef CONFIG_USB_ALIGN_DATA
++ #if defined CONFIG_USB_NO_TRANS_DMA_MAP
++ #error "CONFIG_USB_NO_TRANS_DMA_MAP not supported"
++ #endif
++ usb_buf->usb_align_data = (u8*)kmalloc(sizeof(u8) * buf_len + align_param, GFP_ATOMIC);
++
++ align = ((unsigned long)(usb_buf->usb_align_data)) & (align_param - 1);
++ memcpy(usb_buf->usb_align_data + (align_param - align), buf, buf_len);
++
++ usb_fill_bulk_urb(usb_buf->urb, usb_dev->udev, usb_dev->bulk_out_pipe,
++ usb_buf->usb_align_data + (align_param - align), buf_len, aicwf_usb_tx_complete, usb_buf);
++#else
++ usb_fill_bulk_urb(usb_buf->urb, usb_dev->udev, usb_dev->bulk_out_pipe,
++ buf, buf_len, aicwf_usb_tx_complete, usb_buf);
++#endif
++
++ #if defined CONFIG_USB_NO_TRANS_DMA_MAP
++ usb_buf->urb->transfer_dma = usb_buf->data_dma_trans_addr;
++ usb_buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
++ #endif
++#ifdef CONFIG_USE_USB_ZERO_PACKET
++ usb_buf->urb->transfer_flags |= URB_ZERO_PACKET;
++#endif
++
++ aicwf_usb_tx_queue(usb_dev, &usb_dev->tx_post_list, usb_buf,
++ &usb_dev->tx_post_count, &usb_dev->tx_post_lock);
++
++#ifdef CONFIG_TX_TASKLET
++ tasklet_schedule(&usb_dev->xmit_tasklet);
++#else
++ complete(&bus_if->bustx_trgg);
++#endif
++
++ ret = 0;
++
++ flow_ctrl:
++ spin_lock_irqsave(&usb_dev->tx_flow_lock, flags);
++ if (usb_dev->tx_free_count < AICWF_USB_TX_LOW_WATER) {
++ AICWFDBG(LOGDEBUG, "usb_dev->tx_free_count < AICWF_USB_TX_LOW_WATER:%d\r\n",
++ usb_dev->tx_free_count);
++ usb_dev->tbusy = true;
++ aicwf_usb_tx_flowctrl(usb_dev->rwnx_hw, true);
++ }
++ spin_unlock_irqrestore(&usb_dev->tx_flow_lock, flags);
++
++ return ret;
++}
++#endif
++static int aicwf_usb_bus_start(struct device *dev)
++{
++ struct aicwf_bus *bus_if = dev_get_drvdata(dev);
++ struct aic_usb_dev *usb_dev = bus_if->bus_priv.usb;
++
++ if (usb_dev->state == USB_UP_ST)
++ return 0;
++
++ aicwf_usb_state_change(usb_dev, USB_UP_ST);
++ aicwf_usb_rx_prepare(usb_dev);
++ aicwf_usb_tx_prepare(usb_dev);
++#ifdef CONFIG_USB_MSG_IN_EP
++ if(usb_dev->chipid != PRODUCT_ID_AIC8801 &&
++ usb_dev->chipid != PRODUCT_ID_AIC8800D81){
++ aicwf_usb_msg_rx_prepare(usb_dev);
++ }
++#endif
++
++ return 0;
++}
++
++static void aicwf_usb_cancel_all_urbs_(struct aic_usb_dev *usb_dev)
++{
++ struct aicwf_usb_buf *usb_buf, *tmp;
++ unsigned long flags;
++
++ if (usb_dev->msg_out_urb)
++ usb_kill_urb(usb_dev->msg_out_urb);
++
++ spin_lock_irqsave(&usb_dev->tx_post_lock, flags);
++ list_for_each_entry_safe(usb_buf, tmp, &usb_dev->tx_post_list, list) {
++ spin_unlock_irqrestore(&usb_dev->tx_post_lock, flags);
++ if (!usb_buf->urb) {
++ usb_err("bad usb_buf\n");
++ spin_lock_irqsave(&usb_dev->tx_post_lock, flags);
++ break;
++ }
++ usb_kill_urb(usb_buf->urb);
++ #if defined CONFIG_USB_NO_TRANS_DMA_MAP
++ // free dma buf if needed
++ if (usb_buf->data_buf) {
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
++ usb_free_coherent(usb_buf->usbdev->udev, DATA_BUF_MAX, usb_buf->data_buf, usb_buf->data_dma_trans_addr);
++ #else
++ usb_buffer_free(usb_buf->usbdev->udev, DATA_BUF_MAX, usb_buf->data_buf, usb_buf->data_dma_trans_addr);
++ #endif
++ usb_buf->data_buf = NULL;
++ usb_buf->data_dma_trans_addr = 0x0;
++ } else {
++ usb_err("bad usb dma buf\n");
++ spin_lock_irqsave(&usb_dev->tx_post_lock, flags);
++ break;
++ }
++ #endif
++ spin_lock_irqsave(&usb_dev->tx_post_lock, flags);
++ }
++ spin_unlock_irqrestore(&usb_dev->tx_post_lock, flags);
++
++ usb_kill_anchored_urbs(&usb_dev->rx_submitted);
++#ifdef CONFIG_USB_MSG_IN_EP
++ if(usb_dev->chipid != PRODUCT_ID_AIC8801 &&
++ usb_dev->chipid != PRODUCT_ID_AIC8800D81){
++ usb_kill_anchored_urbs(&usb_dev->msg_rx_submitted);
++ }
++#endif
++}
++
++void aicwf_usb_cancel_all_urbs(struct aic_usb_dev *usb_dev){
++ aicwf_usb_cancel_all_urbs_(usb_dev);
++}
++
++
++static void aicwf_usb_bus_stop(struct device *dev)
++{
++ struct aicwf_bus *bus_if = dev_get_drvdata(dev);
++ struct aic_usb_dev *usb_dev = bus_if->bus_priv.usb;
++
++ AICWFDBG(LOGINFO, "%s\r\n", __func__);
++ if (usb_dev == NULL)
++ return;
++
++ if (usb_dev->state == USB_DOWN_ST)
++ return;
++
++ if(g_rwnx_plat->wait_disconnect_cb == true){
++ atomic_set(&aicwf_deinit_atomic, 1);
++ up(&aicwf_deinit_sem);
++ }
++ aicwf_usb_state_change(usb_dev, USB_DOWN_ST);
++ //aicwf_usb_cancel_all_urbs(usb_dev);//AIDEN
++}
++
++static void aicwf_usb_deinit(struct aic_usb_dev *usbdev)
++{
++ cancel_work_sync(&usbdev->rx_urb_work);
++ aicwf_usb_free_urb(&usbdev->rx_free_list, &usbdev->rx_free_lock);
++ aicwf_usb_free_urb(&usbdev->tx_free_list, &usbdev->tx_free_lock);
++#ifdef CONFIG_USB_MSG_IN_EP
++ if(usbdev->chipid != PRODUCT_ID_AIC8801 &&
++ usbdev->chipid != PRODUCT_ID_AIC8800D81){
++ cancel_work_sync(&usbdev->msg_rx_urb_work);
++ aicwf_usb_free_urb(&usbdev->msg_rx_free_list, &usbdev->msg_rx_free_lock);
++ }
++#endif
++
++ usb_free_urb(usbdev->msg_out_urb);
++}
++
++static void aicwf_usb_rx_urb_work(struct work_struct *work)
++{
++ struct aic_usb_dev *usb_dev = container_of(work, struct aic_usb_dev, rx_urb_work);
++
++ aicwf_usb_rx_submit_all_urb(usb_dev);
++}
++
++#ifdef CONFIG_USB_MSG_IN_EP
++static void aicwf_usb_msg_rx_urb_work(struct work_struct *work)
++{
++ struct aic_usb_dev *usb_dev = container_of(work, struct aic_usb_dev, msg_rx_urb_work);
++
++ aicwf_usb_msg_rx_submit_all_urb(usb_dev);
++}
++#endif
++
++static int aicwf_usb_init(struct aic_usb_dev *usb_dev)
++{
++ int ret = 0;
++
++ usb_dev->tbusy = false;
++ usb_dev->state = USB_DOWN_ST;
++
++ init_waitqueue_head(&usb_dev->msg_wait);
++ init_usb_anchor(&usb_dev->rx_submitted);
++#ifdef CONFIG_USB_MSG_IN_EP
++ if(usb_dev->chipid != PRODUCT_ID_AIC8801 &&
++ usb_dev->chipid != PRODUCT_ID_AIC8800D81){
++ init_usb_anchor(&usb_dev->msg_rx_submitted);
++ }
++#endif
++
++ spin_lock_init(&usb_dev->tx_free_lock);
++ spin_lock_init(&usb_dev->tx_post_lock);
++ spin_lock_init(&usb_dev->rx_free_lock);
++ spin_lock_init(&usb_dev->tx_flow_lock);
++#ifdef CONFIG_USB_MSG_IN_EP
++ if(usb_dev->chipid != PRODUCT_ID_AIC8801 &&
++ usb_dev->chipid != PRODUCT_ID_AIC8800D81){
++ spin_lock_init(&usb_dev->msg_rx_free_lock);
++ }
++#endif
++
++ INIT_LIST_HEAD(&usb_dev->rx_free_list);
++ INIT_LIST_HEAD(&usb_dev->tx_free_list);
++ INIT_LIST_HEAD(&usb_dev->tx_post_list);
++#ifdef CONFIG_USB_MSG_IN_EP
++ if(usb_dev->chipid != PRODUCT_ID_AIC8801 &&
++ usb_dev->chipid != PRODUCT_ID_AIC8800D81){
++ INIT_LIST_HEAD(&usb_dev->msg_rx_free_list);
++ }
++#endif
++
++ atomic_set(&rx_urb_cnt, 0);
++
++ usb_dev->tx_free_count = 0;
++ usb_dev->tx_post_count = 0;
++
++ ret = aicwf_usb_alloc_rx_urb(usb_dev);
++ if (ret) {
++ goto error;
++ }
++ ret = aicwf_usb_alloc_tx_urb(usb_dev);
++ if (ret) {
++ goto error;
++ }
++#ifdef CONFIG_USB_MSG_IN_EP
++ if(usb_dev->chipid != PRODUCT_ID_AIC8801 &&
++ usb_dev->chipid != PRODUCT_ID_AIC8800D81){
++ ret = aicwf_usb_alloc_msg_rx_urb(usb_dev);
++ if (ret) {
++ goto error;
++ }
++ }
++#endif
++
++
++ usb_dev->msg_out_urb = usb_alloc_urb(0, GFP_ATOMIC);
++ if (!usb_dev->msg_out_urb) {
++ usb_err("usb_alloc_urb (msg out) failed\n");
++ ret = ENOMEM;
++ goto error;
++ }
++
++ INIT_WORK(&usb_dev->rx_urb_work, aicwf_usb_rx_urb_work);
++#ifdef CONFIG_USB_MSG_IN_EP
++ if(usb_dev->chipid != PRODUCT_ID_AIC8801 &&
++ usb_dev->chipid != PRODUCT_ID_AIC8800D81){
++ INIT_WORK(&usb_dev->msg_rx_urb_work, aicwf_usb_msg_rx_urb_work);
++ }
++#endif
++
++ return ret;
++ error:
++ usb_err("failed!\n");
++ aicwf_usb_deinit(usb_dev);
++ return ret;
++}
++
++
++static int aicwf_parse_usb(struct aic_usb_dev *usb_dev, struct usb_interface *interface)
++{
++ struct usb_interface_descriptor *interface_desc;
++ struct usb_host_interface *host_interface;
++ struct usb_endpoint_descriptor *endpoint;
++ struct usb_device *usb = usb_dev->udev;
++ int i, endpoints;
++ u8 endpoint_num;
++ int ret = 0;
++
++ usb_dev->bulk_in_pipe = 0;
++ usb_dev->bulk_out_pipe = 0;
++#ifdef CONFIG_USB_MSG_OUT_EP
++ usb_dev->msg_out_pipe = 0;
++#endif
++#ifdef CONFIG_USB_MSG_IN_EP
++ usb_dev->msg_in_pipe = 0;
++#endif
++
++ host_interface = &interface->altsetting[0];
++ interface_desc = &host_interface->desc;
++ endpoints = interface_desc->bNumEndpoints;
++ AICWFDBG(LOGINFO, "%s endpoints = %d\n", __func__, endpoints);
++
++ /* Check device configuration */
++ if (usb->descriptor.bNumConfigurations != 1) {
++ usb_err("Number of configurations: %d not supported\n",
++ usb->descriptor.bNumConfigurations);
++ ret = -ENODEV;
++ goto exit;
++ }
++
++ /* Check deviceclass */
++#ifndef CONFIG_USB_BT
++ if (usb->descriptor.bDeviceClass != 0x00) {
++ usb_err("DeviceClass %d not supported\n",
++ usb->descriptor.bDeviceClass);
++ ret = -ENODEV;
++ goto exit;
++ }
++#endif
++
++ /* Check interface number */
++#ifdef CONFIG_USB_BT
++ if (usb->actconfig->desc.bNumInterfaces != 3) {
++#else
++ if (usb->actconfig->desc.bNumInterfaces != 1) {
++#endif
++ AICWFDBG(LOGERROR, "Number of interfaces: %d not supported\n",
++ usb->actconfig->desc.bNumInterfaces);
++ if(usb_dev->chipid == PRODUCT_ID_AIC8800DC){
++ AICWFDBG(LOGERROR, "AIC8800DC change to AIC8800DW\n");
++ usb_dev->chipid = PRODUCT_ID_AIC8800DW;
++ }else{
++ ret = -ENODEV;
++ goto exit;
++ }
++ }
++
++ if ((interface_desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) ||
++ (interface_desc->bInterfaceSubClass != 0xff) ||
++ (interface_desc->bInterfaceProtocol != 0xff)) {
++ usb_err("non WLAN interface %d: 0x%x:0x%x:0x%x\n",
++ interface_desc->bInterfaceNumber, interface_desc->bInterfaceClass,
++ interface_desc->bInterfaceSubClass, interface_desc->bInterfaceProtocol);
++ ret = -ENODEV;
++ goto exit;
++ }
++
++ for (i = 0; i < endpoints; i++) {
++ endpoint = &host_interface->endpoint[i].desc;
++ endpoint_num = usb_endpoint_num(endpoint);
++
++ if (usb_endpoint_dir_in(endpoint) &&
++ usb_endpoint_xfer_bulk(endpoint)) {
++ if (!usb_dev->bulk_in_pipe) {
++ usb_dev->bulk_in_pipe = usb_rcvbulkpipe(usb, endpoint_num);
++ }
++#ifdef CONFIG_USB_MSG_IN_EP
++ else if (!usb_dev->msg_in_pipe) {
++ if(usb_dev->chipid != PRODUCT_ID_AIC8801 &&
++ usb_dev->chipid != PRODUCT_ID_AIC8800D81){
++ usb_dev->msg_in_pipe = usb_rcvbulkpipe(usb, endpoint_num);
++ }
++ }
++#endif
++ }
++
++ if (usb_endpoint_dir_out(endpoint) &&
++ usb_endpoint_xfer_bulk(endpoint)) {
++ if (!usb_dev->bulk_out_pipe)
++ {
++ usb_dev->bulk_out_pipe = usb_sndbulkpipe(usb, endpoint_num);
++ }
++#ifdef CONFIG_USB_MSG_OUT_EP
++ else if (!usb_dev->msg_out_pipe) {
++ usb_dev->msg_out_pipe = usb_sndbulkpipe(usb, endpoint_num);
++ }
++#endif
++
++ }
++ }
++
++ if (usb_dev->bulk_in_pipe == 0) {
++ usb_err("No RX (in) Bulk EP found\n");
++ ret = -ENODEV;
++ goto exit;
++ }
++ if (usb_dev->bulk_out_pipe == 0) {
++ usb_err("No TX (out) Bulk EP found\n");
++ ret = -ENODEV;
++ goto exit;
++ }
++#ifdef CONFIG_USB_MSG_OUT_EP
++ if (usb_dev->msg_out_pipe == 0) {
++ usb_err("No TX Msg (out) Bulk EP found\n");
++ }
++#endif
++#ifdef CONFIG_USB_MSG_IN_EP
++ if(usb_dev->chipid != PRODUCT_ID_AIC8801 &&
++ usb_dev->chipid != PRODUCT_ID_AIC8800D81){
++ if (usb_dev->msg_in_pipe == 0) {
++ usb_err("No RX Msg (in) Bulk EP found\n");
++ }
++ }
++#endif
++
++ if (usb->speed == USB_SPEED_HIGH){
++ AICWFDBG(LOGINFO, "Aic high speed USB device detected\n");
++ }else{
++ AICWFDBG(LOGINFO, "Aic full speed USB device detected\n");
++ }
++
++ exit:
++ return ret;
++}
++
++
++
++static struct aicwf_bus_ops aicwf_usb_bus_ops = {
++ .start = aicwf_usb_bus_start,
++ .stop = aicwf_usb_bus_stop,
++ .txdata = aicwf_usb_bus_txdata,
++ .txmsg = aicwf_usb_bus_txmsg,
++};
++
++
++#ifdef CONFIG_GPIO_WAKEUP
++
++static irqreturn_t rwnx_irq_handler(int irq, void *para)
++{
++ unsigned long irqflags;
++ spin_lock_irqsave(&irq_lock, irqflags);
++ disable_irq_nosync(hostwake_irq_num);
++ //do something
++ printk("%s gpio irq trigger\r\n", __func__);
++ spin_unlock_irqrestore(&irq_lock, irqflags);
++ atomic_dec(&irq_count);
++ return IRQ_HANDLED;
++}
++
++
++static int rwnx_register_hostwake_irq(struct device *dev)
++{
++ int ret = 0;
++ uint irq_flags = 0;
++
++ spin_lock_init(&irq_lock);
++
++//Setting hostwake gpio for platform
++//For Rockchip
++#ifdef CONFIG_PLATFORM_ROCKCHIP
++ hostwake_irq_num = rockchip_wifi_get_oob_irq();
++ printk("%s hostwake_irq_num:%d \r\n", __func__, hostwake_irq_num);
++ irq_flags = (IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE) & IRQF_TRIGGER_MASK;
++ printk("%s irq_flags:%d \r\n", __func__, irq_flags);
++ wakeup_enable = 1;
++#endif //CONFIG_PLATFORM_ROCKCHIP
++
++//For Allwinner
++#ifdef CONFIG_PLATFORM_ALLWINNER
++ int irq_flags;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
++ hostwake_irq_num = sunxi_wlan_get_oob_irq(&irq_flags, &wakeup_enable);
++#else
++ hostwake_irq_num = sunxi_wlan_get_oob_irq();
++ irq_flags = sunxi_wlan_get_oob_irq_flags();
++ wakeup_enable = 1;
++#endif
++#endif //CONFIG_PLATFORM_ALLWINNER
++
++
++ ret = request_irq(hostwake_irq_num,
++ rwnx_irq_handler, IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND,
++ "rwnx_irq_handler", NULL);
++
++ enable_irq_wake(hostwake_irq_num);
++
++ return ret;
++}
++
++static int rwnx_unregister_hostwake_irq(struct device *dev)
++{
++ wakeup_enable = 0;
++
++ printk("%s hostwake_irq_num:%d \r\n", __func__, hostwake_irq_num);
++ disable_irq_wake(hostwake_irq_num);
++ free_irq(hostwake_irq_num, NULL);
++
++ return 0;
++}
++
++#endif //CONFIG_GPIO_WAKEUP
++
++static int aicwf_usb_chipmatch(struct aic_usb_dev *usb_dev, u16_l vid, u16_l pid){
++
++ if(pid == USB_PRODUCT_ID_AIC8801){
++ usb_dev->chipid = PRODUCT_ID_AIC8801;
++ AICWFDBG(LOGINFO, "%s USE AIC8801\r\n", __func__);
++ return 0;
++ }else if(pid == USB_PRODUCT_ID_AIC8800DC){
++ usb_dev->chipid = PRODUCT_ID_AIC8800DC;
++ AICWFDBG(LOGINFO, "%s USE AIC8800DC\r\n", __func__);
++ return 0;
++ }else if(pid == USB_PRODUCT_ID_AIC8800DW){
++ usb_dev->chipid = PRODUCT_ID_AIC8800DW;
++ AICWFDBG(LOGINFO, "%s USE AIC8800DW\r\n", __func__);
++ return 0;
++ }else if(pid == USB_PRODUCT_ID_AIC8800D81){
++ usb_dev->chipid = PRODUCT_ID_AIC8800D81;
++ aicwf_usb_rx_aggr = true;
++ AICWFDBG(LOGINFO, "%s USE AIC8800D81\r\n", __func__);
++ return 0;
++ }else{
++ return -1;
++ }
++}
++
++
++static int aicwf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
++{
++ int ret = 0;
++ struct usb_device *usb = interface_to_usbdev(intf);
++ struct aicwf_bus *bus_if = NULL;
++ struct device *dev = NULL;
++ struct aicwf_rx_priv* rx_priv = NULL;
++ struct aic_usb_dev *usb_dev = NULL;
++ #ifdef CONFIG_USB_TX_AGGR
++ struct aicwf_tx_priv *tx_priv = NULL;
++ #endif
++
++ usb_dev = kzalloc(sizeof(struct aic_usb_dev), GFP_ATOMIC);
++ if (!usb_dev) {
++ return -ENOMEM;
++ }
++
++ usb_dev->udev = usb;
++ usb_dev->dev = &usb->dev;
++ usb_set_intfdata(intf, usb_dev);
++
++ ret = aicwf_usb_chipmatch(usb_dev, id->idVendor, id->idProduct);
++
++ if (ret < 0) {
++ AICWFDBG(LOGERROR, "%s pid:0x%04X vid:0x%04X unsupport\n",
++ __func__, id->idVendor, id->idProduct);
++ goto out_free_bus;
++ }
++
++ ret = aicwf_parse_usb(usb_dev, intf);
++ if (ret) {
++ AICWFDBG(LOGERROR, "aicwf_parse_usb err %d\n", ret);
++ goto out_free;
++ }
++
++ ret = aicwf_usb_init(usb_dev);
++ if (ret) {
++ AICWFDBG(LOGERROR, "aicwf_usb_init err %d\n", ret);
++ goto out_free;
++ }
++
++ bus_if = kzalloc(sizeof(struct aicwf_bus), GFP_ATOMIC);
++ if (!bus_if) {
++ ret = -ENOMEM;
++ goto out_free_usb;
++ }
++
++ dev = usb_dev->dev;
++ bus_if->dev = dev;
++ usb_dev->bus_if = bus_if;
++ bus_if->bus_priv.usb = usb_dev;
++ dev_set_drvdata(dev, bus_if);
++
++ bus_if->ops = &aicwf_usb_bus_ops;
++
++ rx_priv = aicwf_rx_init(usb_dev);
++ if(!rx_priv) {
++ AICWFDBG(LOGERROR, "rx init failed\n");
++ ret = -1;
++ goto out_free_bus;
++ }
++ usb_dev->rx_priv = rx_priv;
++
++#ifdef CONFIG_USB_TX_AGGR
++ tx_priv = aicwf_tx_init(usb_dev);
++ if(!tx_priv) {
++ usb_err("tx init fail\n");
++ goto out_free_bus;
++ }
++ usb_dev->tx_priv = tx_priv;
++ aicwf_frame_queue_init(&tx_priv->txq, 8, TXQLEN);
++ spin_lock_init(&tx_priv->txqlock);
++ spin_lock_init(&tx_priv->txdlock);
++#endif
++
++ ret = aicwf_bus_init(0, dev);
++ if (ret < 0) {
++ AICWFDBG(LOGERROR, "aicwf_bus_init err %d\n", ret);
++ goto out_free_bus;
++ }
++
++ ret = aicwf_bus_start(bus_if);
++ if (ret < 0) {
++ AICWFDBG(LOGERROR, "aicwf_bus_start err %d\n", ret);
++ goto out_free_bus;
++ }
++
++ ret = aicwf_rwnx_usb_platform_init(usb_dev);
++ if (ret < 0) {
++ AICWFDBG(LOGERROR, "aicwf_rwnx_usb_platform_init err %d\n", ret);
++ goto out_free_bus;
++ }
++ aicwf_hostif_ready();
++
++#ifdef CONFIG_GPIO_WAKEUP
++ rwnx_register_hostwake_irq(usb_dev->dev);
++#endif
++
++ return 0;
++
++out_free_bus:
++ aicwf_bus_deinit(dev);
++ kfree(bus_if);
++out_free_usb:
++ aicwf_usb_deinit(usb_dev);
++out_free:
++ usb_err("failed with errno %d\n", ret);
++ kfree(usb_dev);
++ usb_set_intfdata(intf, NULL);
++ return ret;
++}
++
++static void aicwf_usb_disconnect(struct usb_interface *intf)
++{
++ struct aic_usb_dev *usb_dev =
++ (struct aic_usb_dev *) usb_get_intfdata(intf);
++ AICWFDBG(LOGINFO, "%s Enter\r\n", __func__);
++
++ if(g_rwnx_plat->wait_disconnect_cb == false){
++ atomic_set(&aicwf_deinit_atomic, 0);
++ down(&aicwf_deinit_sem);
++ }
++
++ if (!usb_dev){
++ AICWFDBG(LOGERROR, "%s usb_dev is null \r\n", __func__);
++ return;
++ }
++
++#if 0
++ if(timer_pending(&usb_dev->rwnx_hw->p2p_alive_timer) && usb_dev->rwnx_hw->is_p2p_alive == 1){
++ printk("%s del timer rwnx_hw->p2p_alive_timer \r\n", __func__);
++ rwnx_del_timer(&usb_dev->rwnx_hw->p2p_alive_timer);
++ }
++#endif
++ aicwf_bus_deinit(usb_dev->dev);
++ aicwf_usb_deinit(usb_dev);
++ rwnx_cmd_mgr_deinit(&usb_dev->cmd_mgr);
++
++#ifdef CONFIG_GPIO_WAKEUP
++ rwnx_unregister_hostwake_irq(usb_dev->dev);
++#endif
++
++ if (usb_dev->rx_priv)
++ aicwf_rx_deinit(usb_dev->rx_priv);
++
++ kfree(usb_dev->bus_if);
++ kfree(usb_dev);
++ AICWFDBG(LOGINFO, "%s exit\r\n", __func__);
++ up(&aicwf_deinit_sem);
++ atomic_set(&aicwf_deinit_atomic, 1);
++}
++
++static int aicwf_usb_suspend(struct usb_interface *intf, pm_message_t state)
++{
++ struct aic_usb_dev *usb_dev =
++ (struct aic_usb_dev *) usb_get_intfdata(intf);
++#ifdef CONFIG_GPIO_WAKEUP
++ struct rwnx_vif *rwnx_vif, *tmp;
++ //unsigned long irqflags;
++#endif
++
++ printk("%s enter\r\n", __func__);
++
++#ifdef CONFIG_GPIO_WAKEUP
++// spin_lock_irqsave(&irq_lock, irqflags);
++// rwnx_enable_hostwake_irq();
++// spin_unlock_irqrestore(&irq_lock, irqflags);
++ atomic_inc(&irq_count);
++
++ list_for_each_entry_safe(rwnx_vif, tmp, &usb_dev->rwnx_hw->vifs, list) {
++ if (rwnx_vif->ndev)
++ netif_device_detach(rwnx_vif->ndev);
++ }
++#endif
++
++ aicwf_usb_state_change(usb_dev, USB_SLEEP_ST);
++ aicwf_bus_stop(usb_dev->bus_if);
++
++
++ return 0;
++}
++
++static int aicwf_usb_resume(struct usb_interface *intf)
++{
++ struct aic_usb_dev *usb_dev =
++ (struct aic_usb_dev *) usb_get_intfdata(intf);
++#ifdef CONFIG_GPIO_WAKEUP
++ struct rwnx_vif *rwnx_vif, *tmp;
++// unsigned long irqflags;
++#endif
++ printk("%s enter\r\n", __func__);
++
++#ifdef CONFIG_GPIO_WAKEUP
++// spin_lock_irqsave(&irq_lock, irqflags);
++// rwnx_disable_hostwake_irq();
++// spin_unlock_irqrestore(&irq_lock, irqflags);
++ atomic_dec(&irq_count);
++
++ list_for_each_entry_safe(rwnx_vif, tmp, &usb_dev->rwnx_hw->vifs, list) {
++ if (rwnx_vif->ndev)
++ netif_device_attach(rwnx_vif->ndev);
++ }
++#endif
++
++ if (usb_dev->state == USB_UP_ST)
++ return 0;
++
++ aicwf_bus_start(usb_dev->bus_if);
++ return 0;
++}
++
++static int aicwf_usb_reset_resume(struct usb_interface *intf)
++{
++ return aicwf_usb_resume(intf);
++}
++
++static struct usb_device_id aicwf_usb_id_table[] = {
++#ifndef CONFIG_USB_BT
++ {USB_DEVICE(USB_VENDOR_ID_AIC, USB_PRODUCT_ID_AIC8800)},
++#else
++ {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_AIC, USB_PRODUCT_ID_AIC8801, 0xff, 0xff, 0xff)},
++ {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_AIC, USB_PRODUCT_ID_AIC8800D81, 0xff, 0xff, 0xff)},
++ {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_AIC, USB_PRODUCT_ID_AIC8800DC, 0xff, 0xff, 0xff)},
++ {USB_DEVICE(USB_VENDOR_ID_AIC, USB_PRODUCT_ID_AIC8800DW)},
++#endif
++ {}
++};
++
++MODULE_DEVICE_TABLE(usb, aicwf_usb_id_table);
++
++static struct usb_driver aicwf_usbdrvr = {
++ .name = KBUILD_MODNAME,
++ .probe = aicwf_usb_probe,
++ .disconnect = aicwf_usb_disconnect,
++ .id_table = aicwf_usb_id_table,
++ .suspend = aicwf_usb_suspend,
++ .resume = aicwf_usb_resume,
++ .reset_resume = aicwf_usb_reset_resume,
++#ifdef ANDROID_PLATFORM
++ .supports_autosuspend = 1,
++#else
++ .supports_autosuspend = 0,
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++ .disable_hub_initiated_lpm = 1,
++#endif
++};
++
++void aicwf_usb_register(void)
++{
++ if (usb_register(&aicwf_usbdrvr) < 0) {
++ usb_err("usb_register failed\n");
++ }
++}
++
++void aicwf_usb_exit(void)
++{
++ int retry = 5;
++ AICWFDBG(LOGINFO, "%s Enter\r\n", __func__);
++
++ AICWFDBG(LOGDEBUG, "%s in_interrupt:%d in_softirq:%d in_atomic:%d\r\n", __func__, (int)in_interrupt(), (int)in_softirq(), (int)in_atomic());
++
++ do{
++ AICWFDBG(LOGINFO, "aicwf_deinit_atomic is busy. waiting for 500ms retry:%d \r\n",
++ retry);
++ mdelay(500);
++ retry--;
++ if(retry == 0){
++ break;
++ }
++ }while(atomic_read(&aicwf_deinit_atomic) == 0);
++
++ atomic_set(&aicwf_deinit_atomic, 0);
++ if(down_timeout(&aicwf_deinit_sem, msecs_to_jiffies(SEM_TIMOUT)) != 0){
++ AICWFDBG(LOGERROR, "%s semaphore waiting timeout\r\n", __func__);
++ }
++
++ if(g_rwnx_plat){
++ g_rwnx_plat->wait_disconnect_cb = false;
++ }
++
++
++
++ if(!g_rwnx_plat || !g_rwnx_plat->enabled){
++ AICWFDBG(LOGINFO, "g_rwnx_plat is not ready. waiting for 500ms\r\n");
++ mdelay(500);
++ }
++
++#if 1
++ if(g_rwnx_plat && g_rwnx_plat->enabled){
++ rwnx_platform_deinit(g_rwnx_plat->usbdev->rwnx_hw);
++ }
++#endif
++
++ up(&aicwf_deinit_sem);
++ atomic_set(&aicwf_deinit_atomic, 1);
++
++ AICWFDBG(LOGINFO, "%s usb_deregister \r\n", __func__);
++
++ usb_deregister(&aicwf_usbdrvr);
++ //mdelay(500);
++ if(g_rwnx_plat){
++ kfree(g_rwnx_plat);
++ }
++
++ AICWFDBG(LOGINFO, "%s exit\r\n", __func__);
++
++}
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_usb.h
+@@ -0,0 +1,173 @@
++/**
++ * aicwf_usb.h
++ *
++ * USB function declarations
++ *
++ * Copyright (C) AICSemi 2018-2020
++ */
++
++#ifndef _AICWF_USB_H_
++#define _AICWF_USB_H_
++
++#include <linux/usb.h>
++#include "rwnx_cmds.h"
++
++#ifdef AICWF_USB_SUPPORT
++
++/* USB Device ID */
++#define USB_VENDOR_ID_AIC 0xA69C
++
++#ifndef CONFIG_USB_BT
++#define USB_PRODUCT_ID_AIC8800 0x8800
++#else
++#define USB_PRODUCT_ID_AIC8801 0x8801
++#define USB_PRODUCT_ID_AIC8800DC 0x88dc
++#define USB_PRODUCT_ID_AIC8800DW 0x88dd
++#define USB_PRODUCT_ID_AIC8800D81 0x8d81
++
++#endif
++
++enum AICWF_IC{
++ PRODUCT_ID_AIC8801 = 0,
++ PRODUCT_ID_AIC8800DC,
++ PRODUCT_ID_AIC8800DW,
++ PRODUCT_ID_AIC8800D81
++};
++
++
++#define AICWF_USB_RX_URBS (200)//(200)
++#ifdef CONFIG_USB_MSG_IN_EP
++#define AICWF_USB_MSG_RX_URBS (100)
++#endif
++#ifdef CONFIG_USB_TX_AGGR
++#define TXQLEN (2048*4)
++#define AICWF_USB_TX_URBS (50)
++#else
++#define AICWF_USB_TX_URBS 200//(100)
++#endif
++#define AICWF_USB_TX_LOW_WATER (AICWF_USB_TX_URBS/4)//25%
++#define AICWF_USB_TX_HIGH_WATER (AICWF_USB_TX_LOW_WATER*3)//75%
++#define AICWF_USB_AGGR_MAX_PKT_SIZE (2048*30)
++#define AICWF_USB_MAX_PKT_SIZE (2048)
++#define AICWF_USB_FC_PERSTA_HIGH_WATER 64
++#define AICWF_USB_FC_PERSTA_LOW_WATER 16
++
++
++typedef enum {
++ USB_TYPE_DATA = 0X00,
++ USB_TYPE_CFG = 0X10,
++ USB_TYPE_CFG_CMD_RSP = 0X11,
++ USB_TYPE_CFG_DATA_CFM = 0X12,
++ USB_TYPE_CFG_PRINT = 0X13
++} usb_type;
++
++enum aicwf_usb_state {
++ USB_DOWN_ST,
++ USB_UP_ST,
++ USB_SLEEP_ST
++};
++
++struct aicwf_usb_buf {
++ struct list_head list;
++ struct aic_usb_dev *usbdev;
++ struct urb *urb;
++ struct sk_buff *skb;
++#ifdef CONFIG_PREALLOC_RX_SKB
++ struct rx_buff *rx_buff;
++#endif
++ #ifdef CONFIG_USB_NO_TRANS_DMA_MAP
++ u8 *data_buf;
++ dma_addr_t data_dma_trans_addr;
++ #endif
++ bool cfm;
++ #ifdef CONFIG_USB_TX_AGGR
++ u8 aggr_cnt;
++ #endif
++ u8* usb_align_data;
++};
++
++struct aic_usb_dev {
++ struct rwnx_hw *rwnx_hw;
++ struct aicwf_bus *bus_if;
++ struct usb_device *udev;
++ struct device *dev;
++ struct aicwf_rx_priv* rx_priv;
++ enum aicwf_usb_state state;
++ struct rwnx_cmd_mgr cmd_mgr;
++
++#ifdef CONFIG_USB_TX_AGGR
++ struct aicwf_tx_priv *tx_priv;
++#endif
++
++ struct usb_anchor rx_submitted;
++ struct work_struct rx_urb_work;
++#ifdef CONFIG_USB_MSG_IN_EP
++ struct usb_anchor msg_rx_submitted;
++ struct work_struct msg_rx_urb_work;
++#endif
++
++ spinlock_t rx_free_lock;
++ spinlock_t tx_free_lock;
++ spinlock_t tx_post_lock;
++ spinlock_t tx_flow_lock;
++#ifdef CONFIG_USB_MSG_IN_EP
++ spinlock_t msg_rx_free_lock;
++#endif
++
++ struct list_head rx_free_list;
++ struct list_head tx_free_list;
++ struct list_head tx_post_list;
++#ifdef CONFIG_USB_MSG_IN_EP
++ struct list_head msg_rx_free_list;
++#endif
++
++ uint bulk_in_pipe;
++ uint bulk_out_pipe;
++#ifdef CONFIG_USB_MSG_OUT_EP
++ uint msg_out_pipe;
++#endif
++#ifdef CONFIG_USB_MSG_IN_EP
++ uint msg_in_pipe;
++#endif
++
++ int tx_free_count;
++ int tx_post_count;
++
++ struct aicwf_usb_buf usb_tx_buf[AICWF_USB_TX_URBS];
++ struct aicwf_usb_buf usb_rx_buf[AICWF_USB_RX_URBS];
++#ifdef CONFIG_USB_MSG_IN_EP
++ struct aicwf_usb_buf usb_msg_rx_buf[AICWF_USB_MSG_RX_URBS];
++#endif
++
++ int msg_finished;
++ wait_queue_head_t msg_wait;
++ ulong msg_busy;
++ struct urb *msg_out_urb;
++ #ifdef CONFIG_USB_NO_TRANS_DMA_MAP
++ dma_addr_t cmd_dma_trans_addr;
++ #endif
++
++#ifdef CONFIG_RX_TASKLET//AIDEN tasklet
++ struct tasklet_struct recv_tasklet;
++#endif
++#ifdef CONFIG_TX_TASKLET//AIDEN tasklet
++ struct tasklet_struct xmit_tasklet;
++#endif
++ u16 chipid;
++ bool tbusy;
++};
++
++extern void aicwf_usb_exit(void);
++extern void aicwf_usb_register(void);
++extern void aicwf_usb_tx_flowctrl(struct rwnx_hw *rwnx_hw, bool state);
++#ifdef CONFIG_USB_MSG_IN_EP
++int usb_msg_busrx_thread(void *data);
++#endif
++int usb_bustx_thread(void *data);
++int usb_busrx_thread(void *data);
++
++
++extern void aicwf_hostif_ready(void);
++
++#endif /* AICWF_USB_SUPPORT */
++#endif /* _AICWF_USB_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_wext_linux.c
+@@ -0,0 +1,1201 @@
++#include <linux/etherdevice.h>
++#include <linux/netdevice.h>
++#include <net/netlink.h>
++#include <linux/wireless.h>
++#include <linux/nl80211.h>
++#include <net/iw_handler.h>
++#include <uapi/linux/if_arp.h>
++#include <linux/vmalloc.h>
++#include "aicwf_debug.h"
++#include "rwnx_msg_tx.h"
++#include "aicwf_wext_linux.h"
++#include "rwnx_defs.h"
++
++#define WLAN_CAPABILITY_ESS (1<<0)
++#define WLAN_CAPABILITY_IBSS (1<<1)
++#define WLAN_CAPABILITY_PRIVACY (1<<4)
++
++#define IEEE80211_HT_CAP_SGI_20 0x0020
++#define IEEE80211_HT_CAP_SGI_40 0x0040
++
++#define IEEE80211_HE_CH_BW_SET_160_80P80 1 << 4
++#define IEEE80211_HE_CH_BW_SET_160_IN_5G 1 << 3
++#define IEEE80211_HE_CH_BW_SET_40_AND_80_IN_5G 1 << 2
++#define IEEE80211_HE_CH_BW_SET_40_IN_2_4G 1 << 1
++
++#if WIRELESS_EXT < 17
++ #define IW_QUAL_QUAL_INVALID 0x10
++ #define IW_QUAL_LEVEL_INVALID 0x20
++ #define IW_QUAL_NOISE_INVALID 0x40
++ #define IW_QUAL_QUAL_UPDATED 0x1
++ #define IW_QUAL_LEVEL_UPDATED 0x2
++ #define IW_QUAL_NOISE_UPDATED 0x4
++#endif
++
++#define MAX_WPA_IE_LEN (256)
++
++
++/* 20/40/80, ShortGI, MCS Rate */
++const u16 VHT_MCS_DATA_RATE[3][2][30] = {
++ {
++ {
++ 13, 26, 39, 52, 78, 104, 117, 130, 156, 156,
++ 26, 52, 78, 104, 156, 208, 234, 260, 312, 312,
++ 39, 78, 117, 156, 234, 312, 351, 390, 468, 520
++ }, /* Long GI, 20MHz */
++ {
++ 14, 29, 43, 58, 87, 116, 130, 144, 173, 173,
++ 29, 58, 87, 116, 173, 231, 260, 289, 347, 347,
++ 43, 87, 130, 173, 260, 347, 390, 433, 520, 578
++ }
++ }, /* Short GI, 20MHz */
++ {
++ {
++ 27, 54, 81, 108, 162, 216, 243, 270, 324, 360,
++ 54, 108, 162, 216, 324, 432, 486, 540, 648, 720,
++ 81, 162, 243, 324, 486, 648, 729, 810, 972, 1080
++ }, /* Long GI, 40MHz */
++ {
++ 30, 60, 90, 120, 180, 240, 270, 300, 360, 400,
++ 60, 120, 180, 240, 360, 480, 540, 600, 720, 800,
++ 90, 180, 270, 360, 540, 720, 810, 900, 1080, 1200
++ }
++ }, /* Short GI, 40MHz */
++ {
++ {
++ 59, 117, 176, 234, 351, 468, 527, 585, 702, 780,
++ 117, 234, 351, 468, 702, 936, 1053, 1170, 1404, 1560,
++ 176, 351, 527, 702, 1053, 1404, 1580, 1755, 2106, 2340
++ }, /* Long GI, 80MHz */
++ {
++ 65, 130, 195, 260, 390, 520, 585, 650, 780, 867,
++ 130, 260, 390, 520, 780, 1040, 1170, 1300, 1560, 1734,
++ 195, 390, 585, 780, 1170, 1560, 1755, 1950, 2340, 2600
++ } /* Short GI, 80MHz */
++ }
++};
++
++
++/*HE 20/40/80,MCS Rate */
++const u16 HE_MCS_DATA_RATE[3][30] = {
++ {
++ 9, 17, 26, 34, 52, 69, 77, 86, 103, 115,
++ 129, 143, 0, 0, 0, 0, 0, 0, 0, 0,
++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
++ }, /* 20MHz */
++ {
++ 17, 34, 52, 69, 103, 138, 155, 172, 207, 229,
++ 258, 286, 0, 0, 0, 0, 0, 0, 0, 0,
++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
++ }, /* 40MHz */
++ {
++ 36, 72, 108, 144, 216, 288, 324, 360, 432, 480,
++ 540, 601, 0, 0, 0, 0, 0, 0, 0, 0,
++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
++ }, /* 80MHz */
++
++};
++
++
++#if WIRELESS_EXT >= 17
++struct iw_statistics iwstats;
++
++static struct iw_statistics *aicwf_get_wireless_stats(struct net_device *dev)
++{
++ int tmp_level = -100;
++ int tmp_qual = 0;
++ int tmp_noise = 0;
++
++ struct rwnx_vif* rwnx_vif = netdev_priv(dev);
++ struct rwnx_hw* rwnx_hw = rwnx_vif->rwnx_hw;
++ struct rwnx_sta *sta = NULL;
++
++ union rwnx_rate_ctrl_info *rate_info;
++ struct mm_get_sta_info_cfm cfm;
++
++ AICWFDBG(LOGTRACE, "%s Enter", __func__);
++
++ if(rwnx_vif->sta.ap){
++ sta = rwnx_vif->sta.ap;
++ rwnx_send_get_sta_info_req(rwnx_hw, sta->sta_idx, &cfm);
++ rate_info = (union rwnx_rate_ctrl_info *)&cfm.rate_info;
++ tmp_level = cfm.rssi;
++ }
++
++
++ iwstats.qual.level = tmp_level;
++ iwstats.qual.qual = tmp_qual;
++ iwstats.qual.noise = tmp_noise;
++ iwstats.qual.updated = 0x07;
++ iwstats.qual.updated = iwstats.qual.updated | IW_QUAL_DBM;
++
++ return &iwstats;
++}
++#endif
++
++
++static int aicwf_get_name(struct net_device *dev,
++ struct iw_request_info *info,
++ union iwreq_data *wrqu, char *extra)
++{
++ struct rwnx_vif* rwnx_vif = netdev_priv(dev);
++ struct rwnx_hw* rwnx_hw = rwnx_vif->rwnx_hw;
++ struct rwnx_sta *sta = NULL;
++
++ union rwnx_rate_ctrl_info *rate_info;
++ struct mm_get_sta_info_cfm cfm;
++
++ AICWFDBG(LOGTRACE, "%s Enter", __func__);
++
++ if(rwnx_vif->sta.ap){
++ sta = rwnx_vif->sta.ap;
++ rwnx_send_get_sta_info_req(rwnx_hw, sta->sta_idx, &cfm);
++ rate_info = (union rwnx_rate_ctrl_info *)&cfm.rate_info;
++
++ switch (rate_info->formatModTx) {
++ case FORMATMOD_NON_HT:
++ case FORMATMOD_NON_HT_DUP_OFDM:
++ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bg");
++ break;
++ case FORMATMOD_HT_MF:
++ case FORMATMOD_HT_GF:
++ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11n");
++ break;
++ case FORMATMOD_VHT:
++ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11ac");
++ break;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)
++ case FORMATMOD_HE_MU:
++ case FORMATMOD_HE_SU:
++ case FORMATMOD_HE_ER:
++ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11ax");
++ break;
++#else
++ //kernel not support he
++ case FORMATMOD_HE_MU:
++ case FORMATMOD_HE_SU:
++ case FORMATMOD_HE_ER:
++ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11ax");
++ break;
++#endif
++ }
++
++
++ }else{
++ snprintf(wrqu->name, IFNAMSIZ, "unassociated");
++ }
++
++
++ return 0;
++}
++
++static int aicwf_get_freq(struct net_device *dev,
++ struct iw_request_info *info,
++ union iwreq_data *wrqu, char *extra)
++{
++ struct rwnx_vif* rwnx_vif = netdev_priv(dev);
++
++
++ AICWFDBG(LOGTRACE, "%s Enter", __func__);
++
++ if(rwnx_vif->sta.ap){
++ wrqu->freq.m = rwnx_vif->sta.ap->center_freq * 100000;
++ wrqu->freq.e = 1;
++ wrqu->freq.i = ieee80211_frequency_to_channel(rwnx_vif->sta.ap->center_freq);
++ }else{
++ wrqu->freq.m = 2412 * 100000;
++ wrqu->freq.e = 1;
++ wrqu->freq.i = 1;
++ }
++
++ return 0;
++}
++
++static int aicwf_get_mode(struct net_device *dev, struct iw_request_info *a,
++ union iwreq_data *wrqu, char *b)
++{
++
++ AICWFDBG(LOGTRACE, "%s Enter", __func__);
++
++ wrqu->mode = IW_MODE_AUTO;
++#if 0
++ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == _TRUE)
++ wrqu->mode = IW_MODE_INFRA;
++ else if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == _TRUE) ||
++ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == _TRUE))
++
++ wrqu->mode = IW_MODE_ADHOC;
++ else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE)
++ wrqu->mode = IW_MODE_MASTER;
++ else if (check_fwstate(pmlmepriv, WIFI_MONITOR_STATE) == _TRUE)
++ wrqu->mode = IW_MODE_MONITOR;
++ else
++ wrqu->mode = IW_MODE_AUTO;
++#endif
++
++ return 0;
++
++}
++
++
++static int aicwf_get_range(struct net_device *dev,
++ struct iw_request_info *info,
++ union iwreq_data *wrqu, char *extra)
++{
++ struct iw_range *range = (struct iw_range *)extra;
++ u16 val = 0;
++
++ AICWFDBG(LOGTRACE, "%s Enter", __func__);
++
++
++ wrqu->data.length = sizeof(*range);
++ memset(range, 0, sizeof(*range));
++
++ /* Let's try to keep this struct in the same order as in
++ * linux/include/wireless.h
++ */
++
++ /* TODO: See what values we can set, and remove the ones we can't
++ * set, or fill them with some default data.
++ */
++
++ /* ~5 Mb/s real (802.11b) */
++ range->throughput = 5 * 1000 * 1000;
++
++ /* TODO: Not used in 802.11b?
++ * range->min_nwid; Minimal NWID we are able to set */
++ /* TODO: Not used in 802.11b?
++ * range->max_nwid; Maximal NWID we are able to set */
++
++ /* Old Frequency (backward compat - moved lower ) */
++ /* range->old_num_channels;
++ * range->old_num_frequency;
++ * range->old_freq[6]; Filler to keep "version" at the same offset */
++
++ /* signal level threshold range */
++
++ /* Quality of link & SNR stuff */
++ /* Quality range (link, level, noise)
++ * If the quality is absolute, it will be in the range [0 ; max_qual],
++ * if the quality is dBm, it will be in the range [max_qual ; 0].
++ * Don't forget that we use 8 bit arithmetics...
++ *
++ * If percentage range is 0~100
++ * Signal strength dbm range logical is -100 ~ 0
++ * but usually value is -90 ~ -20
++ */
++ range->max_qual.qual = 100;
++#ifdef CONFIG_SIGNAL_DISPLAY_DBM
++ range->max_qual.level = (u8)-100;
++ range->max_qual.noise = (u8)-100;
++ range->max_qual.updated = IW_QUAL_ALL_UPDATED; /* Updated all three */
++ range->max_qual.updated |= IW_QUAL_DBM;
++#else /* !CONFIG_SIGNAL_DISPLAY_DBM */
++ /* percent values between 0 and 100. */
++ range->max_qual.level = 100;
++ range->max_qual.noise = 100;
++ range->max_qual.updated = IW_QUAL_ALL_UPDATED; /* Updated all three */
++#endif /* !CONFIG_SIGNAL_DISPLAY_DBM */
++
++ /* This should contain the average/typical values of the quality
++ * indicator. This should be the threshold between a "good" and
++ * a "bad" link (example : monitor going from green to orange).
++ * Currently, user space apps like quality monitors don't have any
++ * way to calibrate the measurement. With this, they can split
++ * the range between 0 and max_qual in different quality level
++ * (using a geometric subdivision centered on the average).
++ * I expect that people doing the user space apps will feedback
++ * us on which value we need to put in each driver... */
++ range->avg_qual.qual = 92; /* > 8% missed beacons is 'bad' */
++#ifdef CONFIG_SIGNAL_DISPLAY_DBM
++ /* TODO: Find real 'good' to 'bad' threshold value for RSSI */
++ range->avg_qual.level = (u8)-70;
++ range->avg_qual.noise = 0;
++ range->avg_qual.updated = IW_QUAL_ALL_UPDATED; /* Updated all three */
++ range->avg_qual.updated |= IW_QUAL_DBM;
++#else /* !CONFIG_SIGNAL_DISPLAY_DBM */
++ /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
++ range->avg_qual.level = 30;
++ range->avg_qual.noise = 100;
++ range->avg_qual.updated = IW_QUAL_ALL_UPDATED; /* Updated all three */
++#endif /* !CONFIG_SIGNAL_DISPLAY_DBM */
++#if 0
++ range->num_bitrates = RATE_COUNT;
++
++ for (i = 0; i < RATE_COUNT && i < IW_MAX_BITRATES; i++)
++ range->bitrate[i] = rtw_rates[i];
++
++ range->min_frag = MIN_FRAG_THRESHOLD;
++ range->max_frag = MAX_FRAG_THRESHOLD;
++#endif
++
++ range->pm_capa = 0;
++
++ range->we_version_compiled = WIRELESS_EXT;
++ range->we_version_source = 16;
++
++ /* range->retry_capa; What retry options are supported
++ * range->retry_flags; How to decode max/min retry limit
++ * range->r_time_flags; How to decode max/min retry life
++ * range->min_retry; Minimal number of retries
++ * range->max_retry; Maximal number of retries
++ * range->min_r_time; Minimal retry lifetime
++ * range->max_r_time; Maximal retry lifetime */
++#if 0
++ for (i = 0, val = 0; i < rfctl->max_chan_nums; i++) {
++
++ /* Include only legal frequencies for some countries */
++ if (rfctl->channel_set[i].ChannelNum != 0) {
++ range->freq[val].i = rfctl->channel_set[i].ChannelNum;
++ range->freq[val].m = rtw_ch2freq(rfctl->channel_set[i].ChannelNum) * 100000;
++ range->freq[val].e = 1;
++ val++;
++ }
++
++ if (val == IW_MAX_FREQUENCIES)
++ break;
++ }
++#endif
++ range->num_channels = val;
++ range->num_frequency = val;
++
++ /* Commented by Albert 2009/10/13
++ * The following code will proivde the security capability to network manager.
++ * If the driver doesn't provide this capability to network manager,
++ * the WPA/WPA2 routers can't be choosen in the network manager. */
++
++ /*
++ #define IW_SCAN_CAPA_NONE 0x00
++ #define IW_SCAN_CAPA_ESSID 0x01
++ #define IW_SCAN_CAPA_BSSID 0x02
++ #define IW_SCAN_CAPA_CHANNEL 0x04
++ #define IW_SCAN_CAPA_MODE 0x08
++ #define IW_SCAN_CAPA_RATE 0x10
++ #define IW_SCAN_CAPA_TYPE 0x20
++ #define IW_SCAN_CAPA_TIME 0x40
++ */
++
++#if WIRELESS_EXT > 17
++ range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
++ IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
++#endif
++
++#ifdef IW_SCAN_CAPA_ESSID /* WIRELESS_EXT > 21 */
++ range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE | IW_SCAN_CAPA_BSSID |
++ IW_SCAN_CAPA_CHANNEL | IW_SCAN_CAPA_MODE | IW_SCAN_CAPA_RATE;
++#endif
++
++
++
++ return 0;
++
++}
++
++
++static char *aicwf_get_iwe_stream_mac_addr(struct rwnx_hw* rwnx_hw,
++ struct iw_request_info *info, struct scanu_result_wext *scan_re,
++ char *start, char *stop, struct iw_event *iwe)
++{
++ /* AP MAC address */
++ iwe->cmd = SIOCGIWAP;
++ iwe->u.ap_addr.sa_family = ARPHRD_ETHER;
++
++ memcpy(iwe->u.ap_addr.sa_data, scan_re->bss->bssid, ETH_ALEN);
++ start = iwe_stream_add_event(info, start, stop, iwe, IW_EV_ADDR_LEN);
++ return start;
++}
++
++
++static inline char *aicwf_get_iwe_stream_essid(struct rwnx_hw* rwnx_hw,
++ struct iw_request_info *info, struct scanu_result_wext *scan_re,
++ char *start, char *stop, struct iw_event *iwe)
++{
++ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt*)scan_re->payload;
++ const u8 *ie = mgmt->u.beacon.variable;
++ u8 *ssid;
++ int ssid_len = 0;
++
++ //get ssid len form ie
++ ssid_len = ie[1];
++
++ //get ssid form ie
++ ssid = (u8*)vmalloc(sizeof(char)* (ssid_len + 1));
++ memset(ssid, 0, (ssid_len + 1));
++ memcpy(ssid, &ie[2], ssid_len);
++
++
++ //AICWFDBG(LOGDEBUG, "%s len:%d ssid:%s\r\n", __func__, ssid_len, ssid);
++
++ /* Add the ESSID */
++ iwe->cmd = SIOCGIWESSID;
++ iwe->u.data.flags = 1;
++ iwe->u.data.length = min((u16)ssid_len, (u16)32);
++ start = iwe_stream_add_point(info, start, stop, iwe, ssid);
++
++ vfree(ssid);
++
++ return start;
++}
++
++
++
++static inline char *aicwf_get_iwe_stream_protocol(struct rwnx_hw* rwnx_hw,
++ struct iw_request_info *info, struct scanu_result_wext *scan_re,
++ char *start, char *stop, struct iw_event *iwe)
++{
++ u16 ht_cap = false;
++ u16 vht_cap = false;
++ u16 he_cap = false;
++ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt*)scan_re->payload;
++ u8 *payload = mgmt->u.beacon.variable;
++ const u8 *ie_content;
++
++ /* parsing HT_CAP_IE */
++ ie_content = NULL;
++ ie_content = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, payload, scan_re->ind->length);
++ if (ie_content != NULL){
++ ht_cap = true;
++ }
++
++ /* parsing VHT_CAP_IE */
++ ie_content = NULL;
++ ie_content = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, payload, scan_re->ind->length);
++ if (ie_content != NULL){
++ vht_cap = true;
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)|| defined(CONFIG_HE_FOR_OLD_KERNEL)
++ /* parsing HE_CAP_IE */
++ ie_content = NULL;
++ ie_content = cfg80211_find_ie(WLAN_EID_EXTENSION, payload, scan_re->ind->length);
++ if (ie_content != NULL && ie_content[2] == WLAN_EID_EXT_HE_CAPABILITY){
++ he_cap = true;
++ }
++#endif
++
++ /* Add the protocol name */
++ iwe->cmd = SIOCGIWNAME;
++
++ if (ieee80211_frequency_to_channel(scan_re->ind->center_freq) > 14) {
++ if (he_cap == true){
++ snprintf(iwe->u.name, IFNAMSIZ, "IEEE 802.11ax");
++ }else if(vht_cap == true){
++ snprintf(iwe->u.name, IFNAMSIZ, "IEEE 802.11ac");
++ }else{
++ if (ht_cap == true)
++ snprintf(iwe->u.name, IFNAMSIZ, "IEEE 802.11an");
++ else
++ snprintf(iwe->u.name, IFNAMSIZ, "IEEE 802.11a");
++ }
++ } else {
++ if(he_cap == true){
++ snprintf(iwe->u.name, IFNAMSIZ, "IEEE 802.11ax");
++ }else if (ht_cap == true){
++ snprintf(iwe->u.name, IFNAMSIZ, "IEEE 802.11bgn");
++ }else{
++ snprintf(iwe->u.name, IFNAMSIZ, "IEEE 802.11bg");
++ }
++ }
++
++ start = iwe_stream_add_event(info, start, stop, iwe, IW_EV_CHAR_LEN);
++ return start;
++}
++
++
++
++static inline char *aicwf_get_iwe_stream_rssi(struct rwnx_hw* rwnx_hw,
++ struct iw_request_info *info, struct scanu_result_wext *scan_re,
++ char *start, char *stop, struct iw_event *iwe)
++{
++ iwe->cmd = IWEVQUAL;
++
++ iwe->u.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED
++ | IW_QUAL_NOISE_INVALID
++ | IW_QUAL_DBM;
++
++ iwe->u.qual.level = (u8)scan_re->ind->rssi;
++ iwe->u.qual.qual = 100;//scan_re->bss->signal;
++ iwe->u.qual.noise = 0;
++
++ start = iwe_stream_add_event(info, start, stop, iwe, IW_EV_QUAL_LEN);
++
++ return start;
++}
++
++static inline char *aicwf_get_iwe_stream_chan(struct rwnx_hw* rwnx_hw,
++ struct iw_request_info *info, struct scanu_result_wext *scan_re,
++ char *start, char *stop, struct iw_event *iwe)
++{
++ /* Add frequency/channel */
++ iwe->cmd = SIOCGIWFREQ;
++ iwe->u.freq.m = scan_re->ind->center_freq * 100000;
++ iwe->u.freq.e = 1;
++ iwe->u.freq.i = ieee80211_frequency_to_channel(scan_re->ind->center_freq);
++ start = iwe_stream_add_event(info, start, stop, iwe, IW_EV_FREQ_LEN);
++
++ return start;
++}
++
++
++static inline char *aicwf_get_iwe_stream_mode(struct rwnx_hw* rwnx_hw,
++ struct iw_request_info *info, struct scanu_result_wext *scan_re,
++ char *start, char *stop, struct iw_event *iwe)
++{
++
++ u16 cap = scan_re->bss->capability;
++ /* Add mode */
++ if (cap & (WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_ESS)) {
++ iwe->cmd = SIOCGIWMODE;
++ if (cap & WLAN_CAPABILITY_ESS)
++ iwe->u.mode = IW_MODE_MASTER;
++ else
++ iwe->u.mode = IW_MODE_ADHOC;
++
++ start = iwe_stream_add_event(info, start, stop, iwe, IW_EV_UINT_LEN);
++ }
++ return start;
++
++}
++
++static inline char *aicwf_get_iwe_stream_encryption(struct rwnx_hw* rwnx_hw,
++ struct iw_request_info *info, struct scanu_result_wext *scan_re,
++ char *start, char *stop, struct iw_event *iwe)
++{
++ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt*)scan_re->payload;
++ const u8 *ie = mgmt->u.beacon.variable;
++ u16 cap = scan_re->bss->capability;
++ u8 *ssid;
++ int ssid_len = 0;
++
++ //get ssid len form ie
++ ssid_len = ie[1];
++
++ //get ssid form ie
++ ssid = (u8*)vmalloc(sizeof(char)* (ssid_len + 1));
++ memset(ssid, 0, (ssid_len + 1));
++ memcpy(ssid, &ie[2], ssid_len);
++
++
++ /* Add encryption capability */
++ iwe->cmd = SIOCGIWENCODE;
++ if (cap & WLAN_CAPABILITY_PRIVACY)
++ iwe->u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
++ else
++ iwe->u.data.flags = IW_ENCODE_DISABLED;
++ iwe->u.data.length = 0;
++ start = iwe_stream_add_point(info, start, stop, iwe, ssid);
++ vfree(ssid);
++
++ return start;
++
++}
++
++static inline char *aicwf_get_iwe_stream_rate(struct rwnx_hw* rwnx_hw,
++ struct iw_request_info *info, struct scanu_result_wext *scan_re,
++ char *start, char *stop, struct iw_event *iwe)
++{
++ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt*)scan_re->payload;
++ u8 *payload = mgmt->u.beacon.variable;
++ const u8 *ie_content;
++
++ u16 mcs_rate = 0;
++ u8 bw_40MHz = 0;
++ u8 short_GI = 0;
++ u16 max_rate = 0;
++ u8 bw_160MHz = 0;
++
++ u16 ht_cap = false;
++ struct ieee80211_ht_cap *ht_capie;
++
++ u16 vht_cap = false;
++ u8 tx_mcs_map[2];
++ u8 tx_mcs_index = 0;
++ u16 vht_data_rate = 0;
++
++ u16 he_cap = false;
++ u8 he_ch_width_set = 0;
++ u8 he_bw = 0;
++
++ /* parsing HT_CAP_IE */
++ ie_content = NULL;
++ ie_content = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, payload, scan_re->ind->length);
++ if (ie_content != NULL){
++ ht_cap = true;
++ ht_capie = (struct ieee80211_ht_cap *)(ie_content + 2);
++ bw_40MHz = (ht_capie->cap_info & NL80211_CHAN_WIDTH_40) ? 1 : 0;
++ short_GI = (ht_capie->cap_info & (IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40)) ? 1 : 0;
++ memcpy(&mcs_rate, ht_capie->mcs.rx_mask, 2);
++ }
++
++
++
++ /* parsing VHT_CAP_IE */
++ ie_content = NULL;
++ ie_content = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, payload, scan_re->ind->length);
++ if (ie_content != NULL){
++
++ vht_cap = true;
++ bw_160MHz = ((*(u8*)(ie_content + 2)) >> 2) & ((u8)(0xFF >> (8 - (2))));
++ if (bw_160MHz){
++ short_GI = ((*(u8*)(ie_content + 2)) >> 6) & ((u8)(0xFF >> (8 - (1))));
++ }else{
++ short_GI = ((*(u8*)(ie_content + 2)) >> 5) & ((u8)(0xFF >> (8 - (1))));
++ }
++
++ memcpy(tx_mcs_map, ((ie_content + 2)+8), 2);
++
++ tx_mcs_index = (tx_mcs_map[0] & 0x0F) - 1;
++ if(ieee80211_frequency_to_channel(scan_re->ind->center_freq) > 14){
++ vht_data_rate = VHT_MCS_DATA_RATE[2][short_GI][tx_mcs_index];
++ }else{
++ vht_data_rate = VHT_MCS_DATA_RATE[1][short_GI][tx_mcs_index];
++ }
++ //TO DO:
++ //need to counter antenna number for AC
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)|| defined(CONFIG_HE_FOR_OLD_KERNEL)
++ /* parsing HE_CAP_IE */
++ ie_content = NULL;
++ /*0xFF+len+WLAN_EID_EXTENSION+playload */
++ ie_content = cfg80211_find_ie(WLAN_EID_EXTENSION, payload, scan_re->ind->length);
++ if (ie_content != NULL && ie_content[2] == WLAN_EID_EXT_HE_CAPABILITY){
++ he_cap = true;
++ he_ch_width_set = ie_content[8];
++ if(he_ch_width_set & IEEE80211_HE_CH_BW_SET_160_80P80){
++ he_bw = NL80211_CHAN_WIDTH_80;
++ }else if(he_ch_width_set & IEEE80211_HE_CH_BW_SET_160_IN_5G){
++ he_bw = NL80211_CHAN_WIDTH_160;
++ }else if(he_ch_width_set & IEEE80211_HE_CH_BW_SET_40_AND_80_IN_5G){
++ he_bw = NL80211_CHAN_WIDTH_40;
++ }else if(he_ch_width_set & IEEE80211_HE_CH_BW_SET_40_IN_2_4G){
++ he_bw = NL80211_CHAN_WIDTH_40;
++ }else{
++ he_bw = NL80211_CHAN_WIDTH_20;
++ }
++ //TO DO:
++ //need to counter antenna number for AX
++ }
++#endif
++
++ if(he_cap == true){
++ if(he_bw == NL80211_CHAN_WIDTH_20){
++ max_rate = 144;
++ }else if(he_bw == NL80211_CHAN_WIDTH_40){
++ max_rate = 287;
++ }else if(he_bw == NL80211_CHAN_WIDTH_80){
++ max_rate = 601;
++ }else if(he_bw == NL80211_CHAN_WIDTH_160){
++ max_rate = 1147;
++ }
++ max_rate = max_rate * 2;
++ }else if(vht_cap == true){
++ max_rate = vht_data_rate;
++ }else if (ht_cap == true) {
++ if (mcs_rate & 0x8000) /* MCS15 */
++ max_rate = (bw_40MHz) ? ((short_GI) ? 300 : 270) : ((short_GI) ? 144 : 130);
++
++ else if (mcs_rate & 0x0080) /* MCS7 */
++ max_rate = (bw_40MHz) ? ((short_GI) ? 150 : 135) : ((short_GI) ? 72 : 65);
++ else { /* default MCS7 */
++ max_rate = (bw_40MHz) ? ((short_GI) ? 150 : 135) : ((short_GI) ? 72 : 65);
++ }
++ }
++
++ iwe->cmd = SIOCGIWRATE;
++ iwe->u.bitrate.fixed = iwe->u.bitrate.disabled = 0;
++ iwe->u.bitrate.value = max_rate * 1000000;
++ start = iwe_stream_add_event(info, start, stop, iwe, IW_EV_PARAM_LEN);
++ return start ;
++}
++
++
++
++int aic_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, u8 *wpa_ie, u16 *wpa_len)
++{
++
++ u8 authmode;
++ u8 sec_idx;
++ u8 wpa_oui[4] = {0x00, 0x50, 0xf2, 0x01};
++ uint cnt = 0;
++
++ /* Search required WPA or WPA2 IE and copy to sec_ie[ ] */
++
++ cnt = 0;
++
++ sec_idx = 0;
++
++ while (cnt < in_len) {
++ authmode = in_ie[cnt];
++
++ if ((authmode == 0xdd/*_WPA_IE_ID_*/) && (memcmp(&in_ie[cnt + 2], &wpa_oui[0], 4) == 0)) {
++ if (wpa_ie)
++ memcpy(wpa_ie, &in_ie[cnt], in_ie[cnt + 1] + 2);
++
++ *wpa_len = in_ie[cnt + 1] + 2;
++ cnt += in_ie[cnt + 1] + 2; /* get next */
++ } else {
++ if (authmode == 0x30/*_WPA2_IE_ID_*/) {
++ if (rsn_ie)
++ memcpy(rsn_ie, &in_ie[cnt], in_ie[cnt + 1] + 2);
++
++ *rsn_len = in_ie[cnt + 1] + 2;
++ cnt += in_ie[cnt + 1] + 2; /* get next */
++ } else {
++ cnt += in_ie[cnt + 1] + 2; /* get next */
++ }
++ }
++
++ }
++
++
++ return *rsn_len + *wpa_len;
++
++}
++
++
++static inline char *aicwf_get_iwe_stream_wpa_wpa2(struct rwnx_hw* rwnx_hw,
++ struct iw_request_info *info, struct scanu_result_wext *scan_re,
++ char *start, char *stop, struct iw_event *iwe)
++{
++ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt*)scan_re->payload;
++ u8 *payload = mgmt->u.beacon.variable;
++ int buf_size = MAX_WPA_IE_LEN * 2;
++ u8 *pbuf;
++
++ u8 wpa_ie[255] = {0}, rsn_ie[255] = {0};
++ u16 i, wpa_len = 0, rsn_len = 0;
++ u8 *p;
++ int out_len = 0;
++
++ pbuf = (u8*)vmalloc(sizeof(u8) * buf_size);
++
++ if (pbuf) {
++ p = pbuf;
++
++ /* parsing WPA/WPA2 IE */
++ out_len = aic_get_sec_ie(payload , scan_re->ind->length, rsn_ie, &rsn_len, wpa_ie, &wpa_len);
++
++ if (wpa_len > 0) {
++
++ memset(pbuf, 0, buf_size);
++ p += sprintf(p, "wpa_ie=");
++ for (i = 0; i < wpa_len; i++)
++ p += sprintf(p, "%02x", wpa_ie[i]);
++
++ if (wpa_len > 100) {
++ printk("-----------------Len %d----------------\n", wpa_len);
++ for (i = 0; i < wpa_len; i++)
++ printk("%02x ", wpa_ie[i]);
++ printk("\n");
++ printk("-----------------Len %d----------------\n", wpa_len);
++ }
++
++ memset(iwe, 0, sizeof(*iwe));
++ iwe->cmd = IWEVCUSTOM;
++ iwe->u.data.length = strlen(pbuf);
++ start = iwe_stream_add_point(info, start, stop, iwe, pbuf);
++
++ memset(iwe, 0, sizeof(*iwe));
++ iwe->cmd = IWEVGENIE;
++ iwe->u.data.length = wpa_len;
++ start = iwe_stream_add_point(info, start, stop, iwe, wpa_ie);
++ }
++ if (rsn_len > 0) {
++
++ memset(pbuf, 0, buf_size);
++ p += sprintf(p, "rsn_ie=");
++ for (i = 0; i < rsn_len; i++)
++ p += sprintf(p, "%02x", rsn_ie[i]);
++ memset(iwe, 0, sizeof(*iwe));
++ iwe->cmd = IWEVCUSTOM;
++ iwe->u.data.length = strlen(pbuf);
++ start = iwe_stream_add_point(info, start, stop, iwe, pbuf);
++
++ memset(iwe, 0, sizeof(*iwe));
++ iwe->cmd = IWEVGENIE;
++ iwe->u.data.length = rsn_len;
++ start = iwe_stream_add_point(info, start, stop, iwe, rsn_ie);
++ }
++
++ vfree(pbuf);
++ }
++ return start;
++}
++
++
++u8 aicwf_get_is_wps_ie(u8 *ie_ptr, uint *wps_ielen)
++{
++ u8 match = false;
++ u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
++
++ if (ie_ptr == NULL)
++ return match;
++
++ eid = ie_ptr[0];
++
++ if ((eid == 0xdd/*_WPA_IE_ID_*/) && (memcmp(&ie_ptr[2], wps_oui, 4) == 0)) {
++ *wps_ielen = ie_ptr[1] + 2;
++ match = true;
++ }
++ return match;
++}
++
++
++static inline char *aicwf_get_iwe_stream_wps(struct rwnx_hw* rwnx_hw,
++ struct iw_request_info *info, struct scanu_result_wext *scan_re,
++ char *start, char *stop, struct iw_event *iwe)
++{
++
++ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt*)scan_re->payload;
++ u8 *payload = mgmt->u.beacon.variable;
++
++ /* parsing WPS IE */
++ uint cnt = 0, total_ielen;
++ u8 *wpsie_ptr = NULL;
++ uint wps_ielen = 0;
++ u8 *ie_ptr = payload;
++
++ total_ielen = scan_re->ind->length;
++
++ while (cnt < total_ielen) {
++ if (aicwf_get_is_wps_ie(&ie_ptr[cnt], &wps_ielen) && (wps_ielen > 2)) {
++ wpsie_ptr = &ie_ptr[cnt];
++ iwe->cmd = IWEVGENIE;
++ iwe->u.data.length = (u16)wps_ielen;
++ start = iwe_stream_add_point(info, start, stop, iwe, wpsie_ptr);
++ }
++ cnt += ie_ptr[cnt + 1] + 2; /* goto next */
++ }
++ return start;
++}
++
++
++static char *translate_scan(struct rwnx_hw* rwnx_hw,
++ struct iw_request_info *info, struct scanu_result_wext *scan_re,
++ char *start, char *stop)
++{
++ struct iw_event iwe;
++ memset(&iwe, 0, sizeof(iwe));
++
++
++ start = aicwf_get_iwe_stream_mac_addr(rwnx_hw, info, scan_re, start, stop, &iwe);
++ start = aicwf_get_iwe_stream_essid(rwnx_hw, info, scan_re, start, stop, &iwe);
++ start = aicwf_get_iwe_stream_protocol(rwnx_hw, info, scan_re, start, stop, &iwe);
++ start = aicwf_get_iwe_stream_chan(rwnx_hw, info, scan_re, start, stop, &iwe);
++ start = aicwf_get_iwe_stream_mode(rwnx_hw, info, scan_re, start, stop, &iwe);
++ start = aicwf_get_iwe_stream_encryption(rwnx_hw, info, scan_re, start, stop, &iwe);
++ start = aicwf_get_iwe_stream_rate(rwnx_hw, info, scan_re, start, stop, &iwe);
++ start = aicwf_get_iwe_stream_wpa_wpa2(rwnx_hw, info, scan_re, start, stop, &iwe);
++ start = aicwf_get_iwe_stream_wps(rwnx_hw, info, scan_re, start, stop, &iwe);
++ start = aicwf_get_iwe_stream_rssi(rwnx_hw, info, scan_re, start, stop, &iwe);
++
++ return start;
++}
++
++
++
++static int aicwf_get_wap(struct net_device *dev,
++ struct iw_request_info *info,
++ union iwreq_data *wrqu, char *extra)
++{
++
++ struct rwnx_vif* rwnx_vif = netdev_priv(dev);
++
++
++ AICWFDBG(LOGTRACE, "%s Enter", __func__);
++
++ wrqu->ap_addr.sa_family = ARPHRD_ETHER;
++
++ memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
++
++ if(rwnx_vif->sta.ap){
++ memcpy(wrqu->ap_addr.sa_data, rwnx_vif->sta.ap->mac_addr, ETH_ALEN);
++ }else{
++ memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
++ }
++
++ return 0;
++
++}
++
++
++extern uint8_t scanning;
++static int aicwf_set_scan(struct net_device *dev, struct iw_request_info *a,
++ union iwreq_data *wrqu, char *extra)
++{
++ int ret = 0;
++ struct rwnx_vif* rwnx_vif = netdev_priv(dev);
++ struct rwnx_hw* rwnx_hw = rwnx_vif->rwnx_hw;
++ struct cfg80211_scan_request *request;
++ int index = 0;
++ struct wiphy *wiphy = priv_to_wiphy(rwnx_hw);
++ unsigned long wext_scan_timeout;
++
++ AICWFDBG(LOGTRACE, "%s Enter", __func__);
++
++ if(wiphy == NULL){
++ printk("aic_wiphy error \r\n");
++
++ }
++
++ if (rwnx_hw->wext_scan || scanning) {
++ AICWFDBG(LOGINFO, "is scanning, abort\n");
++ ret = rwnx_send_scanu_cancel_req(rwnx_hw, NULL);
++ if (ret)
++ return ret;
++ msleep(150);
++ }
++
++ rwnx_hw->wext_scan = 1;
++
++ request = (struct cfg80211_scan_request *)vmalloc(sizeof(struct cfg80211_scan_request));
++
++ request->n_channels = rwnx_hw->support_freqs_number;
++ request->n_ssids = 0;
++ request->no_cck = false;
++ request->ie = NULL;
++ request->ie_len = 0;
++
++ for(index = 0;index < rwnx_hw->support_freqs_number; index++){
++ request->channels[index] = ieee80211_get_channel(wiphy,
++ rwnx_hw->support_freqs[index]);
++ if(request->channels[index] == NULL){
++ AICWFDBG(LOGERROR, "%s ERROR!!! channels is NULL", __func__);
++ continue;
++ }
++ }
++
++ if ((ret = rwnx_send_scanu_req(rwnx_hw, rwnx_vif, request))){
++ return ret;
++ }
++
++ rwnx_vif->rwnx_hw->scan_request = request;
++ wext_scan_timeout = msecs_to_jiffies(5000);
++
++ if (!wait_for_completion_killable_timeout(&rwnx_hw->wext_scan_com, wext_scan_timeout)) {
++ AICWFDBG(LOGERROR, "%s WEXT scan timeout", __func__);
++ }
++
++ return 0;
++}
++
++static int aicwf_get_scan(struct net_device *dev, struct iw_request_info *a,
++ union iwreq_data *wrqu, char *extra)
++{
++ int ret = 0;
++ struct rwnx_vif* rwnx_vif = netdev_priv(dev);
++ struct rwnx_hw* rwnx_hw = rwnx_vif->rwnx_hw;
++ struct scanu_result_wext *scan_re;
++ struct scanu_result_wext *tmp;
++ char *start = extra;
++ char *stop = start + wrqu->data.length;
++
++ AICWFDBG(LOGDEBUG, "%s Enter %p %p len:%d \r\n", __func__, start, stop, wrqu->data.length);
++
++ //TODO: spinlock
++ list_for_each_entry_safe(scan_re, tmp, &rwnx_hw->wext_scanre_list, scanu_re_list) {
++ start = translate_scan(rwnx_hw, a, scan_re, start, stop);
++ if ((stop - start) < 768) {
++ return -E2BIG;
++ }
++
++ }
++
++ list_for_each_entry_safe(scan_re, tmp, &rwnx_hw->wext_scanre_list, scanu_re_list) {
++ list_del(&scan_re->scanu_re_list);
++ vfree(scan_re->payload);
++ vfree(scan_re->ind);
++ vfree(scan_re);
++ scan_re = NULL;
++ }
++
++
++ wrqu->data.length = start - extra;
++ wrqu->data.flags = 0;
++
++ return ret ;
++
++}
++
++
++static int aicwf_get_essid(struct net_device *dev,
++ struct iw_request_info *a,
++ union iwreq_data *wrqu, char *extra)
++{
++ int ret = 0;
++ struct rwnx_vif* rwnx_vif = netdev_priv(dev);
++ struct rwnx_hw* rwnx_hw = rwnx_vif->rwnx_hw;
++
++ AICWFDBG(LOGTRACE, "%s Enter", __func__);
++
++ wrqu->essid.length = strlen(rwnx_hw->wext_essid);
++ memcpy(extra, rwnx_hw->wext_essid, strlen(rwnx_hw->wext_essid));
++ wrqu->essid.flags = 1;
++
++
++ return ret;
++
++}
++
++
++static int aicwf_get_nick(struct net_device *dev,
++ struct iw_request_info *info,
++ union iwreq_data *wrqu, char *extra)
++{
++
++ AICWFDBG(LOGTRACE, "%s Enter", __func__);
++
++ if (extra) {
++ wrqu->data.length = 8;
++ wrqu->data.flags = 1;
++ memcpy(extra, "AIC@8800", 8);
++ }
++
++ return 0;
++
++}
++
++static int aicwf_get_rate(struct net_device *dev,
++ struct iw_request_info *info,
++ union iwreq_data *wrqu, char *extra)
++{
++ u16 max_rate = 0;
++ struct rwnx_vif* rwnx_vif = netdev_priv(dev);
++ struct rwnx_hw* rwnx_hw = rwnx_vif->rwnx_hw;
++ struct rwnx_sta *sta = NULL;
++
++ union rwnx_rate_ctrl_info *rate_info;
++ struct mm_get_sta_info_cfm cfm;
++
++ AICWFDBG(LOGTRACE, "%s Enter", __func__);
++
++ if(rwnx_vif->sta.ap){
++ sta = rwnx_vif->sta.ap;
++ rwnx_send_get_sta_info_req(rwnx_hw, sta->sta_idx, &cfm);
++ rate_info = (union rwnx_rate_ctrl_info *)&cfm.rate_info;
++
++ switch (rate_info->formatModTx) {
++ case FORMATMOD_NON_HT:
++ case FORMATMOD_NON_HT_DUP_OFDM:
++ //get bg mode datarate
++ max_rate = tx_legrates_lut_rate[rate_info->mcsIndexTx]/10;
++ break;
++ case FORMATMOD_HT_MF:
++ case FORMATMOD_HT_GF:
++ case FORMATMOD_VHT:
++ //get bg mode MCS index
++ max_rate = VHT_MCS_DATA_RATE[rate_info->bwTx][1][rate_info->mcsIndexTx]/2;
++ break;
++ case FORMATMOD_HE_MU:
++ case FORMATMOD_HE_SU:
++ case FORMATMOD_HE_ER:
++ //get bg mode MCS index
++ max_rate = HE_MCS_DATA_RATE[rate_info->bwTx][rate_info->mcsIndexTx];
++ break;
++ }
++ }
++
++ wrqu->bitrate.fixed = 0; /* no auto select */
++ wrqu->bitrate.value = max_rate * 1000000;
++
++ return 0;
++}
++
++
++static int aicwf_get_enc(struct net_device *dev,
++ struct iw_request_info *info,
++ union iwreq_data *wrqu, char *keybuf)
++{
++ int ret = 0;
++ struct iw_point *erq = &(wrqu->encoding);
++
++ AICWFDBG(LOGTRACE, "%s Enter", __func__);
++
++ erq->length = 0;
++ erq->flags |= IW_ENCODE_ENABLED;
++
++
++ return ret;
++
++}
++
++
++static iw_handler aic_handlers[] = {
++ NULL, /* SIOCSIWCOMMIT */
++ aicwf_get_name, /* SIOCGIWNAME */
++ NULL, /* SIOCSIWNWID */
++ NULL, /* SIOCGIWNWID */
++ NULL, /* SIOCSIWFREQ */
++ aicwf_get_freq, /* SIOCGIWFREQ */
++ NULL, /* SIOCSIWMODE */
++ aicwf_get_mode, /* SIOCGIWMODE */
++ NULL, /* SIOCSIWSENS */
++ NULL, /* SIOCGIWSENS */
++ NULL, /* SIOCSIWRANGE */
++ aicwf_get_range, /* SIOCGIWRANGE */
++ NULL, /* SIOCSIWPRIV */
++ NULL, /* SIOCGIWPRIV */
++ NULL, /* SIOCSIWSTATS */
++ NULL, /* SIOCGIWSTATS */
++ NULL, /* SIOCSIWSPY */
++ NULL, /* SIOCGIWSPY */
++ NULL, /* SIOCGIWTHRSPY */
++ NULL, /* SIOCWIWTHRSPY */
++ NULL, /* SIOCSIWAP */
++ aicwf_get_wap, /* SIOCGIWAP */
++ NULL, /* request MLME operation; uses struct iw_mlme */
++ NULL, /* SIOCGIWAPLIST -- depricated */
++ aicwf_set_scan, /* SIOCSIWSCAN */
++ aicwf_get_scan, /* SIOCGIWSCAN */
++ NULL, /* SIOCSIWESSID */
++ aicwf_get_essid, /* SIOCGIWESSID */
++ NULL, /* SIOCSIWNICKN */
++ aicwf_get_nick, /* SIOCGIWNICKN */
++ NULL, /* -- hole -- */
++ NULL, /* -- hole -- */
++ NULL, /* SIOCSIWRATE */
++ aicwf_get_rate, /* SIOCGIWRATE */
++ NULL, /* SIOCSIWRTS */
++ NULL, /* SIOCGIWRTS */
++ NULL, /* SIOCSIWFRAG */
++ NULL, /* SIOCGIWFRAG */
++ NULL, /* SIOCSIWTXPOW */
++ NULL, /* SIOCGIWTXPOW */
++ NULL, /* SIOCSIWRETRY */
++ NULL, /* SIOCGIWRETRY */
++ NULL, /* SIOCSIWENCODE */
++ aicwf_get_enc, /* SIOCGIWENCODE */
++ NULL, /* SIOCSIWPOWER */
++ NULL, /* SIOCGIWPOWER */
++ NULL, /*---hole---*/
++ NULL, /*---hole---*/
++ NULL, /* SIOCSIWGENIE */
++ NULL, /* SIOCGWGENIE */
++ NULL, /* SIOCSIWAUTH */
++ NULL, /* SIOCGIWAUTH */
++ NULL, /* SIOCSIWENCODEEXT */
++ NULL, /* SIOCGIWENCODEEXT */
++ NULL, /* SIOCSIWPMKSA */
++ NULL, /*---hole---*/
++};
++
++struct iw_handler_def aic_handlers_def = {
++ .standard = aic_handlers,
++ .num_standard = sizeof(aic_handlers) / sizeof(iw_handler),
++#if WIRELESS_EXT >= 17
++ .get_wireless_stats = aicwf_get_wireless_stats,
++#endif
++};
++
++void aicwf_set_wireless_ext( struct net_device *ndev, struct rwnx_hw *rwnx_hw){
++
++ AICWFDBG(LOGINFO, "%s Enter", __func__);
++
++ init_completion(&rwnx_hw->wext_scan_com);
++ INIT_LIST_HEAD(&rwnx_hw->wext_scanre_list);
++
++ ndev->wireless_handlers = (struct iw_handler_def *)&aic_handlers_def;
++}
++
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/aicwf_wext_linux.h
+@@ -0,0 +1,11 @@
++
++struct scanu_result_wext{
++ struct list_head scanu_re_list;
++ struct cfg80211_bss *bss;
++ struct scanu_result_ind *ind;
++ u32_l *payload;
++};
++
++void aicwf_set_wireless_ext( struct net_device *ndev, struct rwnx_hw *rwnx_hw);
++void aicwf_scan_complete_event(struct net_device *dev);
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/hal_desc.h
+@@ -0,0 +1,376 @@
++/**
++ ******************************************************************************
++ *
++ * @file hal_desc.h
++ *
++ * @brief File containing the definition of HW descriptors.
++ *
++ * Contains the definition and structures used by HW
++ *
++ * Copyright (C) RivieraWaves 2011-2019
++ *
++ ******************************************************************************
++ */
++
++#ifndef _HAL_DESC_H_
++#define _HAL_DESC_H_
++
++#include "lmac_types.h"
++
++/* Rate and policy table */
++
++#define N_CCK 8
++#define N_OFDM 8
++#define N_HT (8 * 2 * 2 * 4)
++#define N_VHT (10 * 4 * 2 * 8)
++#define N_HE_SU (12 * 4 * 3 * 8)
++#define N_HE_MU (12 * 6 * 3 * 8)
++
++/* conversion table from NL80211 to MACHW enum */
++extern const int chnl2bw[];
++
++/* conversion table from MACHW to NL80211 enum */
++extern const int bw2chnl[];
++
++/* Rate cntrl info */
++#define MCS_INDEX_TX_RCX_OFT 0
++#define MCS_INDEX_TX_RCX_MASK (0x7F << MCS_INDEX_TX_RCX_OFT)
++#define BW_TX_RCX_OFT 7
++#define BW_TX_RCX_MASK (0x3 << BW_TX_RCX_OFT)
++#define SHORT_GI_TX_RCX_OFT 9
++#define SHORT_GI_TX_RCX_MASK (0x1 << SHORT_GI_TX_RCX_OFT)
++#define PRE_TYPE_TX_RCX_OFT 10
++#define PRE_TYPE_TX_RCX_MASK (0x1 << PRE_TYPE_TX_RCX_OFT)
++#define FORMAT_MOD_TX_RCX_OFT 11
++#define FORMAT_MOD_TX_RCX_MASK (0x7 << FORMAT_MOD_TX_RCX_OFT)
++
++/* Values for formatModTx */
++#define FORMATMOD_NON_HT 0
++#define FORMATMOD_NON_HT_DUP_OFDM 1
++#define FORMATMOD_HT_MF 2
++#define FORMATMOD_HT_GF 3
++#define FORMATMOD_VHT 4
++#define FORMATMOD_HE_SU 5
++#define FORMATMOD_HE_MU 6
++#define FORMATMOD_HE_ER 7
++
++/* Values for navProtFrmEx */
++#define NAV_PROT_NO_PROT_BIT 0
++#define NAV_PROT_SELF_CTS_BIT 1
++#define NAV_PROT_RTS_CTS_BIT 2
++#define NAV_PROT_RTS_CTS_WITH_QAP_BIT 3
++#define NAV_PROT_STBC_BIT 4
++
++/* THD MACCTRLINFO2 fields, used in struct umacdesc umac.flags */
++/// WhichDescriptor definition - contains aMPDU bit and position value
++/// Offset of WhichDescriptor field in the MAC CONTROL INFO 2 word
++#define WHICHDESC_OFT 19
++/// Mask of the WhichDescriptor field
++#define WHICHDESC_MSK (0x07 << WHICHDESC_OFT)
++/// Only 1 THD possible, describing an unfragmented MSDU
++#define WHICHDESC_UNFRAGMENTED_MSDU (0x00 << WHICHDESC_OFT)
++/// THD describing the first MPDU of a fragmented MSDU
++#define WHICHDESC_FRAGMENTED_MSDU_FIRST (0x01 << WHICHDESC_OFT)
++/// THD describing intermediate MPDUs of a fragmented MSDU
++#define WHICHDESC_FRAGMENTED_MSDU_INT (0x02 << WHICHDESC_OFT)
++/// THD describing the last MPDU of a fragmented MSDU
++#define WHICHDESC_FRAGMENTED_MSDU_LAST (0x03 << WHICHDESC_OFT)
++/// THD for extra descriptor starting an AMPDU
++#define WHICHDESC_AMPDU_EXTRA (0x04 << WHICHDESC_OFT)
++/// THD describing the first MPDU of an A-MPDU
++#define WHICHDESC_AMPDU_FIRST (0x05 << WHICHDESC_OFT)
++/// THD describing intermediate MPDUs of an A-MPDU
++#define WHICHDESC_AMPDU_INT (0x06 << WHICHDESC_OFT)
++/// THD describing the last MPDU of an A-MPDU
++#define WHICHDESC_AMPDU_LAST (0x07 << WHICHDESC_OFT)
++
++/// aMPDU bit offset
++#define AMPDU_OFT 21
++/// aMPDU bit
++#define AMPDU_BIT CO_BIT(AMPDU_OFT)
++
++enum {
++ HW_RATE_1MBPS = 0,
++ HW_RATE_2MBPS = 1,
++ HW_RATE_5_5MBPS = 2,
++ HW_RATE_11MBPS = 3,
++ HW_RATE_6MBPS = 4,
++ HW_RATE_9MBPS = 5,
++ HW_RATE_12MBPS = 6,
++ HW_RATE_18MBPS = 7,
++ HW_RATE_24MBPS = 8,
++ HW_RATE_36MBPS = 9,
++ HW_RATE_48MBPS = 10,
++ HW_RATE_54MBPS = 11,
++ HW_RATE_MAX
++};
++
++union rwnx_mcs_index {
++ struct {
++ u32 mcs : 3;
++ u32 nss : 2;
++ } ht;
++ struct {
++ u32 mcs : 4;
++ u32 nss : 3;
++ } vht;
++ struct {
++ u32 mcs : 4;
++ u32 nss : 3;
++ } he;
++ u32 legacy : 7;
++};
++
++/* c.f RW-WLAN-nX-MAC-HW-UM */
++union rwnx_rate_ctrl_info {
++ struct {
++ u32 mcsIndexTx : 7;
++ u32 bwTx : 2;
++ u32 giAndPreTypeTx : 2;
++ u32 formatModTx : 3;
++ u32 navProtFrmEx : 3;
++ u32 mcsIndexProtTx : 7;
++ u32 bwProtTx : 2;
++ u32 formatModProtTx : 3;
++ u32 nRetry : 3;
++ };
++ u32 value;
++};
++
++/* c.f RW-WLAN-nX-MAC-HW-UM */
++struct rwnx_power_ctrl_info {
++ u32 txPwrLevelPT : 8;
++ u32 txPwrLevelProtPT : 8;
++ u32 reserved :16;
++};
++
++/* c.f RW-WLAN-nX-MAC-HW-UM */
++union rwnx_pol_phy_ctrl_info_1 {
++ struct {
++ u32 rsvd1 : 3;
++ u32 bfFrmEx : 1;
++ u32 numExtnSS : 2;
++ u32 fecCoding : 1;
++ u32 stbc : 2;
++ u32 rsvd2 : 5;
++ u32 nTx : 3;
++ u32 nTxProt : 3;
++ };
++ u32 value;
++};
++
++/* c.f RW-WLAN-nX-MAC-HW-UM */
++union rwnx_pol_phy_ctrl_info_2 {
++ struct {
++ u32 antennaSet : 8;
++ u32 smmIndex : 8;
++ u32 beamFormed : 1;
++ };
++ u32 value;
++};
++
++/* c.f RW-WLAN-nX-MAC-HW-UM */
++union rwnx_pol_mac_ctrl_info_1 {
++ struct {
++ u32 keySRamIndex : 10;
++ u32 keySRamIndexRA : 10;
++ };
++ u32 value;
++};
++
++/* c.f RW-WLAN-nX-MAC-HW-UM */
++union rwnx_pol_mac_ctrl_info_2 {
++ struct {
++ u32 longRetryLimit : 8;
++ u32 shortRetryLimit : 8;
++ u32 rtsThreshold : 12;
++ };
++ u32 value;
++};
++
++#define POLICY_TABLE_PATTERN 0xBADCAB1E
++
++struct tx_policy_tbl {
++ /* Unique Pattern at the start of Policy Table */
++ u32 upatterntx;
++ /* PHY Control 1 Information used by MAC HW */
++ union rwnx_pol_phy_ctrl_info_1 phyctrlinfo_1;
++ /* PHY Control 2 Information used by MAC HW */
++ union rwnx_pol_phy_ctrl_info_2 phyctrlinfo_2;
++ /* MAC Control 1 Information used by MAC HW */
++ union rwnx_pol_mac_ctrl_info_1 macctrlinfo_1;
++ /* MAC Control 2 Information used by MAC HW */
++ union rwnx_pol_mac_ctrl_info_2 macctrlinfo_2;
++
++ union rwnx_rate_ctrl_info ratectrlinfos[NX_TX_MAX_RATES];
++ struct rwnx_power_ctrl_info powerctrlinfos[NX_TX_MAX_RATES];
++};
++
++#ifdef CONFIG_RWNX_FULLMAC
++
++/**
++ * struct rwnx_hw_txstatus - Bitfield of confirmation status
++ *
++ * @tx_done: packet has been processed by the firmware.
++ * @retry_required: packet has been transmitted but not acknoledged.
++ * Driver must repush it.
++ * @sw_retry_required: packet has not been transmitted (FW wasn't able to push
++ * it when it received it: not active channel ...). Driver must repush it.
++ * @acknowledged: packet has been acknowledged by peer
++ */
++union rwnx_hw_txstatus {
++ struct {
++ u32 tx_done : 1;
++ u32 retry_required : 1;
++ u32 sw_retry_required : 1;
++ u32 acknowledged : 1;
++ u32 reserved :28;
++ };
++ u32 value;
++};
++
++/**
++ * struct tx_cfm_tag - Structure indicating the status and other
++ * information about the transmission
++ *
++ * @pn: PN that was used for the transmission
++ * @sn: Sequence number of the packet
++ * @timestamp: Timestamp of first transmission of this MPDU
++ * @credits: Number of credits to be reallocated for the txq that push this
++ * buffer (can be 0 or 1)
++ * @ampdu_size: Size of the ampdu in which the frame has been transmitted if
++ * this was the last frame of the a-mpdu, and 0 if the frame is not the last
++ * frame on a a-mdpu.
++ * 1 means that the frame has been transmitted as a singleton.
++ * @amsdu_size: Size, in bytes, allowed to create a-msdu.
++ * @status: transmission status
++ */
++struct tx_cfm_tag
++{
++ u16_l pn[4];
++ u16_l sn;
++ u16_l timestamp;
++ s8_l credits;
++ u8_l ampdu_size;
++#ifdef CONFIG_RWNX_SPLIT_TX_BUF
++ u16_l amsdu_size;
++#endif
++ union rwnx_hw_txstatus status;
++};
++
++/**
++ * struct rwnx_hw_txhdr - Hardware part of tx header
++ *
++ * @cfm: Information updated by fw/hardware after sending a frame
++ */
++struct rwnx_hw_txhdr {
++ struct tx_cfm_tag cfm;
++};
++
++#endif /* CONFIG_RWNX_FULLMAC */
++
++/* Modem */
++
++#define MDM_PHY_CONFIG_TRIDENT 0
++#define MDM_PHY_CONFIG_ELMA 1
++#define MDM_PHY_CONFIG_KARST 2
++
++// MODEM features (from reg_mdm_stat.h)
++/// MUMIMOTX field bit
++#define MDM_MUMIMOTX_BIT ((u32)0x80000000)
++/// MUMIMOTX field position
++#define MDM_MUMIMOTX_POS 31
++/// MUMIMORX field bit
++#define MDM_MUMIMORX_BIT ((u32)0x40000000)
++/// MUMIMORX field position
++#define MDM_MUMIMORX_POS 30
++/// BFMER field bit
++#define MDM_BFMER_BIT ((u32)0x20000000)
++/// BFMER field position
++#define MDM_BFMER_POS 29
++/// BFMEE field bit
++#define MDM_BFMEE_BIT ((u32)0x10000000)
++/// BFMEE field position
++#define MDM_BFMEE_POS 28
++/// LDPCDEC field bit
++#define MDM_LDPCDEC_BIT ((u32)0x08000000)
++/// LDPCDEC field position
++#define MDM_LDPCDEC_POS 27
++/// LDPCENC field bit
++#define MDM_LDPCENC_BIT ((u32)0x04000000)
++/// LDPCENC field position
++#define MDM_LDPCENC_POS 26
++/// CHBW field mask
++#define MDM_CHBW_MASK ((u32)0x03000000)
++/// CHBW field LSB position
++#define MDM_CHBW_LSB 24
++/// CHBW field width
++#define MDM_CHBW_WIDTH ((u32)0x00000002)
++/// DSSSCCK field bit
++#define MDM_DSSSCCK_BIT ((u32)0x00800000)
++/// DSSSCCK field position
++#define MDM_DSSSCCK_POS 23
++/// VHT field bit
++#define MDM_VHT_BIT ((u32)0x00400000)
++/// VHT field position
++#define MDM_VHT_POS 22
++/// HE field bit
++#define MDM_HE_BIT ((u32)0x00200000)
++/// HE field position
++#define MDM_HE_POS 21
++/// ESS field bit
++#define MDM_ESS_BIT ((u32)0x00100000)
++/// ESS field position
++#define MDM_ESS_POS 20
++/// RFMODE field mask
++#define MDM_RFMODE_MASK ((u32)0x000F0000)
++/// RFMODE field LSB position
++#define MDM_RFMODE_LSB 16
++/// RFMODE field width
++#define MDM_RFMODE_WIDTH ((u32)0x00000004)
++/// NSTS field mask
++#define MDM_NSTS_MASK ((u32)0x0000F000)
++/// NSTS field LSB position
++#define MDM_NSTS_LSB 12
++/// NSTS field width
++#define MDM_NSTS_WIDTH ((u32)0x00000004)
++/// NSS field mask
++#define MDM_NSS_MASK ((u32)0x00000F00)
++/// NSS field LSB position
++#define MDM_NSS_LSB 8
++/// NSS field width
++#define MDM_NSS_WIDTH ((u32)0x00000004)
++/// NTX field mask
++#define MDM_NTX_MASK ((u32)0x000000F0)
++/// NTX field LSB position
++#define MDM_NTX_LSB 4
++/// NTX field width
++#define MDM_NTX_WIDTH ((u32)0x00000004)
++/// NRX field mask
++#define MDM_NRX_MASK ((u32)0x0000000F)
++/// NRX field LSB position
++#define MDM_NRX_LSB 0
++/// NRX field width
++#define MDM_NRX_WIDTH ((u32)0x00000004)
++
++#define __MDM_PHYCFG_FROM_VERS(v) (((v) & MDM_RFMODE_MASK) >> MDM_RFMODE_LSB)
++
++#define RIU_FCU_PRESENT_MASK ((u32)0xFF000000)
++#define RIU_FCU_PRESENT_LSB 24
++
++#define __RIU_FCU_PRESENT(v) (((v) & RIU_FCU_PRESENT_MASK) >> RIU_FCU_PRESENT_LSB == 5)
++
++/// AGC load version field mask
++#define RIU_AGC_LOAD_MASK ((u32)0x00C00000)
++/// AGC load version field LSB position
++#define RIU_AGC_LOAD_LSB 22
++
++#define __RIU_AGCLOAD_FROM_VERS(v) (((v) & RIU_AGC_LOAD_MASK) >> RIU_AGC_LOAD_LSB)
++
++#define __FPGA_TYPE(v) (((v) & 0xFFFF0000) >> 16)
++
++#define __MDM_MAJOR_VERSION(v) (((v) & 0xFF000000) >> 24)
++#define __MDM_MINOR_VERSION(v) (((v) & 0x00FF0000) >> 16)
++
++
++#endif // _HAL_DESC_H_
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/ipc_compat.h
+@@ -0,0 +1,25 @@
++/**
++ ****************************************************************************************
++ *
++ * @file ipc_compat.h
++ *
++ * Copyright (C) RivieraWaves 2011-2019
++ *
++ ****************************************************************************************
++ */
++
++#ifndef _IPC_H_
++#define _IPC_H_
++
++#define __INLINE static __attribute__((__always_inline__)) inline
++
++#define __ALIGN4 __aligned(4)
++
++#define ASSERT_ERR(condition) \
++ do { \
++ if (unlikely(!(condition))) { \
++ printk(KERN_ERR "%s:%d:ASSERT_ERR(" #condition ")\n", __FILE__, __LINE__); \
++ } \
++ } while(0)
++
++#endif /* _IPC_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/ipc_host.c
+@@ -0,0 +1,771 @@
++/**
++ ******************************************************************************
++ *
++ * @file ipc_host.c
++ *
++ * @brief IPC module.
++ *
++ * Copyright (C) RivieraWaves 2011-2019
++ *
++ ******************************************************************************
++ */
++
++/*
++ * INCLUDE FILES
++ ******************************************************************************
++ */
++#ifndef __KERNEL__
++#include <stdio.h>
++#define REG_SW_SET_PROFILING(env, value) do{ }while(0)
++#define REG_SW_CLEAR_PROFILING(env, value) do{ }while(0)
++#define REG_SW_CLEAR_HOSTBUF_IDX_PROFILING(env) do{ }while(0)
++#define REG_SW_SET_HOSTBUF_IDX_PROFILING(env, val) do{ }while(0)
++#else
++#include <linux/spinlock.h>
++#include "rwnx_defs.h"
++#include "rwnx_prof.h"
++#endif
++
++#include "reg_ipc_app.h"
++#include "ipc_host.h"
++
++/*
++ * TYPES DEFINITION
++ ******************************************************************************
++ */
++
++const int nx_txdesc_cnt[] =
++{
++ NX_TXDESC_CNT0,
++ NX_TXDESC_CNT1,
++ NX_TXDESC_CNT2,
++ NX_TXDESC_CNT3,
++ #if NX_TXQ_CNT == 5
++ NX_TXDESC_CNT4,
++ #endif
++};
++
++const int nx_txdesc_cnt_msk[] =
++{
++ NX_TXDESC_CNT0 - 1,
++ NX_TXDESC_CNT1 - 1,
++ NX_TXDESC_CNT2 - 1,
++ NX_TXDESC_CNT3 - 1,
++ #if NX_TXQ_CNT == 5
++ NX_TXDESC_CNT4 - 1,
++ #endif
++};
++
++const int nx_txuser_cnt[] =
++{
++ CONFIG_USER_MAX,
++ CONFIG_USER_MAX,
++ CONFIG_USER_MAX,
++ CONFIG_USER_MAX,
++ #if NX_TXQ_CNT == 5
++ 1,
++ #endif
++};
++
++
++/*
++ * FUNCTIONS DEFINITIONS
++ ******************************************************************************
++ */
++/**
++ * ipc_host_rxdesc_handler() - Handle the reception of a Rx Descriptor
++ *
++ * @env: pointer to the IPC Host environment
++ *
++ * Called from general IRQ handler when status %IPC_IRQ_E2A_RXDESC is set
++ */
++static void ipc_host_rxdesc_handler(struct ipc_host_env_tag *env)
++{
++ // For profiling
++ REG_SW_SET_PROFILING(env->pthis, SW_PROF_IRQ_E2A_RXDESC);
++
++ // LMAC has triggered an IT saying that a reception has occurred.
++ // Then we first need to check the validity of the current hostbuf, and the validity
++ // of the next hostbufs too, because it is likely that several hostbufs have been
++ // filled within the time needed for this irq handling
++ do {
++ #ifdef CONFIG_RWNX_FULLMAC
++ // call the external function to indicate that a RX descriptor is received
++ if (env->cb.recv_data_ind(env->pthis,
++ env->ipc_host_rxdesc_array[env->ipc_host_rxdesc_idx].hostid) != 0)
++ #else
++ // call the external function to indicate that a RX packet is received
++ if (env->cb.recv_data_ind(env->pthis,
++ env->ipc_host_rxbuf_array[env->ipc_host_rxbuf_idx].hostid) != 0)
++ #endif //(CONFIG_RWNX_FULLMAC)
++ break;
++
++ }while(1);
++
++ // For profiling
++ REG_SW_CLEAR_PROFILING(env->pthis, SW_PROF_IRQ_E2A_RXDESC);
++}
++
++/**
++ * ipc_host_radar_handler() - Handle the reception of radar events
++ *
++ * @env: pointer to the IPC Host environment
++ *
++ * Called from general IRQ handler when status %IPC_IRQ_E2A_RADAR is set
++ */
++static void ipc_host_radar_handler(struct ipc_host_env_tag *env)
++{
++#ifdef CONFIG_RWNX_RADAR
++ // LMAC has triggered an IT saying that a radar event has been sent to upper layer.
++ // Then we first need to check the validity of the current msg buf, and the validity
++ // of the next buffers too, because it is likely that several buffers have been
++ // filled within the time needed for this irq handling
++ // call the external function to indicate that a RX packet is received
++ spin_lock_bh(&((struct rwnx_hw *)env->pthis)->radar.lock);
++ while (env->cb.recv_radar_ind(env->pthis,
++ env->ipc_host_radarbuf_array[env->ipc_host_radarbuf_idx].hostid) == 0)
++ ;
++ spin_unlock_bh(&((struct rwnx_hw *)env->pthis)->radar.lock);
++#endif /* CONFIG_RWNX_RADAR */
++}
++
++/**
++ * ipc_host_unsup_rx_vec_handler() - Handle the reception of unsupported rx vector
++ *
++ * @env: pointer to the IPC Host environment
++ *
++ * Called from general IRQ handler when status %IPC_IRQ_E2A_UNSUP_RX_VEC is set
++ */
++static void ipc_host_unsup_rx_vec_handler(struct ipc_host_env_tag *env)
++{
++ while (env->cb.recv_unsup_rx_vec_ind(env->pthis,
++ env->ipc_host_unsuprxvecbuf_array[env->ipc_host_unsuprxvecbuf_idx].hostid) == 0)
++ ;
++}
++
++/**
++ * ipc_host_msg_handler() - Handler for firmware message
++ *
++ * @env: pointer to the IPC Host environment
++ *
++ * Called from general IRQ handler when status %IPC_IRQ_E2A_MSG is set
++ */
++static void ipc_host_msg_handler(struct ipc_host_env_tag *env)
++{
++ // For profiling
++ REG_SW_SET_PROFILING(env->pthis, SW_PROF_IRQ_E2A_MSG);
++
++ // LMAC has triggered an IT saying that a message has been sent to upper layer.
++ // Then we first need to check the validity of the current msg buf, and the validity
++ // of the next buffers too, because it is likely that several buffers have been
++ // filled within the time needed for this irq handling
++ // call the external function to indicate that a RX packet is received
++ while (env->cb.recv_msg_ind(env->pthis,
++ env->ipc_host_msgbuf_array[env->ipc_host_msge2a_idx].hostid) == 0)
++ ;
++
++
++ // For profiling
++ REG_SW_CLEAR_PROFILING(env->pthis, SW_PROF_IRQ_E2A_MSG);
++
++}
++
++/**
++ * ipc_host_msgack_handler() - Handle the reception of message acknowledgement
++ *
++ * @env: pointer to the IPC Host environment
++ *
++ * Called from general IRQ handler when status %IPC_IRQ_E2A_MSG_ACK is set
++ */
++static void ipc_host_msgack_handler(struct ipc_host_env_tag *env)
++{
++ void *hostid = env->msga2e_hostid;
++
++ ASSERT_ERR(hostid);
++ ASSERT_ERR(env->msga2e_cnt == (((struct lmac_msg *)(&env->shared->msg_a2e_buf.msg))->src_id & 0xFF));
++
++ env->msga2e_hostid = NULL;
++ env->msga2e_cnt++;
++ env->cb.recv_msgack_ind(env->pthis, hostid);
++}
++
++/**
++ * ipc_host_dbg_handler() - Handle the reception of Debug event
++ *
++ * @env: pointer to the IPC Host environment
++ *
++ * Called from general IRQ handler when status %IPC_IRQ_E2A_DBG is set
++ */
++static void ipc_host_dbg_handler(struct ipc_host_env_tag *env)
++{
++ // For profiling
++ REG_SW_SET_PROFILING(env->pthis, SW_PROF_IRQ_E2A_DBG);
++
++ // LMAC has triggered an IT saying that a DBG message has been sent to upper layer.
++ // Then we first need to check the validity of the current buffer, and the validity
++ // of the next buffers too, because it is likely that several buffers have been
++ // filled within the time needed for this irq handling
++ // call the external function to indicate that a RX packet is received
++ while(env->cb.recv_dbg_ind(env->pthis,
++ env->ipc_host_dbgbuf_array[env->ipc_host_dbg_idx].hostid) == 0)
++ ;
++
++ // For profiling
++ REG_SW_CLEAR_PROFILING(env->pthis, SW_PROF_IRQ_E2A_DBG);
++}
++
++/**
++ * ipc_host_tx_cfm_handler() - Handle the reception of TX confirmation
++ *
++ * @env: pointer to the IPC Host environment
++ * @queue_idx: index of the hardware on which the confirmation has been received
++ * @user_pos: index of the user position
++ *
++ * Called from general IRQ handler when status %IPC_IRQ_E2A_TXCFM is set
++ */
++static void ipc_host_tx_cfm_handler(struct ipc_host_env_tag *env,
++ const int queue_idx, const int user_pos)
++{
++ // TX confirmation descriptors have been received
++ REG_SW_SET_PROFILING(env->pthis, SW_PROF_IRQ_E2A_TXCFM);
++ while (1)
++ {
++ // Get the used index and increase it. We do the increase before knowing if the
++ // current buffer is confirmed because the callback function may call the
++ // ipc_host_txdesc_get() in case flow control was enabled and the index has to be
++ // already at the good value to ensure that the test of FIFO full is correct
++ uint32_t used_idx = env->txdesc_used_idx[queue_idx][user_pos]++;
++ uint32_t used_idx_mod = used_idx & nx_txdesc_cnt_msk[queue_idx];
++ void *host_id = env->tx_host_id[queue_idx][user_pos][used_idx_mod];
++
++ // Reset the host id in the array
++ env->tx_host_id[queue_idx][user_pos][used_idx_mod] = 0;
++
++ // call the external function to indicate that a TX packet is freed
++ if (host_id == 0)
++ {
++ // No more confirmations, so put back the used index at its initial value
++ env->txdesc_used_idx[queue_idx][user_pos] = used_idx;
++ break;
++ }
++
++ if (env->cb.send_data_cfm(env->pthis, host_id) != 0)
++ {
++ // No more confirmations, so put back the used index at its initial value
++ env->txdesc_used_idx[queue_idx][user_pos] = used_idx;
++ env->tx_host_id[queue_idx][user_pos][used_idx_mod] = host_id;
++ // and exit the loop
++ break;
++ }
++
++ REG_SW_SET_PROFILING_CHAN(env->pthis, SW_PROF_CHAN_CTXT_CFM_HDL_BIT);
++ REG_SW_CLEAR_PROFILING_CHAN(env->pthis, SW_PROF_CHAN_CTXT_CFM_HDL_BIT);
++ }
++
++ REG_SW_CLEAR_PROFILING(env->pthis, SW_PROF_IRQ_E2A_TXCFM);
++}
++
++/**
++ ******************************************************************************
++ */
++bool ipc_host_tx_frames_pending(struct ipc_host_env_tag *env)
++{
++ int i, j;
++ bool tx_frames_pending = false;
++
++ for (i = 0; (i < IPC_TXQUEUE_CNT) && !tx_frames_pending; i++)
++ {
++ for (j = 0; j < nx_txuser_cnt[i]; j++)
++ {
++ uint32_t used_idx = env->txdesc_used_idx[i][j];
++ uint32_t free_idx = env->txdesc_free_idx[i][j];
++
++ // Check if this queue is empty or not
++ if (used_idx != free_idx)
++ {
++ // The queue is not empty, update the flag and exit
++ tx_frames_pending = true;
++ break;
++ }
++ }
++ }
++
++ return (tx_frames_pending);
++}
++
++/**
++ ******************************************************************************
++ */
++void *ipc_host_tx_flush(struct ipc_host_env_tag *env, const int queue_idx, const int user_pos)
++{
++ uint32_t used_idx = env->txdesc_used_idx[queue_idx][user_pos];
++ void *host_id = env->tx_host_id[queue_idx][user_pos][used_idx & nx_txdesc_cnt_msk[queue_idx]];
++
++ // call the external function to indicate that a TX packet is freed
++ if (host_id != 0)
++ {
++ // Reset the host id in the array
++ env->tx_host_id[queue_idx][user_pos][used_idx & nx_txdesc_cnt_msk[queue_idx]] = 0;
++
++ // Increment the used index
++ env->txdesc_used_idx[queue_idx][user_pos]++;
++ }
++
++ return (host_id);
++}
++
++/**
++ ******************************************************************************
++ */
++void ipc_host_init(struct ipc_host_env_tag *env,
++ struct ipc_host_cb_tag *cb,
++ struct ipc_shared_env_tag *shared_env_ptr,
++ void *pthis)
++{
++ unsigned int i;
++ unsigned int size;
++ unsigned int * dst;
++
++ // Reset the environments
++ // Reset the IPC Shared memory
++#if 0
++ /* check potential platform bug on multiple stores */
++ memset(shared_env_ptr, 0, sizeof(struct ipc_shared_env_tag));
++#else
++ dst = (unsigned int *)shared_env_ptr;
++ size = (unsigned int)sizeof(struct ipc_shared_env_tag);
++ for (i=0; i < size; i+=4)
++ {
++ *dst++ = 0;
++ }
++#endif
++ // Reset the IPC Host environment
++ memset(env, 0, sizeof(struct ipc_host_env_tag));
++
++ // Initialize the shared environment pointer
++ env->shared = shared_env_ptr;
++
++ // Save the callbacks in our own environment
++ env->cb = *cb;
++
++ // Save the pointer to the register base
++ env->pthis = pthis;
++
++ // Initialize buffers numbers and buffers sizes needed for DMA Receptions
++ env->rx_bufnb = IPC_RXBUF_CNT;
++ #ifdef CONFIG_RWNX_FULLMAC
++ env->rxdesc_nb = IPC_RXDESC_CNT;
++ #endif //(CONFIG_RWNX_FULLMAC)
++ env->radar_bufnb = IPC_RADARBUF_CNT;
++ env->radar_bufsz = sizeof(struct radar_pulse_array_desc);
++ env->unsuprxvec_bufnb = IPC_UNSUPRXVECBUF_CNT;
++ env->unsuprxvec_bufsz = max(sizeof(struct rx_vector_desc), (size_t) RADIOTAP_HDR_MAX_LEN) +
++ RADIOTAP_HDR_VEND_MAX_LEN + UNSUP_RX_VEC_DATA_LEN;
++ env->ipc_e2amsg_bufnb = IPC_MSGE2A_BUF_CNT;
++ env->ipc_e2amsg_bufsz = sizeof(struct ipc_e2a_msg);
++ env->ipc_dbg_bufnb = IPC_DBGBUF_CNT;
++ env->ipc_dbg_bufsz = sizeof(struct ipc_dbg_msg);
++
++ for (i = 0; i < CONFIG_USER_MAX; i++)
++ {
++ // Initialize the pointers to the hostid arrays
++ env->tx_host_id[0][i] = env->tx_host_id0[i];
++ env->tx_host_id[1][i] = env->tx_host_id1[i];
++ env->tx_host_id[2][i] = env->tx_host_id2[i];
++ env->tx_host_id[3][i] = env->tx_host_id3[i];
++ #if NX_TXQ_CNT == 5
++ env->tx_host_id[4][i] = NULL;
++ #endif
++
++ // Initialize the pointers to the TX descriptor arrays
++ env->txdesc[0][i] = shared_env_ptr->txdesc0[i];
++ env->txdesc[1][i] = shared_env_ptr->txdesc1[i];
++ env->txdesc[2][i] = shared_env_ptr->txdesc2[i];
++ env->txdesc[3][i] = shared_env_ptr->txdesc3[i];
++ #if NX_TXQ_CNT == 5
++ env->txdesc[4][i] = NULL;
++ #endif
++ }
++
++ #if NX_TXQ_CNT == 5
++ env->tx_host_id[4][0] = env->tx_host_id4[0];
++ env->txdesc[4][0] = shared_env_ptr->txdesc4[0];
++ #endif
++}
++
++/**
++ ******************************************************************************
++ */
++void ipc_host_patt_addr_push(struct ipc_host_env_tag *env, uint32_t addr)
++{
++ struct ipc_shared_env_tag *shared_env_ptr = env->shared;
++
++ // Copy the address
++ shared_env_ptr->pattern_addr = addr;
++}
++
++/**
++ ******************************************************************************
++ */
++int ipc_host_rxbuf_push(struct ipc_host_env_tag *env,
++#ifdef CONFIG_RWNX_FULLMAC
++ uint32_t hostid,
++#endif
++ uint32_t hostbuf)
++{
++ struct ipc_shared_env_tag *shared_env_ptr = env->shared;
++
++ REG_SW_CLEAR_HOSTBUF_IDX_PROFILING(env->pthis);
++ REG_SW_SET_HOSTBUF_IDX_PROFILING(env->pthis, env->ipc_host_rxbuf_idx);
++
++#ifdef CONFIG_RWNX_FULLMAC
++ // Copy the hostbuf (DMA address) in the ipc shared memory
++ shared_env_ptr->host_rxbuf[env->ipc_host_rxbuf_idx].hostid = hostid;
++ shared_env_ptr->host_rxbuf[env->ipc_host_rxbuf_idx].dma_addr = hostbuf;
++#else
++ // Save the hostid and the hostbuf in global array
++ env->ipc_host_rxbuf_array[env->ipc_host_rxbuf_idx].hostid = hostid;
++ env->ipc_host_rxbuf_array[env->ipc_host_rxbuf_idx].dma_addr = hostbuf;
++
++ shared_env_ptr->host_rxbuf[env->ipc_host_rxbuf_idx] = hostbuf;
++#endif //(CONFIG_RWNX_FULLMAC)
++
++ // Signal to the embedded CPU that at least one buffer is available
++ ipc_app2emb_trigger_set(shared_env_ptr, IPC_IRQ_A2E_RXBUF_BACK);
++
++ // Increment the array index
++ env->ipc_host_rxbuf_idx = (env->ipc_host_rxbuf_idx +1)%IPC_RXBUF_CNT;
++
++ return (0);
++}
++
++#ifdef CONFIG_RWNX_FULLMAC
++/**
++ ******************************************************************************
++ */
++int ipc_host_rxdesc_push(struct ipc_host_env_tag *env, void *hostid,
++ uint32_t hostbuf)
++{
++ struct ipc_shared_env_tag *shared_env_ptr = env->shared;
++
++ // Reset the RX Descriptor DMA Address and increment the counter
++ env->ipc_host_rxdesc_array[env->ipc_host_rxdesc_idx].dma_addr = hostbuf;
++ env->ipc_host_rxdesc_array[env->ipc_host_rxdesc_idx].hostid = hostid;
++
++ shared_env_ptr->host_rxdesc[env->ipc_host_rxdesc_idx].dma_addr = hostbuf;
++
++ // Signal to the embedded CPU that at least one descriptor is available
++ ipc_app2emb_trigger_set(shared_env_ptr, IPC_IRQ_A2E_RXDESC_BACK);
++
++ env->ipc_host_rxdesc_idx = (env->ipc_host_rxdesc_idx + 1) % IPC_RXDESC_CNT;
++
++ return (0);
++}
++#endif /* CONFIG_RWNX_FULLMAC */
++
++/**
++ ******************************************************************************
++ */
++int ipc_host_radarbuf_push(struct ipc_host_env_tag *env, void *hostid,
++ uint32_t hostbuf)
++{
++ struct ipc_shared_env_tag *shared_env_ptr = env->shared;
++
++ // Save the hostid and the hostbuf in global array
++ env->ipc_host_radarbuf_array[env->ipc_host_radarbuf_idx].hostid = hostid;
++ env->ipc_host_radarbuf_array[env->ipc_host_radarbuf_idx].dma_addr = hostbuf;
++
++ // Copy the hostbuf (DMA address) in the ipc shared memory
++ shared_env_ptr->radarbuf_hostbuf[env->ipc_host_radarbuf_idx] = hostbuf;
++
++ // Increment the array index
++ env->ipc_host_radarbuf_idx = (env->ipc_host_radarbuf_idx +1)%IPC_RADARBUF_CNT;
++
++ return (0);
++}
++
++/**
++ ******************************************************************************
++ */
++
++int ipc_host_unsup_rx_vec_buf_push(struct ipc_host_env_tag *env,
++ void *hostid,
++ uint32_t hostbuf)
++{
++ struct ipc_shared_env_tag *shared_env_ptr = env->shared;
++
++ env->ipc_host_unsuprxvecbuf_array[env->ipc_host_unsuprxvecbuf_idx].hostid = hostid;
++ env->ipc_host_unsuprxvecbuf_array[env->ipc_host_unsuprxvecbuf_idx].dma_addr = hostbuf;
++
++ // Copy the hostbuf (DMA address) in the ipc shared memory
++ shared_env_ptr->unsuprxvecbuf_hostbuf[env->ipc_host_unsuprxvecbuf_idx] = hostbuf;
++
++ // Increment the array index
++ env->ipc_host_unsuprxvecbuf_idx = (env->ipc_host_unsuprxvecbuf_idx + 1)%IPC_UNSUPRXVECBUF_CNT;
++
++ return (0);
++}
++
++/**
++ ******************************************************************************
++ */
++int ipc_host_msgbuf_push(struct ipc_host_env_tag *env, void *hostid,
++ uint32_t hostbuf)
++{
++ struct ipc_shared_env_tag *shared_env_ptr = env->shared;
++
++ // Save the hostid and the hostbuf in global array
++ env->ipc_host_msgbuf_array[env->ipc_host_msge2a_idx].hostid = hostid;
++ env->ipc_host_msgbuf_array[env->ipc_host_msge2a_idx].dma_addr = hostbuf;
++
++ // Copy the hostbuf (DMA address) in the ipc shared memory
++ shared_env_ptr->msg_e2a_hostbuf_addr[env->ipc_host_msge2a_idx] = hostbuf;
++
++ // Increment the array index
++ env->ipc_host_msge2a_idx = (env->ipc_host_msge2a_idx +1)%IPC_MSGE2A_BUF_CNT;
++
++ return (0);
++}
++
++/**
++ ******************************************************************************
++ */
++int ipc_host_dbgbuf_push(struct ipc_host_env_tag *env, void *hostid,
++ uint32_t hostbuf)
++{
++ struct ipc_shared_env_tag *shared_env_ptr = env->shared;
++
++ // Save the hostid and the hostbuf in global array
++ env->ipc_host_dbgbuf_array[env->ipc_host_dbg_idx].hostid = hostid;
++ env->ipc_host_dbgbuf_array[env->ipc_host_dbg_idx].dma_addr = hostbuf;
++
++ // Copy the hostbuf (DMA address) in the ipc shared memory
++ shared_env_ptr->dbg_hostbuf_addr[env->ipc_host_dbg_idx] = hostbuf;
++
++ // Increment the array index
++ env->ipc_host_dbg_idx = (env->ipc_host_dbg_idx +1)%IPC_DBGBUF_CNT;
++
++ return (0);
++}
++
++/**
++ ******************************************************************************
++ */
++void ipc_host_dbginfobuf_push(struct ipc_host_env_tag *env, uint32_t infobuf)
++{
++ struct ipc_shared_env_tag *shared_env_ptr = env->shared;
++
++ // Copy the hostbuf (DMA address) in the ipc shared memory
++ shared_env_ptr->la_dbginfo_addr = infobuf;
++}
++
++/**
++ ******************************************************************************
++ */
++volatile struct txdesc_host *ipc_host_txdesc_get(struct ipc_host_env_tag *env, const int queue_idx, const int user_pos)
++{
++ volatile struct txdesc_host *txdesc_free;
++ uint32_t used_idx = env->txdesc_used_idx[queue_idx][user_pos];
++ uint32_t free_idx = env->txdesc_free_idx[queue_idx][user_pos];
++
++ ASSERT_ERR(queue_idx < IPC_TXQUEUE_CNT);
++ ASSERT_ERR((free_idx - used_idx) <= nx_txdesc_cnt[queue_idx]);
++
++ // Check if a free descriptor is available
++ if (free_idx != (used_idx + nx_txdesc_cnt[queue_idx]))
++ {
++ // Get the pointer to the first free descriptor
++ txdesc_free = env->txdesc[queue_idx][user_pos] + (free_idx & nx_txdesc_cnt_msk[queue_idx]);
++ }
++ else
++ {
++ txdesc_free = NULL;
++ }
++
++ return txdesc_free;
++}
++
++/**
++ ******************************************************************************
++ */
++void ipc_host_txdesc_push(struct ipc_host_env_tag *env, const int queue_idx,
++ const int user_pos, void *host_id)
++{
++ uint32_t free_idx = env->txdesc_free_idx[queue_idx][user_pos] & nx_txdesc_cnt_msk[queue_idx];
++ volatile struct txdesc_host *txdesc_pushed = env->txdesc[queue_idx][user_pos] + free_idx;
++
++
++ // Descriptor is now ready
++ txdesc_pushed->ready = 0xFFFFFFFF;
++
++ // Save the host id in the environment
++ env->tx_host_id[queue_idx][user_pos][free_idx] = host_id;
++
++ // Increment the index
++ env->txdesc_free_idx[queue_idx][user_pos]++;
++
++ // trigger interrupt!!!
++ //REG_SW_SET_PROFILING(env->pthis, CO_BIT(queue_idx+SW_PROF_IRQ_A2E_TXDESC_FIRSTBIT));
++ ipc_app2emb_trigger_setf(env->shared, CO_BIT(user_pos + queue_idx * CONFIG_USER_MAX +
++ IPC_IRQ_A2E_TXDESC_FIRSTBIT));
++}
++
++/**
++ ******************************************************************************
++ */
++void ipc_host_irq(struct ipc_host_env_tag *env, uint32_t status)
++{
++ // Acknowledge the pending interrupts
++ ipc_emb2app_ack_clear(env->shared, status);
++ // And re-read the status, just to be sure that the acknowledgment is
++ // effective when we start the interrupt handling
++ ipc_emb2app_status_get(env->shared);
++
++ // Optimized for only one IRQ at a time
++ if (status & IPC_IRQ_E2A_RXDESC)
++ {
++ // handle the RX descriptor reception
++ ipc_host_rxdesc_handler(env);
++ }
++ if (status & IPC_IRQ_E2A_MSG_ACK)
++ {
++ ipc_host_msgack_handler(env);
++ }
++ if (status & IPC_IRQ_E2A_MSG)
++ {
++ ipc_host_msg_handler(env);
++ }
++ if (status & IPC_IRQ_E2A_TXCFM)
++ {
++ int i;
++
++#ifdef __KERNEL__
++ spin_lock_bh(&((struct rwnx_hw *)env->pthis)->tx_lock);
++#endif
++ // handle the TX confirmation reception
++ for (i = 0; i < IPC_TXQUEUE_CNT; i++)
++ {
++ int j = 0;
++#ifdef CONFIG_RWNX_MUMIMO_TX
++ for (; j < nx_txuser_cnt[i]; j++)
++#endif
++ {
++ uint32_t q_bit = CO_BIT(j + i * CONFIG_USER_MAX + IPC_IRQ_E2A_TXCFM_POS);
++ if (status & q_bit)
++ {
++ // handle the confirmation
++ ipc_host_tx_cfm_handler(env, i, j);
++ }
++ }
++ }
++#ifdef __KERNEL__
++ spin_unlock_bh(&((struct rwnx_hw *)env->pthis)->tx_lock);
++#endif
++ }
++ if (status & IPC_IRQ_E2A_RADAR)
++ {
++ // handle the radar event reception
++ ipc_host_radar_handler(env);
++ }
++
++ if (status & IPC_IRQ_E2A_UNSUP_RX_VEC)
++ {
++ // handle the unsupported rx vector reception
++ ipc_host_unsup_rx_vec_handler(env);
++ }
++
++ if (status & IPC_IRQ_E2A_DBG)
++ {
++ ipc_host_dbg_handler(env);
++ }
++
++ if (status & IPC_IRQ_E2A_TBTT_PRIM)
++ {
++ env->cb.prim_tbtt_ind(env->pthis);
++ }
++
++ if (status & IPC_IRQ_E2A_TBTT_SEC)
++ {
++ env->cb.sec_tbtt_ind(env->pthis);
++ }
++}
++
++/**
++ ******************************************************************************
++ */
++int ipc_host_msg_push(struct ipc_host_env_tag *env, void *msg_buf, uint16_t len)
++{
++ int i;
++ uint32_t *src, *dst;
++
++ REG_SW_SET_PROFILING(env->pthis, SW_PROF_IPC_MSGPUSH);
++
++ ASSERT_ERR(!env->msga2e_hostid);
++ ASSERT_ERR(round_up(len, 4) <= sizeof(env->shared->msg_a2e_buf.msg));
++
++ // Copy the message into the IPC MSG buffer
++#ifdef __KERNEL__
++ src = (uint32_t*)((struct rwnx_cmd *)msg_buf)->a2e_msg;
++#else
++ src = (uint32_t*) msg_buf;
++#endif
++ dst = (uint32_t*)&(env->shared->msg_a2e_buf.msg);
++
++ // Copy the message in the IPC queue
++ for (i=0; i<len; i+=4)
++ {
++ *dst++ = *src++;
++ }
++
++ env->msga2e_hostid = msg_buf;
++
++ // Trigger the irq to send the message to EMB
++ ipc_app2emb_trigger_set(env->shared, IPC_IRQ_A2E_MSG);
++
++ REG_SW_CLEAR_PROFILING(env->pthis, SW_PROF_IPC_MSGPUSH);
++
++ return (0);
++}
++
++/**
++ ******************************************************************************
++ */
++void ipc_host_enable_irq(struct ipc_host_env_tag *env, uint32_t value)
++{
++ // Enable the handled interrupts
++ ipc_emb2app_unmask_set(env->shared, value);
++}
++
++/**
++ ******************************************************************************
++ */
++void ipc_host_disable_irq(struct ipc_host_env_tag *env, uint32_t value)
++{
++ // Enable the handled interrupts
++ ipc_emb2app_unmask_clear(env->shared, value);
++}
++
++/**
++ ******************************************************************************
++ */
++uint32_t ipc_host_get_status(struct ipc_host_env_tag *env)
++{
++ volatile uint32_t status;
++
++ status = ipc_emb2app_status_get(env->shared);
++
++ return status;
++}
++
++/**
++ ******************************************************************************
++ */
++uint32_t ipc_host_get_rawstatus(struct ipc_host_env_tag *env)
++{
++ volatile uint32_t rawstatus;
++
++ rawstatus = ipc_emb2app_rawstatus_get(env->shared);
++
++ return rawstatus;
++}
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/ipc_host.h
+@@ -0,0 +1,508 @@
++/**
++ ******************************************************************************
++ *
++ * @file ipc_host.h
++ *
++ * @brief IPC module.
++ *
++ * Copyright (C) RivieraWaves 2011-2019
++ *
++ ******************************************************************************
++ */
++#ifndef _IPC_HOST_H_
++#define _IPC_HOST_H_
++
++/*
++ * INCLUDE FILES
++ ******************************************************************************
++ */
++#include "ipc_shared.h"
++#ifndef __KERNEL__
++#include "arch.h"
++#else
++#include "ipc_compat.h"
++#endif
++
++/*
++ * ENUMERATION
++ ******************************************************************************
++ */
++
++enum ipc_host_desc_status
++{
++ /// Descriptor is IDLE
++ IPC_HOST_DESC_IDLE = 0,
++ /// Data can be forwarded
++ IPC_HOST_DESC_FORWARD,
++ /// Data has to be kept in UMAC memory
++ IPC_HOST_DESC_KEEP,
++ /// Delete stored packet
++ IPC_HOST_DESC_DELETE,
++ /// Update Frame Length status
++ IPC_HOST_DESC_LEN_UPDATE,
++};
++
++/**
++ ******************************************************************************
++ * @brief This structure is used to initialize the MAC SW
++ *
++ * The WLAN device driver provides functions call-back with this structure
++ ******************************************************************************
++ */
++struct ipc_host_cb_tag
++{
++ /// WLAN driver call-back function: send_data_cfm
++ int (*send_data_cfm)(void *pthis, void *host_id);
++
++ /// WLAN driver call-back function: recv_data_ind
++ uint8_t (*recv_data_ind)(void *pthis, void *host_id);
++
++ /// WLAN driver call-back function: recv_radar_ind
++ uint8_t (*recv_radar_ind)(void *pthis, void *host_id);
++
++ /// WLAN driver call-back function: recv_unsup_rx_vec_ind
++ uint8_t (*recv_unsup_rx_vec_ind)(void *pthis, void *host_id);
++
++ /// WLAN driver call-back function: recv_msg_ind
++ uint8_t (*recv_msg_ind)(void *pthis, void *host_id);
++
++ /// WLAN driver call-back function: recv_msgack_ind
++ uint8_t (*recv_msgack_ind)(void *pthis, void *host_id);
++
++ /// WLAN driver call-back function: recv_dbg_ind
++ uint8_t (*recv_dbg_ind)(void *pthis, void *host_id);
++
++ /// WLAN driver call-back function: prim_tbtt_ind
++ void (*prim_tbtt_ind)(void *pthis);
++
++ /// WLAN driver call-back function: sec_tbtt_ind
++ void (*sec_tbtt_ind)(void *pthis);
++
++};
++
++/*
++ * Struct used to store information about host buffers (DMA Address and local pointer)
++ */
++struct ipc_hostbuf
++{
++ void *hostid; ///< ptr to hostbuf client (ipc_host client) structure
++ uint32_t dma_addr; ///< ptr to real hostbuf dma address
++};
++
++/// Definition of the IPC Host environment structure.
++struct ipc_host_env_tag
++{
++ /// Structure containing the callback pointers
++ struct ipc_host_cb_tag cb;
++
++ /// Pointer to the shared environment
++ struct ipc_shared_env_tag *shared;
++
++ #ifdef CONFIG_RWNX_FULLMAC
++ // Array used to store the descriptor addresses
++ struct ipc_hostbuf ipc_host_rxdesc_array[IPC_RXDESC_CNT];
++ // Index of the host RX descriptor array (ipc_shared environment)
++ uint8_t ipc_host_rxdesc_idx;
++ /// Store the number of RX Descriptors
++ uint8_t rxdesc_nb;
++ #endif //(CONFIG_RWNX_FULLMAC)
++
++ /// Fields for Data Rx handling
++ // Index used for ipc_host_rxbuf_array to point to current buffer
++ uint8_t ipc_host_rxbuf_idx;
++ // Store the number of Rx Data buffers
++ uint32_t rx_bufnb;
++ // Store the size of the Rx Data buffers
++ uint32_t rx_bufsz;
++
++ /// Fields for Radar events handling
++ // Global array used to store the hostid and hostbuf addresses
++ struct ipc_hostbuf ipc_host_radarbuf_array[IPC_RADARBUF_CNT];
++ // Index used for ipc_host_rxbuf_array to point to current buffer
++ uint8_t ipc_host_radarbuf_idx;
++ // Store the number of radar event buffers
++ uint32_t radar_bufnb;
++ // Store the size of the radar event buffers
++ uint32_t radar_bufsz;
++
++ ///Fields for Unsupported frame handling
++ // Global array used to store the hostid and hostbuf addresses
++ struct ipc_hostbuf ipc_host_unsuprxvecbuf_array[IPC_UNSUPRXVECBUF_CNT];
++ // Index used for ipc_host_unsuprxvecbuf_array to point to current buffer
++ uint8_t ipc_host_unsuprxvecbuf_idx;
++ // Store the number of unsupported rx vector buffers
++ uint32_t unsuprxvec_bufnb;
++ // Store the size of unsupported rx vector buffers
++ uint32_t unsuprxvec_bufsz;
++
++ // Index used that points to the first free TX desc
++ uint32_t txdesc_free_idx[IPC_TXQUEUE_CNT][CONFIG_USER_MAX];
++ // Index used that points to the first used TX desc
++ uint32_t txdesc_used_idx[IPC_TXQUEUE_CNT][CONFIG_USER_MAX];
++ // Array storing the currently pushed host ids for the BK queue
++ void *tx_host_id0[CONFIG_USER_MAX][NX_TXDESC_CNT0];
++ // Array storing the currently pushed host ids for the BE queue
++ void *tx_host_id1[CONFIG_USER_MAX][NX_TXDESC_CNT1];
++ // Array storing the currently pushed host ids for the VI queue
++ void *tx_host_id2[CONFIG_USER_MAX][NX_TXDESC_CNT2];
++ // Array storing the currently pushed host ids for the VO queue
++ void *tx_host_id3[CONFIG_USER_MAX][NX_TXDESC_CNT3];
++ #if NX_TXQ_CNT == 5
++ // Array storing the currently pushed host ids for the BCN queue
++ void *tx_host_id4[1][NX_TXDESC_CNT4];
++ #endif
++ // Pointer to the different host ids arrays, per IPC queue
++ void **tx_host_id[IPC_TXQUEUE_CNT][CONFIG_USER_MAX];
++ // Pointer to the different TX descriptor arrays, per IPC queue
++ volatile struct txdesc_host *txdesc[IPC_TXQUEUE_CNT][CONFIG_USER_MAX];
++
++ /// Fields for Emb->App MSGs handling
++ // Global array used to store the hostid and hostbuf addresses for msg/ind
++ struct ipc_hostbuf ipc_host_msgbuf_array[IPC_MSGE2A_BUF_CNT];
++ // Index of the MSG E2A buffers array to point to current buffer
++ uint8_t ipc_host_msge2a_idx;
++ // Store the number of E2A MSG buffers
++ uint32_t ipc_e2amsg_bufnb;
++ // Store the size of the E2A MSG buffers
++ uint32_t ipc_e2amsg_bufsz;
++
++ /// E2A ACKs of A2E MSGs
++ uint8_t msga2e_cnt;
++ void *msga2e_hostid;
++
++ /// Fields for Debug MSGs handling
++ // Global array used to store the hostid and hostbuf addresses for Debug messages
++ struct ipc_hostbuf ipc_host_dbgbuf_array[IPC_DBGBUF_CNT];
++ // Index of the Debug messages buffers array to point to current buffer
++ uint8_t ipc_host_dbg_idx;
++ // Store the number of Debug messages buffers
++ uint32_t ipc_dbg_bufnb;
++ // Store the size of the Debug messages buffers
++ uint32_t ipc_dbg_bufsz;
++
++ /// Pointer to the attached object (used in callbacks and register accesses)
++ void *pthis;
++};
++
++extern const int nx_txdesc_cnt[];
++extern const int nx_txuser_cnt[];
++
++/**
++ ******************************************************************************
++ * @brief Returns the full/not full status of the queue the index of which is
++ * passed as parameter.
++ *
++ * @param[in] env Pointer to the IPC host environment
++ * @param[in] queue_idx Index of the queue to be checked
++ *
++ * @return true if the queue is full, false otherwise
++ *
++ ******************************************************************************
++ */
++__INLINE bool ipc_host_queue_full(struct ipc_host_env_tag *env,
++ const int queue_idx)
++{
++ return (env->txdesc_free_idx[queue_idx] ==
++ (env->txdesc_used_idx[queue_idx] + nx_txdesc_cnt[queue_idx]));
++}
++
++/**
++ ******************************************************************************
++ * @brief Initialize the IPC running on the Application CPU.
++ *
++ * This function:
++ * - initializes the IPC software environments
++ * - enables the interrupts in the IPC block
++ *
++ * @param[in] env Pointer to the IPC host environment
++ *
++ * @warning Since this function resets the IPC Shared memory, it must be called
++ * before the LMAC FW is launched because LMAC sets some init values in IPC
++ * Shared memory at boot.
++ *
++ ******************************************************************************
++ */
++void ipc_host_init(struct ipc_host_env_tag *env,
++ struct ipc_host_cb_tag *cb,
++ struct ipc_shared_env_tag *shared_env_ptr,
++ void *pthis);
++
++/** @addtogroup IPC_TX
++ * @{
++ */
++
++/**
++ ******************************************************************************
++ * @brief Retrieve a new free Tx descriptor (host side).
++ *
++ * This function returns a pointer to the next Tx descriptor available from the
++ * queue queue_idx to the host driver. The driver will have to fill it with the
++ * appropriate endianness and to send it to the
++ * emb side with ipc_host_txdesc_push().
++ *
++ * This function should only be called once until ipc_host_txdesc_push() is called.
++ *
++ * This function will return NULL if the queue is full.
++ *
++ * @param[in] env Pointer to the IPC host environment
++ * @param[in] queue_idx Queue index. The index can be inferred from the
++ * user priority of the incoming packet.
++ * @param[in] user_pos User position. If MU-MIMO is not used, this value
++ * shall be 0.
++ * @return Pointer to the next Tx descriptor free. This can
++ * point to the host memory or to shared memory,
++ * depending on IPC implementation.
++ *
++ ******************************************************************************
++ */
++volatile struct txdesc_host *ipc_host_txdesc_get(struct ipc_host_env_tag *env,
++ const int queue_idx,
++ const int user_pos);
++
++
++/**
++ ******************************************************************************
++ * @brief Push a filled Tx descriptor (host side).
++ *
++ * This function sets the next Tx descriptor available by the host side:
++ * - as used for the host side
++ * - as available for the emb side.
++ * The Tx descriptor must be correctly filled before calling this function.
++ *
++ * This function may trigger an IRQ to the emb CPU depending on the interrupt
++ * mitigation policy and on the push count.
++ *
++ * @param[in] env Pointer to the IPC host environment
++ * @param[in] queue_idx Queue index. Same value than ipc_host_txdesc_get()
++ * @param[in] user_pos User position. If MU-MIMO is not used, this value
++ * shall be 0.
++ * @param[in] host_id Parameter indicated by the IPC at TX confirmation,
++ * that allows the driver finding the buffer
++ *
++ ******************************************************************************
++ */
++void ipc_host_txdesc_push(struct ipc_host_env_tag *env, const int queue_idx,
++ const int user_pos, void *host_id);
++
++
++/**
++ ******************************************************************************
++ * @brief Check if there are TX frames pending in the TX queues.
++ *
++ * @param[in] env Pointer to the IPC host environment
++ *
++ * @return true if there are frames pending, false otherwise.
++ *
++ ******************************************************************************
++ */
++bool ipc_host_tx_frames_pending(struct ipc_host_env_tag *env);
++
++/**
++ ******************************************************************************
++ * @brief Get and flush a packet from the IPC queue passed as parameter.
++ *
++ * @param[in] env Pointer to the IPC host environment
++ * @param[in] queue_idx Index of the queue to flush
++ * @param[in] user_pos User position to flush
++ *
++ * @return The flushed hostid if there is one, 0 otherwise.
++ *
++ ******************************************************************************
++ */
++void *ipc_host_tx_flush(struct ipc_host_env_tag *env, const int queue_idx,
++ const int user_pos);
++
++/// @} IPC_TX
++
++/** @addtogroup IPC_RX
++ * @{
++ */
++void ipc_host_patt_addr_push(struct ipc_host_env_tag *env, uint32_t addr);
++
++/**
++ ******************************************************************************
++ * @brief Push a pre-allocated buffer descriptor for Rx packet (host side)
++ *
++ * This function should be called by the host IRQ handler to supply the
++ * embedded side with new empty buffer.
++ *
++ * @param[in] env Pointer to the IPC host environment
++ * @param[in] hostid Packet ID used by the host (skbuff pointer on Linux)
++ * @param[in] hostbuf Pointer to the start of the buffer payload in the
++ * host memory (this may be inferred from the skbuff?)
++ * The length of this buffer should be predefined
++ * between host and emb statically (constant needed?).
++ *
++ ******************************************************************************
++ */
++int ipc_host_rxbuf_push(struct ipc_host_env_tag *env,
++#ifdef CONFIG_RWNX_FULLMAC
++ uint32_t hostid,
++#endif
++ uint32_t hostbuf);
++
++/**
++ ******************************************************************************
++ * @brief Push a pre-allocated Descriptor
++ *
++ * This function should be called by the host IRQ handler to supply the
++ * embedded side with new empty buffer.
++ *
++ * @param[in] env Pointer to the IPC host environment
++ * @param[in] hostid Address of packet for host
++ * @param[in] hostbuf Pointer to the start of the buffer payload in the
++ * host memory. The length of this buffer should be
++ * predefined between host and emb statically.
++ *
++ ******************************************************************************
++ */
++int ipc_host_rxdesc_push(struct ipc_host_env_tag *env, void *hostid,
++ uint32_t hostbuf);
++
++/**
++ ******************************************************************************
++ * @brief Push a pre-allocated radar event buffer descriptor
++ *
++ * This function is called at Init time to initialize all radar event buffers.
++ * Then each time embedded send a radar event, this function is used to push
++ * back the same buffer once it has been handled.
++ *
++ * @param[in] env Pointer to the IPC host environment
++ * @param[in] hostid Address of packet for host
++ * @param[in] hostbuf Pointer to the start of the buffer payload in the
++ * host memory. The length of this buffer should be
++ * predefined between host and emb statically.
++ *
++ ******************************************************************************
++ */
++int ipc_host_radarbuf_push(struct ipc_host_env_tag *env, void *hostid,
++ uint32_t hostbuf);
++
++/**
++ ******************************************************************************
++ * @brief Push a pre-allocated unsupported rx vector buffer descriptor
++ *
++ * This function is called at Init time to initialize all unsupported rx vector
++ * buffers. Then each time the embedded sends a unsupported rx vector, this
++ * function is used to push a new unsupported rx vector buffer.
++ *
++ * @param[in] env Pointer to the IPC host environment
++ * @param[in] hostid Address of packet for host
++ * @param[in] hostbuf Pointer to the start of the buffer payload in the
++ * host memory. The length of this buffer should be
++ * predefined between host and emb statically.
++ *
++ ******************************************************************************
++ */
++int ipc_host_unsup_rx_vec_buf_push(struct ipc_host_env_tag *env, void *hostid,
++ uint32_t hostbuf);
++
++/**
++ ******************************************************************************
++ * @brief Push a pre-allocated buffer descriptor for IPC MSGs (host side)
++ *
++ * This function is called at Init time to initialize all Emb2App messages
++ * buffers. Then each time embedded send a IPC message, this function is used
++ * to push back the same buffer once it has been handled.
++ *
++ * @param[in] env Pointer to the IPC host environment
++ * @param[in] hostid Address of buffer for host
++ * @param[in] hostbuf Address of buffer for embedded
++ * The length of this buffer should be predefined
++ * between host and emb statically.
++ *
++ ******************************************************************************
++ */
++int ipc_host_msgbuf_push(struct ipc_host_env_tag *env, void *hostid,
++ uint32_t hostbuf);
++
++/**
++ ******************************************************************************
++ * @brief Push a pre-allocated buffer descriptor for Debug messages (host side)
++ *
++ * This function is called at Init time to initialize all debug messages.
++ * Then each time embedded send a debug message, this function is used to push
++ * back the same buffer once it has been handled.
++ *
++ * @param[in] env Pointer to the IPC host environment
++ * @param[in] hostid Address of buffer for host
++ * @param[in] hostbuf Address of buffer for embedded
++ * The length of this buffer should be predefined
++ * between host and emb statically.
++ *
++ ******************************************************************************
++ */
++int ipc_host_dbgbuf_push(struct ipc_host_env_tag *env, void *hostid,
++ uint32_t hostbuf);
++
++/**
++ ******************************************************************************
++ * @brief Push the pre-allocated logic analyzer and debug information buffer
++ *
++ * @param[in] env Pointer to the IPC host environment
++ * @param[in] infobuf Address of buffer for embedded
++ * The length of this buffer should be predefined
++ * between host and emb statically.
++ *
++ ******************************************************************************
++ */
++void ipc_host_dbginfobuf_push(struct ipc_host_env_tag *env, uint32_t infobuf);
++
++/// @} IPC_RX
++
++
++
++/** @addtogroup IPC_MISC
++ * @{
++ */
++
++/**
++ ******************************************************************************
++ * @brief Handle all IPC interrupts on the host side.
++ *
++ * The following interrupts should be handled:
++ * Tx confirmation, Rx buffer requests, Rx packet ready and kernel messages
++ *
++ * @param[in] env Pointer to the IPC host environment
++ *
++ ******************************************************************************
++ */
++void ipc_host_irq(struct ipc_host_env_tag *env, uint32_t status);
++
++/**
++ ******************************************************************************
++ * @brief Send a message to the embedded side
++ *
++ * @param[in] env Pointer to the IPC host environment
++ * @param[in] msg_buf Pointer to the message buffer
++ * @param[in] msg_len Length of the message to be transmitted
++ *
++ * @return Non-null value on failure
++ *
++ ******************************************************************************
++ */
++int ipc_host_msg_push(struct ipc_host_env_tag *env, void *msg_buf, uint16_t len);
++
++/**
++ ******************************************************************************
++ * @brief Enable IPC interrupts
++ *
++ * @param[in] env Global ipc_host environment pointer
++ * @param[in] value Bitfield of the interrupts to enable
++ *
++ * @warning After calling this function, IPC interrupts can be triggered at any
++ * time. Potentially, an interrupt could happen even before returning from the
++ * function if there is a request pending from the embedded side.
++ *
++ ******************************************************************************
++ */
++void ipc_host_enable_irq(struct ipc_host_env_tag *env, uint32_t value);
++void ipc_host_disable_irq(struct ipc_host_env_tag *env, uint32_t value);
++
++uint32_t ipc_host_get_status(struct ipc_host_env_tag *env);
++uint32_t ipc_host_get_rawstatus(struct ipc_host_env_tag *env);
++
++/// @} IPC_MISC
++
++
++#endif // _IPC_HOST_H_
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/ipc_shared.h
+@@ -0,0 +1,802 @@
++/**
++ ****************************************************************************************
++ *
++ * @file ipc_shared.h
++ *
++ * @brief Shared data between both IPC modules.
++ *
++ * Copyright (C) RivieraWaves 2011-2019
++ *
++ ****************************************************************************************
++ */
++
++#ifndef _IPC_SHARED_H_
++#define _IPC_SHARED_H_
++
++/*
++ * INCLUDE FILES
++ ****************************************************************************************
++ */
++#include "ipc_compat.h"
++#include "lmac_mac.h"
++
++/*
++ * DEFINES AND MACROS
++ ****************************************************************************************
++ */
++#define CO_BIT(pos) (1U<<(pos))
++
++#define IPC_TXQUEUE_CNT NX_TXQ_CNT
++#define NX_TXDESC_CNT0 8
++#define NX_TXDESC_CNT1 64
++#define NX_TXDESC_CNT2 64
++#define NX_TXDESC_CNT3 32
++#if NX_TXQ_CNT == 5
++#define NX_TXDESC_CNT4 8
++#endif
++
++/*
++ * Number of Host buffers available for Data Rx handling (through DMA)
++ */
++#define IPC_RXBUF_CNT 128
++
++/*
++ * Number of shared descriptors available for Data RX handling
++ */
++#define IPC_RXDESC_CNT 128
++
++/*
++ * Number of Host buffers available for Radar events handling (through DMA)
++ */
++#define IPC_RADARBUF_CNT 16
++
++/*
++ * Number of Host buffers available for unsupported Rx vectors handling (through DMA)
++ */
++#define IPC_UNSUPRXVECBUF_CNT 8
++
++/*
++ * Size of RxVector
++ */
++#define IPC_RXVEC_SIZE 16
++
++/*
++ * Number of Host buffers available for Emb->App MSGs sending (through DMA)
++ */
++#ifdef CONFIG_RWNX_FULLMAC
++#define IPC_MSGE2A_BUF_CNT 64
++#endif
++/*
++ * Number of Host buffers available for Debug Messages sending (through DMA)
++ */
++#define IPC_DBGBUF_CNT 32
++
++/*
++ * Length used in MSGs structures
++ */
++#define IPC_A2E_MSG_BUF_SIZE 127 // size in 4-byte words
++#ifdef CONFIG_RWNX_FULLMAC
++#define IPC_E2A_MSG_SIZE_BASE 256 // size in 4-byte words
++#endif
++
++#ifdef CONFIG_RWNX_TL4
++#define IPC_E2A_MSG_PARAM_SIZE (IPC_E2A_MSG_SIZE_BASE + (IPC_E2A_MSG_SIZE_BASE / 2))
++#else
++#define IPC_E2A_MSG_PARAM_SIZE IPC_E2A_MSG_SIZE_BASE
++#endif
++
++/*
++ * Debug messages buffers size (in bytes)
++ */
++#define IPC_DBG_PARAM_SIZE 256
++
++/*
++ * Define used for Rx hostbuf validity.
++ * This value should appear only when hostbuf was used for a Reception.
++ */
++#define RX_DMA_OVER_PATTERN 0xAAAAAA00
++
++/*
++ * Define used for MSG buffers validity.
++ * This value will be written only when a MSG buffer is used for sending from Emb to App.
++ */
++#define IPC_MSGE2A_VALID_PATTERN 0xADDEDE2A
++
++/*
++ * Define used for Debug messages buffers validity.
++ * This value will be written only when a DBG buffer is used for sending from Emb to App.
++ */
++#define IPC_DBG_VALID_PATTERN 0x000CACA0
++
++/*
++ * Length of the receive vectors, in bytes
++ */
++#define DMA_HDR_PHYVECT_LEN 36
++
++/*
++ * Maximum number of payload addresses and lengths present in the descriptor
++ */
++#define NX_TX_PAYLOAD_MAX 6
++
++/*
++ * Message struct/ID API version
++ */
++#define MSG_API_VER 15
++
++/*
++ ****************************************************************************************
++ */
++// c.f LMAC/src/tx/tx_swdesc.h
++/// Descriptor filled by the Host
++struct hostdesc
++{
++ /// Pointer to packet payload
++ //u32_l packet_addr;
++ /// Size of the payload
++ u16_l packet_len;
++ u16_l flags_ext;
++
++#ifdef CONFIG_RWNX_FULLMAC
++ /// Address of the status descriptor in host memory (used for confirmation upload)
++ u32_l status_desc_addr;
++ /// Destination Address
++ struct mac_addr eth_dest_addr;
++ /// Source Address
++ struct mac_addr eth_src_addr;
++ /// Ethernet Type
++ u16_l ethertype;
++#else /* ! CONFIG_RWNX_FULLMAC */
++#ifdef CONFIG_RWNX_AGG_TX
++ ///Sequence Number for AMPDU MPDUs - for quick check if it's allowed within window
++ u16_l sn;
++#endif /* CONFIG_RWNX_AGG_TX */
++ /// Padding between the buffer control structure and the MPDU in host memory
++ u8_l padding;
++#endif /* CONFIG_RWNX_FULLMAC */
++ u8_l ac;
++ /// Packet TID (0xFF if not a QoS frame)
++ u8_l tid;
++ /// Interface Id
++ u8_l vif_idx;
++ /// Station Id (0xFF if station is unknown)
++ u8_l staid;
++#ifdef CONFIG_RWNX_MUMIMO_TX
++ /// MU-MIMO information (GroupId and User Position in the group) - The GroupId
++ /// is located on bits 0-5 and the User Position on bits 6-7. The GroupId value is set
++ /// to 63 if MU-MIMO shall not be used
++ u8_l mumimo_info;
++#endif /* CONFIG_RWNX_MUMIMO_TX */
++#ifdef CONFIG_RWNX_FULLMAC
++ /// TX flags
++ u16_l flags;
++#endif /* CONFIG_RWNX_FULLMAC */
++};
++
++/// Descriptor filled by the UMAC
++struct umacdesc
++{
++#ifdef CONFIG_RWNX_AGG_TX
++ ///First Sequence Number of the BlockAck window
++ u16_l sn_win;
++ /// Flags from UMAC (match tx_hd.macctrlinfo2 format)
++ u32_l flags;
++ /// PHY related flags field - rate, GI type, BW type - filled by driver
++ u32_l phy_flags;
++#endif //(CONFIG_RWNX_AGG_TX)
++};
++
++struct txdesc_api
++{
++ /// Information provided by Host
++ struct hostdesc host;
++};
++
++
++struct txdesc_host
++{
++ u32_l ready;
++
++ /// API of the embedded part
++ struct txdesc_api api;
++};
++
++/// Comes from ipc_dma.h
++/// Element in the pool of TX DMA bridge descriptors.
++struct dma_desc
++{
++ /** Application subsystem address which is used as source address for DMA payload
++ * transfer*/
++ u32_l src;
++ /** Points to the start of the embedded data buffer associated with this descriptor.
++ * This address acts as the destination address for the DMA payload transfer*/
++ u32_l dest;
++ /// Complete length of the buffer in memory
++ u16_l length;
++ /// Control word for the DMA engine (e.g. for interrupt generation)
++ u16_l ctrl;
++ /// Pointer to the next element of the chained list
++ u32_l next;
++};
++
++// Comes from la.h
++/// Length of the configuration data of a logic analyzer
++#define LA_CONF_LEN 10
++
++/// Structure containing the configuration data of a logic analyzer
++struct la_conf_tag
++{
++ u32_l conf[LA_CONF_LEN];
++ u32_l trace_len;
++ u32_l diag_conf;
++};
++
++/// Size of a logic analyzer memory
++#define LA_MEM_LEN (1024 * 1024)
++
++/// Type of errors
++enum
++{
++ /// Recoverable error, not requiring any action from Upper MAC
++ DBG_ERROR_RECOVERABLE = 0,
++ /// Fatal error, requiring Upper MAC to reset Lower MAC and HW and restart operation
++ DBG_ERROR_FATAL
++};
++
++/// Maximum length of the SW diag trace
++#define DBG_SW_DIAG_MAX_LEN 1024
++
++/// Maximum length of the error trace
++#define DBG_ERROR_TRACE_SIZE 256
++
++/// Number of MAC diagnostic port banks
++#define DBG_DIAGS_MAC_MAX 48
++
++/// Number of PHY diagnostic port banks
++#define DBG_DIAGS_PHY_MAX 32
++
++/// Maximum size of the RX header descriptor information in the debug dump
++#define DBG_RHD_MEM_LEN (5 * 1024)
++
++/// Maximum size of the RX buffer descriptor information in the debug dump
++#define DBG_RBD_MEM_LEN (5 * 1024)
++
++/// Maximum size of the TX header descriptor information in the debug dump
++#define DBG_THD_MEM_LEN (10 * 1024)
++
++/// Structure containing the information about the PHY channel that is used
++struct phy_channel_info
++{
++ /// PHY channel information 1
++ u32_l info1;
++ /// PHY channel information 2
++ u32_l info2;
++};
++
++/// Debug information forwarded to host when an error occurs
++struct dbg_debug_info_tag
++{
++ /// Type of error (0: recoverable, 1: fatal)
++ u32_l error_type;
++ /// Pointer to the first RX Header Descriptor chained to the MAC HW
++ u32_l rhd;
++ /// Size of the RX header descriptor buffer
++ u32_l rhd_len;
++ /// Pointer to the first RX Buffer Descriptor chained to the MAC HW
++ u32_l rbd;
++ /// Size of the RX buffer descriptor buffer
++ u32_l rbd_len;
++ /// Pointer to the first TX Header Descriptors chained to the MAC HW
++ u32_l thd[NX_TXQ_CNT];
++ /// Size of the TX header descriptor buffer
++ u32_l thd_len[NX_TXQ_CNT];
++ /// MAC HW diag configuration
++ u32_l hw_diag;
++ /// Error message
++ u32_l error[DBG_ERROR_TRACE_SIZE/4];
++ /// SW diag configuration length
++ u32_l sw_diag_len;
++ /// SW diag configuration
++ u32_l sw_diag[DBG_SW_DIAG_MAX_LEN/4];
++ /// PHY channel information
++ struct phy_channel_info chan_info;
++ /// Embedded LA configuration
++ struct la_conf_tag la_conf;
++ /// MAC diagnostic port state
++ u16_l diags_mac[DBG_DIAGS_MAC_MAX];
++ /// PHY diagnostic port state
++ u16_l diags_phy[DBG_DIAGS_PHY_MAX];
++ /// MAC HW RX Header descriptor pointer
++ u32_l rhd_hw_ptr;
++ /// MAC HW RX Buffer descriptor pointer
++ u32_l rbd_hw_ptr;
++};
++
++/// Full debug dump that is forwarded to host in case of error
++struct dbg_debug_dump_tag
++{
++ /// Debug information
++ struct dbg_debug_info_tag dbg_info;
++
++ /// RX header descriptor memory
++ u32_l rhd_mem[DBG_RHD_MEM_LEN/4];
++
++ /// RX buffer descriptor memory
++ u32_l rbd_mem[DBG_RBD_MEM_LEN/4];
++
++ /// TX header descriptor memory
++ u32_l thd_mem[NX_TXQ_CNT][DBG_THD_MEM_LEN/4];
++
++ /// Logic analyzer memory
++ u32_l la_mem[LA_MEM_LEN/4];
++};
++
++
++/// Number of pulses in a radar event structure
++#define RADAR_PULSE_MAX 4
++
++/// Definition of an array of radar pulses
++struct radar_pulse_array_desc
++{
++ /// Buffer containing the radar pulses
++ u32_l pulse[RADAR_PULSE_MAX];
++ /// Index of the radar detection chain that detected those pulses
++ u32_l idx;
++ /// Number of valid pulses in the buffer
++ u32_l cnt;
++};
++
++/// Bit mapping inside a radar pulse element
++struct radar_pulse {
++ s32_l freq:6; /** Freq (resolution is 2Mhz range is [-Fadc/4 .. Fadc/4]) */
++ u32_l fom:4; /** Figure of Merit */
++ u32_l len:6; /** Length of the current radar pulse (resolution is 2us) */
++ u32_l rep:16; /** Time interval between the previous radar event
++ and the current one (in us) */
++};
++
++/// Definition of a RX vector descriptor
++struct rx_vector_desc
++{
++ /// PHY channel information
++ struct phy_channel_info phy_info;
++
++ /// RX vector 1
++ u32_l rx_vect1[IPC_RXVEC_SIZE/4];
++
++ /// Used to print a valid rx vector
++ u32_l pattern;
++};
++
++///
++struct rxdesc_tag
++{
++ /// Host Buffer Address
++ u32_l host_id;
++ /// Length
++ u32_l frame_len;
++ /// Status
++ u16_l status;
++};
++
++/**
++ ****************************************************************************************
++ * @defgroup IPC IPC
++ * @ingroup NXMAC
++ * @brief Inter Processor Communication module.
++ *
++ * The IPC module implements the protocol to communicate between the Host CPU
++ * and the Embedded CPU.
++ *
++ * @see http://en.wikipedia.org/wiki/Circular_buffer
++ * For more information about the ring buffer typical use and difficulties.
++ ****************************************************************************************
++ */
++
++
++/**
++ ****************************************************************************************
++ * @addtogroup IPC_TX IPC Tx path
++ * @ingroup IPC
++ * @brief IPC Tx path structures and functions
++ *
++ * A typical use case of the IPC Tx path API:
++ * @msc
++ * hscale = "2";
++ *
++ * a [label=Driver],
++ * b [label="IPC host"],
++ * c [label="IPC emb"],
++ * d [label=Firmware];
++ *
++ * --- [label="Tx descriptor queue example"];
++ * a=>a [label="Driver receives a Tx packet from OS"];
++ * a=>b [label="ipc_host_txdesc_get()"];
++ * a<<b [label="struct txdesc_host *"];
++ * a=>a [label="Driver fill the descriptor"];
++ * a=>b [label="ipc_host_txdesc_push()"];
++ * ... [label="(several Tx desc can be pushed)"];
++ * b:>c [label="Tx desc queue filled IRQ"];
++ * c=>>d [label="EDCA sub-scheduler callback"];
++ * c<<d [label="Tx desc queue to pop"];
++ * c=>>d [label="UMAC Tx desc callback"];
++ * ... [label="(several Tx desc can be popped)"];
++ * d=>d [label="Packets are sent or discarded"];
++ * --- [label="Tx confirm queue example"];
++ * c<=d [label="ipc_emb_txcfm_push()"];
++ * c>>d [label="Request accepted"];
++ * ... [label="(several Tx cfm can be pushed)"];
++ * b<:c [label="Tx cfm queue filled IRQ"];
++ * a<<=b [label="Driver's Tx Confirm callback"];
++ * a=>b [label="ipc_host_txcfm_pop()"];
++ * a<<b [label="struct ipc_txcfm"];
++ * a<=a [label="Packets are freed by the driver"];
++ * @endmsc
++ *
++ * @{
++ ****************************************************************************************
++ */
++
++/// @} IPC_TX
++
++/**
++ ****************************************************************************************
++ * @defgroup IPC_RX IPC Rx path
++ * @ingroup IPC
++ * @brief IPC Rx path functions and structures
++ *
++ * A typical use case of the IPC Rx path API:
++ * @msc
++ * hscale = "2";
++ *
++ * a [label=Firmware],
++ * b [label="IPC emb"],
++ * c [label="IPC host"],
++ * d [label=Driver];
++ *
++ * --- [label="Rx buffer and desc queues usage example"];
++ * d=>c [label="ipc_host_rxbuf_push()"];
++ * d=>c [label="ipc_host_rxbuf_push()"];
++ * d=>c [label="ipc_host_rxbuf_push()"];
++ * ... [label="(several Rx buffer are pushed)"];
++ * a=>a [label=" Frame is received\n from the medium"];
++ * a<<b [label="struct ipc_rxbuf"];
++ * a=>a [label=" Firmware fill the buffer\n with received frame"];
++ * a<<b [label="Push accepted"];
++ * ... [label="(several Rx desc can be pushed)"];
++ * b:>c [label="Rx desc queue filled IRQ"];
++ * c=>>d [label="Driver Rx packet callback"];
++ * c<=d [label="ipc_host_rxdesc_pop()"];
++ * d=>d [label="Rx packet is handed \nover to the OS "];
++ * ... [label="(several Rx desc can be poped)"];
++ * --- [label="Rx buffer request exemple"];
++ * b:>c [label="Low Rx buffer count IRQ"];
++ * a<<b [label="struct ipc_rxbuf"];
++ * c=>>d [label="Driver Rx buffer callback"];
++ * d=>c [label="ipc_host_rxbuf_push()"];
++ * d=>c [label="ipc_host_rxbuf_push()"];
++ * d=>c [label="ipc_host_rxbuf_push()"];
++ * ... [label="(several Rx buffer are pushed)"];
++ * @endmsc
++ *
++ * @addtogroup IPC_RX
++ * @{
++ ****************************************************************************************
++ */
++
++/// @} IPC_RX
++
++
++
++/**
++ ****************************************************************************************
++ * @defgroup IPC_MISC IPC Misc
++ * @ingroup IPC
++ * @brief IPC miscellaneous functions
++ ****************************************************************************************
++ */
++/** IPC header structure. This structure is stored at the beginning of every IPC message.
++ * @warning This structure's size must NOT exceed 4 bytes in length.
++ */
++struct ipc_header
++{
++ /// IPC message type.
++ u16_l type;
++ /// IPC message size in number of bytes.
++ u16_l size;
++};
++
++struct ipc_msg_elt
++{
++ /// Message header (alignment forced on word size, see allocation in shared env).
++ struct ipc_header header __ALIGN4;
++};
++
++/// Message structure for MSGs from Emb to App
++struct ipc_e2a_msg
++{
++ u16_l id; ///< Message id.
++ u16_l dummy_dest_id;
++ u16_l dummy_src_id;
++ u16_l param_len; ///< Parameter embedded struct length.
++ u32_l pattern; ///< Used to stamp a valid MSG buffer
++ u32_l param[IPC_E2A_MSG_PARAM_SIZE]; ///< Parameter embedded struct. Must be word-aligned.
++};
++
++/// Message structure for Debug messages from Emb to App
++struct ipc_dbg_msg
++{
++ u32_l string[IPC_DBG_PARAM_SIZE/4]; ///< Debug string
++ u32_l pattern; ///< Used to stamp a valid buffer
++};
++
++/// Message structure for MSGs from App to Emb.
++/// Actually a sub-structure will be used when filling the messages.
++struct ipc_a2e_msg
++{
++ u32_l dummy_word; // used to cope with kernel message structure
++ u32_l msg[IPC_A2E_MSG_BUF_SIZE]; // body of the msg
++};
++
++struct ipc_shared_rx_buf
++{
++ /// < ptr to hostbuf client (ipc_host client) structure
++ u32_l hostid;
++ /// < ptr to real hostbuf dma address
++ u32_l dma_addr;
++};
++
++struct ipc_shared_rx_desc
++{
++ /// DMA Address
++ u32_l dma_addr;
++};
++
++/// Structure containing FW characteristics for compatibility checking
++struct compatibility_tag {
++ /// Size of IPC shared memory
++ u16_l ipc_shared_size;
++ /// Message struct/ID API version
++ u16_l msg_api;
++ /// Version of IPC shared
++ u8_l ipc_shared_version;
++ /// Number of host buffers available for Emb->App MSGs sending
++ u8_l msge2a_buf_cnt;
++ /// Number of host buffers available for Debug Messages sending
++ u8_l dbgbuf_cnt;
++ /// Number of host buffers available for Radar events handling
++ u8_l radarbuf_cnt;
++ /// Number of host buffers available for unsupported Rx vectors handling
++ u8_l unsuprxvecbuf_cnt;
++ /// Number of shared descriptors available for Data RX handling
++ u8_l rxdesc_cnt;
++ /// Number of host buffers available for Data Rx handling
++ u8_l rxbuf_cnt;
++ /// Number of descriptors in BK TX queue (power of 2, min 4, max 64)
++ u8_l bk_txq;
++ /// Number of descriptors in BE TX queue (power of 2, min 4, max 64)
++ u8_l be_txq;
++ /// Number of descriptors in VI TX queue (power of 2, min 4, max 64)
++ u8_l vi_txq;
++ /// Number of descriptors in VO TX queue (power of 2, min 4, max 64)
++ u8_l vo_txq;
++ /// Number of descriptors in BCN TX queue (power of 2, min 4, max 64)
++ u8_l bcn_txq;
++};
++
++/*
++ * TYPE and STRUCT DEFINITIONS
++ ****************************************************************************************
++ */
++
++
++// Indexes are defined in the MIB shared structure
++struct ipc_shared_env_tag
++{
++ volatile struct compatibility_tag comp_info; //FW characteristics
++
++ volatile struct ipc_a2e_msg msg_a2e_buf; // room for MSG to be sent from App to Emb
++
++ // Fields for MSGs sending from Emb to App
++ volatile struct ipc_e2a_msg msg_e2a_buf; // room to build the MSG to be DMA Xferred
++ volatile struct dma_desc msg_dma_desc; // DMA descriptor for Emb->App MSGs Xfers
++ volatile u32_l msg_e2a_hostbuf_addr [IPC_MSGE2A_BUF_CNT]; // buffers @ for DMA Xfers
++
++ // Fields for Debug MSGs sending from Emb to App
++ volatile struct ipc_dbg_msg dbg_buf; // room to build the MSG to be DMA Xferred
++ volatile struct dma_desc dbg_dma_desc; // DMA descriptor for Emb->App MSGs Xfers
++ volatile u32_l dbg_hostbuf_addr [IPC_DBGBUF_CNT]; // buffers @ for MSGs DMA Xfers
++ volatile u32_l la_dbginfo_addr; // Host buffer address for the debug information
++ volatile u32_l pattern_addr;
++ volatile u32_l radarbuf_hostbuf [IPC_RADARBUF_CNT]; // buffers @ for Radar Events
++ volatile u32_l unsuprxvecbuf_hostbuf [IPC_UNSUPRXVECBUF_CNT]; // buffers @ for unsupported Rx vectors
++ volatile struct txdesc_host txdesc0[CONFIG_USER_MAX][NX_TXDESC_CNT0];
++ volatile struct txdesc_host txdesc1[CONFIG_USER_MAX][NX_TXDESC_CNT1];
++ volatile struct txdesc_host txdesc2[CONFIG_USER_MAX][NX_TXDESC_CNT2];
++ volatile struct txdesc_host txdesc3[CONFIG_USER_MAX][NX_TXDESC_CNT3];
++ #if NX_TXQ_CNT == 5
++ volatile struct txdesc_host txdesc4[1][NX_TXDESC_CNT4];
++ #endif
++ #ifdef CONFIG_RWNX_FULLMAC
++ // RX Descriptors Array
++ volatile struct ipc_shared_rx_desc host_rxdesc[IPC_RXDESC_CNT];
++ // RX Buffers Array
++ volatile struct ipc_shared_rx_buf host_rxbuf[IPC_RXBUF_CNT];
++ #else
++ // buffers @ for Data Rx
++ volatile u32_l host_rxbuf[IPC_RXBUF_CNT];
++ #endif /* CONFIG_RWNX_FULLMAC */
++
++ u32_l buffered[NX_REMOTE_STA_MAX][TID_MAX];
++
++ volatile uint16_t trace_pattern;
++ volatile uint32_t trace_start;
++ volatile uint32_t trace_end;
++ volatile uint32_t trace_size;
++ volatile uint32_t trace_offset;
++ volatile uint32_t trace_nb_compo;
++ volatile uint32_t trace_offset_compo;
++};
++
++extern struct ipc_shared_env_tag ipc_shared_env;
++
++
++/*
++ * TYPE and STRUCT DEFINITIONS
++ ****************************************************************************************
++ */
++
++// IRQs from app to emb
++/// Interrupts bits used for the TX descriptors of the AC queues
++#ifdef CONFIG_RWNX_MUMIMO_TX
++#ifdef CONFIG_RWNX_OLD_IPC
++#error "MU-MIMO cannot be compiled for old IPC"
++#endif
++/// Interrupts bits used
++#if CONFIG_USER_MAX > 3
++#define IPC_IRQ_A2E_USER_MSK 0xF
++#elif CONFIG_USER_MAX > 2
++#define IPC_IRQ_A2E_USER_MSK 0x7
++#else
++#define IPC_IRQ_A2E_USER_MSK 0x3
++#endif
++
++/// Offset of the interrupts for AC0
++#define IPC_IRQ_A2E_AC0_OFT 8
++/// Mask of the interrupts for AC0
++#define IPC_IRQ_A2E_AC0_MSK (IPC_IRQ_A2E_USER_MSK << IPC_IRQ_A2E_AC0_OFT)
++/// Offset of the interrupts for AC1
++#define IPC_IRQ_A2E_AC1_OFT (IPC_IRQ_A2E_AC0_OFT + CONFIG_USER_MAX)
++/// Mask of the interrupts for AC1
++#define IPC_IRQ_A2E_AC1_MSK (IPC_IRQ_A2E_USER_MSK << IPC_IRQ_A2E_AC1_OFT)
++/// Offset of the interrupts for AC2
++#define IPC_IRQ_A2E_AC2_OFT (IPC_IRQ_A2E_AC1_OFT + CONFIG_USER_MAX)
++/// Mask of the interrupts for AC2
++#define IPC_IRQ_A2E_AC2_MSK (IPC_IRQ_A2E_USER_MSK << IPC_IRQ_A2E_AC2_OFT)
++/// Offset of the interrupts for AC3
++#define IPC_IRQ_A2E_AC3_OFT (IPC_IRQ_A2E_AC2_OFT + CONFIG_USER_MAX)
++/// Mask of the interrupts for AC3
++#define IPC_IRQ_A2E_AC3_MSK (IPC_IRQ_A2E_USER_MSK << IPC_IRQ_A2E_AC3_OFT)
++/// Offset of the interrupts for BCN
++#define IPC_IRQ_A2E_BCN_OFT (IPC_IRQ_A2E_AC3_OFT + CONFIG_USER_MAX)
++/// Mask of the interrupts for BCN
++#define IPC_IRQ_A2E_BCN_MSK CO_BIT(IPC_IRQ_A2E_BCN_OFT)
++
++#define IPC_IRQ_A2E_AC_TXDESC (IPC_IRQ_A2E_AC0_MSK | IPC_IRQ_A2E_AC1_MSK | \
++ IPC_IRQ_A2E_AC2_MSK | IPC_IRQ_A2E_AC3_MSK)
++
++/// Interrupts bits used for the TX descriptors of the BCN queue
++#if NX_TXQ_CNT < 5
++#define IPC_IRQ_A2E_BCN_TXDESC 0
++#else
++#define IPC_IRQ_A2E_BCN_TXDESC (0x01 << IPC_IRQ_A2E_BCN_OFT)
++#endif
++
++/// IPC TX descriptor interrupt mask
++#define IPC_IRQ_A2E_TXDESC (IPC_IRQ_A2E_AC_TXDESC | IPC_IRQ_A2E_BCN_TXDESC)
++#else
++/// IPC TX descriptor interrupt mask
++#define IPC_IRQ_A2E_TXDESC 0xFF00
++#endif
++
++#define IPC_IRQ_A2E_TXDESC_FIRSTBIT (8)
++#define IPC_IRQ_A2E_RXBUF_BACK CO_BIT(5)
++#define IPC_IRQ_A2E_RXDESC_BACK CO_BIT(4)
++
++#define IPC_IRQ_A2E_MSG CO_BIT(1)
++#define IPC_IRQ_A2E_DBG CO_BIT(0)
++
++#define IPC_IRQ_A2E_ALL (IPC_IRQ_A2E_TXDESC|IPC_IRQ_A2E_MSG|IPC_IRQ_A2E_DBG)
++
++// IRQs from emb to app
++#define IPC_IRQ_E2A_TXCFM_POS 7
++
++#ifdef CONFIG_RWNX_MUMIMO_TX
++#ifdef CONFIG_RWNX_OLD_IPC
++#error "MU-MIMO cannot be compiled for old IPC"
++#endif
++/// Interrupts bits used
++#if CONFIG_USER_MAX > 3
++#define IPC_IRQ_E2A_USER_MSK 0xF
++#elif CONFIG_USER_MAX > 2
++#define IPC_IRQ_E2A_USER_MSK 0x7
++#else
++#define IPC_IRQ_E2A_USER_MSK 0x3
++#endif
++
++/// Offset of the interrupts for AC0
++#define IPC_IRQ_E2A_AC0_OFT IPC_IRQ_E2A_TXCFM_POS
++/// Mask of the interrupts for AC0
++#define IPC_IRQ_E2A_AC0_MSK (IPC_IRQ_E2A_USER_MSK << IPC_IRQ_E2A_AC0_OFT)
++/// Offset of the interrupts for AC1
++#define IPC_IRQ_E2A_AC1_OFT (IPC_IRQ_E2A_AC0_OFT + CONFIG_USER_MAX)
++/// Mask of the interrupts for AC1
++#define IPC_IRQ_E2A_AC1_MSK (IPC_IRQ_E2A_USER_MSK << IPC_IRQ_E2A_AC1_OFT)
++/// Offset of the interrupts for AC2
++#define IPC_IRQ_E2A_AC2_OFT (IPC_IRQ_E2A_AC1_OFT + CONFIG_USER_MAX)
++/// Mask of the interrupts for AC2
++#define IPC_IRQ_E2A_AC2_MSK (IPC_IRQ_E2A_USER_MSK << IPC_IRQ_E2A_AC2_OFT)
++/// Offset of the interrupts for AC3
++#define IPC_IRQ_E2A_AC3_OFT (IPC_IRQ_E2A_AC2_OFT + CONFIG_USER_MAX)
++/// Mask of the interrupts for AC3
++#define IPC_IRQ_E2A_AC3_MSK (IPC_IRQ_E2A_USER_MSK << IPC_IRQ_E2A_AC3_OFT)
++/// Offset of the interrupts for BCN
++#define IPC_IRQ_E2A_BCN_OFT (IPC_IRQ_E2A_AC3_OFT + CONFIG_USER_MAX)
++/// Mask of the interrupts for BCN
++#define IPC_IRQ_E2A_BCN_MSK CO_BIT(IPC_IRQ_E2A_BCN_OFT)
++
++#define IPC_IRQ_E2A_AC_TXCFM (IPC_IRQ_E2A_AC0_MSK | IPC_IRQ_E2A_AC1_MSK | \
++ IPC_IRQ_E2A_AC2_MSK | IPC_IRQ_E2A_AC3_MSK)
++
++/// Interrupts bits used for the TX descriptors of the BCN queue
++#if NX_TXQ_CNT < 5
++#define IPC_IRQ_E2A_BCN_TXCFM 0
++#else
++#define IPC_IRQ_E2A_BCN_TXCFM (0x01 << IPC_IRQ_E2A_BCN_OFT)
++#endif
++
++/// IPC TX descriptor interrupt mask
++#define IPC_IRQ_E2A_TXCFM (IPC_IRQ_E2A_AC_TXCFM | IPC_IRQ_E2A_BCN_TXCFM)
++
++#else
++
++#define IPC_IRQ_E2A_TXCFM ((1 << NX_TXQ_CNT) - 1 ) << IPC_IRQ_E2A_TXCFM_POS
++
++#endif /* CONFIG_RWNX_MUMIMO_TX */
++
++#define IPC_IRQ_E2A_UNSUP_RX_VEC CO_BIT(7)
++#define IPC_IRQ_E2A_RADAR CO_BIT(6)
++#define IPC_IRQ_E2A_TBTT_SEC CO_BIT(5)
++#define IPC_IRQ_E2A_TBTT_PRIM CO_BIT(4)
++#define IPC_IRQ_E2A_RXDESC CO_BIT(3)
++#define IPC_IRQ_E2A_MSG_ACK CO_BIT(2)
++#define IPC_IRQ_E2A_MSG CO_BIT(1)
++#define IPC_IRQ_E2A_DBG CO_BIT(0)
++
++#define IPC_IRQ_E2A_ALL ( IPC_IRQ_E2A_TXCFM \
++ | IPC_IRQ_E2A_RXDESC \
++ | IPC_IRQ_E2A_MSG_ACK \
++ | IPC_IRQ_E2A_MSG \
++ | IPC_IRQ_E2A_DBG \
++ | IPC_IRQ_E2A_TBTT_PRIM \
++ | IPC_IRQ_E2A_TBTT_SEC \
++ | IPC_IRQ_E2A_RADAR \
++ | IPC_IRQ_E2A_UNSUP_RX_VEC)
++
++// FLAGS for RX desc
++#define IPC_RX_FORWARD CO_BIT(1)
++#define IPC_RX_INTRABSS CO_BIT(0)
++
++
++// IPC message TYPE
++enum
++{
++ IPC_MSG_NONE = 0,
++ IPC_MSG_WRAP,
++ IPC_MSG_KMSG,
++
++ IPC_DBG_STRING,
++
++};
++
++#endif // _IPC_SHARED_H_
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/lmac_mac.h
+@@ -0,0 +1,588 @@
++/**
++ ****************************************************************************************
++ *
++ * @file lmac_mac_types.h
++ *
++ * @brief MAC related definitions.
++ *
++ * Adapted from mac_types.h to used lmac_types.h instead of standard types
++ * eg: perl -pi -e '$_ =~ s/uint(\d{1,2})_t/u$1_l/g; \
++ * $_ =~ s/int(\d{1,2})_t/s$1_l/g; \
++ * $_ =~ s/CO_BIT/BIT/g;' lmac_mac.h
++ *
++ * Copyright (C) RivieraWaves 2011-2019
++ *
++ ****************************************************************************************
++ */
++
++#ifndef LMAC_MAC_H_
++#define LMAC_MAC_H_
++
++#include "lmac_types.h"
++
++/// Interface types
++enum mac_vif_type
++{
++ /// ESS STA interface
++ VIF_STA,
++ /// IBSS STA interface
++ VIF_IBSS,
++ /// AP interface
++ VIF_AP,
++ /// Mesh Point interface
++ VIF_MESH_POINT,
++ /// Monitor interface
++ VIF_MONITOR,
++ /// Unknown type
++ VIF_UNKNOWN
++};
++
++/// MAC address length in bytes.
++#define MAC_ADDR_LEN 6
++
++/// MAC address structure.
++struct mac_addr
++{
++ /// Array of 16-bit words that make up the MAC address.
++ u16_l array[MAC_ADDR_LEN/2];
++};
++
++/// SSID maximum length.
++#define MAC_SSID_LEN 32
++
++/// SSID.
++struct mac_ssid
++{
++ /// Actual length of the SSID.
++ u8_l length;
++ /// Array containing the SSID name.
++ u8_l array[MAC_SSID_LEN];
++};
++
++/// BSS type
++enum mac_bss_type
++{
++ INFRASTRUCTURE_MODE = 1,
++ INDEPENDENT_BSS_MODE,
++ ANY_BSS_MODE
++};
++
++/// Channel Band
++enum mac_chan_band
++{
++ /// 2.4GHz Band
++ PHY_BAND_2G4,
++ /// 5GHz band
++ PHY_BAND_5G,
++ /// Number of bands
++ PHY_BAND_MAX,
++};
++
++/// Operating Channel Bandwidth
++enum mac_chan_bandwidth
++{
++ /// 20MHz BW
++ PHY_CHNL_BW_20,
++ /// 40MHz BW
++ PHY_CHNL_BW_40,
++ /// 80MHz BW
++ PHY_CHNL_BW_80,
++ /// 160MHz BW
++ PHY_CHNL_BW_160,
++ /// 80+80MHz BW
++ PHY_CHNL_BW_80P80,
++ /// Reserved BW
++ PHY_CHNL_BW_OTHER,
++};
++
++/// max number of channels in the 2.4 GHZ band
++#define MAC_DOMAINCHANNEL_24G_MAX 14
++
++/// max number of channels in the 5 GHZ band
++#define MAC_DOMAINCHANNEL_5G_MAX 28
++
++/// Channel Flag
++enum mac_chan_flags
++{
++ /// Cannot initiate radiation on this channel
++ CHAN_NO_IR = BIT(0),
++ /// Channel is not allowed
++ CHAN_DISABLED = BIT(1),
++ /// Radar detection required on this channel
++ CHAN_RADAR = BIT(2),
++};
++
++/// Primary Channel definition
++struct mac_chan_def
++{
++ /// Frequency of the channel (in MHz)
++ u16_l freq;
++ /// RF band (@ref mac_chan_band)
++ u8_l band;
++ /// Additional information (@ref mac_chan_flags)
++ u8_l flags;
++ /// Max transmit power allowed on this channel (dBm)
++ s8_l tx_power;
++};
++
++/// Operating Channel
++struct mac_chan_op
++{
++ /// Band (@ref mac_chan_band)
++ u8_l band;
++ /// Channel type (@ref mac_chan_bandwidth)
++ u8_l type;
++ /// Frequency for Primary 20MHz channel (in MHz)
++ u16_l prim20_freq;
++ /// Frequency center of the contiguous channel or center of Primary 80+80 (in MHz)
++ u16_l center1_freq;
++ /// Frequency center of the non-contiguous secondary 80+80 (in MHz)
++ u16_l center2_freq;
++ /// Max transmit power allowed on this channel (dBm)
++ s8_l tx_power;
++ /// Additional information (@ref mac_chan_flags)
++ u8_l flags;
++};
++
++/// Cipher suites (order is important as it is used by MACHW)
++enum mac_cipher_suite
++{
++ /// 00-0F-AC 1
++ MAC_CIPHER_WEP40 = 0,
++ /// 00-0F-AC 2
++ MAC_CIPHER_TKIP = 1,
++ /// 00-0F-AC 4
++ MAC_CIPHER_CCMP = 2,
++ /// 00-0F-AC 5
++ MAC_CIPHER_WEP104 = 3,
++ /// 00-14-72 1
++ MAC_CIPHER_WPI_SMS4 = 4,
++ /// 00-0F-AC 6 (aka AES_CMAC)
++ MAC_CIPHER_BIP_CMAC_128 = 5,
++
++ // following cipher are not supported by MACHW
++ /// 00-0F-AC 08
++ MAC_CIPHER_GCMP_128,
++ /// 00-0F-AC 09
++ MAC_CIPHER_GCMP_256,
++ /// 00-0F-AC 10
++ MAC_CIPHER_CCMP_256,
++ /// 00-0F-AC 11
++ MAC_CIPHER_BIP_GMAC_128,
++ /// 00-0F-AC 12
++ MAC_CIPHER_BIP_GMAC_256,
++ /// 00-0F-AC 13
++ MAC_CIPHER_BIP_CMAC_256,
++
++ MAC_CIPHER_INVALID = 0xFF
++};
++
++/// Authentication and Key Management suite
++enum mac_akm_suite
++{
++ /// No security
++ MAC_AKM_NONE,
++ /// Pre RSN (WEP or WPA)
++ MAC_AKM_PRE_RSN,
++ /// 00-0F-AC 1
++ MAC_AKM_8021X,
++ /// 00-0F-AC 2
++ MAC_AKM_PSK,
++ /// 00-0F-AC 3
++ MAC_AKM_FT_8021X,
++ /// 00-0F-AC 4
++ MAC_AKM_FT_PSK,
++ /// 00-0F-AC 5
++ MAC_AKM_8021X_SHA256,
++ /// 00-0F-AC 6
++ MAC_AKM_PSK_SHA256,
++ /// 00-0F-AC 7
++ MAC_AKM_TDLS,
++ /// 00-0F-AC 8
++ MAC_AKM_SAE,
++ /// 00-0F-AC 9
++ MAC_AKM_FT_OVER_SAE,
++ /// 00-0F-AC 11
++ MAC_AKM_8021X_SUITE_B,
++ /// 00-0F-AC 12
++ MAC_AKM_8021X_SUITE_B_192,
++ /// 00-0F-AC 14
++ MAC_AKM_FILS_SHA256,
++ /// 00-0F-AC 15
++ MAC_AKM_FILS_SHA384,
++ /// 00-0F-AC 16
++ MAC_AKM_FT_FILS_SHA256,
++ /// 00-0F-AC 17
++ MAC_AKM_FT_FILS_SHA384,
++ /// 00-0F-AC 18
++ MAC_AKM_OWE,
++
++ /// 00-14-72 1
++ MAC_AKM_WAPI_CERT,
++ /// 00-14-72 2
++ MAC_AKM_WAPI_PSK,
++};
++
++/// Scan result element, parsed from beacon or probe response frames.
++struct mac_scan_result
++{
++ /// Scan result is valid
++ bool valid_flag;
++ /// Network BSSID.
++ struct mac_addr bssid;
++ /// Network name.
++ struct mac_ssid ssid;
++ /// Network type (@ref mac_bss_type).
++ u16_l bsstype;
++ /// Network channel.
++ struct mac_chan_def *chan;
++ /// Network beacon period (in TU).
++ u16_l beacon_period;
++ /// Capability information
++ u16_l cap_info;
++ /// Supported AKM (bit-field of @ref mac_akm_suite)
++ u32_l akm;
++ /// Group cipher (bit-field of @ref mac_cipher_suite)
++ u16_l group_cipher;
++ /// Group cipher (bit-field of @ref mac_cipher_suite)
++ u16_l pairwise_cipher;
++ /// RSSI of the scanned BSS (in dBm)
++ s8_l rssi;
++ ///Multi-BSSID index (0 if this is the reference (i.e. transmitted) BSSID)
++ u8_l mluti_bssid_index;
++ ///Maximum BSSID indicator
++ u8_l max_bssid_indicator;
++};
++
++/// Legacy rate 802.11 definitions
++enum mac_legacy_rates
++{
++ /// DSSS/CCK 1Mbps
++ MAC_RATE_1MBPS = 2,
++ /// DSSS/CCK 2Mbps
++ MAC_RATE_2MBPS = 4,
++ /// DSSS/CCK 5.5Mbps
++ MAC_RATE_5_5MBPS = 11,
++ /// OFDM 6Mbps
++ MAC_RATE_6MBPS = 12,
++ /// OFDM 9Mbps
++ MAC_RATE_9MBPS = 18,
++ /// DSSS/CCK 11Mbps
++ MAC_RATE_11MBPS = 22,
++ /// OFDM 12Mbps
++ MAC_RATE_12MBPS = 24,
++ /// OFDM 18Mbps
++ MAC_RATE_18MBPS = 36,
++ /// OFDM 24Mbps
++ MAC_RATE_24MBPS = 48,
++ /// OFDM 36Mbps
++ MAC_RATE_36MBPS = 72,
++ /// OFDM 48Mbps
++ MAC_RATE_48MBPS = 96,
++ /// OFDM 54Mbps
++ MAC_RATE_54MBPS = 108
++};
++
++/// BSS Membership Selector definitions
++enum mac_bss_membership
++{
++ /// HT PHY
++ MAC_BSS_MEMBERSHIP_HT_PHY = 127,
++ /// VHT PHY
++ MAC_BSS_MEMBERSHIP_VHT_PHY = 126,
++};
++
++/// MAC rateset maximum length
++#define MAC_RATESET_LEN 12
++
++/// Structure containing the legacy rateset of a station
++struct mac_rateset
++{
++ /// Number of legacy rates supported
++ u8_l length;
++ /// Array of legacy rates
++ u8_l array[MAC_RATESET_LEN];
++};
++
++/// MAC Security Key maximum length
++#define MAC_SEC_KEY_LEN 32 // TKIP keys 256 bits (max length) with MIC keys
++
++/// Structure defining a security key
++struct mac_sec_key
++{
++ /// Key material length
++ u8_l length;
++ /// Key material
++ u32_l array[MAC_SEC_KEY_LEN/4];
++};
++
++/// Access Category enumeration
++enum mac_ac
++{
++ /// Background
++ AC_BK = 0,
++ /// Best-effort
++ AC_BE,
++ /// Video
++ AC_VI,
++ /// Voice
++ AC_VO,
++ /// Number of access categories
++ AC_MAX
++};
++
++/// Traffic ID enumeration
++enum mac_tid
++{
++ /// TID_0. Mapped to @ref AC_BE as per 802.11 standard.
++ TID_0,
++ /// TID_1. Mapped to @ref AC_BK as per 802.11 standard.
++ TID_1,
++ /// TID_2. Mapped to @ref AC_BK as per 802.11 standard.
++ TID_2,
++ /// TID_3. Mapped to @ref AC_BE as per 802.11 standard.
++ TID_3,
++ /// TID_4. Mapped to @ref AC_VI as per 802.11 standard.
++ TID_4,
++ /// TID_5. Mapped to @ref AC_VI as per 802.11 standard.
++ TID_5,
++ /// TID_6. Mapped to @ref AC_VO as per 802.11 standard.
++ TID_6,
++ /// TID_7. Mapped to @ref AC_VO as per 802.11 standard.
++ TID_7,
++ /// Non standard Management TID used internally
++ TID_MGT,
++ /// Number of TID supported
++ TID_MAX
++};
++
++/// MCS bitfield maximum size (in bytes)
++#define MAX_MCS_LEN 16 // 16 * 8 = 128
++
++/// MAC HT capability information element
++struct mac_htcapability
++{
++ /// HT capability information
++ u16_l ht_capa_info;
++ /// A-MPDU parameters
++ u8_l a_mpdu_param;
++ /// Supported MCS
++ u8_l mcs_rate[MAX_MCS_LEN];
++ /// HT extended capability information
++ u16_l ht_extended_capa;
++ /// Beamforming capability information
++ u32_l tx_beamforming_capa;
++ /// Antenna selection capability information
++ u8_l asel_capa;
++};
++
++/// MAC VHT capability information element
++struct mac_vhtcapability
++{
++ /// VHT capability information
++ u32_l vht_capa_info;
++ /// RX MCS map
++ u16_l rx_mcs_map;
++ /// RX highest data rate
++ u16_l rx_highest;
++ /// TX MCS map
++ u16_l tx_mcs_map;
++ /// TX highest data rate
++ u16_l tx_highest;
++};
++
++/// Length (in bytes) of the MAC HE capability field
++#define MAC_HE_MAC_CAPA_LEN 6
++/// Length (in bytes) of the PHY HE capability field
++#define MAC_HE_PHY_CAPA_LEN 11
++/// Maximum length (in bytes) of the PPE threshold data
++#define MAC_HE_PPE_THRES_MAX_LEN 25
++
++/// Structure listing the per-NSS, per-BW supported MCS combinations
++struct mac_he_mcs_nss_supp
++{
++ /// per-NSS supported MCS in RX, for BW <= 80MHz
++ u16_l rx_mcs_80;
++ /// per-NSS supported MCS in TX, for BW <= 80MHz
++ u16_l tx_mcs_80;
++ /// per-NSS supported MCS in RX, for BW = 160MHz
++ u16_l rx_mcs_160;
++ /// per-NSS supported MCS in TX, for BW = 160MHz
++ u16_l tx_mcs_160;
++ /// per-NSS supported MCS in RX, for BW = 80+80MHz
++ u16_l rx_mcs_80p80;
++ /// per-NSS supported MCS in TX, for BW = 80+80MHz
++ u16_l tx_mcs_80p80;
++};
++
++/// MAC HE capability information element
++struct mac_hecapability
++{
++ /// MAC HE capabilities
++ u8_l mac_cap_info[MAC_HE_MAC_CAPA_LEN];
++ /// PHY HE capabilities
++ u8_l phy_cap_info[MAC_HE_PHY_CAPA_LEN];
++ /// Supported MCS combinations
++ struct mac_he_mcs_nss_supp mcs_supp;
++ /// PPE Thresholds data
++ u8_l ppe_thres[MAC_HE_PPE_THRES_MAX_LEN];
++};
++
++/// Station flags
++enum mac_sta_flags
++{
++ /// Bit indicating that a STA has QoS (WMM) capability
++ STA_QOS_CAPA = BIT(0),
++ /// Bit indicating that a STA has HT capability
++ STA_HT_CAPA = BIT(1),
++ /// Bit indicating that a STA has VHT capability
++ STA_VHT_CAPA = BIT(2),
++ /// Bit indicating that a STA has MFP capability
++ STA_MFP_CAPA = BIT(3),
++ /// Bit indicating that the STA included the Operation Notification IE
++ STA_OPMOD_NOTIF = BIT(4),
++ /// Bit indicating that a STA has HE capability
++ STA_HE_CAPA = BIT(5),
++};
++
++/// Connection flags
++enum mac_connection_flags
++{
++ /// Flag indicating whether the control port is controlled by host or not
++ CONTROL_PORT_HOST = BIT(0),
++ /// Flag indicating whether the control port frame shall be sent unencrypted
++ CONTROL_PORT_NO_ENC = BIT(1),
++ /// Flag indicating whether HT and VHT shall be disabled or not
++ DISABLE_HT = BIT(2),
++ /// Flag indicating whether WPA or WPA2 authentication is in use
++ WPA_WPA2_IN_USE = BIT(3),
++ /// Flag indicating whether MFP is in use
++ MFP_IN_USE = BIT(4),
++ // Flag indicating Roam
++ REASSOCIATION = BIT(5),
++};
++
++#ifdef CONFIG_HE_FOR_OLD_KERNEL
++#define IEEE80211_HE_MAC_CAP2_ALL_ACK 0x02
++#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02
++#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04
++#define IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD 0x20
++#define IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US 0x40
++#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS 0x80
++#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS 0x01
++#define IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US 0x02
++#define IEEE80211_HE_PHY_CAP2_DOPPLER_RX 0x20
++#define IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ 0x08
++#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM 0x18
++#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 0x00
++#define IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA 0x40
++#define IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE 0x01
++#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 0x0c
++#define IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK 0x40
++#define IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK 0x80
++#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU 0x01
++#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU 0x02
++#define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB 0x04
++#define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB 0x08
++#define IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT 0x80
++#define IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO 0x40
++#define IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI 0x04
++#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G 0x02
++#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB 0x10
++#define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB 0x20
++
++struct ieee80211_he_cap_elem {
++ u8 mac_cap_info[6];
++ u8 phy_cap_info[11];
++} __packed;
++
++struct ieee80211_he_mcs_nss_supp {
++ __le16 rx_mcs_80;
++ __le16 tx_mcs_80;
++ __le16 rx_mcs_160;
++ __le16 tx_mcs_160;
++ __le16 rx_mcs_80p80;
++ __le16 tx_mcs_80p80;
++} __packed;
++
++#define IEEE80211_HE_PPE_THRES_MAX_LEN 25
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
++#define WLAN_EID_EXTENSION 255
++/* Element ID Extensions for Element ID 255 */
++
++enum ieee80211_eid_ext {
++ WLAN_EID_EXT_ASSOC_DELAY_INFO = 1,
++ WLAN_EID_EXT_FILS_REQ_PARAMS = 2,
++ WLAN_EID_EXT_FILS_KEY_CONFIRM = 3,
++ WLAN_EID_EXT_FILS_SESSION = 4,
++ WLAN_EID_EXT_FILS_HLP_CONTAINER = 5,
++ WLAN_EID_EXT_FILS_IP_ADDR_ASSIGN = 6,
++ WLAN_EID_EXT_KEY_DELIVERY = 7,
++ WLAN_EID_EXT_FILS_WRAPPED_DATA = 8,
++ WLAN_EID_EXT_FILS_PUBLIC_KEY = 12,
++ WLAN_EID_EXT_FILS_NONCE = 13,
++ WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE = 14,
++
++};
++
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)
++#define WLAN_EID_EXT_HE_CAPABILITY 35
++#define WLAN_EID_EXT_HE_OPERATION 36
++#define WLAN_EID_EXT_UORA 37
++#define WLAN_EID_EXT_HE_MU_EDCA 38
++#define WLAN_EID_EXT_HE_SPR 39
++#define WLAN_EID_EXT_NDP_FEEDBACK_REPORT_PARAMSET 41
++#define WLAN_EID_EXT_BSS_COLOR_CHG_ANN 42
++#define WLAN_EID_EXT_QUIET_TIME_PERIOD_SETUP 43
++#define WLAN_EID_EXT_ESS_REPORT 45
++#define WLAN_EID_EXT_OPS 46
++#define WLAN_EID_EXT_HE_BSS_LOAD 47
++#define WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME 52
++#define WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION 55
++#define WLAN_EID_EXT_NON_INHERITANCE 56
++#define WLAN_EID_EXT_KNOWN_BSSID 57
++#define WLAN_EID_EXT_SHORT_SSID_LIST 58
++#define WLAN_EID_EXT_HE_6GHZ_CAPA 59
++#define WLAN_EID_EXT_UL_MU_POWER_CAPA 60
++#define WLAN_EID_EXT_EHT_OPERATION 106
++#define WLAN_EID_EXT_EHT_MULTI_LINK 107
++#define WLAN_EID_EXT_EHT_CAPABILITY 108
++
++#endif
++
++struct ieee80211_sta_he_cap {
++ bool has_he;
++ struct ieee80211_he_cap_elem he_cap_elem;
++ struct ieee80211_he_mcs_nss_supp he_mcs_nss_supp;
++ u8 ppe_thres[IEEE80211_HE_PPE_THRES_MAX_LEN];
++};
++
++struct ieee80211_sband_iftype_data {
++ u16 types_mask;
++ struct ieee80211_sta_he_cap he_cap;
++};
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0) || defined(CONFIG_VHT_FOR_OLD_KERNEL)
++struct ieee80211_vht_mcs_info {
++ __le16 rx_mcs_map;
++ __le16 rx_highest;
++ __le16 tx_mcs_map;
++ __le16 tx_highest;
++} __packed;
++
++struct ieee80211_vht_cap {
++ __le32 vht_cap_info;
++ struct ieee80211_vht_mcs_info supp_mcs;
++};
++#define WLAN_EID_VHT_CAPABILITY 191
++
++struct ieee80211_sta_vht_cap {
++ bool vht_supported;
++ u32 cap; /* use IEEE80211_VHT_CAP_ */
++ struct ieee80211_vht_mcs_info vht_mcs;
++};
++#endif
++#endif // LMAC_MAC_H_
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/lmac_msg.h
+@@ -0,0 +1,3023 @@
++/**
++ ****************************************************************************************
++ *
++ * @file lmac_msg.h
++ *
++ * @brief Main definitions for message exchanges with LMAC
++ *
++ * Copyright (C) RivieraWaves 2011-2019
++ *
++ ****************************************************************************************
++ */
++
++#ifndef LMAC_MSG_H_
++#define LMAC_MSG_H_
++
++/*
++ * INCLUDE FILES
++ ****************************************************************************************
++ */
++// for MAC related elements (mac_addr, mac_ssid...)
++#include "lmac_mac.h"
++
++/*
++ ****************************************************************************************
++ */
++/////////////////////////////////////////////////////////////////////////////////
++// COMMUNICATION WITH LMAC LAYER
++/////////////////////////////////////////////////////////////////////////////////
++/* Task identifiers for communication between LMAC and DRIVER */
++enum
++{
++ TASK_NONE = (u8_l) -1,
++
++ // MAC Management task.
++ TASK_MM = 0,
++ // DEBUG task
++ TASK_DBG,
++ /// SCAN task
++ TASK_SCAN,
++ /// TDLS task
++ TASK_TDLS,
++ /// SCANU task
++ TASK_SCANU,
++ /// ME task
++ TASK_ME,
++ /// SM task
++ TASK_SM,
++ /// APM task
++ TASK_APM,
++ /// BAM task
++ TASK_BAM,
++ /// MESH task
++ TASK_MESH,
++ /// RXU task
++ TASK_RXU,
++ /// RM task
++ TASK_RM,
++#if defined CONFIG_RWNX_FULLMAC || defined CONFIG_RWNX_FHOST
++ // This is used to define the last task that is running on the EMB processor
++ TASK_LAST_EMB = TASK_RM,
++#else
++#error "Need to define SOFTMAC or FULLMAC"
++#endif
++ // nX API task
++ TASK_API,
++ TASK_MAX,
++};
++
++
++/// For MAC HW States copied from "hal_machw.h"
++enum
++{
++ /// MAC HW IDLE State.
++ HW_IDLE = 0,
++ /// MAC HW RESERVED State.
++ HW_RESERVED,
++ /// MAC HW DOZE State.
++ HW_DOZE,
++ /// MAC HW ACTIVE State.
++ HW_ACTIVE
++};
++
++/// Power Save mode setting
++enum mm_ps_mode_state
++{
++ MM_PS_MODE_OFF,
++ MM_PS_MODE_ON,
++ MM_PS_MODE_ON_DYN,
++};
++
++/// Status/error codes used in the MAC software.
++enum
++{
++ CO_OK,
++ CO_FAIL,
++ CO_EMPTY,
++ CO_FULL,
++ CO_BAD_PARAM,
++ CO_NOT_FOUND,
++ CO_NO_MORE_ELT_AVAILABLE,
++ CO_NO_ELT_IN_USE,
++ CO_BUSY,
++ CO_OP_IN_PROGRESS,
++};
++
++/// Remain on channel operation codes
++enum mm_remain_on_channel_op
++{
++ MM_ROC_OP_START = 0,
++ MM_ROC_OP_CANCEL,
++};
++
++#define DRV_TASK_ID 100
++
++/// Message Identifier. The number of messages is limited to 0xFFFF.
++/// The message ID is divided in two parts:
++/// - bits[15..10] : task index (no more than 64 tasks supported).
++/// - bits[9..0] : message index (no more that 1024 messages per task).
++typedef u16 lmac_msg_id_t;
++
++typedef u16 lmac_task_id_t;
++
++/// Build the first message ID of a task.
++#define LMAC_FIRST_MSG(task) ((lmac_msg_id_t)((task) << 10))
++
++#define MSG_T(msg) ((lmac_task_id_t)((msg) >> 10))
++#define MSG_I(msg) ((msg) & ((1<<10)-1))
++
++/// Message structure.
++struct lmac_msg
++{
++ lmac_msg_id_t id; ///< Message id.
++ lmac_task_id_t dest_id; ///< Destination kernel identifier.
++ lmac_task_id_t src_id; ///< Source kernel identifier.
++ u16 param_len; ///< Parameter embedded struct length.
++ u32 param[]; ///< Parameter embedded struct. Must be word-aligned.
++};
++
++/// List of messages related to the task.
++enum mm_msg_tag
++{
++ /// RESET Request.
++ MM_RESET_REQ = LMAC_FIRST_MSG(TASK_MM),
++ /// RESET Confirmation.
++ MM_RESET_CFM,
++ /// START Request.
++ MM_START_REQ,
++ /// START Confirmation.
++ MM_START_CFM,
++ /// Read Version Request.
++ MM_VERSION_REQ,
++ /// Read Version Confirmation.
++ MM_VERSION_CFM,
++ /// ADD INTERFACE Request.
++ MM_ADD_IF_REQ,
++ /// ADD INTERFACE Confirmation.
++ MM_ADD_IF_CFM,
++ /// REMOVE INTERFACE Request.
++ MM_REMOVE_IF_REQ,
++ /// REMOVE INTERFACE Confirmation.
++ MM_REMOVE_IF_CFM,
++ /// STA ADD Request.
++ MM_STA_ADD_REQ,
++ /// STA ADD Confirm.
++ MM_STA_ADD_CFM,
++ /// STA DEL Request.
++ MM_STA_DEL_REQ,
++ /// STA DEL Confirm.
++ MM_STA_DEL_CFM,
++ /// RX FILTER CONFIGURATION Request.
++ MM_SET_FILTER_REQ,
++ /// RX FILTER CONFIGURATION Confirmation.
++ MM_SET_FILTER_CFM,
++ /// CHANNEL CONFIGURATION Request.
++ MM_SET_CHANNEL_REQ,
++ /// CHANNEL CONFIGURATION Confirmation.
++ MM_SET_CHANNEL_CFM,
++ /// DTIM PERIOD CONFIGURATION Request.
++ MM_SET_DTIM_REQ,
++ /// DTIM PERIOD CONFIGURATION Confirmation.
++ MM_SET_DTIM_CFM,
++ /// BEACON INTERVAL CONFIGURATION Request.
++ MM_SET_BEACON_INT_REQ,
++ /// BEACON INTERVAL CONFIGURATION Confirmation.
++ MM_SET_BEACON_INT_CFM,
++ /// BASIC RATES CONFIGURATION Request.
++ MM_SET_BASIC_RATES_REQ,
++ /// BASIC RATES CONFIGURATION Confirmation.
++ MM_SET_BASIC_RATES_CFM,
++ /// BSSID CONFIGURATION Request.
++ MM_SET_BSSID_REQ,
++ /// BSSID CONFIGURATION Confirmation.
++ MM_SET_BSSID_CFM,
++ /// EDCA PARAMETERS CONFIGURATION Request.
++ MM_SET_EDCA_REQ,
++ /// EDCA PARAMETERS CONFIGURATION Confirmation.
++ MM_SET_EDCA_CFM,
++ /// ABGN MODE CONFIGURATION Request.
++ MM_SET_MODE_REQ,
++ /// ABGN MODE CONFIGURATION Confirmation.
++ MM_SET_MODE_CFM,
++ /// Request setting the VIF active state (i.e associated or AP started)
++ MM_SET_VIF_STATE_REQ,
++ /// Confirmation of the @ref MM_SET_VIF_STATE_REQ message.
++ MM_SET_VIF_STATE_CFM,
++ /// SLOT TIME PARAMETERS CONFIGURATION Request.
++ MM_SET_SLOTTIME_REQ,
++ /// SLOT TIME PARAMETERS CONFIGURATION Confirmation.
++ MM_SET_SLOTTIME_CFM,
++ /// Power Mode Change Request.
++ MM_SET_IDLE_REQ,
++ /// Power Mode Change Confirm.
++ MM_SET_IDLE_CFM,
++ /// KEY ADD Request.
++ MM_KEY_ADD_REQ,
++ /// KEY ADD Confirm.
++ MM_KEY_ADD_CFM,
++ /// KEY DEL Request.
++ MM_KEY_DEL_REQ,
++ /// KEY DEL Confirm.
++ MM_KEY_DEL_CFM,
++ /// Block Ack agreement info addition
++ MM_BA_ADD_REQ,
++ /// Block Ack agreement info addition confirmation
++ MM_BA_ADD_CFM,
++ /// Block Ack agreement info deletion
++ MM_BA_DEL_REQ,
++ /// Block Ack agreement info deletion confirmation
++ MM_BA_DEL_CFM,
++ /// Indication of the primary TBTT to the upper MAC. Upon the reception of this
++ // message the upper MAC has to push the beacon(s) to the beacon transmission queue.
++ MM_PRIMARY_TBTT_IND,
++ /// Indication of the secondary TBTT to the upper MAC. Upon the reception of this
++ // message the upper MAC has to push the beacon(s) to the beacon transmission queue.
++ MM_SECONDARY_TBTT_IND,
++ /// Request for changing the TX power
++ MM_SET_POWER_REQ,
++ /// Confirmation of the TX power change
++ MM_SET_POWER_CFM,
++ /// Request to the LMAC to trigger the embedded logic analyzer and forward the debug
++ /// dump.
++ MM_DBG_TRIGGER_REQ,
++ /// Set Power Save mode
++ MM_SET_PS_MODE_REQ,
++ /// Set Power Save mode confirmation
++ MM_SET_PS_MODE_CFM,
++ /// Request to add a channel context
++ MM_CHAN_CTXT_ADD_REQ,
++ /// Confirmation of the channel context addition
++ MM_CHAN_CTXT_ADD_CFM,
++ /// Request to delete a channel context
++ MM_CHAN_CTXT_DEL_REQ,
++ /// Confirmation of the channel context deletion
++ MM_CHAN_CTXT_DEL_CFM,
++ /// Request to link a channel context to a VIF
++ MM_CHAN_CTXT_LINK_REQ,
++ /// Confirmation of the channel context link
++ MM_CHAN_CTXT_LINK_CFM,
++ /// Request to unlink a channel context from a VIF
++ MM_CHAN_CTXT_UNLINK_REQ,
++ /// Confirmation of the channel context unlink
++ MM_CHAN_CTXT_UNLINK_CFM,
++ /// Request to update a channel context
++ MM_CHAN_CTXT_UPDATE_REQ,
++ /// Confirmation of the channel context update
++ MM_CHAN_CTXT_UPDATE_CFM,
++ /// Request to schedule a channel context
++ MM_CHAN_CTXT_SCHED_REQ,
++ /// Confirmation of the channel context scheduling
++ MM_CHAN_CTXT_SCHED_CFM,
++ /// Request to change the beacon template in LMAC
++ MM_BCN_CHANGE_REQ,
++ /// Confirmation of the beacon change
++ MM_BCN_CHANGE_CFM,
++ /// Request to update the TIM in the beacon (i.e to indicate traffic bufferized at AP)
++ MM_TIM_UPDATE_REQ,
++ /// Confirmation of the TIM update
++ MM_TIM_UPDATE_CFM,
++ /// Connection loss indication
++ MM_CONNECTION_LOSS_IND,
++ /// Channel context switch indication to the upper layers
++ MM_CHANNEL_SWITCH_IND,
++ /// Channel context pre-switch indication to the upper layers
++ MM_CHANNEL_PRE_SWITCH_IND,
++ /// Request to remain on channel or cancel remain on channel
++ MM_REMAIN_ON_CHANNEL_REQ,
++ /// Confirmation of the (cancel) remain on channel request
++ MM_REMAIN_ON_CHANNEL_CFM,
++ /// Remain on channel expired indication
++ MM_REMAIN_ON_CHANNEL_EXP_IND,
++ /// Indication of a PS state change of a peer device
++ MM_PS_CHANGE_IND,
++ /// Indication that some buffered traffic should be sent to the peer device
++ MM_TRAFFIC_REQ_IND,
++ /// Request to modify the STA Power-save mode options
++ MM_SET_PS_OPTIONS_REQ,
++ /// Confirmation of the PS options setting
++ MM_SET_PS_OPTIONS_CFM,
++ /// Indication of PS state change for a P2P VIF
++ MM_P2P_VIF_PS_CHANGE_IND,
++ /// Indication that CSA counter has been updated
++ MM_CSA_COUNTER_IND,
++ /// Channel occupation report indication
++ MM_CHANNEL_SURVEY_IND,
++ /// Message containing Beamformer Information
++ MM_BFMER_ENABLE_REQ,
++ /// Request to Start/Stop/Update NOA - GO Only
++ MM_SET_P2P_NOA_REQ,
++ /// Request to Start/Stop/Update Opportunistic PS - GO Only
++ MM_SET_P2P_OPPPS_REQ,
++ /// Start/Stop/Update NOA Confirmation
++ MM_SET_P2P_NOA_CFM,
++ /// Start/Stop/Update Opportunistic PS Confirmation
++ MM_SET_P2P_OPPPS_CFM,
++ /// P2P NoA Update Indication - GO Only
++ MM_P2P_NOA_UPD_IND,
++ /// Request to set RSSI threshold and RSSI hysteresis
++ MM_CFG_RSSI_REQ,
++ /// Indication that RSSI level is below or above the threshold
++ MM_RSSI_STATUS_IND,
++ /// Indication that CSA is done
++ MM_CSA_FINISH_IND,
++ /// Indication that CSA is in prorgess (resp. done) and traffic must be stopped (resp. restarted)
++ MM_CSA_TRAFFIC_IND,
++ /// Request to update the group information of a station
++ MM_MU_GROUP_UPDATE_REQ,
++ /// Confirmation of the @ref MM_MU_GROUP_UPDATE_REQ message
++ MM_MU_GROUP_UPDATE_CFM,
++ /// Request to initialize the antenna diversity algorithm
++ MM_ANT_DIV_INIT_REQ,
++ /// Request to stop the antenna diversity algorithm
++ MM_ANT_DIV_STOP_REQ,
++ /// Request to update the antenna switch status
++ MM_ANT_DIV_UPDATE_REQ,
++ /// Request to switch the antenna connected to path_0
++ MM_SWITCH_ANTENNA_REQ,
++ /// Indication that a packet loss has occurred
++ MM_PKTLOSS_IND,
++
++ MM_SET_ARPOFFLOAD_REQ,
++ MM_SET_ARPOFFLOAD_CFM,
++ MM_SET_AGG_DISABLE_REQ,
++ MM_SET_AGG_DISABLE_CFM,
++ MM_SET_COEX_REQ,
++ MM_SET_COEX_CFM,
++ MM_SET_RF_CONFIG_REQ,
++ MM_SET_RF_CONFIG_CFM,
++ MM_SET_RF_CALIB_REQ,
++ MM_SET_RF_CALIB_CFM,
++
++ /// MU EDCA PARAMETERS Configuration Request.
++ MM_SET_MU_EDCA_REQ,
++ /// MU EDCA PARAMETERS Configuration Confirmation.
++ MM_SET_MU_EDCA_CFM,
++ /// UORA PARAMETERS Configuration Request.
++ MM_SET_UORA_REQ,
++ /// UORA PARAMETERS Configuration Confirmation.
++ MM_SET_UORA_CFM,
++ /// TXOP RTS THRESHOLD Configuration Request.
++ MM_SET_TXOP_RTS_THRES_REQ,
++ /// TXOP RTS THRESHOLD Configuration Confirmation.
++ MM_SET_TXOP_RTS_THRES_CFM,
++ /// HE BSS Color Configuration Request.
++ MM_SET_BSS_COLOR_REQ,
++ /// HE BSS Color Configuration Confirmation.
++ MM_SET_BSS_COLOR_CFM,
++
++ MM_GET_MAC_ADDR_REQ,
++ MM_GET_MAC_ADDR_CFM,
++
++ MM_GET_STA_INFO_REQ,
++ MM_GET_STA_INFO_CFM,
++
++ MM_SET_TXPWR_IDX_LVL_REQ,
++ MM_SET_TXPWR_IDX_LVL_CFM,
++
++ MM_SET_TXPWR_OFST_REQ,
++ MM_SET_TXPWR_OFST_CFM,
++
++ MM_SET_STACK_START_REQ,
++ MM_SET_STACK_START_CFM,
++
++ MM_APM_STALOSS_IND,
++
++ MM_SET_VENDOR_HWCONFIG_REQ,
++ MM_SET_VENDOR_HWCONFIG_CFM,
++
++ MM_GET_FW_VERSION_REQ,
++ MM_GET_FW_VERSION_CFM,
++
++ /// MAX number of messages
++ MM_MAX,
++};
++
++/// Interface types
++enum
++{
++ /// ESS STA interface
++ MM_STA,
++ /// IBSS STA interface
++ MM_IBSS,
++ /// AP interface
++ MM_AP,
++ // Mesh Point interface
++ MM_MESH_POINT,
++ // Monitor interface
++ MM_MONITOR,
++};
++
++///BA agreement types
++enum
++{
++ ///BlockAck agreement for TX
++ BA_AGMT_TX,
++ ///BlockAck agreement for RX
++ BA_AGMT_RX,
++};
++
++///BA agreement related status
++enum
++{
++ ///Correct BA agreement establishment
++ BA_AGMT_ESTABLISHED,
++ ///BA agreement already exists for STA+TID requested, cannot override it (should have been deleted first)
++ BA_AGMT_ALREADY_EXISTS,
++ ///Correct BA agreement deletion
++ BA_AGMT_DELETED,
++ ///BA agreement for the (STA, TID) doesn't exist so nothing to delete
++ BA_AGMT_DOESNT_EXIST,
++};
++
++/// Features supported by LMAC - Positions
++enum mm_features
++{
++ /// Beaconing
++ MM_FEAT_BCN_BIT = 0,
++ /// Autonomous Beacon Transmission
++ MM_FEAT_AUTOBCN_BIT,
++ /// Scan in LMAC
++ MM_FEAT_HWSCAN_BIT,
++ /// Connection Monitoring
++ MM_FEAT_CMON_BIT,
++ /// Multi Role
++ MM_FEAT_MROLE_BIT,
++ /// Radar Detection
++ MM_FEAT_RADAR_BIT,
++ /// Power Save
++ MM_FEAT_PS_BIT,
++ /// UAPSD
++ MM_FEAT_UAPSD_BIT,
++ /// DPSM
++ MM_FEAT_DPSM_BIT,
++ /// A-MPDU
++ MM_FEAT_AMPDU_BIT,
++ /// A-MSDU
++ MM_FEAT_AMSDU_BIT,
++ /// Channel Context
++ MM_FEAT_CHNL_CTXT_BIT,
++ /// Packet reordering
++ MM_FEAT_REORD_BIT,
++ /// P2P
++ MM_FEAT_P2P_BIT,
++ /// P2P Go
++ MM_FEAT_P2P_GO_BIT,
++ /// UMAC Present
++ MM_FEAT_UMAC_BIT,
++ /// VHT support
++ MM_FEAT_VHT_BIT,
++ /// Beamformee
++ MM_FEAT_BFMEE_BIT,
++ /// Beamformer
++ MM_FEAT_BFMER_BIT,
++ /// WAPI
++ MM_FEAT_WAPI_BIT,
++ /// MFP
++ MM_FEAT_MFP_BIT,
++ /// Mu-MIMO RX support
++ MM_FEAT_MU_MIMO_RX_BIT,
++ /// Mu-MIMO TX support
++ MM_FEAT_MU_MIMO_TX_BIT,
++ /// Wireless Mesh Networking
++ MM_FEAT_MESH_BIT,
++ /// TDLS support
++ MM_FEAT_TDLS_BIT,
++ /// Antenna Diversity support
++ MM_FEAT_ANT_DIV_BIT,
++ /// UF support
++ MM_FEAT_UF_BIT,
++ /// A-MSDU maximum size (bit0)
++ MM_AMSDU_MAX_SIZE_BIT0,
++ /// A-MSDU maximum size (bit1)
++ MM_AMSDU_MAX_SIZE_BIT1,
++ /// MON_DATA support
++ MM_FEAT_MON_DATA_BIT,
++ /// HE (802.11ax) support
++ MM_FEAT_HE_BIT,
++};
++
++/// Maximum number of words in the configuration buffer
++#define PHY_CFG_BUF_SIZE 16
++
++/// Structure containing the parameters of the PHY configuration
++struct phy_cfg_tag
++{
++ /// Buffer containing the parameters specific for the PHY used
++ u32_l parameters[PHY_CFG_BUF_SIZE];
++};
++
++/// Structure containing the parameters of the Trident PHY configuration
++struct phy_trd_cfg_tag
++{
++ /// MDM type(nxm)(upper nibble) and MDM2RF path mapping(lower nibble)
++ u8_l path_mapping;
++ /// TX DC offset compensation
++ u32_l tx_dc_off_comp;
++};
++
++/// Structure containing the parameters of the Karst PHY configuration
++struct phy_karst_cfg_tag
++{
++ /// TX IQ mismatch compensation in 2.4GHz
++ u32_l tx_iq_comp_2_4G[2];
++ /// RX IQ mismatch compensation in 2.4GHz
++ u32_l rx_iq_comp_2_4G[2];
++ /// TX IQ mismatch compensation in 5GHz
++ u32_l tx_iq_comp_5G[2];
++ /// RX IQ mismatch compensation in 5GHz
++ u32_l rx_iq_comp_5G[2];
++ /// RF path used by default (0 or 1)
++ u8_l path_used;
++};
++
++/// Structure containing the parameters of the @ref MM_START_REQ message
++struct mm_start_req
++{
++ /// PHY configuration
++ struct phy_cfg_tag phy_cfg;
++ /// UAPSD timeout
++ u32_l uapsd_timeout;
++ /// Local LP clock accuracy (in ppm)
++ u16_l lp_clk_accuracy;
++};
++
++/// Structure containing the parameters of the @ref MM_SET_CHANNEL_REQ message
++struct mm_set_channel_req
++{
++ /// Channel information
++ struct mac_chan_op chan;
++ /// Index of the RF for which the channel has to be set (0: operating (primary), 1: secondary
++ /// RF (used for additional radar detection). This parameter is reserved if no secondary RF
++ /// is available in the system
++ u8_l index;
++};
++
++/// Structure containing the parameters of the @ref MM_SET_CHANNEL_CFM message
++struct mm_set_channel_cfm
++{
++ /// Radio index to be used in policy table
++ u8_l radio_idx;
++ /// TX power configured (in dBm)
++ s8_l power;
++};
++
++/// Structure containing the parameters of the @ref MM_SET_DTIM_REQ message
++struct mm_set_dtim_req
++{
++ /// DTIM period
++ u8_l dtim_period;
++};
++
++/// Structure containing the parameters of the @ref MM_SET_POWER_REQ message
++struct mm_set_power_req
++{
++ /// Index of the interface for which the parameter is configured
++ u8_l inst_nbr;
++ /// TX power (in dBm)
++ s8_l power;
++};
++
++/// Structure containing the parameters of the @ref MM_SET_POWER_CFM message
++struct mm_set_power_cfm
++{
++ /// Radio index to be used in policy table
++ u8_l radio_idx;
++ /// TX power configured (in dBm)
++ s8_l power;
++};
++
++/// Structure containing the parameters of the @ref MM_SET_BEACON_INT_REQ message
++struct mm_set_beacon_int_req
++{
++ /// Beacon interval
++ u16_l beacon_int;
++ /// Index of the interface for which the parameter is configured
++ u8_l inst_nbr;
++};
++
++/// Structure containing the parameters of the @ref MM_SET_BASIC_RATES_REQ message
++struct mm_set_basic_rates_req
++{
++ /// Basic rate set (as expected by bssBasicRateSet field of Rates MAC HW register)
++ u32_l rates;
++ /// Index of the interface for which the parameter is configured
++ u8_l inst_nbr;
++ /// Band on which the interface will operate
++ u8_l band;
++};
++
++/// Structure containing the parameters of the @ref MM_SET_BSSID_REQ message
++struct mm_set_bssid_req
++{
++ /// BSSID to be configured in HW
++ struct mac_addr bssid;
++ /// Index of the interface for which the parameter is configured
++ u8_l inst_nbr;
++};
++
++/// Structure containing the parameters of the @ref MM_SET_FILTER_REQ message
++struct mm_set_filter_req
++{
++ /// RX filter to be put into rxCntrlReg HW register
++ u32_l filter;
++};
++
++/// Structure containing the parameters of the @ref MM_ADD_IF_REQ message.
++struct mm_add_if_req
++{
++ /// Type of the interface (AP, STA, ADHOC, ...)
++ u8_l type;
++ /// MAC ADDR of the interface to start
++ struct mac_addr addr;
++ /// P2P Interface
++ bool_l p2p;
++};
++
++/// Structure containing the parameters of the @ref MM_SET_EDCA_REQ message
++struct mm_set_edca_req
++{
++ /// EDCA parameters of the queue (as expected by edcaACxReg HW register)
++ u32_l ac_param;
++ /// Flag indicating if UAPSD can be used on this queue
++ bool_l uapsd;
++ /// HW queue for which the parameters are configured
++ u8_l hw_queue;
++ /// Index of the interface for which the parameters are configured
++ u8_l inst_nbr;
++};
++
++/// Structure containing the parameters of the @ref MM_SET_MU_EDCA_REQ message
++struct mm_set_mu_edca_req
++{
++ /// MU EDCA parameters of the different HE queues
++ u32_l param[AC_MAX];
++};
++
++/// Structure containing the parameters of the @ref MM_SET_UORA_REQ message
++struct mm_set_uora_req
++{
++ /// Minimum exponent of OFDMA Contention Window.
++ u8_l eocw_min;
++ /// Maximum exponent of OFDMA Contention Window.
++ u8_l eocw_max;
++};
++
++/// Structure containing the parameters of the @ref MM_SET_TXOP_RTS_THRES_REQ message
++struct mm_set_txop_rts_thres_req
++{
++ /// TXOP RTS threshold
++ u16_l txop_dur_rts_thres;
++ /// Index of the interface for which the parameter is configured
++ u8_l inst_nbr;
++};
++
++/// Structure containing the parameters of the @ref MM_SET_BSS_COLOR_REQ message
++struct mm_set_bss_color_req
++{
++ /// HE BSS color, formatted as per BSS_COLOR MAC HW register
++ u32_l bss_color;
++};
++
++struct mm_set_idle_req
++{
++ u8_l hw_idle;
++};
++
++/// Structure containing the parameters of the @ref MM_SET_SLOTTIME_REQ message
++struct mm_set_slottime_req
++{
++ /// Slot time expressed in us
++ u8_l slottime;
++};
++
++/// Structure containing the parameters of the @ref MM_SET_MODE_REQ message
++struct mm_set_mode_req
++{
++ /// abgnMode field of macCntrl1Reg register
++ u8_l abgnmode;
++};
++
++/// Structure containing the parameters of the @ref MM_SET_VIF_STATE_REQ message
++struct mm_set_vif_state_req
++{
++ /// Association Id received from the AP (valid only if the VIF is of STA type)
++ u16_l aid;
++ /// Flag indicating if the VIF is active or not
++ bool_l active;
++ /// Interface index
++ u8_l inst_nbr;
++};
++
++/// Structure containing the parameters of the @ref MM_ADD_IF_CFM message.
++struct mm_add_if_cfm
++{
++ /// Status of operation (different from 0 if unsuccessful)
++ u8_l status;
++ /// Interface index assigned by the LMAC
++ u8_l inst_nbr;
++};
++
++/// Structure containing the parameters of the @ref MM_REMOVE_IF_REQ message.
++struct mm_remove_if_req
++{
++ /// Interface index assigned by the LMAC
++ u8_l inst_nbr;
++};
++
++/// Structure containing the parameters of the @ref MM_VERSION_CFM message.
++struct mm_version_cfm
++{
++ /// Version of the LMAC FW
++ u32_l version_lmac;
++ /// Version1 of the MAC HW (as encoded in version1Reg MAC HW register)
++ u32_l version_machw_1;
++ /// Version2 of the MAC HW (as encoded in version2Reg MAC HW register)
++ u32_l version_machw_2;
++ /// Version1 of the PHY (depends on actual PHY)
++ u32_l version_phy_1;
++ /// Version2 of the PHY (depends on actual PHY)
++ u32_l version_phy_2;
++ /// Supported Features
++ u32_l features;
++ /// Maximum number of supported stations
++ u16_l max_sta_nb;
++ /// Maximum number of supported virtual interfaces
++ u8_l max_vif_nb;
++};
++
++/// Structure containing the parameters of the @ref MM_STA_ADD_REQ message.
++struct mm_sta_add_req
++{
++ /// Maximum A-MPDU size, in bytes, for HE frames
++ u32_l ampdu_size_max_he;
++ /// Maximum A-MPDU size, in bytes, for VHT frames
++ u32_l ampdu_size_max_vht;
++ /// PAID/GID
++ u32_l paid_gid;
++ /// Maximum A-MPDU size, in bytes, for HT frames
++ u16_l ampdu_size_max_ht;
++ /// MAC address of the station to be added
++ struct mac_addr mac_addr;
++ /// A-MPDU spacing, in us
++ u8_l ampdu_spacing_min;
++ /// Interface index
++ u8_l inst_nbr;
++ /// TDLS station
++ bool_l tdls_sta;
++ /// Indicate if the station is TDLS link initiator station
++ bool_l tdls_sta_initiator;
++ /// Indicate if the TDLS Channel Switch is allowed
++ bool_l tdls_chsw_allowed;
++};
++
++/// Structure containing the parameters of the @ref MM_STA_ADD_CFM message.
++struct mm_sta_add_cfm
++{
++ /// Status of the operation (different from 0 if unsuccessful)
++ u8_l status;
++ /// Index assigned by the LMAC to the newly added station
++ u8_l sta_idx;
++ /// MAC HW index of the newly added station
++ u8_l hw_sta_idx;
++};
++
++/// Structure containing the parameters of the @ref MM_STA_DEL_REQ message.
++struct mm_sta_del_req
++{
++ /// Index of the station to be deleted
++ u8_l sta_idx;
++};
++
++/// Structure containing the parameters of the @ref MM_STA_DEL_CFM message.
++struct mm_sta_del_cfm
++{
++ /// Status of the operation (different from 0 if unsuccessful)
++ u8_l status;
++};
++
++/// Structure containing the parameters of the SET_POWER_MODE REQ message.
++struct mm_setpowermode_req
++{
++ u8_l mode;
++ u8_l sta_idx;
++};
++
++/// Structure containing the parameters of the SET_POWER_MODE CFM message.
++struct mm_setpowermode_cfm
++{
++ u8_l status;
++};
++
++/// Structure containing the parameters of the @ref MM_KEY_ADD REQ message.
++struct mm_key_add_req
++{
++ /// Key index (valid only for default keys)
++ u8_l key_idx;
++ /// STA index (valid only for pairwise or mesh group keys)
++ u8_l sta_idx;
++ /// Key material
++ struct mac_sec_key key;
++ /// Cipher suite (WEP64, WEP128, TKIP, CCMP)
++ u8_l cipher_suite;
++ /// Index of the interface for which the key is set (valid only for default keys or mesh group keys)
++ u8_l inst_nbr;
++ /// A-MSDU SPP parameter
++ u8_l spp;
++ /// Indicate if provided key is a pairwise key or not
++ bool_l pairwise;
++};
++
++/// Structure containing the parameters of the @ref MM_KEY_ADD_CFM message.
++struct mm_key_add_cfm
++{
++ /// Status of the operation (different from 0 if unsuccessful)
++ u8_l status;
++ /// HW index of the key just added
++ u8_l hw_key_idx;
++};
++
++/// Structure containing the parameters of the @ref MM_KEY_DEL_REQ message.
++struct mm_key_del_req
++{
++ /// HW index of the key to be deleted
++ u8_l hw_key_idx;
++};
++
++/// Structure containing the parameters of the @ref MM_BA_ADD_REQ message.
++struct mm_ba_add_req
++{
++ ///Type of agreement (0: TX, 1: RX)
++ u8_l type;
++ ///Index of peer station with which the agreement is made
++ u8_l sta_idx;
++ ///TID for which the agreement is made with peer station
++ u8_l tid;
++ ///Buffer size - number of MPDUs that can be held in its buffer per TID
++ u8_l bufsz;
++ /// Start sequence number negotiated during BA setup - the one in first aggregated MPDU counts more
++ u16_l ssn;
++};
++
++/// Structure containing the parameters of the @ref MM_BA_ADD_CFM message.
++struct mm_ba_add_cfm
++{
++ ///Index of peer station for which the agreement is being confirmed
++ u8_l sta_idx;
++ ///TID for which the agreement is being confirmed
++ u8_l tid;
++ /// Status of ba establishment
++ u8_l status;
++};
++
++/// Structure containing the parameters of the @ref MM_BA_DEL_REQ message.
++struct mm_ba_del_req
++{
++ ///Type of agreement (0: TX, 1: RX)
++ u8_l type;
++ ///Index of peer station for which the agreement is being deleted
++ u8_l sta_idx;
++ ///TID for which the agreement is being deleted
++ u8_l tid;
++};
++
++/// Structure containing the parameters of the @ref MM_BA_DEL_CFM message.
++struct mm_ba_del_cfm
++{
++ ///Index of peer station for which the agreement deletion is being confirmed
++ u8_l sta_idx;
++ ///TID for which the agreement deletion is being confirmed
++ u8_l tid;
++ /// Status of ba deletion
++ u8_l status;
++};
++
++/// Structure containing the parameters of the @ref MM_CHAN_CTXT_ADD_REQ message
++struct mm_chan_ctxt_add_req
++{
++ /// Operating channel
++ struct mac_chan_op chan;
++};
++
++/// Structure containing the parameters of the @ref MM_CHAN_CTXT_ADD_REQ message
++struct mm_chan_ctxt_add_cfm
++{
++ /// Status of the addition
++ u8_l status;
++ /// Index of the new channel context
++ u8_l index;
++};
++
++
++/// Structure containing the parameters of the @ref MM_CHAN_CTXT_DEL_REQ message
++struct mm_chan_ctxt_del_req
++{
++ /// Index of the new channel context to be deleted
++ u8_l index;
++};
++
++
++/// Structure containing the parameters of the @ref MM_CHAN_CTXT_LINK_REQ message
++struct mm_chan_ctxt_link_req
++{
++ /// VIF index
++ u8_l vif_index;
++ /// Channel context index
++ u8_l chan_index;
++ /// Indicate if this is a channel switch (unlink current ctx first if true)
++ u8_l chan_switch;
++};
++
++/// Structure containing the parameters of the @ref MM_CHAN_CTXT_UNLINK_REQ message
++struct mm_chan_ctxt_unlink_req
++{
++ /// VIF index
++ u8_l vif_index;
++};
++
++/// Structure containing the parameters of the @ref MM_CHAN_CTXT_UPDATE_REQ message
++struct mm_chan_ctxt_update_req
++{
++ /// Channel context index
++ u8_l chan_index;
++ /// New channel information
++ struct mac_chan_op chan;
++};
++
++/// Structure containing the parameters of the @ref MM_CHAN_CTXT_SCHED_REQ message
++struct mm_chan_ctxt_sched_req
++{
++ /// VIF index
++ u8_l vif_index;
++ /// Channel context index
++ u8_l chan_index;
++ /// Type of the scheduling request (0: normal scheduling, 1: derogatory
++ /// scheduling)
++ u8_l type;
++};
++
++/// Structure containing the parameters of the @ref MM_CHANNEL_SWITCH_IND message
++struct mm_channel_switch_ind
++{
++ /// Index of the channel context we will switch to
++ u8_l chan_index;
++ /// Indicate if the switch has been triggered by a Remain on channel request
++ bool_l roc;
++ /// VIF on which remain on channel operation has been started (if roc == 1)
++ u8_l vif_index;
++ /// Indicate if the switch has been triggered by a TDLS Remain on channel request
++ bool_l roc_tdls;
++};
++
++/// Structure containing the parameters of the @ref MM_CHANNEL_PRE_SWITCH_IND message
++struct mm_channel_pre_switch_ind
++{
++ /// Index of the channel context we will switch to
++ u8_l chan_index;
++};
++
++/// Structure containing the parameters of the @ref MM_CONNECTION_LOSS_IND message.
++struct mm_connection_loss_ind
++{
++ /// VIF instance number
++ u8_l inst_nbr;
++};
++
++
++/// Structure containing the parameters of the @ref MM_DBG_TRIGGER_REQ message.
++struct mm_dbg_trigger_req
++{
++ /// Error trace to be reported by the LMAC
++ char error[64];
++};
++
++/// Structure containing the parameters of the @ref MM_SET_PS_MODE_REQ message.
++struct mm_set_ps_mode_req
++{
++ /// Power Save is activated or deactivated
++ u8_l new_state;
++};
++
++/// Structure containing the parameters of the @ref MM_BCN_CHANGE_REQ message.
++#define BCN_MAX_CSA_CPT 2
++struct mm_bcn_change_req
++{
++ /// Pointer, in host memory, to the new beacon template
++ u32_l bcn_ptr;
++ /// Length of the beacon template
++ u16_l bcn_len;
++ /// Offset of the TIM IE in the beacon
++ u16_l tim_oft;
++ /// Length of the TIM IE
++ u8_l tim_len;
++ /// Index of the VIF for which the beacon is updated
++ u8_l inst_nbr;
++ /// Offset of CSA (channel switch announcement) counters (0 means no counter)
++ u8_l csa_oft[BCN_MAX_CSA_CPT];
++};
++
++
++/// Structure containing the parameters of the @ref MM_TIM_UPDATE_REQ message.
++struct mm_tim_update_req
++{
++ /// Association ID of the STA the bit of which has to be updated (0 for BC/MC traffic)
++ u16_l aid;
++ /// Flag indicating the availability of data packets for the given STA
++ u8_l tx_avail;
++ /// Index of the VIF for which the TIM is updated
++ u8_l inst_nbr;
++};
++
++/// Structure containing the parameters of the @ref MM_REMAIN_ON_CHANNEL_REQ message.
++struct mm_remain_on_channel_req
++{
++ /// Operation Code
++ u8_l op_code;
++ /// VIF Index
++ u8_l vif_index;
++ /// Band (2.4GHz or 5GHz)
++ u8_l band;
++ /// Channel type: 20,40,80,160 or 80+80 MHz
++ u8_l type;
++ /// Frequency for Primary 20MHz channel (in MHz)
++ u16_l prim20_freq;
++ /// Frequency for Center of the contiguous channel or center of Primary 80+80
++ u16_l center1_freq;
++ /// Frequency for Center of the non-contiguous secondary 80+80
++ u16_l center2_freq;
++ /// Duration (in ms)
++ u32_l duration_ms;
++ /// TX power (in dBm)
++ s8_l tx_power;
++};
++
++/// Structure containing the parameters of the @ref MM_REMAIN_ON_CHANNEL_CFM message
++struct mm_remain_on_channel_cfm
++{
++ /// Operation Code
++ u8_l op_code;
++ /// Status of the operation
++ u8_l status;
++ /// Channel Context index
++ u8_l chan_ctxt_index;
++};
++
++/// Structure containing the parameters of the @ref MM_REMAIN_ON_CHANNEL_EXP_IND message
++struct mm_remain_on_channel_exp_ind
++{
++ /// VIF Index
++ u8_l vif_index;
++ /// Channel Context index
++ u8_l chan_ctxt_index;
++};
++
++/// Structure containing the parameters of the @ref MM_SET_UAPSD_TMR_REQ message.
++struct mm_set_uapsd_tmr_req
++{
++ /// action: Start or Stop the timer
++ u8_l action;
++ /// timeout value, in milliseconds
++ u32_l timeout;
++};
++
++/// Structure containing the parameters of the @ref MM_SET_UAPSD_TMR_CFM message.
++struct mm_set_uapsd_tmr_cfm
++{
++ /// Status of the operation (different from 0 if unsuccessful)
++ u8_l status;
++};
++
++
++/// Structure containing the parameters of the @ref MM_PS_CHANGE_IND message
++struct mm_ps_change_ind
++{
++ /// Index of the peer device that is switching its PS state
++ u8_l sta_idx;
++ /// New PS state of the peer device (0: active, 1: sleeping)
++ u8_l ps_state;
++};
++
++/// Structure containing the parameters of the @ref MM_P2P_VIF_PS_CHANGE_IND message
++struct mm_p2p_vif_ps_change_ind
++{
++ /// Index of the P2P VIF that is switching its PS state
++ u8_l vif_index;
++ /// New PS state of the P2P VIF interface (0: active, 1: sleeping)
++ u8_l ps_state;
++};
++
++/// Structure containing the parameters of the @ref MM_TRAFFIC_REQ_IND message
++struct mm_traffic_req_ind
++{
++ /// Index of the peer device that needs traffic
++ u8_l sta_idx;
++ /// Number of packets that need to be sent (if 0, all buffered traffic shall be sent and
++ /// if set to @ref PS_SP_INTERRUPTED, it means that current service period has been interrupted)
++ u8_l pkt_cnt;
++ /// Flag indicating if the traffic request concerns U-APSD queues or not
++ bool_l uapsd;
++};
++
++/// Structure containing the parameters of the @ref MM_SET_PS_OPTIONS_REQ message.
++struct mm_set_ps_options_req
++{
++ /// VIF Index
++ u8_l vif_index;
++ /// Listen interval (0 if wake up shall be based on DTIM period)
++ u16_l listen_interval;
++ /// Flag indicating if we shall listen the BC/MC traffic or not
++ bool_l dont_listen_bc_mc;
++};
++
++/// Structure containing the parameters of the @ref MM_CSA_COUNTER_IND message
++struct mm_csa_counter_ind
++{
++ /// Index of the VIF
++ u8_l vif_index;
++ /// Updated CSA counter value
++ u8_l csa_count;
++};
++
++/// Structure containing the parameters of the @ref MM_CHANNEL_SURVEY_IND message
++struct mm_channel_survey_ind
++{
++ /// Frequency of the channel
++ u16_l freq;
++ /// Noise in dbm
++ s8_l noise_dbm;
++ /// Amount of time spent of the channel (in ms)
++ u32_l chan_time_ms;
++ /// Amount of time the primary channel was sensed busy
++ u32_l chan_time_busy_ms;
++};
++
++/// Structure containing the parameters of the @ref MM_BFMER_ENABLE_REQ message.
++struct mm_bfmer_enable_req
++{
++ /**
++ * Address of the beamforming report space allocated in host memory
++ * (Valid only if vht_su_bfmee is true)
++ */
++ u32_l host_bfr_addr;
++ /**
++ * Size of the beamforming report space allocated in host memory. This space should
++ * be twice the maximum size of the expected beamforming reports as the FW will
++ * divide it in two in order to be able to upload a new report while another one is
++ * used in transmission
++ */
++ u16_l host_bfr_size;
++ /// AID
++ u16_l aid;
++ /// Station Index
++ u8_l sta_idx;
++ /// Maximum number of spatial streams the station can receive
++ u8_l rx_nss;
++ /**
++ * Indicate if peer STA is MU Beamformee (VHT) capable
++ * (Valid only if vht_su_bfmee is true)
++ */
++ bool_l vht_mu_bfmee;
++};
++
++/// Structure containing the parameters of the @ref MM_SET_P2P_NOA_REQ message.
++struct mm_set_p2p_noa_req
++{
++ /// VIF Index
++ u8_l vif_index;
++ /// Allocated NOA Instance Number - Valid only if count = 0
++ u8_l noa_inst_nb;
++ /// Count
++ u8_l count;
++ /// Indicate if NoA can be paused for traffic reason
++ bool_l dyn_noa;
++ /// Duration (in us)
++ u32_l duration_us;
++ /// Interval (in us)
++ u32_l interval_us;
++ /// Start Time offset from next TBTT (in us)
++ u32_l start_offset;
++};
++
++#ifdef AICWF_ARP_OFFLOAD
++struct mm_set_arpoffload_en_req
++{
++ u32_l ipaddr;
++ u8_l enable;
++ u8_l vif_idx;
++};
++
++struct mm_set_arpoffload_en_cfm
++{
++ u8_l status;
++};
++#endif
++
++struct mm_set_agg_disable_req
++{
++ u8_l disable;
++ u8_l staidx;
++};
++
++struct mm_set_coex_req
++{
++ u8_l bt_on;
++ u8_l disable_coexnull;
++ u8_l enable_nullcts;
++ u8_l enable_periodic_timer;
++ u8_l coex_timeslot_set;
++ u32_l coex_timeslot[2];
++};
++
++#if 0
++struct mm_set_rf_config_req
++{
++ u8_l def_band;
++ u8_l config_type;
++ u16_l offset;
++ u16_l len;
++ u16_l set;
++ u32_l rx_gain_24g[48][4];
++ u32_l rx_gain_5g[32][4];
++ u32_l tx_gain[32];
++};
++#endif
++struct mm_set_rf_config_req
++{
++ u8_l table_sel;
++ u8_l table_ofst;
++ u8_l table_num;
++ u8_l deft_page;
++ u32_l data[64];
++};
++
++
++struct mm_set_rf_calib_req
++{
++ u32_l cal_cfg_24g;
++ u32_l cal_cfg_5g;
++ u32_l param_alpha;
++ u32_l bt_calib_en;
++ u32_l bt_calib_param;
++ u8_l xtal_cap;
++ u8_l xtal_cap_fine;
++
++};
++
++struct mm_set_rf_calib_cfm
++{
++ u32_l rxgain_24g_addr;
++ u32_l rxgain_5g_addr;
++ u32_l txgain_24g_addr;
++ u32_l txgain_5g_addr;
++};
++
++struct mm_get_mac_addr_req
++{
++ u32_l get;
++};
++
++struct mm_get_mac_addr_cfm
++{
++ u8_l mac_addr[6];
++};
++
++struct mm_get_sta_info_req
++{
++ u8_l sta_idx;
++};
++
++struct mm_get_sta_info_cfm
++{
++ u32_l rate_info;
++ u32_l txfailed;
++ u8 rssi;
++};
++
++typedef struct
++{
++ u8_l enable;
++ u8_l dsss;
++ u8_l ofdmlowrate_2g4;
++ u8_l ofdm64qam_2g4;
++ u8_l ofdm256qam_2g4;
++ u8_l ofdm1024qam_2g4;
++ u8_l ofdmlowrate_5g;
++ u8_l ofdm64qam_5g;
++ u8_l ofdm256qam_5g;
++ u8_l ofdm1024qam_5g;
++} txpwr_lvl_conf_t;
++
++typedef struct
++{
++ u8_l enable;
++ s8_l pwrlvl_11b_11ag_2g4[12];
++ s8_l pwrlvl_11n_11ac_2g4[10];
++ s8_l pwrlvl_11ax_2g4[12];
++} txpwr_lvl_conf_v2_t;
++
++typedef struct
++{
++ u8_l enable;
++ s8_l pwrlvl_11b_11ag_2g4[12];
++ s8_l pwrlvl_11n_11ac_2g4[10];
++ s8_l pwrlvl_11ax_2g4[12];
++ s8_l pwrlvl_11a_5g[12];
++ s8_l pwrlvl_11n_11ac_5g[10];
++ s8_l pwrlvl_11ax_5g[12];
++} txpwr_lvl_conf_v3_t;
++
++struct mm_set_txpwr_lvl_req
++{
++ union {
++ txpwr_lvl_conf_t txpwr_lvl;
++ txpwr_lvl_conf_v2_t txpwr_lvl_v2;
++ txpwr_lvl_conf_v3_t txpwr_lvl_v3;
++ };
++};
++
++typedef struct
++{
++ u8_l loss_enable;
++ u8_l loss_value;
++} txpwr_loss_conf_t;
++
++typedef struct
++{
++ int8_t enable;
++ int8_t dsss;
++ int8_t ofdmlowrate_2g4;
++ int8_t ofdm64qam_2g4;
++ int8_t ofdm256qam_2g4;
++ int8_t ofdm1024qam_2g4;
++ int8_t ofdmlowrate_5g;
++ int8_t ofdm64qam_5g;
++ int8_t ofdm256qam_5g;
++ int8_t ofdm1024qam_5g;
++
++} txpwr_idx_conf_t;
++
++struct mm_set_txpwr_idx_req
++{
++ txpwr_idx_conf_t txpwr_idx;
++};
++
++typedef struct
++{
++ int8_t enable;
++ int8_t chan_1_4;
++ int8_t chan_5_9;
++ int8_t chan_10_13;
++ int8_t chan_36_64;
++ int8_t chan_100_120;
++ int8_t chan_122_140;
++ int8_t chan_142_165;
++} txpwr_ofst_conf_t;
++
++struct mm_set_txpwr_ofst_req
++{
++ txpwr_ofst_conf_t txpwr_ofst;
++};
++
++typedef struct
++{
++ u8_l enable;
++ u8_l xtal_cap;
++ u8_l xtal_cap_fine;
++} xtal_cap_conf_t;
++
++
++struct mm_set_stack_start_req
++{
++ u8_l is_stack_start;
++ u8_l efuse_valid;
++ u8_l set_vendor_info;
++ u8_l fwtrace_redir;
++};
++
++struct mm_set_stack_start_cfm
++{
++ u8_l is_5g_support;
++ u8_l vendor_info;
++};
++
++/// Structure containing the parameters of the @ref MM_SET_P2P_OPPPS_REQ message.
++struct mm_set_p2p_oppps_req
++{
++ /// VIF Index
++ u8_l vif_index;
++ /// CTWindow
++ u8_l ctwindow;
++};
++
++/// Structure containing the parameters of the @ref MM_SET_P2P_NOA_CFM message.
++struct mm_set_p2p_noa_cfm
++{
++ /// Request status
++ u8_l status;
++};
++
++/// Structure containing the parameters of the @ref MM_SET_P2P_OPPPS_CFM message.
++struct mm_set_p2p_oppps_cfm
++{
++ /// Request status
++ u8_l status;
++};
++
++/// Structure containing the parameters of the @ref MM_P2P_NOA_UPD_IND message.
++struct mm_p2p_noa_upd_ind
++{
++ /// VIF Index
++ u8_l vif_index;
++ /// NOA Instance Number
++ u8_l noa_inst_nb;
++ /// NoA Type
++ u8_l noa_type;
++ /// Count
++ u8_l count;
++ /// Duration (in us)
++ u32_l duration_us;
++ /// Interval (in us)
++ u32_l interval_us;
++ /// Start Time
++ u32_l start_time;
++};
++
++/// Structure containing the parameters of the @ref MM_CFG_RSSI_REQ message
++struct mm_cfg_rssi_req
++{
++ /// Index of the VIF
++ u8_l vif_index;
++ /// RSSI threshold
++ s8_l rssi_thold;
++ /// RSSI hysteresis
++ u8_l rssi_hyst;
++};
++
++/// Structure containing the parameters of the @ref MM_RSSI_STATUS_IND message
++struct mm_rssi_status_ind
++{
++ /// Index of the VIF
++ u8_l vif_index;
++ /// Status of the RSSI
++ bool_l rssi_status;
++ /// Current RSSI
++ s8_l rssi;
++};
++
++/// Structure containing the parameters of the @ref MM_PKTLOSS_IND message
++struct mm_pktloss_ind
++{
++ /// Index of the VIF
++ u8_l vif_index;
++ /// Address of the STA for which there is a packet loss
++ struct mac_addr mac_addr;
++ /// Number of packets lost
++ u32 num_packets;
++};
++
++/// Structure containing the parameters of the @ref MM_CSA_FINISH_IND message
++struct mm_csa_finish_ind
++{
++ /// Index of the VIF
++ u8_l vif_index;
++ /// Status of the operation
++ u8_l status;
++ /// New channel ctx index
++ u8_l chan_idx;
++};
++
++/// Structure containing the parameters of the @ref MM_CSA_TRAFFIC_IND message
++struct mm_csa_traffic_ind
++{
++ /// Index of the VIF
++ u8_l vif_index;
++ /// Is tx traffic enable or disable
++ bool_l enable;
++};
++
++/// Structure containing the parameters of the @ref MM_MU_GROUP_UPDATE_REQ message.
++/// Size allocated for the structure depends of the number of group
++struct mm_mu_group_update_req
++{
++ /// Station index
++ u8_l sta_idx;
++ /// Number of groups the STA belongs to
++ u8_l group_cnt;
++ /// Group information
++ struct
++ {
++ /// Group Id
++ u8_l group_id;
++ /// User position
++ u8_l user_pos;
++ } groups[0];
++};
++
++///////////////////////////////////////////////////////////////////////////////
++/////////// For Scan messages
++///////////////////////////////////////////////////////////////////////////////
++enum scan_msg_tag
++{
++ /// Scanning start Request.
++ SCAN_START_REQ = LMAC_FIRST_MSG(TASK_SCAN),
++ /// Scanning start Confirmation.
++ SCAN_START_CFM,
++ /// End of scanning indication.
++ SCAN_DONE_IND,
++ /// Cancel scan request
++ SCAN_CANCEL_REQ,
++ /// Cancel scan confirmation
++ SCAN_CANCEL_CFM,
++
++ /// MAX number of messages
++ SCAN_MAX,
++};
++
++/// Maximum number of SSIDs in a scan request
++#define SCAN_SSID_MAX 3
++
++/// Maximum number of channels in a scan request
++#define SCAN_CHANNEL_MAX (MAC_DOMAINCHANNEL_24G_MAX + MAC_DOMAINCHANNEL_5G_MAX)
++
++/// Maximum length of the ProbeReq IEs (SoftMAC mode)
++#define SCAN_MAX_IE_LEN 300
++
++/// Maximum number of PHY bands supported
++#define SCAN_BAND_MAX 2
++
++/// Structure containing the parameters of the @ref SCAN_START_REQ message
++struct scan_start_req
++{
++ /// List of channel to be scanned
++ struct mac_chan_def chan[SCAN_CHANNEL_MAX];
++ /// List of SSIDs to be scanned
++ struct mac_ssid ssid[SCAN_SSID_MAX];
++ /// BSSID to be scanned
++ struct mac_addr bssid;
++ /// Pointer (in host memory) to the additional IEs that need to be added to the ProbeReq
++ /// (following the SSID element)
++ u32_l add_ies;
++ /// Length of the additional IEs
++ u16_l add_ie_len;
++ /// Index of the VIF that is scanning
++ u8_l vif_idx;
++ /// Number of channels to scan
++ u8_l chan_cnt;
++ /// Number of SSIDs to scan for
++ u8_l ssid_cnt;
++ /// no CCK - For P2P frames not being sent at CCK rate in 2GHz band.
++ bool no_cck;
++};
++
++/// Structure containing the parameters of the @ref SCAN_START_CFM message
++struct scan_start_cfm
++{
++ /// Status of the request
++ u8_l status;
++};
++
++/// Structure containing the parameters of the @ref SCAN_CANCEL_REQ message
++struct scan_cancel_req
++{
++};
++
++/// Structure containing the parameters of the @ref SCAN_START_CFM message
++struct scan_cancel_cfm
++{
++ /// Status of the request
++ u8_l status;
++};
++
++///////////////////////////////////////////////////////////////////////////////
++/////////// For Scanu messages
++///////////////////////////////////////////////////////////////////////////////
++/// Messages that are logically related to the task.
++enum
++{
++ /// Scan request from host.
++ SCANU_START_REQ = LMAC_FIRST_MSG(TASK_SCANU),
++ /// Scanning start Confirmation.
++ SCANU_START_CFM,
++ /// Join request
++ SCANU_JOIN_REQ,
++ /// Join confirmation.
++ SCANU_JOIN_CFM,
++ /// Scan result indication.
++ SCANU_RESULT_IND,
++ /// Fast scan request from any other module.
++ SCANU_FAST_REQ,
++ /// Confirmation of fast scan request.
++ SCANU_FAST_CFM,
++
++ SCANU_VENDOR_IE_REQ,
++ SCANU_VENDOR_IE_CFM,
++ SCANU_START_CFM_ADDTIONAL,
++ SCANU_CANCEL_REQ,
++ SCANU_CANCEL_CFM,
++
++ /// MAX number of messages
++ SCANU_MAX,
++};
++
++/// Maximum length of the additional ProbeReq IEs (FullMAC mode)
++#define SCANU_MAX_IE_LEN 200
++
++/// Structure containing the parameters of the @ref SCANU_START_REQ message
++struct scanu_start_req
++{
++ /// List of channel to be scanned
++ struct mac_chan_def chan[SCAN_CHANNEL_MAX];
++ /// List of SSIDs to be scanned
++ struct mac_ssid ssid[SCAN_SSID_MAX];
++ /// BSSID to be scanned (or WILDCARD BSSID if no BSSID is searched in particular)
++ struct mac_addr bssid;
++ /// Address (in host memory) of the additional IEs that need to be added to the ProbeReq
++ /// (following the SSID element)
++ u32_l add_ies;
++ /// Length of the additional IEs
++ u16_l add_ie_len;
++ /// Index of the VIF that is scanning
++ u8_l vif_idx;
++ /// Number of channels to scan
++ u8_l chan_cnt;
++ /// Number of SSIDs to scan for
++ u8_l ssid_cnt;
++ /// no CCK - For P2P frames not being sent at CCK rate in 2GHz band.
++ bool no_cck;
++ /// Scan duration, in us
++ u32_l duration;
++};
++
++struct scanu_vendor_ie_req
++{
++ u16_l add_ie_len;
++ u8_l vif_idx;
++ u8_l ie[256];
++};
++
++/// Structure containing the parameters of the @ref SCANU_START_CFM message
++struct scanu_start_cfm
++{
++ /// Index of the VIF that was scanning
++ u8_l vif_idx;
++ /// Status of the request
++ u8_l status;
++ /// Number of scan results available
++ u8_l result_cnt;
++};
++
++/// Parameters of the @SCANU_RESULT_IND message
++struct scanu_result_ind
++{
++ /// Length of the frame
++ u16_l length;
++ /// Frame control field of the frame.
++ u16_l framectrl;
++ /// Center frequency on which we received the packet
++ u16_l center_freq;
++ /// PHY band
++ u8_l band;
++ /// Index of the station that sent the frame. 0xFF if unknown.
++ u8_l sta_idx;
++ /// Index of the VIF that received the frame. 0xFF if unknown.
++ u8_l inst_nbr;
++ /// RSSI of the received frame.
++ s8_l rssi;
++ /// Frame payload.
++ u32_l payload[];
++};
++
++/// Structure containing the parameters of the message.
++struct scanu_fast_req
++{
++ /// The SSID to scan in the channel.
++ struct mac_ssid ssid;
++ /// BSSID.
++ struct mac_addr bssid;
++ /// Probe delay.
++ u16_l probe_delay;
++ /// Minimum channel time.
++ u16_l minch_time;
++ /// Maximum channel time.
++ u16_l maxch_time;
++ /// The channel number to scan.
++ u16_l ch_nbr;
++};
++
++///////////////////////////////////////////////////////////////////////////////
++/////////// For ME messages
++///////////////////////////////////////////////////////////////////////////////
++/// Messages that are logically related to the task.
++enum
++{
++ /// Configuration request from host.
++ ME_CONFIG_REQ = LMAC_FIRST_MSG(TASK_ME),
++ /// Configuration confirmation.
++ ME_CONFIG_CFM,
++ /// Configuration request from host.
++ ME_CHAN_CONFIG_REQ,
++ /// Configuration confirmation.
++ ME_CHAN_CONFIG_CFM,
++ /// Set control port state for a station.
++ ME_SET_CONTROL_PORT_REQ,
++ /// Control port setting confirmation.
++ ME_SET_CONTROL_PORT_CFM,
++ /// TKIP MIC failure indication.
++ ME_TKIP_MIC_FAILURE_IND,
++ /// Add a station to the FW (AP mode)
++ ME_STA_ADD_REQ,
++ /// Confirmation of the STA addition
++ ME_STA_ADD_CFM,
++ /// Delete a station from the FW (AP mode)
++ ME_STA_DEL_REQ,
++ /// Confirmation of the STA deletion
++ ME_STA_DEL_CFM,
++ /// Indication of a TX RA/TID queue credit update
++ ME_TX_CREDITS_UPDATE_IND,
++ /// Request indicating to the FW that there is traffic buffered on host
++ ME_TRAFFIC_IND_REQ,
++ /// Confirmation that the @ref ME_TRAFFIC_IND_REQ has been executed
++ ME_TRAFFIC_IND_CFM,
++ /// Request of RC statistics to a station
++ ME_RC_STATS_REQ,
++ /// RC statistics confirmation
++ ME_RC_STATS_CFM,
++ /// RC fixed rate request
++ ME_RC_SET_RATE_REQ,
++ /// Configure monitor interface
++ ME_CONFIG_MONITOR_REQ,
++ /// Configure monitor interface response
++ ME_CONFIG_MONITOR_CFM,
++ /// Setting power Save mode request from host
++ ME_SET_PS_MODE_REQ,
++ /// Set power Save mode confirmation
++ ME_SET_PS_MODE_CFM,
++ /// Setting Low Power level request from host
++ ME_SET_LP_LEVEL_REQ,
++ /// Set Low Power level confirmation
++ ME_SET_LP_LEVEL_CFM,
++ /// MAX number of messages
++ ME_MAX,
++};
++
++/// Structure containing the parameters of the @ref ME_START_REQ message
++struct me_config_req
++{
++ /// HT Capabilities
++ struct mac_htcapability ht_cap;
++ /// VHT Capabilities
++ struct mac_vhtcapability vht_cap;
++ /// HE capabilities
++ struct mac_hecapability he_cap;
++ /// Lifetime of packets sent under a BlockAck agreement (expressed in TUs)
++ u16_l tx_lft;
++ /// Maximum supported BW
++ u8_l phy_bw_max;
++ /// Boolean indicating if HT is supported or not
++ bool_l ht_supp;
++ /// Boolean indicating if VHT is supported or not
++ bool_l vht_supp;
++ /// Boolean indicating if HE is supported or not
++ bool_l he_supp;
++ /// Boolean indicating if HE OFDMA UL is enabled or not
++ bool_l he_ul_on;
++ /// Boolean indicating if PS mode shall be enabled or not
++ bool_l ps_on;
++ /// Boolean indicating if Antenna Diversity shall be enabled or not
++ bool_l ant_div_on;
++ /// Boolean indicating if Dynamic PS mode shall be used or not
++ bool_l dpsm;
++};
++
++/// Structure containing the parameters of the @ref ME_CHAN_CONFIG_REQ message
++struct me_chan_config_req
++{
++ /// List of 2.4GHz supported channels
++ struct mac_chan_def chan2G4[MAC_DOMAINCHANNEL_24G_MAX];
++ /// List of 5GHz supported channels
++ struct mac_chan_def chan5G[MAC_DOMAINCHANNEL_5G_MAX];
++ /// Number of 2.4GHz channels in the list
++ u8_l chan2G4_cnt;
++ /// Number of 5GHz channels in the list
++ u8_l chan5G_cnt;
++};
++
++/// Structure containing the parameters of the @ref ME_SET_CONTROL_PORT_REQ message
++struct me_set_control_port_req
++{
++ /// Index of the station for which the control port is opened
++ u8_l sta_idx;
++ /// Control port state
++ bool_l control_port_open;
++};
++
++/// Structure containing the parameters of the @ref ME_TKIP_MIC_FAILURE_IND message
++struct me_tkip_mic_failure_ind
++{
++ /// Address of the sending STA
++ struct mac_addr addr;
++ /// TSC value
++ u64_l tsc;
++ /// Boolean indicating if the packet was a group or unicast one (true if group)
++ bool_l ga;
++ /// Key Id
++ u8_l keyid;
++ /// VIF index
++ u8_l vif_idx;
++};
++
++/// Structure containing the parameters of the @ref ME_STA_ADD_REQ message
++struct me_sta_add_req
++{
++ /// MAC address of the station to be added
++ struct mac_addr mac_addr;
++ /// Supported legacy rates
++ struct mac_rateset rate_set;
++ /// HT Capabilities
++ struct mac_htcapability ht_cap;
++ /// VHT Capabilities
++ struct mac_vhtcapability vht_cap;
++ /// HE capabilities
++ struct mac_hecapability he_cap;
++ /// Flags giving additional information about the station (@ref mac_sta_flags)
++ u32_l flags;
++ /// Association ID of the station
++ u16_l aid;
++ /// Bit field indicating which queues have U-APSD enabled
++ u8_l uapsd_queues;
++ /// Maximum size, in frames, of a APSD service period
++ u8_l max_sp_len;
++ /// Operation mode information (valid if bit @ref STA_OPMOD_NOTIF is
++ /// set in the flags)
++ u8_l opmode;
++ /// Index of the VIF the station is attached to
++ u8_l vif_idx;
++ /// Whether the the station is TDLS station
++ bool_l tdls_sta;
++ /// Indicate if the station is TDLS link initiator station
++ bool_l tdls_sta_initiator;
++ /// Indicate if the TDLS Channel Switch is allowed
++ bool_l tdls_chsw_allowed;
++};
++
++/// Structure containing the parameters of the @ref ME_STA_ADD_CFM message
++struct me_sta_add_cfm
++{
++ /// Station index
++ u8_l sta_idx;
++ /// Status of the station addition
++ u8_l status;
++ /// PM state of the station
++ u8_l pm_state;
++};
++
++/// Structure containing the parameters of the @ref ME_STA_DEL_REQ message.
++struct me_sta_del_req
++{
++ /// Index of the station to be deleted
++ u8_l sta_idx;
++ /// Whether the the station is TDLS station
++ bool_l tdls_sta;
++};
++
++/// Structure containing the parameters of the @ref ME_TX_CREDITS_UPDATE_IND message.
++struct me_tx_credits_update_ind
++{
++ /// Index of the station for which the credits are updated
++ u8_l sta_idx;
++ /// TID for which the credits are updated
++ u8_l tid;
++ /// Offset to be applied on the credit count
++ s8_l credits;
++};
++
++/// Structure containing the parameters of the @ref ME_TRAFFIC_IND_REQ message.
++struct me_traffic_ind_req
++{
++ /// Index of the station for which UAPSD traffic is available on host
++ u8_l sta_idx;
++ /// Flag indicating the availability of UAPSD packets for the given STA
++ u8_l tx_avail;
++ /// Indicate if traffic is on uapsd-enabled queues
++ bool_l uapsd;
++};
++
++struct mm_apm_staloss_ind
++{
++ u8_l sta_idx;
++ u8_l vif_idx;
++ u8_l mac_addr[6];
++};
++
++enum vendor_hwconfig_tag{
++ ACS_TXOP_REQ = 0,
++ CHANNEL_ACCESS_REQ,
++ MAC_TIMESCALE_REQ,
++ CCA_THRESHOLD_REQ,
++ BWMODE_REQ,
++};
++
++enum {
++ BWMODE20M = 0,
++ BWMODE10M,
++ BWMODE5M,
++};
++
++struct mm_set_acs_txop_req
++{
++ u32_l hwconfig_id;
++ u16_l txop_bk;
++ u16_l txop_be;
++ u16_l txop_vi;
++ u16_l txop_vo;
++};
++
++struct mm_set_channel_access_req
++{
++ u32_l hwconfig_id;
++ u32_l edca[4];
++ u8_l vif_idx;
++ u8_l retry_cnt;
++ u8_l rts_en;
++ u8_l long_nav_en;
++ u8_l cfe_en;
++ u8_l rc_retry_cnt[3];
++};
++
++struct mm_set_mac_timescale_req
++{
++ u32_l hwconfig_id;
++ u8_l sifsA_time;
++ u8_l sifsB_time;
++ u8_l slot_time;
++ u8_l rx_startdelay_ofdm;
++ u8_l rx_startdelay_long;
++ u8_l rx_startdelay_short;
++};
++
++struct mm_set_cca_threshold_req
++{
++ u32_l hwconfig_id;
++ u8_l auto_cca_en;
++ s8_l cca20p_rise_th;
++ s8_l cca20s_rise_th;
++ s8_l cca20p_fall_th;
++ s8_l cca20s_fall_th;
++
++};
++
++struct mm_set_bwmode_req
++{
++ u32_l hwconfig_id;
++ u8_l bwmode;
++};
++
++struct mm_set_txop_req
++{
++ u16_l txop_bk;
++ u16_l txop_be;
++ u16_l txop_vi;
++ u16_l txop_vo;
++ u8_l long_nav_en;
++ u8_l cfe_en;
++};
++
++struct mm_set_vendor_trx_param_req
++{
++ u32_l edca[4];
++ u8_l vif_idx;
++ u8_l retry_cnt;
++};
++
++struct mm_get_fw_version_cfm
++{
++ u8_l fw_version_len;
++ u8_l fw_version[63];
++};
++
++/// Structure containing the parameters of the @ref ME_RC_STATS_REQ message.
++struct me_rc_stats_req
++{
++ /// Index of the station for which the RC statistics are requested
++ u8_l sta_idx;
++};
++
++/// Structure containing the rate control statistics
++struct rc_rate_stats
++{
++ /// Number of attempts (per sampling interval)
++ u16_l attempts;
++ /// Number of success (per sampling interval)
++ u16_l success;
++ /// Estimated probability of success (EWMA)
++ u16_l probability;
++ /// Rate configuration of the sample
++ u16_l rate_config;
++ union
++ {
++ struct {
++ /// Number of times the sample has been skipped (per sampling interval)
++ u8_l sample_skipped;
++ /// Whether the old probability is available
++ bool_l old_prob_available;
++ /// Whether the rate can be used in the retry chain
++ bool_l rate_allowed;
++ };
++ struct {
++ /// RU size and UL length received in the latest HE trigger frame
++ u16_l ru_and_length;
++ };
++ };
++};
++
++/// Number of RC samples
++#define RC_MAX_N_SAMPLE 10
++/// Index of the HE statistics element in the table
++#define RC_HE_STATS_IDX RC_MAX_N_SAMPLE
++
++/// Structure containing the parameters of the @ref ME_RC_STATS_CFM message.
++struct me_rc_stats_cfm
++{
++ /// Index of the station for which the RC statistics are provided
++ u8_l sta_idx;
++ /// Number of samples used in the RC algorithm
++ u16_l no_samples;
++ /// Number of MPDUs transmitted (per sampling interval)
++ u16_l ampdu_len;
++ /// Number of AMPDUs transmitted (per sampling interval)
++ u16_l ampdu_packets;
++ /// Average number of MPDUs in each AMPDU frame (EWMA)
++ u32_l avg_ampdu_len;
++ // Current step 0 of the retry chain
++ u8_l sw_retry_step;
++ /// Trial transmission period
++ u8_l sample_wait;
++ /// Retry chain steps
++ u16_l retry_step_idx[4];
++ /// RC statistics - Max number of RC samples, plus one for the HE TB statistics
++ struct rc_rate_stats rate_stats[RC_MAX_N_SAMPLE + 1];
++ /// Throughput - Max number of RC samples, plus one for the HE TB statistics
++ u32_l tp[RC_MAX_N_SAMPLE + 1];
++};
++
++/// Structure containing the parameters of the @ref ME_RC_SET_RATE_REQ message.
++struct me_rc_set_rate_req
++{
++ /// Index of the station for which the fixed rate is set
++ u8_l sta_idx;
++ /// Rate configuration to be set
++ u16_l fixed_rate_cfg;
++};
++
++/// Structure containing the parameters of the @ref ME_CONFIG_MONITOR_REQ message.
++struct me_config_monitor_req
++{
++ /// Channel to configure
++ struct mac_chan_op chan;
++ /// Is channel data valid
++ bool_l chan_set;
++ /// Enable report of unsupported HT frames
++ bool_l uf;
++ /// Enable auto-reply as the mac_addr matches
++ bool_l auto_reply;
++};
++
++/// Structure containing the parameters of the @ref ME_CONFIG_MONITOR_CFM message.
++struct me_config_monitor_cfm
++{
++ /// Channel context index
++ u8_l chan_index;
++ /// Channel parameters
++ struct mac_chan_op chan;
++};
++
++/// Structure containing the parameters of the @ref ME_SET_PS_MODE_REQ message.
++struct me_set_ps_mode_req
++{
++ /// Power Save is activated or deactivated
++ u8_l ps_state;
++};
++
++/// Structure containing the parameters of the @ref ME_SET_LP_LEVEL_REQ message.
++struct me_set_lp_level_req
++{
++ /// Low Power level
++ u8_l lp_level;
++};
++
++
++///////////////////////////////////////////////////////////////////////////////
++/////////// For SM messages
++///////////////////////////////////////////////////////////////////////////////
++/// Message API of the SM task
++enum sm_msg_tag
++{
++ /// Request to connect to an AP
++ SM_CONNECT_REQ = LMAC_FIRST_MSG(TASK_SM),
++ /// Confirmation of connection
++ SM_CONNECT_CFM,
++ /// Indicates that the SM associated to the AP
++ SM_CONNECT_IND,
++ /// Request to disconnect
++ SM_DISCONNECT_REQ,
++ /// Confirmation of disconnection
++ SM_DISCONNECT_CFM,
++ /// Indicates that the SM disassociated the AP
++ SM_DISCONNECT_IND,
++ /// Request to start external authentication
++ SM_EXTERNAL_AUTH_REQUIRED_IND,
++ /// Response to external authentication request
++ SM_EXTERNAL_AUTH_REQUIRED_RSP,
++
++ /// MAX number of messages
++ SM_MAX,
++};
++
++/// Structure containing the parameters of @ref SM_CONNECT_REQ message.
++struct sm_connect_req
++{
++ /// SSID to connect to
++ struct mac_ssid ssid;
++ /// BSSID to connect to (if not specified, set this field to WILDCARD BSSID)
++ struct mac_addr bssid;
++ /// Channel on which we have to connect (if not specified, set -1 in the chan.freq field)
++ struct mac_chan_def chan;
++ /// Connection flags (see @ref mac_connection_flags)
++ u32_l flags;
++ /// Control port Ethertype (in network endianness)
++ u16_l ctrl_port_ethertype;
++ /// Length of the association request IEs
++ u16_l ie_len;
++ /// Listen interval to be used for this connection
++ u16_l listen_interval;
++ /// Flag indicating if the we have to wait for the BC/MC traffic after beacon or not
++ bool_l dont_wait_bcmc;
++ /// Authentication type
++ u8_l auth_type;
++ /// UAPSD queues (bit0: VO, bit1: VI, bit2: BE, bit3: BK)
++ u8_l uapsd_queues;
++ /// VIF index
++ u8_l vif_idx;
++ /// Buffer containing the additional information elements to be put in the
++ /// association request
++ u32_l ie_buf[64];
++};
++
++/// Structure containing the parameters of the @ref SM_CONNECT_CFM message.
++struct sm_connect_cfm
++{
++ /// Status. If 0, it means that the connection procedure will be performed and that
++ /// a subsequent @ref SM_CONNECT_IND message will be forwarded once the procedure is
++ /// completed
++ u8_l status;
++};
++
++#define SM_ASSOC_IE_LEN 800
++/// Structure containing the parameters of the @ref SM_CONNECT_IND message.
++struct sm_connect_ind
++{
++ /// Status code of the connection procedure
++ u16_l status_code;
++ /// BSSID
++ struct mac_addr bssid;
++ /// Flag indicating if the indication refers to an internal roaming or from a host request
++ bool_l roamed;
++ /// Index of the VIF for which the association process is complete
++ u8_l vif_idx;
++ /// Index of the STA entry allocated for the AP
++ u8_l ap_idx;
++ /// Index of the LMAC channel context the connection is attached to
++ u8_l ch_idx;
++ /// Flag indicating if the AP is supporting QoS
++ bool_l qos;
++ /// ACM bits set in the AP WMM parameter element
++ u8_l acm;
++ /// Length of the AssocReq IEs
++ u16_l assoc_req_ie_len;
++ /// Length of the AssocRsp IEs
++ u16_l assoc_rsp_ie_len;
++ /// IE buffer
++ u32_l assoc_ie_buf[SM_ASSOC_IE_LEN/4];
++
++ u16_l aid;
++ u8_l band;
++ u16_l center_freq;
++ u8_l width;
++ u32_l center_freq1;
++ u32_l center_freq2;
++
++ /// EDCA parameters
++ u32_l ac_param[AC_MAX];
++};
++
++/// Structure containing the parameters of the @ref SM_DISCONNECT_REQ message.
++struct sm_disconnect_req
++{
++ /// Reason of the deauthentication.
++ u16_l reason_code;
++ /// Index of the VIF.
++ u8_l vif_idx;
++};
++
++/// Structure containing the parameters of SM_ASSOCIATION_IND the message
++struct sm_association_ind
++{
++ // MAC ADDR of the STA
++ struct mac_addr me_mac_addr;
++};
++
++
++/// Structure containing the parameters of the @ref SM_DISCONNECT_IND message.
++struct sm_disconnect_ind
++{
++ /// Reason of the disconnection.
++ u16_l reason_code;
++ /// Index of the VIF.
++ u8_l vif_idx;
++ /// FT over DS is ongoing
++ bool_l ft_over_ds;
++ u8_l reassoc;
++};
++
++/// Structure containing the parameters of the @ref SM_EXTERNAL_AUTH_REQUIRED_IND
++struct sm_external_auth_required_ind
++{
++ /// Index of the VIF.
++ u8_l vif_idx;
++ /// SSID to authenticate to
++ struct mac_ssid ssid;
++ /// BSSID to authenticate to
++ struct mac_addr bssid;
++ /// AKM suite of the respective authentication
++ u32_l akm;
++};
++
++/// Structure containing the parameters of the @ref SM_EXTERNAL_AUTH_REQUIRED_RSP
++struct sm_external_auth_required_rsp
++{
++ /// Index of the VIF.
++ u8_l vif_idx;
++ /// Authentication status
++ u16_l status;
++};
++
++///////////////////////////////////////////////////////////////////////////////
++/////////// For APM messages
++///////////////////////////////////////////////////////////////////////////////
++/// Message API of the APM task
++enum apm_msg_tag
++{
++ /// Request to start the AP.
++ APM_START_REQ = LMAC_FIRST_MSG(TASK_APM),
++ /// Confirmation of the AP start.
++ APM_START_CFM,
++ /// Request to stop the AP.
++ APM_STOP_REQ,
++ /// Confirmation of the AP stop.
++ APM_STOP_CFM,
++ /// Request to start CAC
++ APM_START_CAC_REQ,
++ /// Confirmation of the CAC start
++ APM_START_CAC_CFM,
++ /// Request to stop CAC
++ APM_STOP_CAC_REQ,
++ /// Confirmation of the CAC stop
++ APM_STOP_CAC_CFM,
++
++ APM_SET_BEACON_IE_REQ,
++ APM_SET_BEACON_IE_CFM,
++ /// MAX number of messages
++ APM_MAX,
++};
++
++/// Structure containing the parameters of the @ref APM_START_REQ message.
++struct apm_start_req
++{
++ /// Basic rate set
++ struct mac_rateset basic_rates;
++ /// Control channel on which we have to enable the AP
++ struct mac_chan_def chan;
++ /// Center frequency of the first segment
++ u32_l center_freq1;
++ /// Center frequency of the second segment (only in 80+80 configuration)
++ u32_l center_freq2;
++ /// Width of channel
++ u8_l ch_width;
++ /// Address, in host memory, to the beacon template
++ u32_l bcn_addr;
++ /// Length of the beacon template
++ u16_l bcn_len;
++ /// Offset of the TIM IE in the beacon
++ u16_l tim_oft;
++ /// Beacon interval
++ u16_l bcn_int;
++ /// Flags (@ref mac_connection_flags)
++ u32_l flags;
++ /// Control port Ethertype
++ u16_l ctrl_port_ethertype;
++ /// Length of the TIM IE
++ u8_l tim_len;
++ /// Index of the VIF for which the AP is started
++ u8_l vif_idx;
++};
++
++struct apm_set_bcn_ie_req
++{
++ u8_l vif_idx;
++ u16_l bcn_ie_len;
++ u8_l bcn_ie[512];
++};
++
++/// Structure containing the parameters of the @ref APM_START_CFM message.
++struct apm_start_cfm
++{
++ /// Status of the AP starting procedure
++ u8_l status;
++ /// Index of the VIF for which the AP is started
++ u8_l vif_idx;
++ /// Index of the channel context attached to the VIF
++ u8_l ch_idx;
++ /// Index of the STA used for BC/MC traffic
++ u8_l bcmc_idx;
++};
++
++/// Structure containing the parameters of the @ref APM_STOP_REQ message.
++struct apm_stop_req
++{
++ /// Index of the VIF for which the AP has to be stopped
++ u8_l vif_idx;
++};
++
++/// Structure containing the parameters of the @ref APM_START_CAC_REQ message.
++struct apm_start_cac_req
++{
++ /// Control channel on which we have to start the CAC
++ struct mac_chan_def chan;
++ /// Center frequency of the first segment
++ u32_l center_freq1;
++ /// Center frequency of the second segment (only in 80+80 configuration)
++ u32_l center_freq2;
++ /// Width of channel
++ u8_l ch_width;
++ /// Index of the VIF for which the CAC is started
++ u8_l vif_idx;
++};
++
++/// Structure containing the parameters of the @ref APM_START_CAC_CFM message.
++struct apm_start_cac_cfm
++{
++ /// Status of the CAC starting procedure
++ u8_l status;
++ /// Index of the channel context attached to the VIF for CAC
++ u8_l ch_idx;
++};
++
++/// Structure containing the parameters of the @ref APM_STOP_CAC_REQ message.
++struct apm_stop_cac_req
++{
++ /// Index of the VIF for which the CAC has to be stopped
++ u8_l vif_idx;
++};
++
++///////////////////////////////////////////////////////////////////////////////
++/////////// For MESH messages
++///////////////////////////////////////////////////////////////////////////////
++
++/// Maximum length of the Mesh ID
++#define MESH_MESHID_MAX_LEN (32)
++
++/// Message API of the MESH task
++enum mesh_msg_tag
++{
++ /// Request to start the MP
++ MESH_START_REQ = LMAC_FIRST_MSG(TASK_MESH),
++ /// Confirmation of the MP start.
++ MESH_START_CFM,
++
++ /// Request to stop the MP.
++ MESH_STOP_REQ,
++ /// Confirmation of the MP stop.
++ MESH_STOP_CFM,
++
++ // Request to update the MP
++ MESH_UPDATE_REQ,
++ /// Confirmation of the MP update
++ MESH_UPDATE_CFM,
++
++ /// Request information about a given link
++ MESH_PEER_INFO_REQ,
++ /// Response to the MESH_PEER_INFO_REQ message
++ MESH_PEER_INFO_CFM,
++
++ /// Request automatic establishment of a path with a given mesh STA
++ MESH_PATH_CREATE_REQ,
++ /// Confirmation to the MESH_PATH_CREATE_REQ message
++ MESH_PATH_CREATE_CFM,
++
++ /// Request a path update (delete path, modify next hop mesh STA)
++ MESH_PATH_UPDATE_REQ,
++ /// Confirmation to the MESH_PATH_UPDATE_REQ message
++ MESH_PATH_UPDATE_CFM,
++
++ /// Indication from Host that the indicated Mesh Interface is a proxy for an external STA
++ MESH_PROXY_ADD_REQ,
++
++ /// Indicate that a connection has been established or lost
++ MESH_PEER_UPDATE_IND,
++ /// Notification that a connection has been established or lost (when MPM handled by userspace)
++ MESH_PEER_UPDATE_NTF = MESH_PEER_UPDATE_IND,
++
++ /// Indicate that a path is now active or inactive
++ MESH_PATH_UPDATE_IND,
++ /// Indicate that proxy information have been updated
++ MESH_PROXY_UPDATE_IND,
++
++ /// MAX number of messages
++ MESH_MAX,
++};
++
++/// Structure containing the parameters of the @ref MESH_START_REQ message.
++struct mesh_start_req
++{
++ /// Basic rate set
++ struct mac_rateset basic_rates;
++ /// Control channel on which we have to enable the AP
++ struct mac_chan_def chan;
++ /// Center frequency of the first segment
++ u32_l center_freq1;
++ /// Center frequency of the second segment (only in 80+80 configuration)
++ u32_l center_freq2;
++ /// Width of channel
++ u8_l ch_width;
++ /// DTIM Period
++ u8_l dtim_period;
++ /// Beacon Interval
++ u16_l bcn_int;
++ /// Index of the VIF for which the MP is started
++ u8_l vif_index;
++ /// Length of the Mesh ID
++ u8_l mesh_id_len;
++ /// Mesh ID
++ u8_l mesh_id[MESH_MESHID_MAX_LEN];
++ /// Address of the IEs to download
++ u32_l ie_addr;
++ /// Length of the provided IEs
++ u8_l ie_len;
++ /// Indicate if Mesh Peering Management (MPM) protocol is handled in userspace
++ bool_l user_mpm;
++ /// Indicate if Mesh Point is using authentication
++ bool_l is_auth;
++ /// Indicate which authentication method is used
++ u8_l auth_id;
++};
++
++/// Structure containing the parameters of the @ref MESH_START_CFM message.
++struct mesh_start_cfm
++{
++ /// Status of the MP starting procedure
++ u8_l status;
++ /// Index of the VIF for which the MP is started
++ u8_l vif_idx;
++ /// Index of the channel context attached to the VIF
++ u8_l ch_idx;
++ /// Index of the STA used for BC/MC traffic
++ u8_l bcmc_idx;
++};
++
++/// Structure containing the parameters of the @ref MESH_STOP_REQ message.
++struct mesh_stop_req
++{
++ /// Index of the VIF for which the MP has to be stopped
++ u8_l vif_idx;
++};
++
++/// Structure containing the parameters of the @ref MESH_STOP_CFM message.
++struct mesh_stop_cfm
++{
++ /// Index of the VIF for which the MP has to be stopped
++ u8_l vif_idx;
++ /// Status
++ u8_l status;
++};
++
++/// Bit fields for mesh_update_req message's flags value
++enum mesh_update_flags_bit
++{
++ /// Root Mode
++ MESH_UPDATE_FLAGS_ROOT_MODE_BIT = 0,
++ /// Gate Mode
++ MESH_UPDATE_FLAGS_GATE_MODE_BIT,
++ /// Mesh Forwarding
++ MESH_UPDATE_FLAGS_MESH_FWD_BIT,
++ /// Local Power Save Mode
++ MESH_UPDATE_FLAGS_LOCAL_PSM_BIT,
++};
++
++/// Structure containing the parameters of the @ref MESH_UPDATE_REQ message.
++struct mesh_update_req
++{
++ /// Flags, indicate fields which have been updated
++ u8_l flags;
++ /// VIF Index
++ u8_l vif_idx;
++ /// Root Mode
++ u8_l root_mode;
++ /// Gate Announcement
++ bool_l gate_announ;
++ /// Mesh Forwarding
++ bool_l mesh_forward;
++ /// Local PS Mode
++ u8_l local_ps_mode;
++};
++
++/// Structure containing the parameters of the @ref MESH_UPDATE_CFM message.
++struct mesh_update_cfm
++{
++ /// Status
++ u8_l status;
++};
++
++/// Structure containing the parameters of the @ref MESH_PEER_INFO_REQ message.
++struct mesh_peer_info_req
++{
++ ///Index of the station allocated for the peer
++ u8_l sta_idx;
++};
++
++/// Structure containing the parameters of the @ref MESH_PEER_INFO_CFM message.
++struct mesh_peer_info_cfm
++{
++ /// Response status
++ u8_l status;
++ /// Index of the station allocated for the peer
++ u8_l sta_idx;
++ /// Local Link ID
++ u16_l local_link_id;
++ /// Peer Link ID
++ u16_l peer_link_id;
++ /// Local PS Mode
++ u8_l local_ps_mode;
++ /// Peer PS Mode
++ u8_l peer_ps_mode;
++ /// Non-peer PS Mode
++ u8_l non_peer_ps_mode;
++ /// Link State
++ u8_l link_state;
++};
++
++/// Structure containing the parameters of the @ref MESH_PATH_CREATE_REQ message.
++struct mesh_path_create_req
++{
++ /// Index of the interface on which path has to be created
++ u8_l vif_idx;
++ /// Indicate if originator MAC Address is provided
++ bool_l has_orig_addr;
++ /// Path Target MAC Address
++ struct mac_addr tgt_mac_addr;
++ /// Originator MAC Address
++ struct mac_addr orig_mac_addr;
++};
++
++/// Structure containing the parameters of the @ref MESH_PATH_CREATE_CFM message.
++struct mesh_path_create_cfm
++{
++ /// Confirmation status
++ u8_l status;
++ /// VIF Index
++ u8_l vif_idx;
++};
++
++/// Structure containing the parameters of the @ref MESH_PATH_UPDATE_REQ message.
++struct mesh_path_update_req
++{
++ /// Indicate if path must be deleted
++ bool_l delete;
++ /// Index of the interface on which path has to be created
++ u8_l vif_idx;
++ /// Path Target MAC Address
++ struct mac_addr tgt_mac_addr;
++ /// Next Hop MAC Address
++ struct mac_addr nhop_mac_addr;
++};
++
++/// Structure containing the parameters of the @ref MESH_PATH_UPDATE_CFM message.
++struct mesh_path_update_cfm
++{
++ /// Confirmation status
++ u8_l status;
++ /// VIF Index
++ u8_l vif_idx;
++};
++
++/// Structure containing the parameters of the @ref MESH_PROXY_ADD_REQ message.
++struct mesh_proxy_add_req
++{
++ /// VIF Index
++ u8_l vif_idx;
++ /// MAC Address of the External STA
++ struct mac_addr ext_sta_addr;
++};
++
++/// Structure containing the parameters of the @ref MESH_PROXY_UPDATE_IND
++struct mesh_proxy_update_ind
++{
++ /// Indicate if proxy information has been added or deleted
++ bool_l delete;
++ /// Indicate if we are a proxy for the external STA
++ bool_l local;
++ /// VIF Index
++ u8_l vif_idx;
++ /// MAC Address of the External STA
++ struct mac_addr ext_sta_addr;
++ /// MAC Address of the proxy (only valid if local is false)
++ struct mac_addr proxy_mac_addr;
++};
++
++/// Structure containing the parameters of the @ref MESH_PEER_UPDATE_IND message.
++struct mesh_peer_update_ind
++{
++ /// Indicate if connection has been established or lost
++ bool_l estab;
++ /// VIF Index
++ u8_l vif_idx;
++ /// STA Index
++ u8_l sta_idx;
++ /// Peer MAC Address
++ struct mac_addr peer_addr;
++};
++
++/// Structure containing the parameters of the @ref MESH_PEER_UPDATE_NTF message.
++struct mesh_peer_update_ntf
++{
++ /// VIF Index
++ u8_l vif_idx;
++ /// STA Index
++ u8_l sta_idx;
++ /// Mesh Link State
++ u8_l state;
++};
++
++/// Structure containing the parameters of the @ref MESH_PATH_UPDATE_IND message.
++struct mesh_path_update_ind
++{
++ /// Indicate if path is deleted or not
++ bool_l delete;
++ /// Indicate if path is towards an external STA (not part of MBSS)
++ bool_l ext_sta;
++ /// VIF Index
++ u8_l vif_idx;
++ /// Path Index
++ u8_l path_idx;
++ /// Target MAC Address
++ struct mac_addr tgt_mac_addr;
++ /// External STA MAC Address (only if ext_sta is true)
++ struct mac_addr ext_sta_mac_addr;
++ /// Next Hop STA Index
++ u8_l nhop_sta_idx;
++};
++
++///////////////////////////////////////////////////////////////////////////////
++/////////// For Debug messages
++///////////////////////////////////////////////////////////////////////////////
++
++/// Messages related to Debug Task
++enum dbg_msg_tag
++{
++ /// Memory read request
++ DBG_MEM_READ_REQ = LMAC_FIRST_MSG(TASK_DBG),
++ /// Memory read confirm
++ DBG_MEM_READ_CFM,
++ /// Memory write request
++ DBG_MEM_WRITE_REQ,
++ /// Memory write confirm
++ DBG_MEM_WRITE_CFM,
++ /// Module filter request
++ DBG_SET_MOD_FILTER_REQ,
++ /// Module filter confirm
++ DBG_SET_MOD_FILTER_CFM,
++ /// Severity filter request
++ DBG_SET_SEV_FILTER_REQ,
++ /// Severity filter confirm
++ DBG_SET_SEV_FILTER_CFM,
++ /// LMAC/MAC HW fatal error indication
++ DBG_ERROR_IND,
++ /// Request to get system statistics
++ DBG_GET_SYS_STAT_REQ,
++ /// COnfirmation of system statistics
++ DBG_GET_SYS_STAT_CFM,
++ /// Memory block write request
++ DBG_MEM_BLOCK_WRITE_REQ,
++ /// Memory block write confirm
++ DBG_MEM_BLOCK_WRITE_CFM,
++ /// Start app request
++ DBG_START_APP_REQ,
++ /// Start app confirm
++ DBG_START_APP_CFM,
++ /// Start npc request
++ DBG_START_NPC_REQ,
++ /// Start npc confirm
++ DBG_START_NPC_CFM,
++ /// Memory mask write request
++ DBG_MEM_MASK_WRITE_REQ,
++ /// Memory mask write confirm
++ DBG_MEM_MASK_WRITE_CFM,
++
++ DBG_RFTEST_CMD_REQ,
++ DBG_RFTEST_CMD_CFM,
++ DBG_BINDING_REQ,
++ DBG_BINDING_CFM,
++ DBG_BINDING_IND,
++
++ DBG_CUSTOM_MSG_REQ,
++ DBG_CUSTOM_MSG_CFM,
++ DBG_CUSTOM_MSG_IND,
++
++ DBG_GPIO_WRITE_REQ,
++ DBG_GPIO_WRITE_CFM,
++ DBG_GPIO_READ_REQ,
++ DBG_GPIO_READ_CFM,
++ DBG_GPIO_INIT_REQ,
++ DBG_GPIO_INIT_CFM,
++
++ /// Max number of Debug messages
++ DBG_MAX,
++};
++
++/// Structure containing the parameters of the @ref DBG_MEM_READ_REQ message.
++struct dbg_mem_read_req
++{
++ u32_l memaddr;
++};
++
++/// Structure containing the parameters of the @ref DBG_MEM_READ_CFM message.
++struct dbg_mem_read_cfm
++{
++ u32_l memaddr;
++ u32_l memdata;
++};
++
++/// Structure containing the parameters of the @ref DBG_MEM_WRITE_REQ message.
++struct dbg_mem_write_req
++{
++ u32_l memaddr;
++ u32_l memdata;
++};
++
++/// Structure containing the parameters of the @ref DBG_MEM_WRITE_CFM message.
++struct dbg_mem_write_cfm
++{
++ u32_l memaddr;
++ u32_l memdata;
++};
++
++/// Structure containing the parameters of the @ref DBG_MEM_MASK_WRITE_REQ message.
++struct dbg_mem_mask_write_req
++{
++ u32_l memaddr;
++ u32_l memmask;
++ u32_l memdata;
++};
++
++/// Structure containing the parameters of the @ref DBG_MEM_MASK_WRITE_CFM message.
++struct dbg_mem_mask_write_cfm
++{
++ u32_l memaddr;
++ u32_l memdata;
++};
++
++struct dbg_rftest_cmd_req
++{
++ u32_l cmd;
++ u32_l argc;
++ u8_l argv[30];
++};
++
++struct dbg_rftest_cmd_cfm
++{
++ u32_l rftest_result[18];
++};
++
++struct dbg_gpio_write_req {
++ uint8_t gpio_idx;
++ uint8_t gpio_val;
++};
++
++struct dbg_gpio_read_req {
++ uint8_t gpio_idx;
++};
++
++struct dbg_gpio_read_cfm {
++ uint8_t gpio_idx;
++ uint8_t gpio_val;
++};
++
++struct dbg_gpio_init_req {
++ uint8_t gpio_idx;
++ uint8_t gpio_dir; //1 output, 0 input;
++ uint8_t gpio_val; //for output, 1 high, 0 low;
++};
++
++/// Structure containing the parameters of the @ref DBG_SET_MOD_FILTER_REQ message.
++struct dbg_set_mod_filter_req
++{
++ /// Bit field indicating for each module if the traces are enabled or not
++ u32_l mod_filter;
++};
++
++/// Structure containing the parameters of the @ref DBG_SEV_MOD_FILTER_REQ message.
++struct dbg_set_sev_filter_req
++{
++ /// Bit field indicating the severity threshold for the traces
++ u32_l sev_filter;
++};
++
++/// Structure containing the parameters of the @ref DBG_GET_SYS_STAT_CFM message.
++struct dbg_get_sys_stat_cfm
++{
++ /// Time spent in CPU sleep since last reset of the system statistics
++ u32_l cpu_sleep_time;
++ /// Time spent in DOZE since last reset of the system statistics
++ u32_l doze_time;
++ /// Total time spent since last reset of the system statistics
++ u32_l stats_time;
++};
++
++/// Structure containing the parameters of the @ref DBG_MEM_BLOCK_WRITE_REQ message.
++struct dbg_mem_block_write_req
++{
++ u32_l memaddr;
++ u32_l memsize;
++ u32_l memdata[512 / sizeof(u32_l)];//1024 for 8801
++};
++
++/// Structure containing the parameters of the @ref DBG_MEM_BLOCK_WRITE_CFM message.
++struct dbg_mem_block_write_cfm
++{
++ u32_l wstatus;
++};
++
++/// Structure containing the parameters of the @ref DBG_START_APP_REQ message.
++struct dbg_start_app_req
++{
++ u32_l bootaddr;
++ u32_l boottype;
++};
++
++/// Structure containing the parameters of the @ref DBG_START_APP_CFM message.
++struct dbg_start_app_cfm
++{
++ u32_l bootstatus;
++};
++
++enum {
++ HOST_START_APP_AUTO = 1,
++ HOST_START_APP_CUSTOM,
++#ifdef CONFIG_USB_BT
++ HOST_START_APP_REBOOT,
++#endif // (CONFIG_USB_BT)
++ HOST_START_APP_FNCALL = 4,
++ HOST_START_APP_DUMMY = 5,
++};
++
++
++
++///////////////////////////////////////////////////////////////////////////////
++/////////// For TDLS messages
++///////////////////////////////////////////////////////////////////////////////
++
++/// List of messages related to the task.
++enum tdls_msg_tag
++{
++ /// TDLS channel Switch Request.
++ TDLS_CHAN_SWITCH_REQ = LMAC_FIRST_MSG(TASK_TDLS),
++ /// TDLS channel switch confirmation.
++ TDLS_CHAN_SWITCH_CFM,
++ /// TDLS channel switch indication.
++ TDLS_CHAN_SWITCH_IND,
++ /// TDLS channel switch to base channel indication.
++ TDLS_CHAN_SWITCH_BASE_IND,
++ /// TDLS cancel channel switch request.
++ TDLS_CANCEL_CHAN_SWITCH_REQ,
++ /// TDLS cancel channel switch confirmation.
++ TDLS_CANCEL_CHAN_SWITCH_CFM,
++ /// TDLS peer power save indication.
++ TDLS_PEER_PS_IND,
++ /// TDLS peer traffic indication request.
++ TDLS_PEER_TRAFFIC_IND_REQ,
++ /// TDLS peer traffic indication confirmation.
++ TDLS_PEER_TRAFFIC_IND_CFM,
++ /// MAX number of messages
++ TDLS_MAX
++};
++
++/// Structure containing the parameters of the @ref TDLS_CHAN_SWITCH_REQ message
++struct tdls_chan_switch_req
++{
++ /// Index of the VIF
++ u8_l vif_index;
++ /// STA Index
++ u8_l sta_idx;
++ /// MAC address of the TDLS station
++ struct mac_addr peer_mac_addr;
++ bool_l initiator;
++ /// Band (2.4GHz or 5GHz)
++ u8_l band;
++ /// Channel type: 20,40,80,160 or 80+80 MHz
++ u8_l type;
++ /// Frequency for Primary 20MHz channel (in MHz)
++ u16_l prim20_freq;
++ /// Frequency for Center of the contiguous channel or center of Primary 80+80
++ u16_l center1_freq;
++ /// Frequency for Center of the non-contiguous secondary 80+80
++ u16_l center2_freq;
++ /// TX power (in dBm)
++ s8_l tx_power;
++ /// Operating class
++ u8_l op_class;
++};
++
++/// Structure containing the parameters of the @ref TDLS_CANCEL_CHAN_SWITCH_REQ message
++struct tdls_cancel_chan_switch_req
++{
++ /// Index of the VIF
++ u8_l vif_index;
++ /// STA Index
++ u8_l sta_idx;
++ /// MAC address of the TDLS station
++ struct mac_addr peer_mac_addr;
++};
++
++
++/// Structure containing the parameters of the @ref TDLS_CHAN_SWITCH_CFM message
++struct tdls_chan_switch_cfm
++{
++ /// Status of the operation
++ u8_l status;
++};
++
++/// Structure containing the parameters of the @ref TDLS_CANCEL_CHAN_SWITCH_CFM message
++struct tdls_cancel_chan_switch_cfm
++{
++ /// Status of the operation
++ u8_l status;
++};
++
++/// Structure containing the parameters of the @ref TDLS_CHAN_SWITCH_IND message
++struct tdls_chan_switch_ind
++{
++ /// VIF Index
++ u8_l vif_index;
++ /// Channel Context Index
++ u8_l chan_ctxt_index;
++ /// Status of the operation
++ u8_l status;
++};
++
++/// Structure containing the parameters of the @ref TDLS_CHAN_SWITCH_BASE_IND message
++struct tdls_chan_switch_base_ind
++{
++ /// VIF Index
++ u8_l vif_index;
++ /// Channel Context index
++ u8_l chan_ctxt_index;
++};
++
++/// Structure containing the parameters of the @ref TDLS_PEER_PS_IND message
++struct tdls_peer_ps_ind
++{
++ /// VIF Index
++ u8_l vif_index;
++ /// STA Index
++ u8_l sta_idx;
++ /// MAC ADDR of the TDLS STA
++ struct mac_addr peer_mac_addr;
++ /// Flag to indicate if the TDLS peer is going to sleep
++ bool ps_on;
++};
++
++/// Structure containing the parameters of the @ref TDLS_PEER_TRAFFIC_IND_REQ message
++struct tdls_peer_traffic_ind_req
++{
++ /// VIF Index
++ u8_l vif_index;
++ /// STA Index
++ u8_l sta_idx;
++ // MAC ADDR of the TDLS STA
++ struct mac_addr peer_mac_addr;
++ /// Dialog token
++ u8_l dialog_token;
++ /// TID of the latest MPDU transmitted over the TDLS direct link to the TDLS STA
++ u8_l last_tid;
++ /// Sequence number of the latest MPDU transmitted over the TDLS direct link
++ /// to the TDLS STA
++ u16_l last_sn;
++};
++
++/// Structure containing the parameters of the @ref TDLS_PEER_TRAFFIC_IND_CFM message
++struct tdls_peer_traffic_ind_cfm
++{
++ /// Status of the operation
++ u8_l status;
++};
++
++
++#endif // LMAC_MSG_H_
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/lmac_types.h
+@@ -0,0 +1,62 @@
++/**
++ ****************************************************************************************
++ *
++ * @file co_types.h
++ *
++ * @brief This file replaces the need to include stdint or stdbool typical headers,
++ * which may not be available in all toolchains, and adds new types
++ *
++ * Copyright (C) RivieraWaves 2009-2019
++ *
++ * $Rev: $
++ *
++ ****************************************************************************************
++ */
++
++#ifndef _LMAC_INT_H_
++#define _LMAC_INT_H_
++
++
++/**
++ ****************************************************************************************
++ * @addtogroup CO_INT
++ * @ingroup COMMON
++ * @brief Common integer standard types (removes use of stdint)
++ *
++ * @{
++ ****************************************************************************************
++ */
++
++
++/*
++ * DEFINES
++ ****************************************************************************************
++ */
++
++#include <linux/version.h>
++#include <linux/types.h>
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)
++#include <linux/bits.h>
++#else
++#include <linux/bitops.h>
++#endif
++
++#ifdef CONFIG_RWNX_TL4
++typedef uint16_t u8_l;
++typedef int16_t s8_l;
++typedef uint16_t bool_l;
++#else
++typedef uint8_t u8_l;
++typedef int8_t s8_l;
++typedef bool bool_l;
++#endif
++typedef uint16_t u16_l;
++typedef int16_t s16_l;
++typedef uint32_t u32_l;
++typedef int32_t s32_l;
++typedef uint64_t u64_l;
++
++
++
++/// @} CO_INT
++#endif // _LMAC_INT_H_
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/md5.c
+@@ -0,0 +1,161 @@
++#include <linux/memory.h>
++#include "md5.h"
++
++unsigned char PADDING[]={0x80,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
++ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
++ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
++ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
++
++void MD5Init(MD5_CTX *context)
++{
++ context->count[0] = 0;
++ context->count[1] = 0;
++ context->state[0] = 0x67452301;
++ context->state[1] = 0xEFCDAB89;
++ context->state[2] = 0x98BADCFE;
++ context->state[3] = 0x10325476;
++}
++void MD5Update(MD5_CTX *context,unsigned char *input,unsigned int inputlen)
++{
++ unsigned int i = 0,index = 0,partlen = 0;
++ index = (context->count[0] >> 3) & 0x3F;
++ partlen = 64 - index;
++ context->count[0] += inputlen << 3;
++ if(context->count[0] < (inputlen << 3))
++ context->count[1]++;
++ context->count[1] += inputlen >> 29;
++
++ if(inputlen >= partlen)
++ {
++ memcpy(&context->buffer[index],input,partlen);
++ MD5Transform(context->state,context->buffer);
++ for(i = partlen;i+64 <= inputlen;i+=64)
++ MD5Transform(context->state,&input[i]);
++ index = 0;
++ }
++ else
++ {
++ i = 0;
++ }
++ memcpy(&context->buffer[index],&input[i],inputlen-i);
++}
++void MD5Final(MD5_CTX *context,unsigned char digest[16])
++{
++ unsigned int index = 0,padlen = 0;
++ unsigned char bits[8];
++ index = (context->count[0] >> 3) & 0x3F;
++ padlen = (index < 56)?(56-index):(120-index);
++ MD5Encode(bits,context->count,8);
++ MD5Update(context,PADDING,padlen);
++ MD5Update(context,bits,8);
++ MD5Encode(digest,context->state,16);
++}
++void MD5Encode(unsigned char *output,unsigned int *input,unsigned int len)
++{
++ unsigned int i = 0,j = 0;
++ while(j < len)
++ {
++ output[j] = input[i] & 0xFF;
++ output[j+1] = (input[i] >> 8) & 0xFF;
++ output[j+2] = (input[i] >> 16) & 0xFF;
++ output[j+3] = (input[i] >> 24) & 0xFF;
++ i++;
++ j+=4;
++ }
++}
++void MD5Decode(unsigned int *output,unsigned char *input,unsigned int len)
++{
++ unsigned int i = 0,j = 0;
++ while(j < len)
++ {
++ output[i] = (input[j]) |
++ (input[j+1] << 8) |
++ (input[j+2] << 16) |
++ (input[j+3] << 24);
++ i++;
++ j+=4;
++ }
++}
++void MD5Transform(unsigned int state[4],unsigned char block[64])
++{
++ unsigned int a = state[0];
++ unsigned int b = state[1];
++ unsigned int c = state[2];
++ unsigned int d = state[3];
++ unsigned int x[64];
++ MD5Decode(x,block,64);
++ FF(a, b, c, d, x[ 0], 7, 0xd76aa478); /* 1 */
++ FF(d, a, b, c, x[ 1], 12, 0xe8c7b756); /* 2 */
++ FF(c, d, a, b, x[ 2], 17, 0x242070db); /* 3 */
++ FF(b, c, d, a, x[ 3], 22, 0xc1bdceee); /* 4 */
++ FF(a, b, c, d, x[ 4], 7, 0xf57c0faf); /* 5 */
++ FF(d, a, b, c, x[ 5], 12, 0x4787c62a); /* 6 */
++ FF(c, d, a, b, x[ 6], 17, 0xa8304613); /* 7 */
++ FF(b, c, d, a, x[ 7], 22, 0xfd469501); /* 8 */
++ FF(a, b, c, d, x[ 8], 7, 0x698098d8); /* 9 */
++ FF(d, a, b, c, x[ 9], 12, 0x8b44f7af); /* 10 */
++ FF(c, d, a, b, x[10], 17, 0xffff5bb1); /* 11 */
++ FF(b, c, d, a, x[11], 22, 0x895cd7be); /* 12 */
++ FF(a, b, c, d, x[12], 7, 0x6b901122); /* 13 */
++ FF(d, a, b, c, x[13], 12, 0xfd987193); /* 14 */
++ FF(c, d, a, b, x[14], 17, 0xa679438e); /* 15 */
++ FF(b, c, d, a, x[15], 22, 0x49b40821); /* 16 */
++
++ /* Round 2 */
++ GG(a, b, c, d, x[ 1], 5, 0xf61e2562); /* 17 */
++ GG(d, a, b, c, x[ 6], 9, 0xc040b340); /* 18 */
++ GG(c, d, a, b, x[11], 14, 0x265e5a51); /* 19 */
++ GG(b, c, d, a, x[ 0], 20, 0xe9b6c7aa); /* 20 */
++ GG(a, b, c, d, x[ 5], 5, 0xd62f105d); /* 21 */
++ GG(d, a, b, c, x[10], 9, 0x2441453); /* 22 */
++ GG(c, d, a, b, x[15], 14, 0xd8a1e681); /* 23 */
++ GG(b, c, d, a, x[ 4], 20, 0xe7d3fbc8); /* 24 */
++ GG(a, b, c, d, x[ 9], 5, 0x21e1cde6); /* 25 */
++ GG(d, a, b, c, x[14], 9, 0xc33707d6); /* 26 */
++ GG(c, d, a, b, x[ 3], 14, 0xf4d50d87); /* 27 */
++ GG(b, c, d, a, x[ 8], 20, 0x455a14ed); /* 28 */
++ GG(a, b, c, d, x[13], 5, 0xa9e3e905); /* 29 */
++ GG(d, a, b, c, x[ 2], 9, 0xfcefa3f8); /* 30 */
++ GG(c, d, a, b, x[ 7], 14, 0x676f02d9); /* 31 */
++ GG(b, c, d, a, x[12], 20, 0x8d2a4c8a); /* 32 */
++
++ /* Round 3 */
++ HH(a, b, c, d, x[ 5], 4, 0xfffa3942); /* 33 */
++ HH(d, a, b, c, x[ 8], 11, 0x8771f681); /* 34 */
++ HH(c, d, a, b, x[11], 16, 0x6d9d6122); /* 35 */
++ HH(b, c, d, a, x[14], 23, 0xfde5380c); /* 36 */
++ HH(a, b, c, d, x[ 1], 4, 0xa4beea44); /* 37 */
++ HH(d, a, b, c, x[ 4], 11, 0x4bdecfa9); /* 38 */
++ HH(c, d, a, b, x[ 7], 16, 0xf6bb4b60); /* 39 */
++ HH(b, c, d, a, x[10], 23, 0xbebfbc70); /* 40 */
++ HH(a, b, c, d, x[13], 4, 0x289b7ec6); /* 41 */
++ HH(d, a, b, c, x[ 0], 11, 0xeaa127fa); /* 42 */
++ HH(c, d, a, b, x[ 3], 16, 0xd4ef3085); /* 43 */
++ HH(b, c, d, a, x[ 6], 23, 0x4881d05); /* 44 */
++ HH(a, b, c, d, x[ 9], 4, 0xd9d4d039); /* 45 */
++ HH(d, a, b, c, x[12], 11, 0xe6db99e5); /* 46 */
++ HH(c, d, a, b, x[15], 16, 0x1fa27cf8); /* 47 */
++ HH(b, c, d, a, x[ 2], 23, 0xc4ac5665); /* 48 */
++
++ /* Round 4 */
++ II(a, b, c, d, x[ 0], 6, 0xf4292244); /* 49 */
++ II(d, a, b, c, x[ 7], 10, 0x432aff97); /* 50 */
++ II(c, d, a, b, x[14], 15, 0xab9423a7); /* 51 */
++ II(b, c, d, a, x[ 5], 21, 0xfc93a039); /* 52 */
++ II(a, b, c, d, x[12], 6, 0x655b59c3); /* 53 */
++ II(d, a, b, c, x[ 3], 10, 0x8f0ccc92); /* 54 */
++ II(c, d, a, b, x[10], 15, 0xffeff47d); /* 55 */
++ II(b, c, d, a, x[ 1], 21, 0x85845dd1); /* 56 */
++ II(a, b, c, d, x[ 8], 6, 0x6fa87e4f); /* 57 */
++ II(d, a, b, c, x[15], 10, 0xfe2ce6e0); /* 58 */
++ II(c, d, a, b, x[ 6], 15, 0xa3014314); /* 59 */
++ II(b, c, d, a, x[13], 21, 0x4e0811a1); /* 60 */
++ II(a, b, c, d, x[ 4], 6, 0xf7537e82); /* 61 */
++ II(d, a, b, c, x[11], 10, 0xbd3af235); /* 62 */
++ II(c, d, a, b, x[ 2], 15, 0x2ad7d2bb); /* 63 */
++ II(b, c, d, a, x[ 9], 21, 0xeb86d391); /* 64 */
++ state[0] += a;
++ state[1] += b;
++ state[2] += c;
++ state[3] += d;
++}
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/md5.h
+@@ -0,0 +1,48 @@
++#ifndef MD5_H
++#define MD5_H
++
++typedef struct
++{
++ unsigned int count[2];
++ unsigned int state[4];
++ unsigned char buffer[64];
++}MD5_CTX;
++
++
++#define F(x,y,z) ((x & y) | (~x & z))
++#define G(x,y,z) ((x & z) | (y & ~z))
++#define H(x,y,z) (x^y^z)
++#define I(x,y,z) (y ^ (x | ~z))
++#define ROTATE_LEFT(x,n) ((x << n) | (x >> (32-n)))
++#define FF(a,b,c,d,x,s,ac) \
++ { \
++ a += F(b,c,d) + x + ac; \
++ a = ROTATE_LEFT(a,s); \
++ a += b; \
++ }
++#define GG(a,b,c,d,x,s,ac) \
++ { \
++ a += G(b,c,d) + x + ac; \
++ a = ROTATE_LEFT(a,s); \
++ a += b; \
++ }
++#define HH(a,b,c,d,x,s,ac) \
++ { \
++ a += H(b,c,d) + x + ac; \
++ a = ROTATE_LEFT(a,s); \
++ a += b; \
++ }
++#define II(a,b,c,d,x,s,ac) \
++ { \
++ a += I(b,c,d) + x + ac; \
++ a = ROTATE_LEFT(a,s); \
++ a += b; \
++ }
++void MD5Init(MD5_CTX *context);
++void MD5Update(MD5_CTX *context,unsigned char *input,unsigned int inputlen);
++void MD5Final(MD5_CTX *context,unsigned char digest[16]);
++void MD5Transform(unsigned int state[4],unsigned char block[64]);
++void MD5Encode(unsigned char *output,unsigned int *input,unsigned int len);
++void MD5Decode(unsigned int *output,unsigned char *input,unsigned int len);
++
++#endif
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/reg_access.h
+@@ -0,0 +1,161 @@
++/**
++ ******************************************************************************
++ *
++ * @file reg_access.h
++ *
++ * @brief Definitions and macros for MAC HW and platform register accesses
++ *
++ * Copyright (C) RivieraWaves 2011-2019
++ *
++ ******************************************************************************
++ */
++
++#ifndef REG_ACCESS_H_
++#define REG_ACCESS_H_
++
++/*****************************************************************************
++ * Addresses within RWNX_ADDR_CPU
++ *****************************************************************************/
++#define RAM_LMAC_FW_ADDR 0x00150000
++
++#define ROM_FMAC_FW_ADDR 0x00010000
++#define RAM_FMAC_FW_ADDR 0x00120000
++#define ROM_FMAC_PATCH_ADDR 0x00180000
++#ifdef CONFIG_DPD
++#define ROM_FMAC_CALIB_ADDR 0x00130000
++#endif
++
++
++/*****************************************************************************
++ * Addresses within RWNX_ADDR_SYSTEM
++ *****************************************************************************/
++/* Shard RAM */
++#define SHARED_RAM_START_ADDR 0x00000000
++
++/* IPC registers */
++#define IPC_REG_BASE_ADDR 0x00800000
++
++/* System Controller Registers */
++#define SYSCTRL_SIGNATURE_ADDR 0x00900000
++// old diag register name
++#define SYSCTRL_DIAG_CONF_ADDR 0x00900068
++#define SYSCTRL_PHYDIAG_CONF_ADDR 0x00900074
++#define SYSCTRL_RIUDIAG_CONF_ADDR 0x00900078
++// new diag register name
++#define SYSCTRL_DIAG_CONF0 0x00900064
++#define SYSCTRL_DIAG_CONF1 0x00900068
++#define SYSCTRL_DIAG_CONF2 0x00900074
++#define SYSCTRL_DIAG_CONF3 0x00900078
++#define SYSCTRL_MISC_CNTL_ADDR 0x009000E0
++#define BOOTROM_ENABLE BIT(4)
++#define FPGA_B_RESET BIT(1)
++#define SOFT_RESET BIT(0)
++
++/* MAC platform */
++#define NXMAC_VERSION_1_ADDR 0x00B00004
++#define NXMAC_MU_MIMO_TX_BIT BIT(19)
++#define NXMAC_BFMER_BIT BIT(18)
++#define NXMAC_BFMEE_BIT BIT(17)
++#define NXMAC_MAC_80211MH_FORMAT_BIT BIT(16)
++#define NXMAC_COEX_BIT BIT(14)
++#define NXMAC_WAPI_BIT BIT(13)
++#define NXMAC_TPC_BIT BIT(12)
++#define NXMAC_VHT_BIT BIT(11)
++#define NXMAC_HT_BIT BIT(10)
++#define NXMAC_RCE_BIT BIT(8)
++#define NXMAC_CCMP_BIT BIT(7)
++#define NXMAC_TKIP_BIT BIT(6)
++#define NXMAC_WEP_BIT BIT(5)
++#define NXMAC_SECURITY_BIT BIT(4)
++#define NXMAC_SME_BIT BIT(3)
++#define NXMAC_HCCA_BIT BIT(2)
++#define NXMAC_EDCA_BIT BIT(1)
++#define NXMAC_QOS_BIT BIT(0)
++
++#define NXMAC_RX_CNTRL_ADDR 0x00B00060
++#define NXMAC_EN_DUPLICATE_DETECTION_BIT BIT(31)
++#define NXMAC_ACCEPT_UNKNOWN_BIT BIT(30)
++#define NXMAC_ACCEPT_OTHER_DATA_FRAMES_BIT BIT(29)
++#define NXMAC_ACCEPT_QO_S_NULL_BIT BIT(28)
++#define NXMAC_ACCEPT_QCFWO_DATA_BIT BIT(27)
++#define NXMAC_ACCEPT_Q_DATA_BIT BIT(26)
++#define NXMAC_ACCEPT_CFWO_DATA_BIT BIT(25)
++#define NXMAC_ACCEPT_DATA_BIT BIT(24)
++#define NXMAC_ACCEPT_OTHER_CNTRL_FRAMES_BIT BIT(23)
++#define NXMAC_ACCEPT_CF_END_BIT BIT(22)
++#define NXMAC_ACCEPT_ACK_BIT BIT(21)
++#define NXMAC_ACCEPT_CTS_BIT BIT(20)
++#define NXMAC_ACCEPT_RTS_BIT BIT(19)
++#define NXMAC_ACCEPT_PS_POLL_BIT BIT(18)
++#define NXMAC_ACCEPT_BA_BIT BIT(17)
++#define NXMAC_ACCEPT_BAR_BIT BIT(16)
++#define NXMAC_ACCEPT_OTHER_MGMT_FRAMES_BIT BIT(15)
++#define NXMAC_ACCEPT_BFMEE_FRAMES_BIT BIT(14)
++#define NXMAC_ACCEPT_ALL_BEACON_BIT BIT(13)
++#define NXMAC_ACCEPT_NOT_EXPECTED_BA_BIT BIT(12)
++#define NXMAC_ACCEPT_DECRYPT_ERROR_FRAMES_BIT BIT(11)
++#define NXMAC_ACCEPT_BEACON_BIT BIT(10)
++#define NXMAC_ACCEPT_PROBE_RESP_BIT BIT(9)
++#define NXMAC_ACCEPT_PROBE_REQ_BIT BIT(8)
++#define NXMAC_ACCEPT_MY_UNICAST_BIT BIT(7)
++#define NXMAC_ACCEPT_UNICAST_BIT BIT(6)
++#define NXMAC_ACCEPT_ERROR_FRAMES_BIT BIT(5)
++#define NXMAC_ACCEPT_OTHER_BSSID_BIT BIT(4)
++#define NXMAC_ACCEPT_BROADCAST_BIT BIT(3)
++#define NXMAC_ACCEPT_MULTICAST_BIT BIT(2)
++#define NXMAC_DONT_DECRYPT_BIT BIT(1)
++#define NXMAC_EXC_UNENCRYPTED_BIT BIT(0)
++
++#define NXMAC_DEBUG_PORT_SEL_ADDR 0x00B00510
++#define NXMAC_SW_SET_PROFILING_ADDR 0x00B08564
++#define NXMAC_SW_CLEAR_PROFILING_ADDR 0x00B08568
++
++/* Modem Status */
++#define MDM_HDMCONFIG_ADDR 0x00C00000
++
++/* Clock gating configuration */
++#define MDM_MEMCLKCTRL0_ADDR 0x00C00848
++#define MDM_CLKGATEFCTRL0_ADDR 0x00C00874
++#define CRM_CLKGATEFCTRL0_ADDR 0x00940010
++
++/* AGC (trident) */
++#define AGC_RWNXAGCCNTL_ADDR 0x00C02060
++
++/* LDPC RAM*/
++#define PHY_LDPC_RAM_ADDR 0x00C09000
++
++/* FCU (elma )*/
++#define FCU_RWNXFCAGCCNTL_ADDR 0x00C09034
++
++/* AGC RAM */
++#define PHY_AGC_UCODE_ADDR 0x00C0A000
++
++/* RIU */
++#define RIU_RWNXVERSION_ADDR 0x00C0B000
++#define RIU_RWNXDYNAMICCONFIG_ADDR 0x00C0B008
++#define RIU_AGCMEMBISTSTAT_ADDR 0x00C0B238
++#define RIU_AGCMEMSIGNATURESTAT_ADDR 0x00C0B23C
++#define RIU_RWNXAGCCNTL_ADDR 0x00C0B390
++
++/* FCU RAM */
++#define PHY_FCU_UCODE_ADDR 0x00C0E000
++
++/* RF ITF */
++#define FPGAB_MPIF_SEL_ADDR 0x00C10030
++#define RF_V6_DIAGPORT_CONF1_ADDR 0x00C10010
++#define RF_v6_PHYDIAG_CONF1_ADDR 0x00C10018
++
++#define RF_V7_DIAGPORT_CONF1_ADDR 0x00F10010
++#define RF_v7_PHYDIAG_CONF1_ADDR 0x00F10018
++
++/*****************************************************************************
++ * Macros for generated register files
++ *****************************************************************************/
++/* Macros for IPC registers access (used in reg_ipc_app.h) */
++#define REG_IPC_APP_RD(env, INDEX) \
++ (*(volatile u32*)((u8*)env + IPC_REG_BASE_ADDR + 4*(INDEX)))
++
++#define REG_IPC_APP_WR(env, INDEX, value) \
++ (*(volatile u32*)((u8*)env + IPC_REG_BASE_ADDR + 4*(INDEX)) = value)
++
++#endif /* REG_ACCESS_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/reg_ipc_app.h
+@@ -0,0 +1,299 @@
++/**
++ ****************************************************************************************
++ *
++ * @file rwnx_ipc_app.h
++ *
++ * @brief IPC module register definitions
++ *
++ * Copyright (C) RivieraWaves 2011-2019
++ *
++ ****************************************************************************************
++ */
++
++#ifndef _REG_IPC_APP_H_
++#define _REG_IPC_APP_H_
++
++#ifndef __KERNEL__
++#include <stdint.h>
++#include "arch.h"
++#else
++#include "ipc_compat.h"
++#endif
++#include "reg_access.h"
++
++#define REG_IPC_APP_DECODING_MASK 0x0000007F
++
++/**
++ * @brief APP2EMB_TRIGGER register definition
++ * <pre>
++ * Bits Field Name Reset Value
++ * ----- ------------------ -----------
++ * 31:00 APP2EMB_TRIGGER 0x0
++ * </pre>
++ */
++#define IPC_APP2EMB_TRIGGER_ADDR 0x12000000
++#define IPC_APP2EMB_TRIGGER_OFFSET 0x00000000
++#define IPC_APP2EMB_TRIGGER_INDEX 0x00000000
++#define IPC_APP2EMB_TRIGGER_RESET 0x00000000
++
++__INLINE u32 ipc_app2emb_trigger_get(void *env)
++{
++ return REG_IPC_APP_RD(env, IPC_APP2EMB_TRIGGER_INDEX);
++}
++
++__INLINE void ipc_app2emb_trigger_set(void *env, u32 value)
++{
++ REG_IPC_APP_WR(env, IPC_APP2EMB_TRIGGER_INDEX, value);
++}
++
++// field definitions
++#define IPC_APP2EMB_TRIGGER_MASK ((u32)0xFFFFFFFF)
++#define IPC_APP2EMB_TRIGGER_LSB 0
++#define IPC_APP2EMB_TRIGGER_WIDTH ((u32)0x00000020)
++
++#define IPC_APP2EMB_TRIGGER_RST 0x0
++
++__INLINE u32 ipc_app2emb_trigger_getf(void *env)
++{
++ u32 localVal = REG_IPC_APP_RD(env, IPC_APP2EMB_TRIGGER_INDEX);
++ ASSERT_ERR((localVal & ~((u32)0xFFFFFFFF)) == 0);
++ return (localVal >> 0);
++}
++
++__INLINE void ipc_app2emb_trigger_setf(void *env, u32 app2embtrigger)
++{
++ ASSERT_ERR((((u32)app2embtrigger << 0) & ~((u32)0xFFFFFFFF)) == 0);
++ REG_IPC_APP_WR(env, IPC_APP2EMB_TRIGGER_INDEX, (u32)app2embtrigger << 0);
++}
++
++/**
++ * @brief EMB2APP_RAWSTATUS register definition
++ * <pre>
++ * Bits Field Name Reset Value
++ * ----- ------------------ -----------
++ * 31:00 EMB2APP_RAWSTATUS 0x0
++ * </pre>
++ */
++#define IPC_EMB2APP_RAWSTATUS_ADDR 0x12000004
++#define IPC_EMB2APP_RAWSTATUS_OFFSET 0x00000004
++#define IPC_EMB2APP_RAWSTATUS_INDEX 0x00000001
++#define IPC_EMB2APP_RAWSTATUS_RESET 0x00000000
++
++__INLINE u32 ipc_emb2app_rawstatus_get(void *env)
++{
++ return REG_IPC_APP_RD(env, IPC_EMB2APP_RAWSTATUS_INDEX);
++}
++
++__INLINE void ipc_emb2app_rawstatus_set(void *env, u32 value)
++{
++ REG_IPC_APP_WR(env, IPC_EMB2APP_RAWSTATUS_INDEX, value);
++}
++
++// field definitions
++#define IPC_EMB2APP_RAWSTATUS_MASK ((u32)0xFFFFFFFF)
++#define IPC_EMB2APP_RAWSTATUS_LSB 0
++#define IPC_EMB2APP_RAWSTATUS_WIDTH ((u32)0x00000020)
++
++#define IPC_EMB2APP_RAWSTATUS_RST 0x0
++
++__INLINE u32 ipc_emb2app_rawstatus_getf(void *env)
++{
++ u32 localVal = REG_IPC_APP_RD(env, IPC_EMB2APP_RAWSTATUS_INDEX);
++ ASSERT_ERR((localVal & ~((u32)0xFFFFFFFF)) == 0);
++ return (localVal >> 0);
++}
++
++/**
++ * @brief EMB2APP_ACK register definition
++ * <pre>
++ * Bits Field Name Reset Value
++ * ----- ------------------ -----------
++ * 31:00 EMB2APP_ACK 0x0
++ * </pre>
++ */
++#define IPC_EMB2APP_ACK_ADDR 0x12000008
++#define IPC_EMB2APP_ACK_OFFSET 0x00000008
++#define IPC_EMB2APP_ACK_INDEX 0x00000002
++#define IPC_EMB2APP_ACK_RESET 0x00000000
++
++__INLINE u32 ipc_emb2app_ack_get(void *env)
++{
++ return REG_IPC_APP_RD(env, IPC_EMB2APP_ACK_INDEX);
++}
++
++__INLINE void ipc_emb2app_ack_clear(void *env, u32 value)
++{
++ REG_IPC_APP_WR(env, IPC_EMB2APP_ACK_INDEX, value);
++}
++
++// field definitions
++#define IPC_EMB2APP_ACK_MASK ((u32)0xFFFFFFFF)
++#define IPC_EMB2APP_ACK_LSB 0
++#define IPC_EMB2APP_ACK_WIDTH ((u32)0x00000020)
++
++#define IPC_EMB2APP_ACK_RST 0x0
++
++__INLINE u32 ipc_emb2app_ack_getf(void *env)
++{
++ u32 localVal = REG_IPC_APP_RD(env, IPC_EMB2APP_ACK_INDEX);
++ ASSERT_ERR((localVal & ~((u32)0xFFFFFFFF)) == 0);
++ return (localVal >> 0);
++}
++
++__INLINE void ipc_emb2app_ack_clearf(void *env, u32 emb2appack)
++{
++ ASSERT_ERR((((u32)emb2appack << 0) & ~((u32)0xFFFFFFFF)) == 0);
++ REG_IPC_APP_WR(env, IPC_EMB2APP_ACK_INDEX, (u32)emb2appack << 0);
++}
++
++/**
++ * @brief EMB2APP_UNMASK_SET register definition
++ * <pre>
++ * Bits Field Name Reset Value
++ * ----- ------------------ -----------
++ * 31:00 EMB2APP_UNMASK 0x0
++ * </pre>
++ */
++#define IPC_EMB2APP_UNMASK_SET_ADDR 0x1200000C
++#define IPC_EMB2APP_UNMASK_SET_OFFSET 0x0000000C
++#define IPC_EMB2APP_UNMASK_SET_INDEX 0x00000003
++#define IPC_EMB2APP_UNMASK_SET_RESET 0x00000000
++
++__INLINE u32 ipc_emb2app_unmask_get(void *env)
++{
++ return REG_IPC_APP_RD(env, IPC_EMB2APP_UNMASK_SET_INDEX);
++}
++
++__INLINE void ipc_emb2app_unmask_set(void *env, u32 value)
++{
++ REG_IPC_APP_WR(env, IPC_EMB2APP_UNMASK_SET_INDEX, value);
++}
++
++// field definitions
++#define IPC_EMB2APP_UNMASK_MASK ((u32)0xFFFFFFFF)
++#define IPC_EMB2APP_UNMASK_LSB 0
++#define IPC_EMB2APP_UNMASK_WIDTH ((u32)0x00000020)
++
++#define IPC_EMB2APP_UNMASK_RST 0x0
++
++__INLINE u32 ipc_emb2app_unmask_getf(void *env)
++{
++ u32 localVal = REG_IPC_APP_RD(env, IPC_EMB2APP_UNMASK_SET_INDEX);
++ ASSERT_ERR((localVal & ~((u32)0xFFFFFFFF)) == 0);
++ return (localVal >> 0);
++}
++
++__INLINE void ipc_emb2app_unmask_setf(void *env, u32 emb2appunmask)
++{
++ ASSERT_ERR((((u32)emb2appunmask << 0) & ~((u32)0xFFFFFFFF)) == 0);
++ REG_IPC_APP_WR(env, IPC_EMB2APP_UNMASK_SET_INDEX, (u32)emb2appunmask << 0);
++}
++
++/**
++ * @brief EMB2APP_UNMASK_CLEAR register definition
++ * <pre>
++ * Bits Field Name Reset Value
++ * ----- ------------------ -----------
++ * 31:00 EMB2APP_UNMASK 0x0
++ * </pre>
++ */
++#define IPC_EMB2APP_UNMASK_CLEAR_ADDR 0x12000010
++#define IPC_EMB2APP_UNMASK_CLEAR_OFFSET 0x00000010
++#define IPC_EMB2APP_UNMASK_CLEAR_INDEX 0x00000004
++#define IPC_EMB2APP_UNMASK_CLEAR_RESET 0x00000000
++
++__INLINE void ipc_emb2app_unmask_clear(void *env, u32 value)
++{
++ REG_IPC_APP_WR(env, IPC_EMB2APP_UNMASK_CLEAR_INDEX, value);
++}
++
++// fields defined in symmetrical set/clear register
++__INLINE void ipc_emb2app_unmask_clearf(void *env, u32 emb2appunmask)
++{
++ ASSERT_ERR((((u32)emb2appunmask << 0) & ~((u32)0xFFFFFFFF)) == 0);
++ REG_IPC_APP_WR(env, IPC_EMB2APP_UNMASK_CLEAR_INDEX, (u32)emb2appunmask << 0);
++}
++
++/**
++ * @brief EMB2APP_STATUS register definition
++ * <pre>
++ * Bits Field Name Reset Value
++ * ----- ------------------ -----------
++ * 31:00 EMB2APP_STATUS 0x0
++ * </pre>
++ */
++#ifdef CONFIG_RWNX_OLD_IPC
++#define IPC_EMB2APP_STATUS_ADDR 0x12000014
++#define IPC_EMB2APP_STATUS_OFFSET 0x00000014
++#define IPC_EMB2APP_STATUS_INDEX 0x00000005
++#else
++#define IPC_EMB2APP_STATUS_ADDR 0x1200001C
++#define IPC_EMB2APP_STATUS_OFFSET 0x0000001C
++#define IPC_EMB2APP_STATUS_INDEX 0x00000007
++#endif
++#define IPC_EMB2APP_STATUS_RESET 0x00000000
++
++__INLINE u32 ipc_emb2app_status_get(void *env)
++{
++ return REG_IPC_APP_RD(env, IPC_EMB2APP_STATUS_INDEX);
++}
++
++__INLINE void ipc_emb2app_status_set(void *env, u32 value)
++{
++ REG_IPC_APP_WR(env, IPC_EMB2APP_STATUS_INDEX, value);
++}
++
++// field definitions
++#define IPC_EMB2APP_STATUS_MASK ((u32)0xFFFFFFFF)
++#define IPC_EMB2APP_STATUS_LSB 0
++#define IPC_EMB2APP_STATUS_WIDTH ((u32)0x00000020)
++
++#define IPC_EMB2APP_STATUS_RST 0x0
++
++__INLINE u32 ipc_emb2app_status_getf(void *env)
++{
++ u32 localVal = REG_IPC_APP_RD(env, IPC_EMB2APP_STATUS_INDEX);
++ ASSERT_ERR((localVal & ~((u32)0xFFFFFFFF)) == 0);
++ return (localVal >> 0);
++}
++
++/**
++ * @brief APP_SIGNATURE register definition
++ * <pre>
++ * Bits Field Name Reset Value
++ * ----- ------------------ ----------
++ * 31:00 APP_SIGNATURE 0x0
++ * </pre>
++ */
++#define IPC_APP_SIGNATURE_ADDR 0x12000040
++#define IPC_APP_SIGNATURE_OFFSET 0x00000040
++#define IPC_APP_SIGNATURE_INDEX 0x00000010
++#define IPC_APP_SIGNATURE_RESET 0x00000000
++
++__INLINE u32 ipc_app_signature_get(void *env)
++{
++ return REG_IPC_APP_RD(env, IPC_APP_SIGNATURE_INDEX);
++}
++
++__INLINE void ipc_app_signature_set(void *env, u32 value)
++{
++ REG_IPC_APP_WR(env, IPC_APP_SIGNATURE_INDEX, value);
++}
++
++// field definitions
++#define IPC_APP_SIGNATURE_MASK ((u32)0xFFFFFFFF)
++#define IPC_APP_SIGNATURE_LSB 0
++#define IPC_APP_SIGNATURE_WIDTH ((u32)0x00000020)
++
++#define IPC_APP_SIGNATURE_RST 0x0
++
++__INLINE u32 ipc_app_signature_getf(void *env)
++{
++ u32 localVal = REG_IPC_APP_RD(env, IPC_APP_SIGNATURE_INDEX);
++ ASSERT_ERR((localVal & ~((u32)0xFFFFFFFF)) == 0);
++ return (localVal >> 0);
++}
++
++
++#endif // _REG_IPC_APP_H_
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/regdb.c
+@@ -0,0 +1,2873 @@
++#include <linux/nl80211.h>
++#include <net/cfg80211.h>
++#include <linux/version.h>
++
++//#include "regdb.h"
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)
++#define REG_RULE_EXT(start, end, bw, gain, eirp, dfs_cac, reg_flags) \
++{ \
++ .freq_range.start_freq_khz = MHZ_TO_KHZ(start), \
++ .freq_range.end_freq_khz = MHZ_TO_KHZ(end), \
++ .freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw), \
++ .power_rule.max_antenna_gain = DBI_TO_MBI(gain),\
++ .power_rule.max_eirp = DBM_TO_MBM(eirp), \
++ .flags = reg_flags, \
++}
++#define NL80211_RRF_AUTO_BW 0
++#endif
++
++static const struct ieee80211_regdomain regdom_00 = {
++ .n_reg_rules = 2,
++ .alpha2 = "00",
++ .reg_rules = {
++ // 1...14
++ REG_RULE(2390 - 10, 2510 + 10, 40, 0, 20, 0),
++ // 36...165
++ REG_RULE(5150 - 10, 5970 + 10, 80, 0, 20, 0),
++ }
++};
++
++static const struct ieee80211_regdomain regdom_AD = {
++ .alpha2 = "AD",
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5490, 5710, 80, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_AE = {
++ .alpha2 = "AE",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ //REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_AF = {
++ .alpha2 = "AF",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_AI = {
++ .alpha2 = "AI",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_AL = {
++ .alpha2 = "AL",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_AM = {
++ .alpha2 = "AM",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 18, 0, 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 18, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 3
++};
++
++static const struct ieee80211_regdomain regdom_AN = {
++ .alpha2 = "AN",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_AR = {
++ .alpha2 = "AR",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ //REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ // NL80211_RRF_AUTO_BW | 0),
++ //REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ // NL80211_RRF_DFS |
++ // NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5270, 5330, 40, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ //REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ // NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5815, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 3
++};
++
++static const struct ieee80211_regdomain regdom_AS = {
++ .alpha2 = "AS",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 24, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_AT = {
++ .alpha2 = "AT",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_AU = {
++ .alpha2 = "AU",
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_AW = {
++ .alpha2 = "AW",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_AZ = {
++ .alpha2 = "AZ",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 18, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 18, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ },
++ .n_reg_rules = 3
++};
++
++static const struct ieee80211_regdomain regdom_BA = {
++ .alpha2 = "BA",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_BB = {
++ .alpha2 = "BB",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 23, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 23, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_BD = {
++ .alpha2 = "BD",
++ .dfs_region = NL80211_DFS_JP,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 2
++};
++
++static const struct ieee80211_regdomain regdom_BE = {
++ .alpha2 = "BE",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_BF = {
++ .alpha2 = "BF",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_BG = {
++ .alpha2 = "BG",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_BH = {
++ .alpha2 = "BH",
++ .dfs_region = NL80211_DFS_JP,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 20, 0, 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_BL = {
++ .alpha2 = "BL",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_BM = {
++ .alpha2 = "BM",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 24, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_BN = {
++ .alpha2 = "BN",
++ .dfs_region = NL80211_DFS_JP,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 20, 0, 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_BO = {
++ .alpha2 = "BO",
++ .dfs_region = NL80211_DFS_JP,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 30, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 3
++};
++
++static const struct ieee80211_regdomain regdom_BR = {
++ .alpha2 = "BR",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_BS = {
++ .alpha2 = "BS",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 24, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_BT = {
++ .alpha2 = "BT",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_BY = {
++ .alpha2 = "BY",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_BZ = {
++ .alpha2 = "BZ",
++ .dfs_region = NL80211_DFS_JP,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 30, 0, 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 2
++};
++
++static const struct ieee80211_regdomain regdom_CA = {
++ .alpha2 = "CA",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_CF = {
++ .alpha2 = "CF",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 40, 0, 17, 0, 0),
++ REG_RULE_EXT(5250, 5330, 40, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5490, 5730, 40, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 40, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_CH = {
++ .alpha2 = "CH",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_CI = {
++ .alpha2 = "CI",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_CL = {
++ .alpha2 = "CL",
++ .dfs_region = NL80211_DFS_JP,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 20, 0, 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_CN = {
++ .alpha2 = "CN",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 23, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 23, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ REG_RULE_EXT(57240, 59400, 2160, 0, 28, 0, 0),
++ REG_RULE_EXT(59400, 63720, 2160, 0, 44, 0, 0),
++ REG_RULE_EXT(63720, 65880, 2160, 0, 28, 0, 0),
++ },
++ .n_reg_rules = 7
++};
++
++static const struct ieee80211_regdomain regdom_CO = {
++ .alpha2 = "CO",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_CR = {
++ .alpha2 = "CR",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0, 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5490, 5730, 80, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_CX = {
++ .alpha2 = "CX",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 24, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_CY = {
++ .alpha2 = "CY",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_CZ = {
++ .alpha2 = "CZ",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5150, 5250, 80, 0, 23, 0,
++ NL80211_RRF_NO_OUTDOOR |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5350, 80, 0, 20, 0,
++ NL80211_RRF_NO_OUTDOOR |
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5470, 5725, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_DE = {
++ .alpha2 = "DE",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5150, 5250, 80, 0, 20, 0,
++ NL80211_RRF_NO_OUTDOOR |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5350, 80, 0, 20, 0,
++ NL80211_RRF_NO_OUTDOOR |
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5470, 5695, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ /*REG_RULE_EXT(5470, 5725, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),*/
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_DK = {
++ .alpha2 = "DK",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_DM = {
++ .alpha2 = "DM",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 23, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_DO = {
++ .alpha2 = "DO",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 23, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_DZ = {
++ .alpha2 = "DZ",
++ .dfs_region = NL80211_DFS_JP,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 23, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 23, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5670, 160, 0, 23, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_EC = {
++ .alpha2 = "EC",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0, 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5490, 5730, 80, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_EE = {
++ .alpha2 = "EE",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_EG = {
++ .alpha2 = "EG",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 3
++};
++
++static const struct ieee80211_regdomain regdom_ES = {
++ .alpha2 = "ES",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5150, 5250, 80, 0, 23, 0,
++ NL80211_RRF_NO_OUTDOOR |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5350, 80, 0, 20, 0,
++ NL80211_RRF_NO_OUTDOOR |
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5470, 5725, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_ET = {
++ .alpha2 = "ET",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_FI = {
++ .alpha2 = "FI",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_FM = {
++ .alpha2 = "FM",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 24, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_FR = {
++ .alpha2 = "FR",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5695, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_GB = {
++ .alpha2 = "GB",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_GD = {
++ .alpha2 = "GD",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_GE = {
++ .alpha2 = "GE",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 18, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 18, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_GF = {
++ .alpha2 = "GF",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_GH = {
++ .alpha2 = "GH",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_GL = {
++ .alpha2 = "GL",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5490, 5710, 80, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_GP = {
++ .alpha2 = "GP",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_GR = {
++ .alpha2 = "GR",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_GT = {
++ .alpha2 = "GT",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 23, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_GU = {
++ .alpha2 = "GU",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0, 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5490, 5730, 80, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_GY = {
++ .alpha2 = "GY",
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 30, 0, 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 2
++};
++
++static const struct ieee80211_regdomain regdom_HK = {
++ .alpha2 = "HK",
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_HN = {
++ .alpha2 = "HN",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_HR = {
++ .alpha2 = "HR",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_HT = {
++ .alpha2 = "HT",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 24, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_HU = {
++ .alpha2 = "HU",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_ID = {
++ .alpha2 = "ID",
++ .dfs_region = NL80211_DFS_JP,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5735, 5815, 80, 0, 23, 0, 0),
++ },
++ .n_reg_rules = 2
++};
++
++static const struct ieee80211_regdomain regdom_IE = {
++ .alpha2 = "IE",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_IL = {
++ .alpha2 = "IL",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5150, 5250, 80, 0, 23, 0,
++ NL80211_RRF_NO_OUTDOOR |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5350, 80, 0, 23, 0,
++ NL80211_RRF_NO_OUTDOOR |
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ },
++ .n_reg_rules = 3
++};
++
++static const struct ieee80211_regdomain regdom_IN = {
++ .alpha2 = "IN",
++ .dfs_region = NL80211_DFS_JP,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 20, 0, 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_IR = {
++ .alpha2 = "IR",
++ .dfs_region = NL80211_DFS_JP,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 2
++};
++
++static const struct ieee80211_regdomain regdom_IS = {
++ .alpha2 = "IS",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_IT = {
++ .alpha2 = "IT",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_JM = {
++ .alpha2 = "JM",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_JO = {
++ .alpha2 = "JO",
++ .dfs_region = NL80211_DFS_JP,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 23, 0, 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 23, 0, 0),
++ },
++ .n_reg_rules = 3
++};
++
++static const struct ieee80211_regdomain regdom_JP = {
++ .alpha2 = "JP",
++ .dfs_region = NL80211_DFS_JP,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(2474, 2494, 20, 0, 20, 0,
++ NL80211_RRF_NO_OFDM | 0),
++ REG_RULE_EXT(4910, 4990, 40, 0, 23, 0, 0),
++ REG_RULE_EXT(5030, 5090, 40, 0, 23, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 23, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 7
++};
++
++static const struct ieee80211_regdomain regdom_KE = {
++ .alpha2 = "KE",
++ .dfs_region = NL80211_DFS_JP,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 23, 0, 0),
++ REG_RULE_EXT(5490, 5570, 80, 0, 30, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5775, 40, 0, 23, 0, 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_KH = {
++ .alpha2 = "KH",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_KN = {
++ .alpha2 = "KN",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 30, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5815, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_KP = {
++ .alpha2 = "KP",
++ .dfs_region = NL80211_DFS_JP,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5630, 80, 0, 30, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5815, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_KR = {
++ .alpha2 = "KR",
++ .dfs_region = NL80211_DFS_JP,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 30, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_KW = {
++ .alpha2 = "KW",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ },
++ .n_reg_rules = 3
++};
++
++static const struct ieee80211_regdomain regdom_KY = {
++ .alpha2 = "KY",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 24, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_KZ = {
++ .alpha2 = "KZ",
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ },
++ .n_reg_rules = 1
++};
++
++static const struct ieee80211_regdomain regdom_LB = {
++ .alpha2 = "LB",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_LC = {
++ .alpha2 = "LC",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 30, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5815, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_LI = {
++ .alpha2 = "LI",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_LK = {
++ .alpha2 = "LK",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0, 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5490, 5730, 80, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_LS = {
++ .alpha2 = "LS",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_LT = {
++ .alpha2 = "LT",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_LU = {
++ .alpha2 = "LU",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_LV = {
++ .alpha2 = "LV",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_MA = {
++ .alpha2 = "MA",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ },
++ .n_reg_rules = 3
++};
++
++static const struct ieee80211_regdomain regdom_MC = {
++ .alpha2 = "MC",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_MD = {
++ .alpha2 = "MD",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_ME = {
++ .alpha2 = "ME",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_MF = {
++ .alpha2 = "MF",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_MH = {
++ .alpha2 = "MH",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 24, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_MK = {
++ .alpha2 = "MK",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_MN = {
++ .alpha2 = "MN",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 24, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_MO = {
++ .alpha2 = "MO",
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 40, 0, 23, 0, 0),
++ REG_RULE_EXT(5250, 5330, 40, 0, 23, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 40, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_MP = {
++ .alpha2 = "MP",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 24, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_MQ = {
++ .alpha2 = "MQ",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_MR = {
++ .alpha2 = "MR",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_MT = {
++ .alpha2 = "MT",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_MU = {
++ .alpha2 = "MU",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 24, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_MW = {
++ .alpha2 = "MW",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_MX = {
++ .alpha2 = "MX",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_MY = {
++ .alpha2 = "MY",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 23, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_NI = {
++ .alpha2 = "NI",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 24, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_NL = {
++ .alpha2 = "NL",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_NO_OUTDOOR |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_NO_OUTDOOR |
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_NO = {
++ .alpha2 = "NO",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5150, 5250, 80, 0, 23, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5350, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5470, 5795, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5815, 5850, 35, 0, 33, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(17100, 17300, 200, 0, 20, 0, 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 7
++};
++
++static const struct ieee80211_regdomain regdom_NP = {
++ .alpha2 = "NP",
++ .dfs_region = NL80211_DFS_JP,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 20, 0, 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_NZ = {
++ .alpha2 = "NZ",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 30, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_OM = {
++ .alpha2 = "OM",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_PA = {
++ .alpha2 = "PA",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 23, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_PE = {
++ .alpha2 = "PE",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_PF = {
++ .alpha2 = "PF",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_PG = {
++ .alpha2 = "PG",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_PH = {
++ .alpha2 = "PH",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_PK = {
++ .alpha2 = "PK",
++ .dfs_region = NL80211_DFS_JP,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 2
++};
++
++static const struct ieee80211_regdomain regdom_PL = {
++ .alpha2 = "PL",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_PM = {
++ .alpha2 = "PM",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_PR = {
++ .alpha2 = "PR",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_PT = {
++ .alpha2 = "PT",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_PW = {
++ .alpha2 = "PW",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 24, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_PY = {
++ .alpha2 = "PY",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 24, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_QA = {
++ .alpha2 = "QA",
++ .dfs_region = NL80211_DFS_JP,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 2
++};
++
++static const struct ieee80211_regdomain regdom_RE = {
++ .alpha2 = "RE",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_RO = {
++ .alpha2 = "RO",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_RS = {
++ .alpha2 = "RS",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2400, 2483, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5150, 5350, 40, 0, 23, 0,
++ NL80211_RRF_NO_OUTDOOR | 0),
++ REG_RULE_EXT(5470, 5725, 20, 0, 30, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_RU = {
++ .alpha2 = "RU",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0, 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5650, 5730, 80, 0, 30, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_RW = {
++ .alpha2 = "RW",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_SA = {
++ .alpha2 = "SA",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_SE = {
++ .alpha2 = "SE",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_SG = {
++ .alpha2 = "SG",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_SI = {
++ .alpha2 = "SI",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_SK = {
++ .alpha2 = "SK",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_SN = {
++ .alpha2 = "SN",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_SR = {
++ .alpha2 = "SR",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_SV = {
++ .alpha2 = "SV",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0, 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 23, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_SY = {
++ .alpha2 = "SY",
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ },
++ .n_reg_rules = 1
++};
++
++static const struct ieee80211_regdomain regdom_TC = {
++ .alpha2 = "TC",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 24, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_TD = {
++ .alpha2 = "TD",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_TG = {
++ .alpha2 = "TG",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5250, 5330, 40, 0, 20, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5490, 5710, 40, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_TH = {
++ .alpha2 = "TH",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_TN = {
++ .alpha2 = "TN",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ },
++ .n_reg_rules = 3
++};
++
++static const struct ieee80211_regdomain regdom_TR = {
++ .alpha2 = "TR",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_TT = {
++ .alpha2 = "TT",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_TW = {
++ .alpha2 = "TW",
++ .dfs_region = NL80211_DFS_JP,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0),
++ REG_RULE_EXT(5270, 5330, 40, 0, 17, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5490, 5590, 80, 0, 30, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5650, 5710, 40, 0, 30, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_UA = {
++ .alpha2 = "UA",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2400, 2483, 40, 0, 20, 0,
++ NL80211_RRF_NO_OUTDOOR | 0),
++ REG_RULE_EXT(5150, 5350, 40, 0, 20, 0,
++ NL80211_RRF_NO_OUTDOOR | 0),
++ REG_RULE_EXT(5490, 5670, 80, 0, 20, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 20, 0, 0),
++ REG_RULE_EXT(57000, 66000, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_UG = {
++ .alpha2 = "UG",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 24, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_US = {
++ .alpha2 = "US",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ // 1...13
++ REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0),
++ // 36 40 44 48
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ // 52 56 60 64
++ REG_RULE_EXT(5250, 5330, 80, 0, 23, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ // 100 104 108 112 116 120 124
++ REG_RULE_EXT(5490, 5650, 80, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ // 128 132 136 140
++ REG_RULE_EXT(5650, 5710, 40, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ // 149 153 157 161 165
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ REG_RULE_EXT(57240, 63720, 2160, 0, 40, 0, 0),
++ },
++ .n_reg_rules = 7
++};
++
++static const struct ieee80211_regdomain regdom_UY = {
++ .alpha2 = "UY",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_UZ = {
++ .alpha2 = "UZ",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ },
++ .n_reg_rules = 3
++};
++
++static const struct ieee80211_regdomain regdom_VC = {
++ .alpha2 = "VC",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_VE = {
++ .alpha2 = "VE",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 30, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 23, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 23, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_VI = {
++ .alpha2 = "VI",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2472, 40, 0, 30, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 24, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_VN = {
++ .alpha2 = "VN",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0, 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5490, 5730, 80, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_VU = {
++ .alpha2 = "VU",
++ .dfs_region = NL80211_DFS_FCC,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 17, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 24, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5730, 160, 0, 24, 0,
++ NL80211_RRF_DFS | 0),
++ REG_RULE_EXT(5735, 5835, 80, 0, 30, 0, 0),
++ },
++ .n_reg_rules = 5
++};
++
++static const struct ieee80211_regdomain regdom_WF = {
++ .alpha2 = "WF",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_YE = {
++ .alpha2 = "YE",
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ },
++ .n_reg_rules = 1
++};
++
++static const struct ieee80211_regdomain regdom_YT = {
++ .alpha2 = "YT",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_ZA = {
++ .alpha2 = "ZA",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5695, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ /*REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),*/
++ },
++ .n_reg_rules = 4
++};
++
++static const struct ieee80211_regdomain regdom_ZW = {
++ .alpha2 = "ZW",
++ .dfs_region = NL80211_DFS_ETSI,
++ .reg_rules = {
++ REG_RULE_EXT(2402, 2482, 40, 0, 20, 0, 0),
++ REG_RULE_EXT(5170, 5250, 80, 0, 20, 0,
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5250, 5330, 80, 0, 20, 0,
++ NL80211_RRF_DFS |
++ NL80211_RRF_AUTO_BW | 0),
++ REG_RULE_EXT(5490, 5710, 160, 0, 27, 0,
++ NL80211_RRF_DFS | 0),
++ },
++ .n_reg_rules = 4
++};
++
++const struct ieee80211_regdomain *reg_regdb[] = {
++ ®dom_00,
++ ®dom_AD,
++ ®dom_AE,
++ ®dom_AF,
++ ®dom_AI,
++ ®dom_AL,
++ ®dom_AM,
++ ®dom_AN,
++ ®dom_AR,
++ ®dom_AS,
++ ®dom_AT,
++ ®dom_AU,
++ ®dom_AW,
++ ®dom_AZ,
++ ®dom_BA,
++ ®dom_BB,
++ ®dom_BD,
++ ®dom_BE,
++ ®dom_BF,
++ ®dom_BG,
++ ®dom_BH,
++ ®dom_BL,
++ ®dom_BM,
++ ®dom_BN,
++ ®dom_BO,
++ ®dom_BR,
++ ®dom_BS,
++ ®dom_BT,
++ ®dom_BY,
++ ®dom_BZ,
++ ®dom_CA,
++ ®dom_CF,
++ ®dom_CH,
++ ®dom_CI,
++ ®dom_CL,
++ ®dom_CN,
++ ®dom_CO,
++ ®dom_CR,
++ ®dom_CX,
++ ®dom_CY,
++ ®dom_CZ,
++ ®dom_DE,
++ ®dom_DK,
++ ®dom_DM,
++ ®dom_DO,
++ ®dom_DZ,
++ ®dom_EC,
++ ®dom_EE,
++ ®dom_EG,
++ ®dom_ES,
++ ®dom_ET,
++ ®dom_FI,
++ ®dom_FM,
++ ®dom_FR,
++ ®dom_GB,
++ ®dom_GD,
++ ®dom_GE,
++ ®dom_GF,
++ ®dom_GH,
++ ®dom_GL,
++ ®dom_GP,
++ ®dom_GR,
++ ®dom_GT,
++ ®dom_GU,
++ ®dom_GY,
++ ®dom_HK,
++ ®dom_HN,
++ ®dom_HR,
++ ®dom_HT,
++ ®dom_HU,
++ ®dom_ID,
++ ®dom_IE,
++ ®dom_IL,
++ ®dom_IN,
++ ®dom_IR,
++ ®dom_IS,
++ ®dom_IT,
++ ®dom_JM,
++ ®dom_JO,
++ ®dom_JP,
++ ®dom_KE,
++ ®dom_KH,
++ ®dom_KN,
++ ®dom_KP,
++ ®dom_KR,
++ ®dom_KW,
++ ®dom_KY,
++ ®dom_KZ,
++ ®dom_LB,
++ ®dom_LC,
++ ®dom_LI,
++ ®dom_LK,
++ ®dom_LS,
++ ®dom_LT,
++ ®dom_LU,
++ ®dom_LV,
++ ®dom_MA,
++ ®dom_MC,
++ ®dom_MD,
++ ®dom_ME,
++ ®dom_MF,
++ ®dom_MH,
++ ®dom_MK,
++ ®dom_MN,
++ ®dom_MO,
++ ®dom_MP,
++ ®dom_MQ,
++ ®dom_MR,
++ ®dom_MT,
++ ®dom_MU,
++ ®dom_MW,
++ ®dom_MX,
++ ®dom_MY,
++ ®dom_NI,
++ ®dom_NL,
++ ®dom_NO,
++ ®dom_NP,
++ ®dom_NZ,
++ ®dom_OM,
++ ®dom_PA,
++ ®dom_PE,
++ ®dom_PF,
++ ®dom_PG,
++ ®dom_PH,
++ ®dom_PK,
++ ®dom_PL,
++ ®dom_PM,
++ ®dom_PR,
++ ®dom_PT,
++ ®dom_PW,
++ ®dom_PY,
++ ®dom_QA,
++ ®dom_RE,
++ ®dom_RO,
++ ®dom_RS,
++ ®dom_RU,
++ ®dom_RW,
++ ®dom_SA,
++ ®dom_SE,
++ ®dom_SG,
++ ®dom_SI,
++ ®dom_SK,
++ ®dom_SN,
++ ®dom_SR,
++ ®dom_SV,
++ ®dom_SY,
++ ®dom_TC,
++ ®dom_TD,
++ ®dom_TG,
++ ®dom_TH,
++ ®dom_TN,
++ ®dom_TR,
++ ®dom_TT,
++ ®dom_TW,
++ ®dom_UA,
++ ®dom_UG,
++ ®dom_US,
++ ®dom_UY,
++ ®dom_UZ,
++ ®dom_VC,
++ ®dom_VE,
++ ®dom_VI,
++ ®dom_VN,
++ ®dom_VU,
++ ®dom_WF,
++ ®dom_YE,
++ ®dom_YT,
++ ®dom_ZA,
++ ®dom_ZW,
++};
++
++int reg_regdb_size = ARRAY_SIZE(reg_regdb);
++
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_bfmer.c
+@@ -0,0 +1,105 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_bfmer.c
++ *
++ * @brief VHT Beamformer function definitions
++ *
++ * Copyright (C) RivieraWaves 2016-2019
++ *
++ ******************************************************************************
++ */
++
++/**
++ * INCLUDE FILES
++ ******************************************************************************
++ */
++
++#include <linux/slab.h>
++#include "rwnx_bfmer.h"
++
++/**
++ * FUNCTION DEFINITIONS
++ ******************************************************************************
++ */
++
++int rwnx_bfmer_report_add(struct rwnx_hw *rwnx_hw, struct rwnx_sta *rwnx_sta,
++ unsigned int length)
++{
++ gfp_t flags;
++ struct rwnx_bfmer_report *bfm_report ;
++
++ if (in_softirq())
++ flags = GFP_ATOMIC;
++ else
++ flags = GFP_KERNEL;
++
++ /* Allocate a structure that will contain the beamforming report */
++ bfm_report = kmalloc(sizeof(*bfm_report) + length, flags);
++
++
++ /* Check report allocation */
++ if (!bfm_report) {
++ /* Do not use beamforming */
++ return -1;
++ }
++
++ /* Store report length */
++ bfm_report->length = length;
++
++ /*
++ * Need to provide a Virtual Address to the MAC so that it can
++ * upload the received Beamforming Report in driver memory
++ */
++ bfm_report->dma_addr = dma_map_single(rwnx_hw->dev, &bfm_report->report[0],
++ length, DMA_FROM_DEVICE);
++
++ /* Check DMA mapping result */
++ if (dma_mapping_error(rwnx_hw->dev, bfm_report->dma_addr)) {
++ /* Free allocated report */
++ kfree(bfm_report);
++ /* And leave */
++ return -1;
++ }
++
++ /* Store report structure */
++ rwnx_sta->bfm_report = bfm_report;
++
++ return 0;
++}
++
++void rwnx_bfmer_report_del(struct rwnx_hw *rwnx_hw, struct rwnx_sta *rwnx_sta)
++{
++ /* Verify if a report has been allocated */
++ if (rwnx_sta->bfm_report) {
++ struct rwnx_bfmer_report *bfm_report = rwnx_sta->bfm_report;
++
++ /* Unmap DMA region */
++ dma_unmap_single(rwnx_hw->dev, bfm_report->dma_addr,
++ bfm_report->length, DMA_BIDIRECTIONAL);
++
++ /* Free allocated report structure and clean the pointer */
++ kfree(bfm_report);
++ rwnx_sta->bfm_report = NULL;
++ }
++}
++
++#ifdef CONFIG_RWNX_FULLMAC
++u8 rwnx_bfmer_get_rx_nss(const struct ieee80211_vht_cap *vht_capa)
++{
++ int i;
++ u8 rx_nss = 0;
++ u16 rx_mcs_map = le16_to_cpu(vht_capa->supp_mcs.rx_mcs_map);
++
++ for (i = 7; i >= 0; i--) {
++ u8 mcs = (rx_mcs_map >> (2 * i)) & 3;
++
++ if (mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
++ rx_nss = i + 1;
++ break;
++ }
++ }
++
++ return rx_nss;
++}
++#endif /* CONFIG_RWNX_FULLMAC */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_bfmer.h
+@@ -0,0 +1,100 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_bfmer.h
++ *
++ * @brief VHT Beamformer function declarations
++ *
++ * Copyright (C) RivieraWaves 2016-2019
++ *
++ ******************************************************************************
++ */
++
++#ifndef _RWNX_BFMER_H_
++#define _RWNX_BFMER_H_
++
++/**
++ * INCLUDE FILES
++ ******************************************************************************
++ */
++
++#include "rwnx_defs.h"
++
++/**
++ * DEFINES
++ ******************************************************************************
++ */
++
++/// Maximal supported report length (in bytes)
++#define RWNX_BFMER_REPORT_MAX_LEN 2048
++
++/// Size of the allocated report space (twice the maximum report length)
++#define RWNX_BFMER_REPORT_SPACE_SIZE (RWNX_BFMER_REPORT_MAX_LEN * 2)
++
++/**
++ * TYPE DEFINITIONS
++ ******************************************************************************
++ */
++
++/*
++ * Structure used to store a beamforming report.
++ */
++struct rwnx_bfmer_report {
++ dma_addr_t dma_addr; /* Virtual address provided to MAC for
++ DMA transfer of the Beamforming Report */
++ unsigned int length; /* Report Length */
++ u8 report[1]; /* Report to be used for VHT TX Beamforming */
++};
++
++/**
++ * FUNCTION DECLARATIONS
++ ******************************************************************************
++ */
++
++/**
++ ******************************************************************************
++ * @brief Allocate memory aiming to contains the Beamforming Report received
++ * from a Beamformee capable capable.
++ * The providing length shall be large enough to contain the VHT Compressed
++ * Beaforming Report and the MU Exclusive part.
++ * It also perform a DMA Mapping providing an address to be provided to the HW
++ * responsible for the DMA transfer of the report.
++ * If successful a struct rwnx_bfmer_report object is allocated, it's address
++ * is stored in rwnx_sta->bfm_report.
++ *
++ * @param[in] rwnx_hw PHY Information
++ * @param[in] rwnx_sta Peer STA Information
++ * @param[in] length Memory size to be allocated
++ *
++ * @return 0 if operation is successful, else -1.
++ ******************************************************************************
++ */
++int rwnx_bfmer_report_add(struct rwnx_hw *rwnx_hw, struct rwnx_sta *rwnx_sta,
++ unsigned int length);
++
++/**
++ ******************************************************************************
++ * @brief Free a previously allocated memory intended to be used for
++ * Beamforming Reports.
++ *
++ * @param[in] rwnx_hw PHY Information
++ * @param[in] rwnx_sta Peer STA Information
++ *
++ ******************************************************************************
++ */
++void rwnx_bfmer_report_del(struct rwnx_hw *rwnx_hw, struct rwnx_sta *rwnx_sta);
++
++#ifdef CONFIG_RWNX_FULLMAC
++/**
++ ******************************************************************************
++ * @brief Parse a Rx VHT-MCS map in order to deduce the maximum number of
++ * Spatial Streams supported by a beamformee.
++ *
++ * @param[in] vht_capa Received VHT Capability field.
++ *
++ ******************************************************************************
++ */
++u8 rwnx_bfmer_get_rx_nss(const struct ieee80211_vht_cap *vht_capa);
++#endif /* CONFIG_RWNX_FULLMAC */
++
++#endif /* _RWNX_BFMER_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_cfgfile.c
+@@ -0,0 +1,237 @@
++/**
++ ****************************************************************************************
++ *
++ * @file rwnx_configparse.c
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ****************************************************************************************
++ */
++#include <linux/firmware.h>
++#include <linux/if_ether.h>
++
++#include "rwnx_defs.h"
++#include "rwnx_cfgfile.h"
++
++/**
++ *
++ */
++static const char *rwnx_find_tag(const u8 *file_data, unsigned int file_size,
++ const char *tag_name, unsigned int tag_len)
++{
++ unsigned int curr, line_start = 0, line_size;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Walk through all the lines of the configuration file */
++ while (line_start < file_size) {
++ /* Search the end of the current line (or the end of the file) */
++ for (curr = line_start; curr < file_size; curr++)
++ if (file_data[curr] == '\n')
++ break;
++
++ /* Compute the line size */
++ line_size = curr - line_start;
++
++ /* Check if this line contains the expected tag */
++ if ((line_size == (strlen(tag_name) + tag_len)) &&
++ (!strncmp(&file_data[line_start], tag_name, strlen(tag_name))))
++ return (&file_data[line_start + strlen(tag_name)]);
++
++ /* Move to next line */
++ line_start = curr + 1;
++ }
++
++ /* Tag not found */
++ return NULL;
++}
++
++/**
++ * Parse the Config file used at init time
++ */
++int rwnx_parse_configfile(struct rwnx_hw *rwnx_hw, const char *filename,
++ struct rwnx_conf_file *config)
++{
++ const struct firmware *config_fw;
++ u8 dflt_mac[ETH_ALEN] = { 0, 111, 111, 111, 111, 0 };
++ int ret;
++ const u8 *tag_ptr;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ if ((ret = request_firmware(&config_fw, filename, rwnx_hw->dev))) {
++ printk(KERN_CRIT "%s: Failed to get %s (%d)\n", __func__, filename, ret);
++ return ret;
++ }
++
++ /* Get MAC Address */
++ tag_ptr = rwnx_find_tag(config_fw->data, config_fw->size,
++ "MAC_ADDR=", strlen("00:00:00:00:00:00"));
++ if (tag_ptr != NULL) {
++ u8 *addr = config->mac_addr;
++ if (sscanf(tag_ptr,
++ "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
++ addr + 0, addr + 1, addr + 2,
++ addr + 3, addr + 4, addr + 5) != ETH_ALEN)
++ memcpy(config->mac_addr, dflt_mac, ETH_ALEN);
++ } else
++ memcpy(config->mac_addr, dflt_mac, ETH_ALEN);
++
++ RWNX_DBG("MAC Address is:\n%pM\n", config->mac_addr);
++
++ /* Release the configuration file */
++ release_firmware(config_fw);
++
++ return 0;
++}
++
++/**
++ * Parse the Config file used at init time
++ */
++int rwnx_parse_phy_configfile(struct rwnx_hw *rwnx_hw, const char *filename,
++ struct rwnx_phy_conf_file *config, int path)
++{
++ const struct firmware *config_fw;
++ int ret;
++ const u8 *tag_ptr;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ if ((ret = request_firmware(&config_fw, filename, rwnx_hw->dev))) {
++ printk(KERN_CRIT "%s: Failed to get %s (%d)\n", __func__, filename, ret);
++ return ret;
++ }
++
++ /* Get Trident path mapping */
++ tag_ptr = rwnx_find_tag(config_fw->data, config_fw->size,
++ "TRD_PATH_MAPPING=", strlen("00"));
++ if (tag_ptr != NULL) {
++ u8 val;
++ if (sscanf(tag_ptr, "%hhx", &val) == 1)
++ config->trd.path_mapping = val;
++ else
++ config->trd.path_mapping = path;
++ } else
++ config->trd.path_mapping = path;
++
++ RWNX_DBG("Trident path mapping is: %d\n", config->trd.path_mapping);
++
++ /* Get DC offset compensation */
++ tag_ptr = rwnx_find_tag(config_fw->data, config_fw->size,
++ "TX_DC_OFF_COMP=", strlen("00000000"));
++ if (tag_ptr != NULL) {
++ if (sscanf(tag_ptr, "%08x", &config->trd.tx_dc_off_comp) != 1)
++ config->trd.tx_dc_off_comp = 0;
++ } else
++ config->trd.tx_dc_off_comp = 0;
++
++ RWNX_DBG("TX DC offset compensation is: %08X\n", config->trd.tx_dc_off_comp);
++
++ /* Get Karst TX IQ compensation value for path0 on 2.4GHz */
++ tag_ptr = rwnx_find_tag(config_fw->data, config_fw->size,
++ "KARST_TX_IQ_COMP_2_4G_PATH_0=", strlen("00000000"));
++ if (tag_ptr != NULL) {
++ if (sscanf(tag_ptr, "%08x", &config->karst.tx_iq_comp_2_4G[0]) != 1)
++ config->karst.tx_iq_comp_2_4G[0] = 0x01000000;
++ } else
++ config->karst.tx_iq_comp_2_4G[0] = 0x01000000;
++
++ RWNX_DBG("Karst TX IQ compensation for path 0 on 2.4GHz is: %08X\n", config->karst.tx_iq_comp_2_4G[0]);
++
++ /* Get Karst TX IQ compensation value for path1 on 2.4GHz */
++ tag_ptr = rwnx_find_tag(config_fw->data, config_fw->size,
++ "KARST_TX_IQ_COMP_2_4G_PATH_1=", strlen("00000000"));
++ if (tag_ptr != NULL) {
++ if (sscanf(tag_ptr, "%08x", &config->karst.tx_iq_comp_2_4G[1]) != 1)
++ config->karst.tx_iq_comp_2_4G[1] = 0x01000000;
++ } else
++ config->karst.tx_iq_comp_2_4G[1] = 0x01000000;
++
++ RWNX_DBG("Karst TX IQ compensation for path 1 on 2.4GHz is: %08X\n", config->karst.tx_iq_comp_2_4G[1]);
++
++ /* Get Karst RX IQ compensation value for path0 on 2.4GHz */
++ tag_ptr = rwnx_find_tag(config_fw->data, config_fw->size,
++ "KARST_RX_IQ_COMP_2_4G_PATH_0=", strlen("00000000"));
++ if (tag_ptr != NULL) {
++ if (sscanf(tag_ptr, "%08x", &config->karst.rx_iq_comp_2_4G[0]) != 1)
++ config->karst.rx_iq_comp_2_4G[0] = 0x01000000;
++ } else
++ config->karst.rx_iq_comp_2_4G[0] = 0x01000000;
++
++ RWNX_DBG("Karst RX IQ compensation for path 0 on 2.4GHz is: %08X\n", config->karst.rx_iq_comp_2_4G[0]);
++
++ /* Get Karst RX IQ compensation value for path1 on 2.4GHz */
++ tag_ptr = rwnx_find_tag(config_fw->data, config_fw->size,
++ "KARST_RX_IQ_COMP_2_4G_PATH_1=", strlen("00000000"));
++ if (tag_ptr != NULL) {
++ if (sscanf(tag_ptr, "%08x", &config->karst.rx_iq_comp_2_4G[1]) != 1)
++ config->karst.rx_iq_comp_2_4G[1] = 0x01000000;
++ } else
++ config->karst.rx_iq_comp_2_4G[1] = 0x01000000;
++
++ RWNX_DBG("Karst RX IQ compensation for path 1 on 2.4GHz is: %08X\n", config->karst.rx_iq_comp_2_4G[1]);
++
++ /* Get Karst TX IQ compensation value for path0 on 5GHz */
++ tag_ptr = rwnx_find_tag(config_fw->data, config_fw->size,
++ "KARST_TX_IQ_COMP_5G_PATH_0=", strlen("00000000"));
++ if (tag_ptr != NULL) {
++ if (sscanf(tag_ptr, "%08x", &config->karst.tx_iq_comp_5G[0]) != 1)
++ config->karst.tx_iq_comp_5G[0] = 0x01000000;
++ } else
++ config->karst.tx_iq_comp_5G[0] = 0x01000000;
++
++ RWNX_DBG("Karst TX IQ compensation for path 0 on 5GHz is: %08X\n", config->karst.tx_iq_comp_5G[0]);
++
++ /* Get Karst TX IQ compensation value for path1 on 5GHz */
++ tag_ptr = rwnx_find_tag(config_fw->data, config_fw->size,
++ "KARST_TX_IQ_COMP_5G_PATH_1=", strlen("00000000"));
++ if (tag_ptr != NULL) {
++ if (sscanf(tag_ptr, "%08x", &config->karst.tx_iq_comp_5G[1]) != 1)
++ config->karst.tx_iq_comp_5G[1] = 0x01000000;
++ } else
++ config->karst.tx_iq_comp_5G[1] = 0x01000000;
++
++ RWNX_DBG("Karst TX IQ compensation for path 1 on 5GHz is: %08X\n", config->karst.tx_iq_comp_5G[1]);
++
++ /* Get Karst RX IQ compensation value for path0 on 5GHz */
++ tag_ptr = rwnx_find_tag(config_fw->data, config_fw->size,
++ "KARST_RX_IQ_COMP_5G_PATH_0=", strlen("00000000"));
++ if (tag_ptr != NULL) {
++ if (sscanf(tag_ptr, "%08x", &config->karst.rx_iq_comp_5G[0]) != 1)
++ config->karst.rx_iq_comp_5G[0] = 0x01000000;
++ } else
++ config->karst.rx_iq_comp_5G[0] = 0x01000000;
++
++ RWNX_DBG("Karst RX IQ compensation for path 0 on 5GHz is: %08X\n", config->karst.rx_iq_comp_5G[0]);
++
++ /* Get Karst RX IQ compensation value for path1 on 5GHz */
++ tag_ptr = rwnx_find_tag(config_fw->data, config_fw->size,
++ "KARST_RX_IQ_COMP_5G_PATH_1=", strlen("00000000"));
++ if (tag_ptr != NULL) {
++ if (sscanf(tag_ptr, "%08x", &config->karst.rx_iq_comp_5G[1]) != 1)
++ config->karst.rx_iq_comp_5G[1] = 0x01000000;
++ } else
++ config->karst.rx_iq_comp_5G[1] = 0x01000000;
++
++ RWNX_DBG("Karst RX IQ compensation for path 1 on 5GHz is: %08X\n", config->karst.rx_iq_comp_5G[1]);
++
++ /* Get Karst default path */
++ tag_ptr = rwnx_find_tag(config_fw->data, config_fw->size,
++ "KARST_DEFAULT_PATH=", strlen("00"));
++ if (tag_ptr != NULL) {
++ u8 val;
++ if (sscanf(tag_ptr, "%hhx", &val) == 1)
++ config->karst.path_used = val;
++ else
++ config->karst.path_used = path;
++ } else
++ config->karst.path_used = path;
++
++ RWNX_DBG("Karst default path is: %d\n", config->karst.path_used);
++
++ /* Release the configuration file */
++ release_firmware(config_fw);
++
++ return 0;
++}
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_cfgfile.h
+@@ -0,0 +1,35 @@
++/**
++ ****************************************************************************************
++ *
++ * @file rwnx_cfgfile.h
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ****************************************************************************************
++ */
++
++#ifndef _RWNX_CFGFILE_H_
++#define _RWNX_CFGFILE_H_
++
++/*
++ * Structure used to retrieve information from the Config file used at Initialization time
++ */
++struct rwnx_conf_file {
++ u8 mac_addr[ETH_ALEN];
++};
++
++/*
++ * Structure used to retrieve information from the PHY Config file used at Initialization time
++ */
++struct rwnx_phy_conf_file {
++ struct phy_trd_cfg_tag trd;
++ struct phy_karst_cfg_tag karst;
++};
++
++int rwnx_parse_configfile(struct rwnx_hw *rwnx_hw, const char *filename,
++ struct rwnx_conf_file *config);
++
++int rwnx_parse_phy_configfile(struct rwnx_hw *rwnx_hw, const char *filename,
++ struct rwnx_phy_conf_file *config, int path);
++
++#endif /* _RWNX_CFGFILE_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_cmds.c
+@@ -0,0 +1,541 @@
++/**
++ ******************************************************************************
++ *
++ * rwnx_cmds.c
++ *
++ * Handles queueing (push to IPC, ack/cfm from IPC) of commands issued to
++ * LMAC FW
++ *
++ * Copyright (C) RivieraWaves 2014-2019
++ *
++ ******************************************************************************
++ */
++
++//#define CREATE_TRACE_POINTS
++#include <linux/list.h>
++
++#include "rwnx_cmds.h"
++#include "rwnx_defs.h"
++#include "rwnx_strs.h"
++#include "rwnx_events.h"
++#include "aicwf_txrxif.h"
++#ifdef AICWF_SDIO_SUPPORT
++#include "aicwf_sdio.h"
++#else
++#include "aicwf_usb.h"
++#endif
++/**
++ *
++ */
++extern int aicwf_sdio_writeb(struct aic_sdio_dev *sdiodev, uint regaddr, u8 val);
++
++void rwnx_cmd_free(struct rwnx_cmd *cmd);
++
++static void cmd_dump(const struct rwnx_cmd *cmd)
++{
++ printk(KERN_CRIT "tkn[%d] flags:%04x result:%3d cmd:%4d-%-24s - reqcfm(%4d-%-s)\n",
++ cmd->tkn, cmd->flags, cmd->result, cmd->id, RWNX_ID2STR(cmd->id),
++ cmd->reqid, cmd->reqid != (lmac_msg_id_t)-1 ? RWNX_ID2STR(cmd->reqid) : "none");
++}
++
++/**
++ *
++ */
++static void cmd_complete(struct rwnx_cmd_mgr *cmd_mgr, struct rwnx_cmd *cmd)
++{
++ //RWNX_DBG(RWNX_FN_ENTRY_STR);
++ lockdep_assert_held(&cmd_mgr->lock);
++
++ list_del(&cmd->list);
++ cmd_mgr->queue_sz--;
++
++ cmd->flags |= RWNX_CMD_FLAG_DONE;
++ if (cmd->flags & RWNX_CMD_FLAG_NONBLOCK) {
++ rwnx_cmd_free(cmd);//kfree(cmd);AIDEN
++ } else {
++ if (RWNX_CMD_WAIT_COMPLETE(cmd->flags)) {
++ cmd->result = 0;
++ complete(&cmd->complete);
++ }
++ }
++}
++
++int cmd_mgr_queue_force_defer(struct rwnx_cmd_mgr *cmd_mgr, struct rwnx_cmd *cmd)
++{
++ bool defer_push = false;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++#ifdef CREATE_TRACE_POINTS
++ trace_msg_send(cmd->id);
++#endif
++ spin_lock_bh(&cmd_mgr->lock);
++
++ if (cmd_mgr->state == RWNX_CMD_MGR_STATE_CRASHED) {
++ printk(KERN_CRIT"cmd queue crashed\n");
++ cmd->result = -EPIPE;
++ spin_unlock_bh(&cmd_mgr->lock);
++ return -EPIPE;
++ }
++
++ #ifndef CONFIG_RWNX_FHOST
++ if (!list_empty(&cmd_mgr->cmds)) {
++ if (cmd_mgr->queue_sz == cmd_mgr->max_queue_sz) {
++ printk(KERN_CRIT"Too many cmds (%d) already queued\n",
++ cmd_mgr->max_queue_sz);
++ cmd->result = -ENOMEM;
++ spin_unlock_bh(&cmd_mgr->lock);
++ return -ENOMEM;
++ }
++ }
++ #endif
++
++ cmd->flags |= RWNX_CMD_FLAG_WAIT_PUSH;
++ defer_push = true;
++
++ if (cmd->flags & RWNX_CMD_FLAG_REQ_CFM)
++ cmd->flags |= RWNX_CMD_FLAG_WAIT_CFM;
++
++ cmd->tkn = cmd_mgr->next_tkn++;
++ cmd->result = -EINTR;
++
++ if (!(cmd->flags & RWNX_CMD_FLAG_NONBLOCK))
++ init_completion(&cmd->complete);
++
++ list_add_tail(&cmd->list, &cmd_mgr->cmds);
++ cmd_mgr->queue_sz++;
++ spin_unlock_bh(&cmd_mgr->lock);
++
++ WAKE_CMD_WORK(cmd_mgr);
++ return 0;
++}
++
++void rwnx_msg_free_(struct lmac_msg *msg);
++
++
++static int cmd_mgr_queue(struct rwnx_cmd_mgr *cmd_mgr, struct rwnx_cmd *cmd)
++{
++ int ret = 0;
++#ifdef AICWF_SDIO_SUPPORT
++ struct aic_sdio_dev *sdiodev = container_of(cmd_mgr, struct aic_sdio_dev, cmd_mgr);
++#endif
++#ifdef AICWF_USB_SUPPORT
++
++ struct aic_usb_dev *usbdev = container_of(cmd_mgr, struct aic_usb_dev, cmd_mgr);
++#endif
++ bool defer_push = false;
++
++ //RWNX_DBG(RWNX_FN_ENTRY_STR);
++#ifdef CREATE_TRACE_POINTS
++ trace_msg_send(cmd->id);
++#endif
++ spin_lock_bh(&cmd_mgr->lock);
++
++ if (cmd_mgr->state == RWNX_CMD_MGR_STATE_CRASHED) {
++ printk(KERN_CRIT"cmd queue crashed\n");
++ cmd->result = -EPIPE;
++ spin_unlock_bh(&cmd_mgr->lock);
++ return -EPIPE;
++ }
++
++ #ifndef CONFIG_RWNX_FHOST
++ if (!list_empty(&cmd_mgr->cmds)) {
++ struct rwnx_cmd *last;
++
++ if (cmd_mgr->queue_sz == cmd_mgr->max_queue_sz) {
++ printk(KERN_CRIT"Too many cmds (%d) already queued\n",
++ cmd_mgr->max_queue_sz);
++ cmd->result = -ENOMEM;
++ spin_unlock_bh(&cmd_mgr->lock);
++ return -ENOMEM;
++ }
++ last = list_entry(cmd_mgr->cmds.prev, struct rwnx_cmd, list);
++ if (last->flags & (RWNX_CMD_FLAG_WAIT_ACK | RWNX_CMD_FLAG_WAIT_PUSH | RWNX_CMD_FLAG_WAIT_CFM)) {
++#if 0 // queue even NONBLOCK command.
++ if (cmd->flags & RWNX_CMD_FLAG_NONBLOCK) {
++ printk(KERN_CRIT"cmd queue busy\n");
++ cmd->result = -EBUSY;
++ spin_unlock_bh(&cmd_mgr->lock);
++ return -EBUSY;
++ }
++#endif
++ cmd->flags |= RWNX_CMD_FLAG_WAIT_PUSH;
++ defer_push = true;
++ }
++ }
++ #endif
++
++#if 0
++ cmd->flags |= RWNX_CMD_FLAG_WAIT_ACK;
++#endif
++ if (cmd->flags & RWNX_CMD_FLAG_REQ_CFM)
++ cmd->flags |= RWNX_CMD_FLAG_WAIT_CFM;
++
++ cmd->tkn = cmd_mgr->next_tkn++;
++ cmd->result = -EINTR;
++
++ if (!(cmd->flags & RWNX_CMD_FLAG_NONBLOCK))
++ init_completion(&cmd->complete);
++
++ list_add_tail(&cmd->list, &cmd_mgr->cmds);
++ cmd_mgr->queue_sz++;
++
++ if(cmd->a2e_msg->id == ME_TRAFFIC_IND_REQ
++ #ifdef AICWF_ARP_OFFLOAD
++ || cmd->a2e_msg->id == MM_SET_ARPOFFLOAD_REQ
++ #endif
++ ) {
++ defer_push = true;
++ cmd->flags |= RWNX_CMD_FLAG_WAIT_PUSH;
++ //printk("defer push: tkn=%d\r\n", cmd->tkn);
++ }
++
++ spin_unlock_bh(&cmd_mgr->lock);
++ if (!defer_push) {
++ AICWFDBG(LOGTRACE, "queue:id=%x, param_len=%u\n",cmd->a2e_msg->id, cmd->a2e_msg->param_len);
++
++ #ifdef AICWF_SDIO_SUPPORT
++ aicwf_set_cmd_tx((void *)(sdiodev), cmd->a2e_msg, sizeof(struct lmac_msg) + cmd->a2e_msg->param_len);
++ #else
++ aicwf_set_cmd_tx((void *)(usbdev), cmd->a2e_msg, sizeof(struct lmac_msg) + cmd->a2e_msg->param_len);
++ #endif
++ //rwnx_ipc_msg_push(rwnx_hw, cmd, RWNX_CMD_A2EMSG_LEN(cmd->a2e_msg));
++
++ kfree(cmd->a2e_msg);
++ } else {
++ WAKE_CMD_WORK(cmd_mgr);
++ return 0;
++ }
++
++ if (!(cmd->flags & RWNX_CMD_FLAG_NONBLOCK)) {
++ #ifdef CONFIG_RWNX_FHOST
++ if (wait_for_completion_killable(&cmd->complete)) {
++ cmd->result = -EINTR;
++ spin_lock_bh(&cmd_mgr->lock);
++ cmd_complete(cmd_mgr, cmd);
++ spin_unlock_bh(&cmd_mgr->lock);
++ /* TODO: kill the cmd at fw level */
++ }
++ #else
++ unsigned long tout = msecs_to_jiffies(RWNX_80211_CMD_TIMEOUT_MS/*AIDEN workaround* cmd_mgr->queue_sz*/);
++ if (!wait_for_completion_killable_timeout(&cmd->complete, tout)) {
++ printk(KERN_CRIT"%s cmd timed-out cmd_mgr->queue_sz:%d\n", __func__,cmd_mgr->queue_sz);
++ #ifdef AICWF_SDIO_SUPPORT
++ ret = aicwf_sdio_writeb(sdiodev, SDIOWIFI_WAKEUP_REG, 2);
++ if (ret < 0) {
++ sdio_err("reg:%d write failed!\n", SDIOWIFI_WAKEUP_REG);
++ }
++ #endif
++
++ cmd_dump(cmd);
++ spin_lock_bh(&cmd_mgr->lock);
++
++ cmd_mgr->state = RWNX_CMD_MGR_STATE_CRASHED;
++ if (!(cmd->flags & RWNX_CMD_FLAG_DONE)) {
++ cmd->result = -ETIMEDOUT;
++ cmd_complete(cmd_mgr, cmd);
++ }
++ ret = -ETIMEDOUT;
++ spin_unlock_bh(&cmd_mgr->lock);
++ }
++ else{
++ rwnx_cmd_free(cmd);//kfree(cmd);AIDEN
++ if(!list_empty(&cmd_mgr->cmds) && usbdev->state == USB_UP_ST)
++ WAKE_CMD_WORK(cmd_mgr);
++ }
++ #endif
++ } else {
++ cmd->result = 0;
++ }
++ return ret;
++}
++
++/**
++ *
++ */
++static int cmd_mgr_llind(struct rwnx_cmd_mgr *cmd_mgr, struct rwnx_cmd *cmd)
++{
++ struct rwnx_cmd *cur, *acked = NULL, *next = NULL;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ spin_lock_bh(&cmd_mgr->lock);
++ list_for_each_entry(cur, &cmd_mgr->cmds, list) {
++ if (!acked) {
++ if (cur->tkn == cmd->tkn) {
++ if (WARN_ON_ONCE(cur != cmd)) {
++ cmd_dump(cmd);
++ }
++ acked = cur;
++ continue;
++ }
++ }
++ if (cur->flags & RWNX_CMD_FLAG_WAIT_PUSH) {
++ next = cur;
++ break;
++ }
++ }
++ if (!acked) {
++ printk(KERN_CRIT "Error: acked cmd not found\n");
++ } else {
++ cmd->flags &= ~RWNX_CMD_FLAG_WAIT_ACK;
++ if (RWNX_CMD_WAIT_COMPLETE(cmd->flags))
++ cmd_complete(cmd_mgr, cmd);
++ }
++
++ if (next) {
++ #if 0 //there is no ack
++ struct rwnx_hw *rwnx_hw = container_of(cmd_mgr, struct rwnx_hw, cmd_mgr);
++ next->flags &= ~RWNX_CMD_FLAG_WAIT_PUSH;
++ rwnx_ipc_msg_push(rwnx_hw, next, RWNX_CMD_A2EMSG_LEN(next->a2e_msg));
++ kfree(next->a2e_msg);
++ #endif
++ }
++ spin_unlock(&cmd_mgr->lock);
++
++ return 0;
++}
++
++void cmd_mgr_task_process(struct work_struct *work)
++{
++ struct rwnx_cmd_mgr *cmd_mgr = container_of(work, struct rwnx_cmd_mgr, cmdWork);
++ struct rwnx_cmd *cur, *next = NULL;
++ unsigned long tout;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ while(1) {
++ next = NULL;
++ spin_lock_bh(&cmd_mgr->lock);
++
++ list_for_each_entry(cur, &cmd_mgr->cmds, list) {
++ if (cur->flags & RWNX_CMD_FLAG_WAIT_PUSH) { //just judge the first
++ next = cur;
++ }
++ break;
++ }
++ spin_unlock_bh(&cmd_mgr->lock);
++
++ if(next == NULL)
++ break;
++
++ if (next) {
++ #ifdef AICWF_SDIO_SUPPORT
++ struct aic_sdio_dev *sdiodev = container_of(cmd_mgr, struct aic_sdio_dev, cmd_mgr);
++ #endif
++ #ifdef AICWF_USB_SUPPORT
++ struct aic_usb_dev *usbdev = container_of(cmd_mgr, struct aic_usb_dev, cmd_mgr);
++ #endif
++ next->flags &= ~RWNX_CMD_FLAG_WAIT_PUSH;
++
++ //printk("cmd_process, cmd->id=%d, tkn=%d\r\n",next->reqid, next->tkn);
++ //rwnx_ipc_msg_push(rwnx_hw, next, RWNX_CMD_A2EMSG_LEN(next->a2e_msg));
++#ifdef AICWF_SDIO_SUPPORT
++ aicwf_set_cmd_tx((void *)(sdiodev), next->a2e_msg, sizeof(struct lmac_msg) + next->a2e_msg->param_len);
++#else
++ aicwf_set_cmd_tx((void *)(usbdev), next->a2e_msg, sizeof(struct lmac_msg) + next->a2e_msg->param_len);
++#endif
++ kfree(next->a2e_msg);
++
++ tout = msecs_to_jiffies(RWNX_80211_CMD_TIMEOUT_MS * cmd_mgr->queue_sz);
++ if (!wait_for_completion_killable_timeout(&next->complete, tout)) {
++ printk(KERN_CRIT"%s cmd timed-out cmd_mgr->queue_sz:%d\n", __func__, cmd_mgr->queue_sz);
++ cmd_dump(next);
++ spin_lock_bh(&cmd_mgr->lock);
++ //AIDEN workaround
++ cmd_mgr->state = RWNX_CMD_MGR_STATE_CRASHED;
++ if (!(next->flags & RWNX_CMD_FLAG_DONE)) {
++ next->result = -ETIMEDOUT;
++ cmd_complete(cmd_mgr, next);
++ }
++ spin_unlock_bh(&cmd_mgr->lock);
++ } else
++ rwnx_cmd_free(next);//kfree(next);AIDEN
++ }
++ }
++
++}
++
++
++static int cmd_mgr_run_callback(struct rwnx_hw *rwnx_hw, struct rwnx_cmd *cmd,
++ struct rwnx_cmd_e2amsg *msg, msg_cb_fct cb)
++{
++ int res;
++
++ if (! cb){
++ return 0;
++ }
++ //RWNX_DBG(RWNX_FN_ENTRY_STR);
++#ifndef CONFIG_DEBUG_ATOMIC_SLEEP
++ //spin_lock_bh(&rwnx_hw->cb_lock);
++#endif
++ res = cb(rwnx_hw, cmd, msg);
++#ifndef CONFIG_DEBUG_ATOMIC_SLEEP
++ //spin_unlock_bh(&rwnx_hw->cb_lock);
++#endif
++
++ return res;
++}
++
++/**
++ *
++
++ */
++static int cmd_mgr_msgind(struct rwnx_cmd_mgr *cmd_mgr, struct rwnx_cmd_e2amsg *msg,
++ msg_cb_fct cb)
++{
++#ifdef AICWF_SDIO_SUPPORT
++ struct aic_sdio_dev *sdiodev = container_of(cmd_mgr, struct aic_sdio_dev, cmd_mgr);
++ struct rwnx_hw *rwnx_hw = sdiodev->rwnx_hw;
++#endif
++#ifdef AICWF_USB_SUPPORT
++ struct aic_usb_dev *usbdev = container_of(cmd_mgr, struct aic_usb_dev, cmd_mgr);
++ struct rwnx_hw *rwnx_hw = usbdev->rwnx_hw;
++#endif
++ struct rwnx_cmd *cmd, *pos;
++ bool found = false;
++
++ // RWNX_DBG(RWNX_FN_ENTRY_STR);
++#ifdef CREATE_TRACE_POINTS
++ trace_msg_recv(msg->id);
++#endif
++ AICWFDBG(LOGTRACE, "%s cmd->id=%d\n", __func__, msg->id);
++ spin_lock_bh(&cmd_mgr->lock);
++ list_for_each_entry_safe(cmd, pos, &cmd_mgr->cmds, list) {
++ if (cmd->reqid == msg->id &&
++ (cmd->flags & RWNX_CMD_FLAG_WAIT_CFM)) {
++
++ if (!cmd_mgr_run_callback(rwnx_hw, cmd, msg, cb)) {
++ found = true;
++ cmd->flags &= ~RWNX_CMD_FLAG_WAIT_CFM;
++
++ if (WARN((msg->param_len > RWNX_CMD_E2AMSG_LEN_MAX),
++ "Unexpect E2A msg len %d > %d\n", msg->param_len,
++ RWNX_CMD_E2AMSG_LEN_MAX)) {
++ msg->param_len = RWNX_CMD_E2AMSG_LEN_MAX;
++ }
++
++ if (cmd->e2a_msg && msg->param_len)
++ memcpy(cmd->e2a_msg, &msg->param, msg->param_len);
++
++ if (RWNX_CMD_WAIT_COMPLETE(cmd->flags))
++ cmd_complete(cmd_mgr, cmd);
++
++ break;
++ }
++ }
++ }
++ spin_unlock_bh(&cmd_mgr->lock);
++
++ if (!found)
++ cmd_mgr_run_callback(rwnx_hw, NULL, msg, cb);
++
++ return 0;
++}
++
++/**
++ *
++ */
++static void cmd_mgr_print(struct rwnx_cmd_mgr *cmd_mgr)
++{
++ struct rwnx_cmd *cur;
++
++ spin_lock_bh(&cmd_mgr->lock);
++ RWNX_DBG("q_sz/max: %2d / %2d - next tkn: %d\n",
++ cmd_mgr->queue_sz, cmd_mgr->max_queue_sz,
++ cmd_mgr->next_tkn);
++ list_for_each_entry(cur, &cmd_mgr->cmds, list) {
++ cmd_dump(cur);
++ }
++ spin_unlock_bh(&cmd_mgr->lock);
++}
++
++static void cmd_mgr_drain(struct rwnx_cmd_mgr *cmd_mgr)
++{
++ struct rwnx_cmd *cur, *nxt;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ spin_lock_bh(&cmd_mgr->lock);
++ list_for_each_entry_safe(cur, nxt, &cmd_mgr->cmds, list) {
++ list_del(&cur->list);
++ cmd_mgr->queue_sz--;
++ if (!(cur->flags & RWNX_CMD_FLAG_NONBLOCK))
++ complete(&cur->complete);
++ }
++ spin_unlock_bh(&cmd_mgr->lock);
++}
++
++void rwnx_cmd_mgr_init(struct rwnx_cmd_mgr *cmd_mgr)
++{
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ INIT_LIST_HEAD(&cmd_mgr->cmds);
++ cmd_mgr->state = RWNX_CMD_MGR_STATE_INITED;
++ spin_lock_init(&cmd_mgr->lock);
++ cmd_mgr->max_queue_sz = RWNX_CMD_MAX_QUEUED;
++ cmd_mgr->queue = &cmd_mgr_queue;
++ cmd_mgr->print = &cmd_mgr_print;
++ cmd_mgr->drain = &cmd_mgr_drain;
++ cmd_mgr->llind = &cmd_mgr_llind;
++ cmd_mgr->msgind = &cmd_mgr_msgind;
++
++ INIT_WORK(&cmd_mgr->cmdWork, cmd_mgr_task_process);
++ cmd_mgr->cmd_wq = create_singlethread_workqueue("cmd_wq");
++ if (!cmd_mgr->cmd_wq) {
++ txrx_err("insufficient memory to create cmd workqueue.\n");
++ return;
++ }
++}
++
++void rwnx_cmd_mgr_deinit(struct rwnx_cmd_mgr *cmd_mgr)
++{
++ cmd_mgr->print(cmd_mgr);
++ cmd_mgr->drain(cmd_mgr);
++ cmd_mgr->print(cmd_mgr);
++ flush_workqueue(cmd_mgr->cmd_wq);
++ destroy_workqueue(cmd_mgr->cmd_wq);
++ memset(cmd_mgr, 0, sizeof(*cmd_mgr));
++}
++
++
++void aicwf_set_cmd_tx(void *dev, struct lmac_msg *msg, uint len)
++{
++ u8 *buffer = NULL;
++ u16 index = 0;
++#ifdef AICWF_SDIO_SUPPORT
++ struct aic_sdio_dev *sdiodev = (struct aic_sdio_dev *)dev;
++ struct aicwf_bus *bus = sdiodev->bus_if;
++#else
++ struct aic_usb_dev *usbdev = (struct aic_usb_dev *)dev;
++ struct aicwf_bus *bus = NULL;
++ if (!usbdev->state) {
++ printk("down msg \n");
++ return;
++ }
++ bus = usbdev->bus_if;
++#endif
++ buffer = bus->cmd_buf;
++
++ memset(buffer, 0, CMD_BUF_MAX);
++ buffer[0] = (len+4) & 0x00ff;
++ buffer[1] = ((len+4) >> 8) &0x0f;
++ buffer[2] = 0x11;
++ buffer[3] = 0x0;
++ index += 4;
++ //there is a dummy word
++ index += 4;
++
++ //make sure little endian
++ put_u16(&buffer[index], msg->id);
++ index += 2;
++ put_u16(&buffer[index], msg->dest_id);
++ index += 2;
++ put_u16(&buffer[index], msg->src_id);
++ index += 2;
++ put_u16(&buffer[index], msg->param_len);
++ index += 2;
++ memcpy(&buffer[index], (u8 *)msg->param, msg->param_len);
++
++ aicwf_bus_txmsg(bus, buffer, len + 8);
++}
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_cmds.h
+@@ -0,0 +1,120 @@
++/**
++ ******************************************************************************
++ *
++ * rwnx_cmds.h
++ *
++ * Copyright (C) RivieraWaves 2014-2019
++ *
++ ******************************************************************************
++ */
++
++#ifndef _RWNX_CMDS_H_
++#define _RWNX_CMDS_H_
++
++#include <linux/spinlock.h>
++#include <linux/completion.h>
++#include <linux/module.h>
++#include "lmac_msg.h"
++
++#ifdef CONFIG_RWNX_SDM
++#define RWNX_80211_CMD_TIMEOUT_MS (20 * 300)
++#elif defined(CONFIG_RWNX_FHOST)
++#define RWNX_80211_CMD_TIMEOUT_MS (10000)
++#else
++#define RWNX_80211_CMD_TIMEOUT_MS 4000//500//300
++#endif
++
++#define RWNX_CMD_FLAG_NONBLOCK BIT(0)
++#define RWNX_CMD_FLAG_REQ_CFM BIT(1)
++#define RWNX_CMD_FLAG_WAIT_PUSH BIT(2)
++#define RWNX_CMD_FLAG_WAIT_ACK BIT(3)
++#define RWNX_CMD_FLAG_WAIT_CFM BIT(4)
++#define RWNX_CMD_FLAG_DONE BIT(5)
++/* ATM IPC design makes it possible to get the CFM before the ACK,
++ * otherwise this could have simply been a state enum */
++#define RWNX_CMD_WAIT_COMPLETE(flags) \
++ (!(flags & (RWNX_CMD_FLAG_WAIT_ACK | RWNX_CMD_FLAG_WAIT_CFM)))
++
++#define RWNX_CMD_MAX_QUEUED 16//8 AIDEN
++
++#ifdef CONFIG_RWNX_FHOST
++#include "ipc_fhost.h"
++#define rwnx_cmd_e2amsg ipc_fhost_msg
++#define rwnx_cmd_a2emsg ipc_fhost_msg
++#define RWNX_CMD_A2EMSG_LEN(m) (m->param_len)
++#define RWNX_CMD_E2AMSG_LEN_MAX IPC_FHOST_MSG_BUF_SIZE
++struct rwnx_term_stream;
++
++#else /* !CONFIG_RWNX_FHOST*/
++#include "ipc_shared.h"
++#define rwnx_cmd_e2amsg ipc_e2a_msg
++#define rwnx_cmd_a2emsg lmac_msg
++#define RWNX_CMD_A2EMSG_LEN(m) (sizeof(struct lmac_msg) + m->param_len)
++#define RWNX_CMD_E2AMSG_LEN_MAX (IPC_E2A_MSG_PARAM_SIZE * 4)
++
++#endif /* CONFIG_RWNX_FHOST*/
++
++struct rwnx_hw;
++struct rwnx_cmd;
++typedef int (*msg_cb_fct)(struct rwnx_hw *rwnx_hw, struct rwnx_cmd *cmd,
++ struct rwnx_cmd_e2amsg *msg);
++static inline void put_u16(u8 *buf, u16 data)
++{
++ buf[0] = (u8)(data&0x00ff);
++ buf[1] = (u8)((data >> 8)&0x00ff);
++}
++
++enum rwnx_cmd_mgr_state {
++ RWNX_CMD_MGR_STATE_DEINIT,
++ RWNX_CMD_MGR_STATE_INITED,
++ RWNX_CMD_MGR_STATE_CRASHED,
++};
++
++struct rwnx_cmd {
++ struct list_head list;
++ lmac_msg_id_t id;
++ lmac_msg_id_t reqid;
++ struct rwnx_cmd_a2emsg *a2e_msg;
++ char *e2a_msg;
++ u32 tkn;
++ u16 flags;
++
++ struct completion complete;
++ u32 result;
++ u8 used;
++ int array_id;
++ #ifdef CONFIG_RWNX_FHOST
++ struct rwnx_term_stream *stream;
++ #endif
++};
++
++struct rwnx_cmd_mgr {
++ enum rwnx_cmd_mgr_state state;
++ spinlock_t lock;
++ u32 next_tkn;
++ u32 queue_sz;
++ u32 max_queue_sz;
++
++ struct list_head cmds;
++
++ int (*queue)(struct rwnx_cmd_mgr *, struct rwnx_cmd *);
++ int (*llind)(struct rwnx_cmd_mgr *, struct rwnx_cmd *);
++ int (*msgind)(struct rwnx_cmd_mgr *, struct rwnx_cmd_e2amsg *, msg_cb_fct);
++ void (*print)(struct rwnx_cmd_mgr *);
++ void (*drain)(struct rwnx_cmd_mgr *);
++
++ struct work_struct cmdWork;
++ struct workqueue_struct *cmd_wq;
++};
++
++#define WAKE_CMD_WORK(cmd_mgr) \
++ do { \
++ queue_work((cmd_mgr)->cmd_wq, &cmd_mgr->cmdWork); \
++ } while (0)
++
++void rwnx_cmd_mgr_init(struct rwnx_cmd_mgr *cmd_mgr);
++void rwnx_cmd_mgr_deinit(struct rwnx_cmd_mgr *cmd_mgr);
++int cmd_mgr_queue_force_defer(struct rwnx_cmd_mgr *cmd_mgr, struct rwnx_cmd *cmd);
++void aicwf_set_cmd_tx(void *dev, struct lmac_msg *msg, uint len);
++
++#endif /* _RWNX_CMDS_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_compat.h
+@@ -0,0 +1,428 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_compat.h
++ *
++ * Ensure driver compilation for linux 3.16 to 3.19
++ *
++ * To avoid too many #if LINUX_VERSION_CODE if the code, when prototype change
++ * between different kernel version:
++ * - For external function, define a macro whose name is the function name with
++ * _compat suffix and prototype (actually the number of parameter) of the
++ * latest version. Then latest version this macro simply call the function
++ * and for older kernel version it call the function adapting the api.
++ * - For internal function (e.g. cfg80211_ops) do the same but the macro name
++ * doesn't need to have the _compat suffix when the function is not used
++ * directly by the driver
++ *
++ * Copyright (C) RivieraWaves 2018
++ *
++ ******************************************************************************
++ */
++#ifndef _RWNX_COMPAT_H_
++#define _RWNX_COMPAT_H_
++#include <linux/version.h>
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
++#error "Minimum kernel version supported is 3.10"
++#endif
++
++/* Generic */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
++#define __bf_shf(x) (__builtin_ffsll(x) - 1)
++#define FIELD_PREP(_mask, _val) \
++ (((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask))
++#else
++#include <linux/bitfield.h>
++#endif
++
++/* CFG80211 */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0)
++#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_MASK
++#endif
++
++#if LINUX_VERSION_CODE > KERNEL_VERSION(5, 15, 60)
++#define IEEE80211_MAX_AMPDU_BUF IEEE80211_MAX_AMPDU_BUF_HE
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)
++#define IEEE80211_RADIOTAP_HE 23
++#define IEEE80211_RADIOTAP_HE_MU 24
++
++struct ieee80211_radiotap_he {
++ __le16 data1, data2, data3, data4, data5, data6;
++};
++
++enum ieee80211_radiotap_he_bits {
++ IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MASK = 3,
++ IEEE80211_RADIOTAP_HE_DATA1_FORMAT_SU = 0,
++ IEEE80211_RADIOTAP_HE_DATA1_FORMAT_EXT_SU = 1,
++ IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MU = 2,
++ IEEE80211_RADIOTAP_HE_DATA1_FORMAT_TRIG = 3,
++
++ IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN = 0x0004,
++ IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN = 0x0008,
++ IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN = 0x0010,
++ IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN = 0x0020,
++ IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN = 0x0040,
++ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN = 0x0080,
++ IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN = 0x0100,
++ IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN = 0x0200,
++ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN = 0x0400,
++ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN = 0x0800,
++ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN = 0x1000,
++ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN = 0x2000,
++ IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN = 0x4000,
++ IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN = 0x8000,
++
++ IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN = 0x0001,
++ IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN = 0x0002,
++ IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN = 0x0004,
++ IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN = 0x0008,
++ IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN = 0x0010,
++ IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN = 0x0020,
++ IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN = 0x0040,
++ IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN = 0x0080,
++ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET = 0x3f00,
++ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN = 0x4000,
++ IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC = 0x8000,
++
++ IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR = 0x003f,
++ IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE = 0x0040,
++ IEEE80211_RADIOTAP_HE_DATA3_UL_DL = 0x0080,
++ IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS = 0x0f00,
++ IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM = 0x1000,
++ IEEE80211_RADIOTAP_HE_DATA3_CODING = 0x2000,
++ IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG = 0x4000,
++ IEEE80211_RADIOTAP_HE_DATA3_STBC = 0x8000,
++
++ IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE = 0x000f,
++ IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID = 0x7ff0,
++ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1 = 0x000f,
++ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2 = 0x00f0,
++ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3 = 0x0f00,
++ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4 = 0xf000,
++
++ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC = 0x000f,
++ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ = 0,
++ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ = 1,
++ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ = 2,
++ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ = 3,
++ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_26T = 4,
++ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_52T = 5,
++ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_106T = 6,
++ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_242T = 7,
++ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_484T = 8,
++ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_996T = 9,
++ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_2x996T = 10,
++
++ IEEE80211_RADIOTAP_HE_DATA5_GI = 0x0030,
++ IEEE80211_RADIOTAP_HE_DATA5_GI_0_8 = 0,
++ IEEE80211_RADIOTAP_HE_DATA5_GI_1_6 = 1,
++ IEEE80211_RADIOTAP_HE_DATA5_GI_3_2 = 2,
++
++ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE = 0x00c0,
++ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN = 0,
++ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X = 1,
++ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X = 2,
++ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X = 3,
++ IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS = 0x0700,
++ IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD = 0x3000,
++ IEEE80211_RADIOTAP_HE_DATA5_TXBF = 0x4000,
++ IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG = 0x8000,
++
++ IEEE80211_RADIOTAP_HE_DATA6_NSTS = 0x000f,
++ IEEE80211_RADIOTAP_HE_DATA6_DOPPLER = 0x0010,
++ IEEE80211_RADIOTAP_HE_DATA6_TXOP = 0x7f00,
++ IEEE80211_RADIOTAP_HE_DATA6_MIDAMBLE_PDCTY = 0x8000,
++};
++
++struct ieee80211_radiotap_he_mu {
++ __le16 flags1, flags2;
++ u8 ru_ch1[4];
++ u8 ru_ch2[4];
++};
++
++enum ieee80211_radiotap_he_mu_bits {
++ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS = 0x000f,
++ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN = 0x0010,
++ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM = 0x0020,
++ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN = 0x0040,
++ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN = 0x0080,
++ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN = 0x0100,
++ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN = 0x0200,
++ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN = 0x1000,
++ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU = 0x2000,
++ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN = 0x4000,
++ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN = 0x8000,
++
++ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW = 0x0003,
++ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_20MHZ = 0x0000,
++ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_40MHZ = 0x0001,
++ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_80MHZ = 0x0002,
++ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_160MHZ = 0x0003,
++ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN = 0x0004,
++ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP = 0x0008,
++ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS = 0x00f0,
++ IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW = 0x0300,
++ IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN= 0x0400,
++ IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU = 0x0800,
++};
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)
++#define rwnx_cfg80211_add_iface(wiphy, name, name_assign_type, type, params) \
++ rwnx_cfg80211_add_iface(wiphy, name, type, u32 *flags, params)
++#else
++#define rwnx_cfg80211_add_iface(wiphy, name, name_assign_type, type, params) \
++ rwnx_cfg80211_add_iface(wiphy, name, name_assign_type, type, u32 *flags, params)
++#endif
++
++#define rwnx_cfg80211_change_iface(wiphy, dev, type, params) \
++ rwnx_cfg80211_change_iface(wiphy, dev, type, u32 *flags, params)
++
++#define CCFS0(vht) vht->center_freq_seg1_idx
++#define CCFS1(vht) vht->center_freq_seg2_idx
++
++#else
++#define CCFS0(vht) vht->center_freq_seg0_idx
++#define CCFS1(vht) vht->center_freq_seg1_idx
++
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
++#define cfg80211_cqm_rssi_notify(dev, event, level, gfp) \
++ cfg80211_cqm_rssi_notify(dev, event, gfp)
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
++#define ieee80211_amsdu_to_8023s(skb, list, addr, iftype, extra_headroom, check_da, check_sa) \
++ ieee80211_amsdu_to_8023s(skb, list, addr, iftype, extra_headroom, false)
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)
++#define NUM_NL80211_BANDS IEEE80211_NUM_BANDS
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)
++#define cfg80211_disconnected(dev, reason, ie, len, local, gfp) \
++ cfg80211_disconnected(dev, reason, ie, len, gfp)
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0)) && !(defined CONFIG_VENDOR_RWNX)
++#define ieee80211_chandef_to_operating_class(chan_def, op_class) 0
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
++#define SURVEY_INFO_TIME SURVEY_INFO_CHANNEL_TIME
++#define SURVEY_INFO_TIME_BUSY SURVEY_INFO_CHANNEL_TIME_BUSY
++#define SURVEY_INFO_TIME_EXT_BUSY SURVEY_INFO_CHANNEL_TIME_EXT_BUSY
++#define SURVEY_INFO_TIME_RX SURVEY_INFO_CHANNEL_TIME_RX
++#define SURVEY_INFO_TIME_TX SURVEY_INFO_CHANNEL_TIME_TX
++
++#define SURVEY_TIME(s) s->channel_time
++#define SURVEY_TIME_BUSY(s) s->channel_time_busy
++#else
++#define SURVEY_TIME(s) s->time
++#define SURVEY_TIME_BUSY(s) s->time_busy
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)
++#define cfg80211_ch_switch_started_notify(dev, chandef, count)
++
++#define WLAN_BSS_COEX_INFORMATION_REQUEST BIT(0)
++#define WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING BIT(2)
++#define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4)
++#define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5)
++#define WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH BIT(6)
++#define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED BIT(7)
++#define NL80211_FEATURE_TDLS_CHANNEL_SWITCH 0
++
++#define STA_TDLS_INITIATOR(sta) 0
++
++#define REGULATORY_IGNORE_STALE_KICKOFF 0
++#else
++#define STA_TDLS_INITIATOR(sta) sta->tdls_initiator
++#endif
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
++#define cfg80211_rx_mgmt(wdev, freq, rssi, buf, len, flags) \
++ cfg80211_rx_mgmt(wdev, freq, rssi, buf, len, flags, GFP_ATOMIC)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0)
++#define cfg80211_rx_mgmt(wdev, freq, rssi, buf, len, flags) \
++ cfg80211_rx_mgmt(wdev, freq, rssi, buf, len, GFP_ATOMIC)
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
++
++#if 0
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
++#define rwnx_cfg80211_tdls_mgmt(wiphy, dev, peer, act, tok, status, peer_capability, initiator, buf, len) \
++ rwnx_cfg80211_tdls_mgmt(wiphy, dev, peer, act, tok, status, peer_capability, buf, len)
++#else
++#define rwnx_cfg80211_tdls_mgmt(wiphy, dev, peer, act, tok, status, peer_capability, initiator, buf, len) \
++ rwnx_cfg80211_tdls_mgmt(wiphy, dev, peer, act, tok, status, buf, len)
++#endif
++#endif
++
++#include <linux/types.h>
++
++struct ieee80211_wmm_ac_param {
++ u8 aci_aifsn; /* AIFSN, ACM, ACI */
++ u8 cw; /* ECWmin, ECWmax (CW = 2^ECW - 1) */
++ __le16 txop_limit;
++} __packed;
++
++struct ieee80211_wmm_param_ie {
++ u8 element_id; /* Element ID: 221 (0xdd); */
++ u8 len; /* Length: 24 */
++ /* required fields for WMM version 1 */
++ u8 oui[3]; /* 00:50:f2 */
++ u8 oui_type; /* 2 */
++ u8 oui_subtype; /* 1 */
++ u8 version; /* 1 for WMM version 1.0 */
++ u8 qos_info; /* AP/STA specific QoS info */
++ u8 reserved; /* 0 */
++ /* AC_BE, AC_BK, AC_VI, AC_VO */
++ struct ieee80211_wmm_ac_param ac[4];
++} __packed;
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)
++enum {
++ IEEE80211_HE_MCS_SUPPORT_0_7 = 0,
++ IEEE80211_HE_MCS_SUPPORT_0_9 = 1,
++ IEEE80211_HE_MCS_SUPPORT_0_11 = 2,
++ IEEE80211_HE_MCS_NOT_SUPPORTED = 3,
++};
++#endif
++
++/* MAC80211 */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0)
++#define rwnx_ops_mgd_prepare_tx(hw, vif, duration) \
++ rwnx_ops_mgd_prepare_tx(hw, vif)
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
++
++#define RX_ENC_HT(s) s->flag |= RX_FLAG_HT
++#define RX_ENC_HT_GF(s) s->flag |= (RX_FLAG_HT | RX_FLAG_HT_GF)
++#define RX_ENC_VHT(s) s->flag |= RX_FLAG_HT
++#define RX_ENC_HE(s) s->flag |= RX_FLAG_HT
++#define RX_ENC_FLAG_SHORT_GI(s) s->flag |= RX_FLAG_SHORT_GI
++#define RX_ENC_FLAG_SHORT_PRE(s) s->flag |= RX_FLAG_SHORTPRE
++#define RX_ENC_FLAG_LDPC(s) s->flag |= RX_FLAG_LDPC
++#define RX_BW_40MHZ(s) s->flag |= RX_FLAG_40MHZ
++#define RX_BW_80MHZ(s) s->vht_flag |= RX_VHT_FLAG_80MHZ
++#define RX_BW_160MHZ(s) s->vht_flag |= RX_VHT_FLAG_160MHZ
++#define RX_NSS(s) s->vht_nss
++
++#else
++#define RX_ENC_HT(s) s->encoding = RX_ENC_HT
++#define RX_ENC_HT_GF(s) { s->encoding = RX_ENC_HT; \
++ s->enc_flags |= RX_ENC_FLAG_HT_GF; }
++#define RX_ENC_VHT(s) s->encoding = RX_ENC_VHT
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)
++#define RX_ENC_HE(s) s->encoding = RX_ENC_VHT
++#else
++#define RX_ENC_HE(s) s->encoding = RX_ENC_HE
++#endif
++#define RX_ENC_FLAG_SHORT_GI(s) s->enc_flags |= RX_ENC_FLAG_SHORT_GI
++#define RX_ENC_FLAG_SHORT_PRE(s) s->enc_flags |= RX_ENC_FLAG_SHORTPRE
++#define RX_ENC_FLAG_LDPC(s) s->enc_flags |= RX_ENC_FLAG_LDPC
++#define RX_BW_40MHZ(s) s->bw = RATE_INFO_BW_40
++#define RX_BW_80MHZ(s) s->bw = RATE_INFO_BW_80
++#define RX_BW_160MHZ(s) s->bw = RATE_INFO_BW_160
++#define RX_NSS(s) s->nss
++
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
++#define ieee80211_cqm_rssi_notify(vif, event, level, gfp) \
++ ieee80211_cqm_rssi_notify(vif, event, gfp)
++#endif
++
++#ifndef CONFIG_VENDOR_RWNX_AMSDUS_TX
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
++#define rwnx_ops_ampdu_action(hw, vif, params) \
++ rwnx_ops_ampdu_action(hw, vif, enum ieee80211_ampdu_mlme_action action, \
++ struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size)
++#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0))
++#define rwnx_ops_ampdu_action(hw, vif, params) \
++ rwnx_ops_ampdu_action(hw, vif, enum ieee80211_ampdu_mlme_action action, \
++ struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size, \
++ bool amsdu)
++#endif
++#endif /* CONFIG_VENDOR_RWNX_AMSDUS_TX */
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)
++#define IEEE80211_HW_SUPPORT_FAST_XMIT 0
++#define ieee80211_hw_check(hw, feat) (hw->flags & IEEE80211_HW_##feat)
++#define ieee80211_hw_set(hw, feat) {hw->flags |= IEEE80211_HW_##feat;}
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)
++#define rwnx_ops_sw_scan_start(hw, vif, mac_addr) \
++ rwnx_ops_sw_scan_start(hw)
++#define rwnx_ops_sw_scan_complete(hw, vif) \
++ rwnx_ops_sw_scan_complete(hw)
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
++#define rwnx_ops_hw_scan(hw, vif, hw_req) \
++ rwnx_ops_hw_scan(hw, vif, struct cfg80211_scan_request *req)
++#endif
++
++/* NET */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
++#define rwnx_select_queue(dev, skb, sb_dev) \
++ rwnx_select_queue(dev, skb)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)
++#define rwnx_select_queue(dev, skb, sb_dev) \
++ rwnx_select_queue(dev, skb, void *accel_priv, select_queue_fallback_t fallback)
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)
++#define rwnx_select_queue(dev, skb, sb_dev) \
++ rwnx_select_queue(dev, skb, sb_dev, select_queue_fallback_t fallback)
++#else
++#define rwnx_select_queue(dev, skb, sb_dev) \
++ rwnx_select_queue(dev, skb, sb_dev)
++#endif
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) && !(defined CONFIG_VENDOR_RWNX)
++#define sk_pacing_shift_update(sk, shift)
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
++#define alloc_netdev_mqs(size, name, assign, setup, txqs, rxqs) \
++ alloc_netdev_mqs(size, name, setup, txqs, rxqs)
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
++#define NET_NAME_UNKNOWN 0
++#endif
++
++/* TRACE */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)
++#define trace_print_symbols_seq ftrace_print_symbols_seq
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
++#define trace_seq_buffer_ptr(p) p->buffer + p->len
++#endif
++
++/* TIME */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)
++#define time64_to_tm(t, o, tm) time_to_tm((time_t)t, o, tm)
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)
++#define ktime_get_real_seconds get_seconds
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
++typedef __s64 time64_t;
++#endif
++
++#endif /* _RWNX_COMPAT_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_debugfs.c
+@@ -0,0 +1,2251 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_debugfs.c
++ *
++ * @brief Definition of debugfs entries
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ******************************************************************************
++ */
++
++
++#include <linux/kernel.h>
++#include <linux/kmod.h>
++#include <linux/debugfs.h>
++#include <linux/string.h>
++#include <linux/sort.h>
++
++#include "rwnx_debugfs.h"
++#include "rwnx_msg_tx.h"
++#include "rwnx_radar.h"
++#include "rwnx_tx.h"
++
++#ifdef CONFIG_DEBUG_FS
++#ifdef CONFIG_RWNX_FULLMAC
++static ssize_t rwnx_dbgfs_stats_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ char *buf;
++ int ret;
++ int i, skipped;
++ ssize_t read;
++ int bufsz = (NX_TXQ_CNT) * 20 + (ARRAY_SIZE(priv->stats.amsdus_rx) + 1) * 40
++ + (ARRAY_SIZE(priv->stats.ampdus_tx) * 30);
++
++ if (*ppos)
++ return 0;
++
++ buf = kmalloc(bufsz, GFP_ATOMIC);
++ if (buf == NULL)
++ return 0;
++
++ ret = scnprintf(buf, bufsz, "TXQs CFM balances ");
++ for (i = 0; i < NX_TXQ_CNT; i++)
++ ret += scnprintf(&buf[ret], bufsz - ret,
++ " [%1d]:%3d", i,
++ priv->stats.cfm_balance[i]);
++
++ ret += scnprintf(&buf[ret], bufsz - ret, "\n");
++
++#ifdef CONFIG_RWNX_SPLIT_TX_BUF
++ ret += scnprintf(&buf[ret], bufsz - ret,
++ "\nAMSDU[len] done failed received\n");
++ for (i = skipped = 0; i < NX_TX_PAYLOAD_MAX; i++) {
++ if (priv->stats.amsdus[i].done) {
++ per = DIV_ROUND_UP((priv->stats.amsdus[i].failed) *
++ 100, priv->stats.amsdus[i].done);
++ } else if (priv->stats.amsdus_rx[i]) {
++ per = 0;
++ } else {
++ per = 0;
++ skipped = 1;
++ continue;
++ }
++ if (skipped) {
++ ret += scnprintf(&buf[ret], bufsz - ret, " ...\n");
++ skipped = 0;
++ }
++
++ ret += scnprintf(&buf[ret], bufsz - ret,
++ " [%2d] %10d %8d(%3d%%) %10d\n", i ? i + 1 : i,
++ priv->stats.amsdus[i].done,
++ priv->stats.amsdus[i].failed, per,
++ priv->stats.amsdus_rx[i]);
++ }
++
++ for (; i < ARRAY_SIZE(priv->stats.amsdus_rx); i++) {
++ if (!priv->stats.amsdus_rx[i]) {
++ skipped = 1;
++ continue;
++ }
++ if (skipped) {
++ ret += scnprintf(&buf[ret], bufsz - ret, " ...\n");
++ skipped = 0;
++ }
++
++ ret += scnprintf(&buf[ret], bufsz - ret,
++ " [%2d] %10d\n",
++ i + 1, priv->stats.amsdus_rx[i]);
++ }
++#else
++ ret += scnprintf(&buf[ret], bufsz - ret,
++ "\nAMSDU[len] received\n");
++ for (i = skipped = 0; i < ARRAY_SIZE(priv->stats.amsdus_rx); i++) {
++ if (!priv->stats.amsdus_rx[i]) {
++ skipped = 1;
++ continue;
++ }
++ if (skipped) {
++ ret += scnprintf(&buf[ret], bufsz - ret,
++ " ...\n");
++ skipped = 0;
++ }
++
++ ret += scnprintf(&buf[ret], bufsz - ret,
++ " [%2d] %10d\n",
++ i + 1, priv->stats.amsdus_rx[i]);
++ }
++
++#endif /* CONFIG_RWNX_SPLIT_TX_BUF */
++
++ ret += scnprintf(&buf[ret], bufsz - ret,
++ "\nAMPDU[len] done received\n");
++ for (i = skipped = 0; i < ARRAY_SIZE(priv->stats.ampdus_tx); i++) {
++ if (!priv->stats.ampdus_tx[i] && !priv->stats.ampdus_rx[i]) {
++ skipped = 1;
++ continue;
++ }
++ if (skipped) {
++ ret += scnprintf(&buf[ret], bufsz - ret,
++ " ...\n");
++ skipped = 0;
++ }
++
++ ret += scnprintf(&buf[ret], bufsz - ret,
++ " [%2d] %9d %9d\n", i ? i + 1 : i,
++ priv->stats.ampdus_tx[i], priv->stats.ampdus_rx[i]);
++ }
++
++ ret += scnprintf(&buf[ret], bufsz - ret,
++ "#mpdu missed %9d\n",
++ priv->stats.ampdus_rx_miss);
++ read = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
++
++ kfree(buf);
++
++ return read;
++}
++#endif /* CONFIG_RWNX_FULLMAC */
++
++static ssize_t rwnx_dbgfs_stats_write(struct file *file,
++ const char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++
++ /* Prevent from interrupt preemption as these statistics are updated under
++ * interrupt */
++ spin_lock_bh(&priv->tx_lock);
++
++ memset(&priv->stats, 0, sizeof(priv->stats));
++
++ spin_unlock_bh(&priv->tx_lock);
++
++ return count;
++}
++
++DEBUGFS_READ_WRITE_FILE_OPS(stats);
++
++#define TXQ_STA_PREF "tid|"
++#define TXQ_STA_PREF_FMT "%3d|"
++
++#ifdef CONFIG_RWNX_FULLMAC
++#define TXQ_VIF_PREF "type|"
++#define TXQ_VIF_PREF_FMT "%4s|"
++#else
++#define TXQ_VIF_PREF "AC|"
++#define TXQ_VIF_PREF_FMT "%2s|"
++#endif /* CONFIG_RWNX_FULLMAC */
++
++#define TXQ_HDR "idx| status|credit|ready|retry"
++#define TXQ_HDR_FMT "%3d|%s%s%s%s%s%s%s|%6d|%5d|%5d"
++
++#ifdef CONFIG_RWNX_AMSDUS_TX
++#ifdef CONFIG_RWNX_FULLMAC
++#define TXQ_HDR_SUFF "|amsdu"
++#define TXQ_HDR_SUFF_FMT "|%5d"
++#else
++#define TXQ_HDR_SUFF "|amsdu-ht|amdsu-vht"
++#define TXQ_HDR_SUFF_FMT "|%8d|%9d"
++#endif /* CONFIG_RWNX_FULLMAC */
++#else
++#define TXQ_HDR_SUFF ""
++#define TXQ_HDR_SUF_FMT ""
++#endif /* CONFIG_RWNX_AMSDUS_TX */
++
++#define TXQ_HDR_MAX_LEN (sizeof(TXQ_STA_PREF) + sizeof(TXQ_HDR) + sizeof(TXQ_HDR_SUFF) + 1)
++
++#ifdef CONFIG_RWNX_FULLMAC
++#define PS_HDR "Legacy PS: ready=%d, sp=%d / UAPSD: ready=%d, sp=%d"
++#define PS_HDR_LEGACY "Legacy PS: ready=%d, sp=%d"
++#define PS_HDR_UAPSD "UAPSD: ready=%d, sp=%d"
++#define PS_HDR_MAX_LEN sizeof("Legacy PS: ready=xxx, sp=xxx / UAPSD: ready=xxx, sp=xxx\n")
++#else
++#define PS_HDR ""
++#define PS_HDR_MAX_LEN 0
++#endif /* CONFIG_RWNX_FULLMAC */
++
++#define STA_HDR "** STA %d (%pM)\n"
++#define STA_HDR_MAX_LEN sizeof("- STA xx (xx:xx:xx:xx:xx:xx)\n") + PS_HDR_MAX_LEN
++
++#ifdef CONFIG_RWNX_FULLMAC
++#define VIF_HDR "* VIF [%d] %s\n"
++#define VIF_HDR_MAX_LEN sizeof(VIF_HDR) + IFNAMSIZ
++#else
++#define VIF_HDR "* VIF [%d]\n"
++#define VIF_HDR_MAX_LEN sizeof(VIF_HDR)
++#endif
++
++
++#ifdef CONFIG_RWNX_AMSDUS_TX
++
++#ifdef CONFIG_RWNX_FULLMAC
++#define VIF_SEP "---------------------------------------\n"
++#else
++#define VIF_SEP "----------------------------------------------------\n"
++#endif /* CONFIG_RWNX_FULLMAC */
++
++#else /* ! CONFIG_RWNX_AMSDUS_TX */
++#define VIF_SEP "---------------------------------\n"
++#endif /* CONFIG_RWNX_AMSDUS_TX*/
++
++#define VIF_SEP_LEN sizeof(VIF_SEP)
++
++#define CAPTION "status: L=in hwq list, F=stop full, P=stop sta PS, V=stop vif PS, C=stop channel, S=stop CSA, M=stop MU"
++#define CAPTION_LEN sizeof(CAPTION)
++
++#define STA_TXQ 0
++#define VIF_TXQ 1
++
++static int rwnx_dbgfs_txq(char *buf, size_t size, struct rwnx_txq *txq, int type, int tid, char *name)
++{
++ int res, idx = 0;
++
++ if (type == STA_TXQ) {
++ res = scnprintf(&buf[idx], size, TXQ_STA_PREF_FMT, tid);
++ idx += res;
++ size -= res;
++ } else {
++ res = scnprintf(&buf[idx], size, TXQ_VIF_PREF_FMT, name);
++ idx += res;
++ size -= res;
++ }
++
++ res = scnprintf(&buf[idx], size, TXQ_HDR_FMT, txq->idx,
++ (txq->status & RWNX_TXQ_IN_HWQ_LIST) ? "L" : " ",
++ (txq->status & RWNX_TXQ_STOP_FULL) ? "F" : " ",
++ (txq->status & RWNX_TXQ_STOP_STA_PS) ? "P" : " ",
++ (txq->status & RWNX_TXQ_STOP_VIF_PS) ? "V" : " ",
++ (txq->status & RWNX_TXQ_STOP_CHAN) ? "C" : " ",
++ (txq->status & RWNX_TXQ_STOP_CSA) ? "S" : " ",
++ (txq->status & RWNX_TXQ_STOP_MU_POS) ? "M" : " ",
++ txq->credits, skb_queue_len(&txq->sk_list),
++ txq->nb_retry);
++ idx += res;
++ size -= res;
++
++#ifdef CONFIG_RWNX_AMSDUS_TX
++ if (type == STA_TXQ) {
++ res = scnprintf(&buf[idx], size, TXQ_HDR_SUFF_FMT,
++#ifdef CONFIG_RWNX_FULLMAC
++ txq->amsdu_len
++#else
++ txq->amsdu_ht_len_cap, txq->amsdu_vht_len_cap
++#endif /* CONFIG_RWNX_FULLMAC */
++ );
++ idx += res;
++ size -= res;
++ }
++#endif
++
++ res = scnprintf(&buf[idx], size, "\n");
++ idx += res;
++ size -= res;
++
++ return idx;
++}
++
++static int rwnx_dbgfs_txq_sta(char *buf, size_t size, struct rwnx_sta *rwnx_sta,
++ struct rwnx_hw *rwnx_hw)
++{
++ int tid, res, idx = 0;
++ struct rwnx_txq *txq;
++
++ res = scnprintf(&buf[idx], size, "\n" STA_HDR,
++ rwnx_sta->sta_idx,
++#ifdef CONFIG_RWNX_FULLMAC
++ rwnx_sta->mac_addr
++#endif /* CONFIG_RWNX_FULLMAC */
++ );
++ idx += res;
++ size -= res;
++
++#ifdef CONFIG_RWNX_FULLMAC
++ if (rwnx_sta->ps.active) {
++ if (rwnx_sta->uapsd_tids &&
++ (rwnx_sta->uapsd_tids == ((1 << NX_NB_TXQ_PER_STA) - 1)))
++ res = scnprintf(&buf[idx], size, PS_HDR_UAPSD "\n",
++ rwnx_sta->ps.pkt_ready[UAPSD_ID],
++ rwnx_sta->ps.sp_cnt[UAPSD_ID]);
++ else if (rwnx_sta->uapsd_tids)
++ res = scnprintf(&buf[idx], size, PS_HDR "\n",
++ rwnx_sta->ps.pkt_ready[LEGACY_PS_ID],
++ rwnx_sta->ps.sp_cnt[LEGACY_PS_ID],
++ rwnx_sta->ps.pkt_ready[UAPSD_ID],
++ rwnx_sta->ps.sp_cnt[UAPSD_ID]);
++ else
++ res = scnprintf(&buf[idx], size, PS_HDR_LEGACY "\n",
++ rwnx_sta->ps.pkt_ready[LEGACY_PS_ID],
++ rwnx_sta->ps.sp_cnt[LEGACY_PS_ID]);
++ idx += res;
++ size -= res;
++ } else {
++ res = scnprintf(&buf[idx], size, "\n");
++ idx += res;
++ size -= res;
++ }
++#endif /* CONFIG_RWNX_FULLMAC */
++
++
++ res = scnprintf(&buf[idx], size, TXQ_STA_PREF TXQ_HDR TXQ_HDR_SUFF "\n");
++ idx += res;
++ size -= res;
++
++
++ foreach_sta_txq(rwnx_sta, txq, tid, rwnx_hw) {
++ res = rwnx_dbgfs_txq(&buf[idx], size, txq, STA_TXQ, tid, NULL);
++ idx += res;
++ size -= res;
++ }
++
++ return idx;
++}
++
++static int rwnx_dbgfs_txq_vif(char *buf, size_t size, struct rwnx_vif *rwnx_vif,
++ struct rwnx_hw *rwnx_hw)
++{
++ int res, idx = 0;
++ struct rwnx_txq *txq;
++ struct rwnx_sta *rwnx_sta;
++
++#ifdef CONFIG_RWNX_FULLMAC
++ res = scnprintf(&buf[idx], size, VIF_HDR, rwnx_vif->vif_index, rwnx_vif->ndev->name);
++ idx += res;
++ size -= res;
++ if (!rwnx_vif->up || rwnx_vif->ndev == NULL)
++ return idx;
++
++#else
++ int ac;
++ char ac_name[2] = {'0', '\0'};
++
++ res = scnprintf(&buf[idx], size, VIF_HDR, rwnx_vif->vif_index);
++ idx += res;
++ size -= res;
++#endif /* CONFIG_RWNX_FULLMAC */
++
++#ifdef CONFIG_RWNX_FULLMAC
++ if (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_AP ||
++ RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_P2P_GO ||
++ RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_MESH_POINT) {
++ res = scnprintf(&buf[idx], size, TXQ_VIF_PREF TXQ_HDR "\n");
++ idx += res;
++ size -= res;
++ txq = rwnx_txq_vif_get(rwnx_vif, NX_UNK_TXQ_TYPE);
++ res = rwnx_dbgfs_txq(&buf[idx], size, txq, VIF_TXQ, 0, "UNK");
++ idx += res;
++ size -= res;
++ txq = rwnx_txq_vif_get(rwnx_vif, NX_BCMC_TXQ_TYPE);
++ res = rwnx_dbgfs_txq(&buf[idx], size, txq, VIF_TXQ, 0, "BCMC");
++ idx += res;
++ size -= res;
++ rwnx_sta = &rwnx_hw->sta_table[rwnx_vif->ap.bcmc_index];
++ if (rwnx_sta->ps.active) {
++ res = scnprintf(&buf[idx], size, PS_HDR_LEGACY "\n",
++ rwnx_sta->ps.sp_cnt[LEGACY_PS_ID],
++ rwnx_sta->ps.sp_cnt[LEGACY_PS_ID]);
++ idx += res;
++ size -= res;
++ } else {
++ res = scnprintf(&buf[idx], size, "\n");
++ idx += res;
++ size -= res;
++ }
++
++ list_for_each_entry(rwnx_sta, &rwnx_vif->ap.sta_list, list) {
++ res = rwnx_dbgfs_txq_sta(&buf[idx], size, rwnx_sta, rwnx_hw);
++ idx += res;
++ size -= res;
++ }
++ } else if (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_STATION ||
++ RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_P2P_CLIENT) {
++ if (rwnx_vif->sta.ap) {
++ res = rwnx_dbgfs_txq_sta(&buf[idx], size, rwnx_vif->sta.ap, rwnx_hw);
++ idx += res;
++ size -= res;
++ }
++ }
++
++#else
++ res = scnprintf(&buf[idx], size, TXQ_VIF_PREF TXQ_HDR "\n");
++ idx += res;
++ size -= res;
++
++ foreach_vif_txq(rwnx_vif, txq, ac) {
++ ac_name[0]++;
++ res = rwnx_dbgfs_txq(&buf[idx], size, txq, VIF_TXQ, 0, ac_name);
++ idx += res;
++ size -= res;
++ }
++
++ list_for_each_entry(rwnx_sta, &rwnx_vif->stations, list) {
++ res = rwnx_dbgfs_txq_sta(&buf[idx], size, rwnx_sta, rwnx_hw);
++ idx += res;
++ size -= res;
++ }
++#endif /* CONFIG_RWNX_FULLMAC */
++ return idx;
++}
++
++static ssize_t rwnx_dbgfs_txq_read(struct file *file ,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *rwnx_hw = file->private_data;
++ struct rwnx_vif *vif;
++ char *buf;
++ int idx, res;
++ ssize_t read;
++ size_t bufsz = ((NX_VIRT_DEV_MAX * (VIF_HDR_MAX_LEN + 2 * VIF_SEP_LEN)) +
++ (NX_REMOTE_STA_MAX * STA_HDR_MAX_LEN) +
++ ((NX_REMOTE_STA_MAX + NX_VIRT_DEV_MAX + NX_NB_TXQ) *
++ TXQ_HDR_MAX_LEN) + CAPTION_LEN);
++
++ /* everything is read in one go */
++ if (*ppos)
++ return 0;
++
++ bufsz = min_t(size_t, bufsz, count);
++
++ buf = kmalloc(bufsz, GFP_ATOMIC);
++ if (buf == NULL)
++ return 0;
++
++ bufsz--;
++ idx = 0;
++
++ res = scnprintf(&buf[idx], bufsz, CAPTION);
++ idx += res;
++ bufsz -= res;
++
++ //spin_lock_bh(&rwnx_hw->tx_lock);
++ list_for_each_entry(vif, &rwnx_hw->vifs, list) {
++ res = scnprintf(&buf[idx], bufsz, "\n"VIF_SEP);
++ idx += res;
++ bufsz -= res;
++ res = rwnx_dbgfs_txq_vif(&buf[idx], bufsz, vif, rwnx_hw);
++ idx += res;
++ bufsz -= res;
++ res = scnprintf(&buf[idx], bufsz, VIF_SEP);
++ idx += res;
++ bufsz -= res;
++ }
++ //spin_unlock_bh(&rwnx_hw->tx_lock);
++
++ read = simple_read_from_buffer(user_buf, count, ppos, buf, idx);
++ kfree(buf);
++
++ return read;
++}
++DEBUGFS_READ_FILE_OPS(txq);
++
++static ssize_t rwnx_dbgfs_acsinfo_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ #ifdef CONFIG_RWNX_FULLMAC
++ struct wiphy *wiphy = priv->wiphy;
++ #endif //CONFIG_RWNX_FULLMAC
++ //char buf[(SCAN_CHANNEL_MAX + 1) * 43];
++ char *buf = NULL;
++ ssize_t size = 0;
++ int survey_cnt = 0;
++ int len = 0;
++ int band, chan_cnt;
++ int band_max = NL80211_BAND_5GHZ;
++
++ buf = (char*)kmalloc(sizeof(char) * ((SCAN_CHANNEL_MAX + 1) * 43), GFP_KERNEL);
++ memset(buf, 0, ((SCAN_CHANNEL_MAX + 1) * 43));
++
++ if (priv->band_5g_support){
++ band_max = NL80211_BAND_5GHZ + 1;
++ }
++
++ mutex_lock(&priv->dbgdump_elem.mutex);
++
++ len += scnprintf(buf, min_t(size_t, sizeof(buf) - 1, count),
++ "FREQ TIME(ms) BUSY(ms) NOISE(dBm)\n");
++
++
++ //#ifdef USE_5G
++ //for (band = NL80211_BAND_2GHZ; band <= NL80211_BAND_5GHZ; band++) {
++ //#else
++ //for (band = NL80211_BAND_2GHZ; band < NL80211_BAND_5GHZ; band++) {
++ //#endif
++ for (band = NL80211_BAND_2GHZ; band < band_max; band++) {
++ for (chan_cnt = 0; chan_cnt < wiphy->bands[band]->n_channels; chan_cnt++) {
++ struct rwnx_survey_info *p_survey_info = &priv->survey[survey_cnt];
++ struct ieee80211_channel *p_chan = &wiphy->bands[band]->channels[chan_cnt];
++
++ if (p_survey_info->filled) {
++ len += scnprintf(&buf[len], min_t(size_t, sizeof(buf) - len - 1, count),
++ "%d %03d %03d %d\n",
++ p_chan->center_freq,
++ p_survey_info->chan_time_ms,
++ p_survey_info->chan_time_busy_ms,
++ p_survey_info->noise_dbm);
++ } else {
++ len += scnprintf(&buf[len], min_t(size_t, sizeof(buf) -len -1, count),
++ "%d NOT AVAILABLE\n",
++ p_chan->center_freq);
++ }
++
++ survey_cnt++;
++ }
++ }
++
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++
++ size = simple_read_from_buffer(user_buf, count, ppos, buf, len);
++
++ kfree(buf);
++ buf = NULL;
++
++ return size;
++}
++
++DEBUGFS_READ_FILE_OPS(acsinfo);
++
++static ssize_t rwnx_dbgfs_fw_dbg_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ char help[]="usage: [MOD:<ALL|KE|DBG|IPC|DMA|MM|TX|RX|PHY>]* "
++ "[DBG:<NONE|CRT|ERR|WRN|INF|VRB>]\n";
++
++ return simple_read_from_buffer(user_buf, count, ppos, help, sizeof(help));
++}
++
++
++static ssize_t rwnx_dbgfs_fw_dbg_write(struct file *file,
++ const char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ char buf[32];
++ int idx = 0;
++ u32 mod = 0;
++ size_t len = min_t(size_t, count, sizeof(buf) - 1);
++
++ if (copy_from_user(buf, user_buf, len))
++ return -EFAULT;
++ buf[len] = '\0';
++
++#define RWNX_MOD_TOKEN(str, val) \
++ if (strncmp(&buf[idx], str, sizeof(str) - 1 ) == 0) { \
++ idx += sizeof(str) - 1; \
++ mod |= val; \
++ continue; \
++ }
++
++#define RWNX_DBG_TOKEN(str, val) \
++ if (strncmp(&buf[idx], str, sizeof(str) - 1) == 0) { \
++ idx += sizeof(str) - 1; \
++ dbg = val; \
++ goto dbg_done; \
++ }
++
++ while ((idx + 4) < len) {
++ if (strncmp(&buf[idx], "MOD:", 4) == 0) {
++ idx += 4;
++ RWNX_MOD_TOKEN("ALL", 0xffffffff);
++ RWNX_MOD_TOKEN("KE", BIT(0));
++ RWNX_MOD_TOKEN("DBG", BIT(1));
++ RWNX_MOD_TOKEN("IPC", BIT(2));
++ RWNX_MOD_TOKEN("DMA", BIT(3));
++ RWNX_MOD_TOKEN("MM", BIT(4));
++ RWNX_MOD_TOKEN("TX", BIT(5));
++ RWNX_MOD_TOKEN("RX", BIT(6));
++ RWNX_MOD_TOKEN("PHY", BIT(7));
++ idx++;
++ } else if (strncmp(&buf[idx], "DBG:", 4) == 0) {
++ u32 dbg = 0;
++ idx += 4;
++ RWNX_DBG_TOKEN("NONE", 0);
++ RWNX_DBG_TOKEN("CRT", 1);
++ RWNX_DBG_TOKEN("ERR", 2);
++ RWNX_DBG_TOKEN("WRN", 3);
++ RWNX_DBG_TOKEN("INF", 4);
++ RWNX_DBG_TOKEN("VRB", 5);
++ idx++;
++ continue;
++ dbg_done:
++ rwnx_send_dbg_set_sev_filter_req(priv, dbg);
++ } else {
++ idx++;
++ }
++ }
++
++ if (mod) {
++ rwnx_send_dbg_set_mod_filter_req(priv, mod);
++ }
++
++ return count;
++}
++
++DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg);
++
++static ssize_t rwnx_dbgfs_sys_stats_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ char buf[3*64];
++ int len = 0;
++ ssize_t read;
++ int error = 0;
++ struct dbg_get_sys_stat_cfm cfm;
++ u32 sleep_int, sleep_frac, doze_int, doze_frac;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Get the information from the FW */
++ if ((error = rwnx_send_dbg_get_sys_stat_req(priv, &cfm)))
++ return error;
++
++ if (cfm.stats_time == 0)
++ return 0;
++
++ sleep_int = ((cfm.cpu_sleep_time * 100) / cfm.stats_time);
++ sleep_frac = (((cfm.cpu_sleep_time * 100) % cfm.stats_time) * 10) / cfm.stats_time;
++ doze_int = ((cfm.doze_time * 100) / cfm.stats_time);
++ doze_frac = (((cfm.doze_time * 100) % cfm.stats_time) * 10) / cfm.stats_time;
++
++ len += scnprintf(buf, min_t(size_t, sizeof(buf) - 1, count),
++ "\nSystem statistics:\n");
++ len += scnprintf(&buf[len], min_t(size_t, sizeof(buf) - 1, count),
++ " CPU sleep [%%]: %d.%d\n", sleep_int, sleep_frac);
++ len += scnprintf(&buf[len], min_t(size_t, sizeof(buf) - 1, count),
++ " Doze [%%]: %d.%d\n", doze_int, doze_frac);
++
++ read = simple_read_from_buffer(user_buf, count, ppos, buf, len);
++
++ return read;
++}
++
++DEBUGFS_READ_FILE_OPS(sys_stats);
++
++#ifdef CONFIG_RWNX_MUMIMO_TX
++static ssize_t rwnx_dbgfs_mu_group_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *rwnx_hw = file->private_data;
++ struct rwnx_mu_info *mu = &rwnx_hw->mu;
++ struct rwnx_mu_group *group;
++ size_t bufsz = NX_MU_GROUP_MAX * sizeof("xx = (xx - xx - xx - xx)\n") + 50;
++ char *buf;
++ int j, res, idx = 0;
++
++ if (*ppos)
++ return 0;
++
++ buf = kmalloc(bufsz, GFP_ATOMIC);
++ if (buf == NULL)
++ return 0;
++
++ res = scnprintf(&buf[idx], bufsz, "MU Group list (%d groups, %d users max)\n",
++ NX_MU_GROUP_MAX, CONFIG_USER_MAX);
++ idx += res;
++ bufsz -= res;
++
++ list_for_each_entry(group, &mu->active_groups, list) {
++ if (group->user_cnt) {
++ res = scnprintf(&buf[idx], bufsz, "%2d = (", group->group_id);
++ idx += res;
++ bufsz -= res;
++ for (j = 0; j < (CONFIG_USER_MAX - 1) ; j++) {
++ if (group->users[j])
++ res = scnprintf(&buf[idx], bufsz, "%2d - ",
++ group->users[j]->sta_idx);
++ else
++ res = scnprintf(&buf[idx], bufsz, ".. - ");
++
++ idx += res;
++ bufsz -= res;
++ }
++
++ if (group->users[j])
++ res = scnprintf(&buf[idx], bufsz, "%2d)\n",
++ group->users[j]->sta_idx);
++ else
++ res = scnprintf(&buf[idx], bufsz, "..)\n");
++
++ idx += res;
++ bufsz -= res;
++ }
++ }
++
++ res = simple_read_from_buffer(user_buf, count, ppos, buf, idx);
++ kfree(buf);
++
++ return res;
++}
++
++DEBUGFS_READ_FILE_OPS(mu_group);
++#endif
++
++#ifdef CONFIG_RWNX_P2P_DEBUGFS
++static ssize_t rwnx_dbgfs_oppps_write(struct file *file,
++ const char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *rw_hw = file->private_data;
++ struct rwnx_vif *rw_vif;
++ char buf[32];
++ size_t len = min_t(size_t, count, sizeof(buf) - 1);
++ int ctw;
++
++ if (copy_from_user(buf, user_buf, len))
++ return -EFAULT;
++ buf[len] = '\0';
++
++ /* Read the written CT Window (provided in ms) value */
++ if (sscanf(buf, "ctw=%d", &ctw) > 0) {
++ /* Check if at least one VIF is configured as P2P GO */
++ list_for_each_entry(rw_vif, &rw_hw->vifs, list) {
++#ifdef CONFIG_RWNX_FULLMAC
++ if (RWNX_VIF_TYPE(rw_vif) == NL80211_IFTYPE_P2P_GO) {
++#endif /* CONFIG_RWNX_FULLMAC */
++ struct mm_set_p2p_oppps_cfm cfm;
++
++ /* Forward request to the embedded and wait for confirmation */
++ rwnx_send_p2p_oppps_req(rw_hw, rw_vif, (u8)ctw, &cfm);
++
++ break;
++ }
++ }
++ }
++
++ return count;
++}
++
++DEBUGFS_WRITE_FILE_OPS(oppps);
++
++static ssize_t rwnx_dbgfs_noa_write(struct file *file,
++ const char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *rw_hw = file->private_data;
++ struct rwnx_vif *rw_vif;
++ char buf[64];
++ size_t len = min_t(size_t, count, sizeof(buf) - 1);
++ int noa_count, interval, duration, dyn_noa;
++
++ if (copy_from_user(buf, user_buf, len))
++ return -EFAULT;
++ buf[len] = '\0';
++
++ /* Read the written NOA information */
++ if (sscanf(buf, "count=%d interval=%d duration=%d dyn=%d",
++ &noa_count, &interval, &duration, &dyn_noa) > 0) {
++ /* Check if at least one VIF is configured as P2P GO */
++ list_for_each_entry(rw_vif, &rw_hw->vifs, list) {
++#ifdef CONFIG_RWNX_FULLMAC
++ if (RWNX_VIF_TYPE(rw_vif) == NL80211_IFTYPE_P2P_GO) {
++#endif /* CONFIG_RWNX_FULLMAC */
++ struct mm_set_p2p_noa_cfm cfm;
++
++ /* Forward request to the embedded and wait for confirmation */
++ rwnx_send_p2p_noa_req(rw_hw, rw_vif, noa_count, interval,
++ duration, (dyn_noa > 0), &cfm);
++
++ break;
++ }
++ }
++ }
++
++ return count;
++}
++
++DEBUGFS_WRITE_FILE_OPS(noa);
++#endif /* CONFIG_RWNX_P2P_DEBUGFS */
++
++static char fw_log_buffer[FW_LOG_SIZE];
++
++static ssize_t rwnx_dbgfs_fw_log_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ size_t not_cpy;
++ size_t nb_cpy;
++ char *log = fw_log_buffer;
++
++ printk("%s, %d, %p, %p\n", __func__, priv->debugfs.fw_log.buf.size, priv->debugfs.fw_log.buf.start, priv->debugfs.fw_log.buf.dataend);
++ //spin_lock_bh(&priv->debugfs.fw_log.lock);
++
++ if ((priv->debugfs.fw_log.buf.start + priv->debugfs.fw_log.buf.size) >= priv->debugfs.fw_log.buf.dataend) {
++ memcpy(log, priv->debugfs.fw_log.buf.start, priv->debugfs.fw_log.buf.dataend - priv->debugfs.fw_log.buf.start);
++ not_cpy = copy_to_user(user_buf, log, priv->debugfs.fw_log.buf.dataend - priv->debugfs.fw_log.buf.start);
++ nb_cpy = priv->debugfs.fw_log.buf.dataend - priv->debugfs.fw_log.buf.start - not_cpy;
++ priv->debugfs.fw_log.buf.start = priv->debugfs.fw_log.buf.data;
++ } else {
++ memcpy(log, priv->debugfs.fw_log.buf.start, priv->debugfs.fw_log.buf.size);
++ not_cpy = copy_to_user(user_buf, log, priv->debugfs.fw_log.buf.size);
++ nb_cpy = priv->debugfs.fw_log.buf.size - not_cpy;
++ priv->debugfs.fw_log.buf.start = priv->debugfs.fw_log.buf.start + priv->debugfs.fw_log.buf.size - not_cpy;
++ }
++
++ priv->debugfs.fw_log.buf.size -= nb_cpy;
++ //spin_unlock_bh(&priv->debugfs.fw_log.lock);
++
++ printk("nb_cpy=%lu, not_cpy=%lu, start=%p, end=%p\n", (long unsigned int)nb_cpy, (long unsigned int)not_cpy, priv->debugfs.fw_log.buf.start, priv->debugfs.fw_log.buf.end);
++ return nb_cpy;
++}
++
++static ssize_t rwnx_dbgfs_fw_log_write(struct file *file,
++ const char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ //struct rwnx_hw *priv = file->private_data;
++
++ printk("%s\n", __func__);
++ return count;
++}
++DEBUGFS_READ_WRITE_FILE_OPS(fw_log);
++
++#ifdef CONFIG_RWNX_RADAR
++static ssize_t rwnx_dbgfs_pulses_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos,
++ int rd_idx)
++{
++ struct rwnx_hw *priv = file->private_data;
++ char *buf;
++ int len = 0;
++ int bufsz;
++ int i;
++ int index;
++ struct rwnx_radar_pulses *p = &priv->radar.pulses[rd_idx];
++ ssize_t read;
++
++ if (*ppos != 0)
++ return 0;
++
++ /* Prevent from interrupt preemption */
++ spin_lock_bh(&priv->radar.lock);
++ bufsz = p->count * 34 + 51;
++ bufsz += rwnx_radar_dump_pattern_detector(NULL, 0, &priv->radar, rd_idx);
++ buf = kmalloc(bufsz, GFP_ATOMIC);
++ if (buf == NULL) {
++ spin_unlock_bh(&priv->radar.lock);
++ return 0;
++ }
++
++ if (p->count) {
++ len += scnprintf(&buf[len], bufsz - len,
++ " PRI WIDTH FOM FREQ\n");
++ index = p->index;
++ for (i = 0; i < p->count; i++) {
++ struct radar_pulse *pulse;
++
++ if (index > 0)
++ index--;
++ else
++ index = RWNX_RADAR_PULSE_MAX - 1;
++
++ pulse = (struct radar_pulse *) &p->buffer[index];
++
++ len += scnprintf(&buf[len], bufsz - len,
++ "%05dus %03dus %2d%% %+3dMHz\n", pulse->rep,
++ 2 * pulse->len, 6 * pulse->fom, 2*pulse->freq);
++ }
++ }
++
++ len += rwnx_radar_dump_pattern_detector(&buf[len], bufsz - len,
++ &priv->radar, rd_idx);
++
++ spin_unlock_bh(&priv->radar.lock);
++
++ read = simple_read_from_buffer(user_buf, count, ppos, buf, len);
++
++ kfree(buf);
++
++ return read;
++}
++
++static ssize_t rwnx_dbgfs_pulses_prim_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ return rwnx_dbgfs_pulses_read(file, user_buf, count, ppos, 0);
++}
++
++DEBUGFS_READ_FILE_OPS(pulses_prim);
++
++static ssize_t rwnx_dbgfs_pulses_sec_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ return rwnx_dbgfs_pulses_read(file, user_buf, count, ppos, 1);
++}
++
++DEBUGFS_READ_FILE_OPS(pulses_sec);
++
++static ssize_t rwnx_dbgfs_detected_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ char *buf;
++ int bufsz,len = 0;
++ ssize_t read;
++
++ if (*ppos != 0)
++ return 0;
++
++ bufsz = 5; // RIU:\n
++ bufsz += rwnx_radar_dump_radar_detected(NULL, 0, &priv->radar,
++ RWNX_RADAR_RIU);
++
++ if (priv->phy.cnt > 1) {
++ bufsz += 5; // FCU:\n
++ bufsz += rwnx_radar_dump_radar_detected(NULL, 0, &priv->radar,
++ RWNX_RADAR_FCU);
++ }
++
++ buf = kmalloc(bufsz, GFP_KERNEL);
++ if (buf == NULL) {
++ return 0;
++ }
++
++ len = scnprintf(&buf[len], bufsz, "RIU:\n");
++ len += rwnx_radar_dump_radar_detected(&buf[len], bufsz - len, &priv->radar,
++ RWNX_RADAR_RIU);
++
++ if (priv->phy.cnt > 1) {
++ len += scnprintf(&buf[len], bufsz - len, "FCU:\n");
++ len += rwnx_radar_dump_radar_detected(&buf[len], bufsz - len,
++ &priv->radar, RWNX_RADAR_FCU);
++ }
++
++ read = simple_read_from_buffer(user_buf, count, ppos, buf, len);
++
++ kfree(buf);
++
++ return read;
++}
++
++DEBUGFS_READ_FILE_OPS(detected);
++
++static ssize_t rwnx_dbgfs_enable_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ char buf[32];
++ int ret;
++ ssize_t read;
++
++ ret = scnprintf(buf, min_t(size_t, sizeof(buf) - 1, count),
++ "RIU=%d FCU=%d\n", priv->radar.dpd[RWNX_RADAR_RIU]->enabled,
++ priv->radar.dpd[RWNX_RADAR_FCU]->enabled);
++
++ read = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
++
++ return read;
++}
++
++static ssize_t rwnx_dbgfs_enable_write(struct file *file,
++ const char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ char buf[32];
++ int val;
++ size_t len = min_t(size_t, count, sizeof(buf) - 1);
++
++ if (copy_from_user(buf, user_buf, len))
++ return -EFAULT;
++
++ buf[len] = '\0';
++
++ if (sscanf(buf, "RIU=%d", &val) > 0)
++ rwnx_radar_detection_enable(&priv->radar, val, RWNX_RADAR_RIU);
++
++ if (sscanf(buf, "FCU=%d", &val) > 0)
++ rwnx_radar_detection_enable(&priv->radar, val, RWNX_RADAR_FCU);
++
++ return count;
++}
++
++DEBUGFS_READ_WRITE_FILE_OPS(enable);
++
++static ssize_t rwnx_dbgfs_band_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ char buf[32];
++ int ret;
++ ssize_t read;
++
++ ret = scnprintf(buf, min_t(size_t, sizeof(buf) - 1, count),
++ "BAND=%d\n", priv->phy.sec_chan.band);
++
++ read = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
++
++ return read;
++}
++
++static ssize_t rwnx_dbgfs_band_write(struct file *file,
++ const char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ char buf[32];
++ int val;
++ size_t len = min_t(size_t, count, sizeof(buf) - 1);
++ int band_max = NL80211_BAND_5GHZ;
++
++ if (priv->band_5g_support){
++ band_max = NL80211_BAND_5GHZ + 1;
++ }
++
++ if (copy_from_user(buf, user_buf, len))
++ return -EFAULT;
++
++ buf[len] = '\0';
++
++// #ifdef USE_5G
++// if ((sscanf(buf, "%d", &val) > 0) && (val >= 0) && (val <= NL80211_BAND_5GHZ))
++// #else
++// if ((sscanf(buf, "%d", &val) > 0) && (val >= 0) && (val < NL80211_BAND_5GHZ))
++// #endif
++ if ((sscanf(buf, "%d", &val) > 0) && (val >= 0) && (val < band_max)){
++ priv->phy.sec_chan.band = val;
++ }
++
++ return count;
++}
++
++DEBUGFS_READ_WRITE_FILE_OPS(band);
++
++static ssize_t rwnx_dbgfs_type_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ char buf[32];
++ int ret;
++ ssize_t read;
++
++ ret = scnprintf(buf, min_t(size_t, sizeof(buf) - 1, count),
++ "TYPE=%d\n", priv->phy.sec_chan.type);
++
++ read = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
++
++ return read;
++}
++
++static ssize_t rwnx_dbgfs_type_write(struct file *file,
++ const char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ char buf[32];
++ int val;
++ size_t len = min_t(size_t, count, sizeof(buf) - 1);
++
++ if (copy_from_user(buf, user_buf, len))
++ return -EFAULT;
++
++ buf[len] = '\0';
++
++ if ((sscanf(buf, "%d", &val) > 0) && (val >= PHY_CHNL_BW_20) &&
++ (val <= PHY_CHNL_BW_80P80))
++ priv->phy.sec_chan.type = val;
++
++ return count;
++}
++
++DEBUGFS_READ_WRITE_FILE_OPS(type);
++
++static ssize_t rwnx_dbgfs_prim20_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ char buf[32];
++ int ret;
++ ssize_t read;
++
++ ret = scnprintf(buf, min_t(size_t, sizeof(buf) - 1, count),
++ "PRIM20=%dMHz\n", priv->phy.sec_chan.prim20_freq);
++
++ read = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
++
++ return read;
++}
++
++static ssize_t rwnx_dbgfs_prim20_write(struct file *file,
++ const char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ char buf[32];
++ int val;
++ size_t len = min_t(size_t, count, sizeof(buf) - 1);
++
++ if (copy_from_user(buf, user_buf, len))
++ return -EFAULT;
++
++ buf[len] = '\0';
++
++ if (sscanf(buf, "%d", &val) > 0)
++ priv->phy.sec_chan.prim20_freq = val;
++
++ return count;
++}
++
++DEBUGFS_READ_WRITE_FILE_OPS(prim20);
++
++static ssize_t rwnx_dbgfs_center1_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ char buf[32];
++ int ret;
++ ssize_t read;
++
++ ret = scnprintf(buf, min_t(size_t, sizeof(buf) - 1, count),
++ "CENTER1=%dMHz\n", priv->phy.sec_chan.center_freq1);
++
++ read = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
++
++ return read;
++}
++
++static ssize_t rwnx_dbgfs_center1_write(struct file *file,
++ const char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ char buf[32];
++ int val;
++ size_t len = min_t(size_t, count, sizeof(buf) - 1);
++
++ if (copy_from_user(buf, user_buf, len))
++ return -EFAULT;
++
++ buf[len] = '\0';
++
++ if (sscanf(buf, "%d", &val) > 0)
++ priv->phy.sec_chan.center_freq1 = val;
++
++ return count;
++}
++
++DEBUGFS_READ_WRITE_FILE_OPS(center1);
++
++static ssize_t rwnx_dbgfs_center2_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ char buf[32];
++ int ret;
++ ssize_t read;
++
++ ret = scnprintf(buf, min_t(size_t, sizeof(buf) - 1, count),
++ "CENTER2=%dMHz\n", priv->phy.sec_chan.center_freq2);
++
++ read = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
++
++ return read;
++}
++
++static ssize_t rwnx_dbgfs_center2_write(struct file *file,
++ const char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ char buf[32];
++ int val;
++ size_t len = min_t(size_t, count, sizeof(buf) - 1);
++
++ if (copy_from_user(buf, user_buf, len))
++ return -EFAULT;
++
++ buf[len] = '\0';
++
++ if (sscanf(buf, "%d", &val) > 0)
++ priv->phy.sec_chan.center_freq2 = val;
++
++ return count;
++}
++
++DEBUGFS_READ_WRITE_FILE_OPS(center2);
++
++
++static ssize_t rwnx_dbgfs_set_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ return 0;
++}
++
++static ssize_t rwnx_dbgfs_set_write(struct file *file,
++ const char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++
++ rwnx_send_set_channel(priv, 1, NULL);
++ rwnx_radar_detection_enable(&priv->radar, RWNX_RADAR_DETECT_ENABLE,
++ RWNX_RADAR_FCU);
++
++ return count;
++}
++
++DEBUGFS_READ_WRITE_FILE_OPS(set);
++#endif /* CONFIG_RWNX_RADAR */
++
++static ssize_t rwnx_dbgfs_regdbg_write(struct file *file,
++ const char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++
++ struct rwnx_hw *priv = file->private_data;
++ char buf[32];
++ u32 addr,val, oper;
++ size_t len = min_t(size_t, count, sizeof(buf) - 1);
++ struct dbg_mem_read_cfm mem_read_cfm;
++ int ret;
++
++ if (copy_from_user(buf, user_buf, len))
++ return -EFAULT;
++
++ buf[len] = '\0';
++
++ if (sscanf(buf, "%x %x %x" , &oper, &addr, &val ) > 0)
++ printk("addr=%x, val=%x,oper=%d\n", addr, val, oper);
++
++ if(oper== 0) {
++ ret = rwnx_send_dbg_mem_read_req(priv, addr, &mem_read_cfm);
++ printk("[0x%x] = [0x%x]\n", mem_read_cfm.memaddr, mem_read_cfm.memdata);
++ }
++
++ return count;
++}
++
++DEBUGFS_WRITE_FILE_OPS(regdbg);
++
++static ssize_t rwnx_dbgfs_vendor_hwconfig_write(struct file *file,
++ const char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ char buf[64];
++ int32_t addr[9];
++ u32_l hwconfig_id;
++ size_t len = min_t(size_t,count,sizeof(buf)-1);
++ int ret;
++ printk("%s\n",__func__);
++ //choose the type of write info by struct
++ //struct mm_set_vendor_trx_param_req trx_param;
++
++ if(copy_from_user(buf,user_buf,len)) {
++ return -EFAULT;
++ }
++
++ buf[len] = '\0';
++ ret = sscanf(buf, "%x %x %x %x %x %x %x %x %x %x %x %x %x",
++ &hwconfig_id, &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5], &addr[6], &addr[7], &addr[8], &addr[9], &addr[10], &addr[11]);
++ if(ret > 13) {
++ printk("param error > 13\n");
++ } else {
++ switch(hwconfig_id)
++ {
++ case 0:
++ if(ret != 5) {
++ printk("param error != 5\n");
++ break;}
++ ret = rwnx_send_vendor_hwconfig_req(priv, hwconfig_id, addr);
++ printk("ACS_TXOP_REQ bk:0x%x be:0x%x vi:0x%x vo:0x%x\n",addr[0], addr[1], addr[2], addr[3]);
++ break;
++ case 1:
++ if(ret != 13) {
++ printk("param error != 13\n");
++ break;}
++ ret = rwnx_send_vendor_hwconfig_req(priv, hwconfig_id, addr);
++ printk("CHANNEL_ACCESS_REQ edca:%x,%x,%x,%x, vif:%x, retry_cnt:%x, rts:%x, long_nav:%x, cfe:%x, rc_retry_cnt:%x:%x:%x\n",
++ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], addr[6], addr[7], addr[8], addr[9], addr[10], addr[11]);
++ break;
++ case 2:
++ if(ret != 7) {
++ printk("param error != 7\n");
++ break;}
++ ret = rwnx_send_vendor_hwconfig_req(priv, hwconfig_id, addr);
++ printk("MAC_TIMESCALE_REQ sifsA:%x,sifsB:%x,slot:%x,ofdm_delay:%x,long_delay:%x,short_delay:%x\n",
++ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
++ break;
++ case 3:
++ if(ret != 6) {
++ printk("param error != 6\n");
++ break;}
++ addr[1] = ~addr[1] + 1;
++ addr[2] = ~addr[2] + 1;
++ addr[3] = ~addr[3] + 1;
++ addr[4] = ~addr[4] + 1;
++ ret = rwnx_send_vendor_hwconfig_req(priv, hwconfig_id, addr);
++ printk("CCA_THRESHOLD_REQ auto_cca:%d, cca20p_rise:%d cca20s_rise:%d cca20p_fail:%d cca20s_fail:%d\n",
++ addr[0], addr[1], addr[2], addr[3], addr[4]);
++ break;
++ default:
++ printk("param error\n");
++ break;
++ }
++ if(ret) {
++ printk("rwnx_send_vendor_hwconfig_req fail: %x\n", ret);
++ }
++ }
++
++ return count;
++}
++
++DEBUGFS_WRITE_FILE_OPS(vendor_hwconfig)
++
++#ifdef CONFIG_RWNX_FULLMAC
++
++#define LINE_MAX_SZ 150
++
++struct st {
++ char line[LINE_MAX_SZ + 1];
++ unsigned int r_idx;
++};
++
++static int compare_idx(const void *st1, const void *st2)
++{
++ int index1 = ((struct st *)st1)->r_idx;
++ int index2 = ((struct st *)st2)->r_idx;
++
++ if (index1 > index2) return 1;
++ if (index1 < index2) return -1;
++
++ return 0;
++}
++
++static const int ru_size[] =
++{
++ 26,
++ 52,
++ 106,
++ 242,
++ 484,
++ 996
++};
++
++static int print_rate(char *buf, int size, int format, int nss, int mcs, int bw,
++ int sgi, int pre, int *r_idx)
++{
++ int res = 0;
++ int bitrates_cck[4] = { 10, 20, 55, 110 };
++ int bitrates_ofdm[8] = { 6, 9, 12, 18, 24, 36, 48, 54};
++ char he_gi[3][4] = {"0.8", "1.6", "3.2"};
++
++ if (format < FORMATMOD_HT_MF) {
++ if (mcs < 4) {
++ if (r_idx) {
++ *r_idx = (mcs * 2) + pre;
++ res = scnprintf(buf, size - res, "%3d ", *r_idx);
++ }
++ res += scnprintf(&buf[res], size - res, "L-CCK/%cP %2u.%1uM ",
++ pre > 0 ? 'L' : 'S',
++ bitrates_cck[mcs] / 10,
++ bitrates_cck[mcs] % 10);
++ } else {
++ mcs -= 4;
++ if (r_idx) {
++ *r_idx = N_CCK + mcs;
++ res = scnprintf(buf, size - res, "%3d ", *r_idx);
++ }
++ res += scnprintf(&buf[res], size - res, "L-OFDM %2u.0M ",
++ bitrates_ofdm[mcs]);
++ }
++ } else if (format < FORMATMOD_VHT) {
++ if (r_idx) {
++ *r_idx = N_CCK + N_OFDM + nss * 32 + mcs * 4 + bw * 2 + sgi;
++ res = scnprintf(buf, size - res, "%3d ", *r_idx);
++ }
++ mcs += nss * 8;
++ res += scnprintf(&buf[res], size - res, "HT%d/%cGI MCS%-2d ",
++ 20 * (1 << bw), sgi ? 'S' : 'L', mcs);
++ } else if (format == FORMATMOD_VHT){
++ if (r_idx) {
++ *r_idx = N_CCK + N_OFDM + N_HT + nss * 80 + mcs * 8 + bw * 2 + sgi;
++ res = scnprintf(buf, size - res, "%3d ", *r_idx);
++ }
++ res += scnprintf(&buf[res], size - res, "VHT%d/%cGI%*cMCS%d/%1d ",
++ 20 * (1 << bw), sgi ? 'S' : 'L', bw > 2 ? 5 : 6, ' ',
++ mcs, nss + 1);
++ } else if (format == FORMATMOD_HE_SU){
++ if (r_idx) {
++ *r_idx = N_CCK + N_OFDM + N_HT + N_VHT + nss * 144 + mcs * 12 + bw * 3 + sgi;
++ res = scnprintf(buf, size - res, "%3d ", *r_idx);
++ }
++ res += scnprintf(&buf[res], size - res, "HE%d/GI%s%*cMCS%d/%1d%*c",
++ 20 * (1 << bw), he_gi[sgi], bw > 2 ? 4 : 5, ' ',
++ mcs, nss + 1, mcs > 9 ? 1 : 2, ' ');
++ } else {
++ if (r_idx) {
++ *r_idx = N_CCK + N_OFDM + N_HT + N_VHT + N_HE_SU + nss * 216 + mcs * 18 + bw * 3 + sgi;
++ res = scnprintf(buf, size - res, "%3d ", *r_idx);
++ }
++ res += scnprintf(&buf[res], size - res, "HEMU-%d/GI%s%*cMCS%d/%1d%*c",
++ ru_size[bw], he_gi[sgi], bw > 1 ? 1 : 2, ' ',
++ mcs, nss + 1, mcs > 9 ? 1 : 2, ' ');
++
++ }
++
++ return res;
++}
++
++static int print_rate_from_cfg(char *buf, int size, u32 rate_config, int *r_idx, int ru_size)
++{
++ union rwnx_rate_ctrl_info *r_cfg = (union rwnx_rate_ctrl_info *)&rate_config;
++ union rwnx_mcs_index *mcs_index = (union rwnx_mcs_index *)&rate_config;
++ unsigned int ft, pre, gi, bw, nss, mcs, len;
++
++ ft = r_cfg->formatModTx;
++ pre = r_cfg->giAndPreTypeTx >> 1;
++ gi = r_cfg->giAndPreTypeTx;
++ bw = r_cfg->bwTx;
++ if (ft == FORMATMOD_HE_MU) {
++ mcs = mcs_index->he.mcs;
++ nss = mcs_index->he.nss;
++ bw = ru_size;
++ } else if (ft == FORMATMOD_HE_SU) {
++ mcs = mcs_index->he.mcs;
++ nss = mcs_index->he.nss;
++ } else if (ft == FORMATMOD_VHT) {
++ mcs = mcs_index->vht.mcs;
++ nss = mcs_index->vht.nss;
++ } else if (ft >= FORMATMOD_HT_MF) {
++ mcs = mcs_index->ht.mcs;
++ nss = mcs_index->ht.nss;
++ } else {
++ mcs = mcs_index->legacy;
++ nss = 0;
++ }
++
++ len = print_rate(buf, size, ft, nss, mcs, bw, gi, pre, r_idx);
++ return len;
++}
++
++static void idx_to_rate_cfg(int idx, union rwnx_rate_ctrl_info *r_cfg, int *ru_size)
++{
++ r_cfg->value = 0;
++ if (idx < N_CCK)
++ {
++ r_cfg->formatModTx = FORMATMOD_NON_HT;
++ r_cfg->giAndPreTypeTx = (idx & 1) << 1;
++ r_cfg->mcsIndexTx = idx / 2;
++ }
++ else if (idx < (N_CCK + N_OFDM))
++ {
++ r_cfg->formatModTx = FORMATMOD_NON_HT;
++ r_cfg->mcsIndexTx = idx - N_CCK + 4;
++ }
++ else if (idx < (N_CCK + N_OFDM + N_HT))
++ {
++ union rwnx_mcs_index *r = (union rwnx_mcs_index *)r_cfg;
++
++ idx -= (N_CCK + N_OFDM);
++ r_cfg->formatModTx = FORMATMOD_HT_MF;
++ r->ht.nss = idx / (8*2*2);
++ r->ht.mcs = (idx % (8*2*2)) / (2*2);
++ r_cfg->bwTx = ((idx % (8*2*2)) % (2*2)) / 2;
++ r_cfg->giAndPreTypeTx = idx & 1;
++ }
++ else if (idx < (N_CCK + N_OFDM + N_HT + N_VHT))
++ {
++ union rwnx_mcs_index *r = (union rwnx_mcs_index *)r_cfg;
++
++ idx -= (N_CCK + N_OFDM + N_HT);
++ r_cfg->formatModTx = FORMATMOD_VHT;
++ r->vht.nss = idx / (10*4*2);
++ r->vht.mcs = (idx % (10*4*2)) / (4*2);
++ r_cfg->bwTx = ((idx % (10*4*2)) % (4*2)) / 2;
++ r_cfg->giAndPreTypeTx = idx & 1;
++ }
++ else if (idx < (N_CCK + N_OFDM + N_HT + N_VHT + N_HE_SU))
++ {
++ union rwnx_mcs_index *r = (union rwnx_mcs_index *)r_cfg;
++
++ idx -= (N_CCK + N_OFDM + N_HT + N_VHT);
++ r_cfg->formatModTx = FORMATMOD_HE_SU;
++ r->vht.nss = idx / (12*4*3);
++ r->vht.mcs = (idx % (12*4*3)) / (4*3);
++ r_cfg->bwTx = ((idx % (12*4*3)) % (4*3)) / 3;
++ r_cfg->giAndPreTypeTx = idx % 3;
++ }
++ else
++ {
++ union rwnx_mcs_index *r = (union rwnx_mcs_index *)r_cfg;
++
++ BUG_ON(ru_size == NULL);
++
++ idx -= (N_CCK + N_OFDM + N_HT + N_VHT + N_HE_SU);
++ r_cfg->formatModTx = FORMATMOD_HE_MU;
++ r->vht.nss = idx / (12*6*3);
++ r->vht.mcs = (idx % (12*6*3)) / (6*3);
++ *ru_size = ((idx % (12*6*3)) % (6*3)) / 3;
++ r_cfg->giAndPreTypeTx = idx % 3;
++ r_cfg->bwTx = 0;
++ }
++}
++
++static void idx_to_rate_cfg1(unsigned int formatmod,
++ unsigned int mcs,unsigned int nss,
++ unsigned int bwTx,unsigned int gi,
++ union rwnx_rate_ctrl_info *r_cfg, int *ru_size)
++{
++ r_cfg->value = 0;
++
++ switch(formatmod){
++ case FORMATMOD_NON_HT:
++ {
++ r_cfg->formatModTx = formatmod;
++ r_cfg->giAndPreTypeTx = 1;
++ r_cfg->mcsIndexTx = mcs;
++ break;
++ }
++ case FORMATMOD_NON_HT_DUP_OFDM:
++ {
++ r_cfg->formatModTx = formatmod;
++ r_cfg->giAndPreTypeTx = gi;
++ r_cfg->mcsIndexTx = mcs;
++ break;
++ }
++ case FORMATMOD_HT_MF:
++ {
++ union rwnx_mcs_index *r = (union rwnx_mcs_index *)r_cfg;
++
++ r_cfg->formatModTx = formatmod;
++ r->ht.nss = nss;
++ r->ht.mcs = mcs;
++ r_cfg->bwTx = bwTx;
++ r_cfg->giAndPreTypeTx = gi;
++ break;
++ }
++ case FORMATMOD_VHT:
++ case FORMATMOD_HE_SU:
++ {
++ union rwnx_mcs_index *r = (union rwnx_mcs_index *)r_cfg;
++
++ r_cfg->formatModTx = formatmod;
++ r->vht.nss = nss;
++ r->vht.mcs = mcs;
++ r_cfg->bwTx = bwTx;
++ r_cfg->giAndPreTypeTx = gi;
++ break;
++ }
++ case FORMATMOD_HE_MU:
++ {
++ union rwnx_mcs_index *r = (union rwnx_mcs_index *)r_cfg;
++
++ r_cfg->formatModTx = formatmod;
++ r->he.nss = nss;
++ r->he.mcs = mcs;
++ r_cfg->bwTx = 0;
++ r_cfg->giAndPreTypeTx = gi;
++ break;
++ }
++ default:
++ printk("Don't have the formatmod");
++ }
++}
++
++static ssize_t rwnx_dbgfs_rc_stats_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_sta *sta = NULL;
++ struct rwnx_hw *priv = file->private_data;
++ char *buf;
++ int bufsz, len = 0;
++ ssize_t read;
++ int i = 0;
++ int error = 0;
++ struct me_rc_stats_cfm me_rc_stats_cfm;
++ unsigned int no_samples;
++ struct st *st;
++ u8 mac[6];
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* everything should fit in one call */
++ if (*ppos)
++ return 0;
++
++ /* Get the station index from MAC address */
++ sscanf(file->f_path.dentry->d_parent->d_iname, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
++ &mac[0], &mac[1], &mac[2], &mac[3], &mac[4], &mac[5]);
++ //if (mac == NULL)
++ // return 0;
++ sta = rwnx_get_sta(priv, mac);
++ if (sta == NULL)
++ return 0;
++
++ /* Forward the information to the LMAC */
++ if ((error = rwnx_send_me_rc_stats(priv, sta->sta_idx, &me_rc_stats_cfm)))
++ return error;
++
++ no_samples = me_rc_stats_cfm.no_samples;
++ if (no_samples == 0)
++ return 0;
++
++ bufsz = no_samples * LINE_MAX_SZ + 500;
++
++ buf = kmalloc(bufsz + 1, GFP_ATOMIC);
++ if (buf == NULL)
++ return 0;
++
++ st = kmalloc(sizeof(struct st) * no_samples, GFP_ATOMIC);
++ if (st == NULL)
++ {
++ kfree(buf);
++ return 0;
++ }
++
++ for (i = 0; i < no_samples; i++)
++ {
++ unsigned int tp, eprob;
++ len = print_rate_from_cfg(st[i].line, LINE_MAX_SZ,
++ me_rc_stats_cfm.rate_stats[i].rate_config,
++ &st[i].r_idx, 0);
++
++ if (me_rc_stats_cfm.sw_retry_step != 0)
++ {
++ len += scnprintf(&st[i].line[len], LINE_MAX_SZ - len, "%c",
++ me_rc_stats_cfm.retry_step_idx[me_rc_stats_cfm.sw_retry_step] == i ? '*' : ' ');
++ }
++ else
++ {
++ len += scnprintf(&st[i].line[len], LINE_MAX_SZ - len, " ");
++ }
++ len += scnprintf(&st[i].line[len], LINE_MAX_SZ - len, "%c",
++ me_rc_stats_cfm.retry_step_idx[0] == i ? 'T' : ' ');
++ len += scnprintf(&st[i].line[len], LINE_MAX_SZ - len, "%c",
++ me_rc_stats_cfm.retry_step_idx[1] == i ? 't' : ' ');
++ len += scnprintf(&st[i].line[len], LINE_MAX_SZ - len, "%c ",
++ me_rc_stats_cfm.retry_step_idx[2] == i ? 'P' : ' ');
++
++ tp = me_rc_stats_cfm.tp[i] / 10;
++ len += scnprintf(&st[i].line[len], LINE_MAX_SZ - len, " %4u.%1u",
++ tp / 10, tp % 10);
++
++ eprob = ((me_rc_stats_cfm.rate_stats[i].probability * 1000) >> 16) + 1;
++ len += scnprintf(&st[i].line[len],LINE_MAX_SZ - len,
++ " %4u.%1u %5u(%6u) %6u",
++ eprob / 10, eprob % 10,
++ me_rc_stats_cfm.rate_stats[i].success,
++ me_rc_stats_cfm.rate_stats[i].attempts,
++ me_rc_stats_cfm.rate_stats[i].sample_skipped);
++ }
++ len = scnprintf(buf, bufsz ,
++ "\nTX rate info for %02X:%02X:%02X:%02X:%02X:%02X:\n",
++ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
++
++ len += scnprintf(&buf[len], bufsz - len,
++ " # type rate tpt eprob ok( tot) skipped\n");
++
++ // add sorted statistics to the buffer
++ sort(st, no_samples, sizeof(st[0]), compare_idx, NULL);
++ for (i = 0; i < no_samples; i++)
++ {
++ len += scnprintf(&buf[len], bufsz - len, "%s\n", st[i].line);
++ }
++
++ // display HE TB statistics if any
++ if (me_rc_stats_cfm.rate_stats[RC_HE_STATS_IDX].rate_config != 0) {
++ unsigned int tp, eprob;
++ struct rc_rate_stats *rate_stats = &me_rc_stats_cfm.rate_stats[RC_HE_STATS_IDX];
++ int ru_index = rate_stats->ru_and_length & 0x07;
++ int ul_length = rate_stats->ru_and_length >> 3;
++
++ len += scnprintf(&buf[len], bufsz - len,
++ "\nHE TB rate info:\n");
++
++ len += scnprintf(&buf[len], bufsz - len,
++ " type rate tpt eprob ok( tot) ul_length\n ");
++ len += print_rate_from_cfg(&buf[len], bufsz - len, rate_stats->rate_config,
++ NULL, ru_index);
++
++ tp = me_rc_stats_cfm.tp[RC_HE_STATS_IDX] / 10;
++ len += scnprintf(&buf[len], bufsz - len, " %4u.%1u",
++ tp / 10, tp % 10);
++
++ eprob = ((rate_stats->probability * 1000) >> 16) + 1;
++ len += scnprintf(&buf[len],bufsz - len,
++ " %4u.%1u %5u(%6u) %6u\n",
++ eprob / 10, eprob % 10,
++ rate_stats->success,
++ rate_stats->attempts,
++ ul_length);
++ }
++
++ len += scnprintf(&buf[len], bufsz - len, "\n MPDUs AMPDUs AvLen trialP");
++ len += scnprintf(&buf[len], bufsz - len, "\n%6u %6u %3d.%1d %6u\n",
++ me_rc_stats_cfm.ampdu_len,
++ me_rc_stats_cfm.ampdu_packets,
++ me_rc_stats_cfm.avg_ampdu_len >> 16,
++ ((me_rc_stats_cfm.avg_ampdu_len * 10) >> 16) % 10,
++ me_rc_stats_cfm.sample_wait);
++
++ read = simple_read_from_buffer(user_buf, count, ppos, buf, len);
++
++ kfree(buf);
++ kfree(st);
++
++ return read;
++}
++
++DEBUGFS_READ_FILE_OPS(rc_stats);
++
++static ssize_t rwnx_dbgfs_rc_fixed_rate_idx_write(struct file *file,
++ const char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_sta *sta = NULL;
++ struct rwnx_hw *priv = file->private_data;
++ u8 mac[6];
++ char buf[10];
++ int fixed_rate_idx = 1;
++ unsigned int formatmod, mcs, nss, bwTx, gi;
++ union rwnx_rate_ctrl_info rate_config;
++ int error = 0;
++ size_t len = min_t(size_t, count, sizeof(buf) - 1);
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Get the station index from MAC address */
++ sscanf(file->f_path.dentry->d_parent->d_iname, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
++ &mac[0], &mac[1], &mac[2], &mac[3], &mac[4], &mac[5]);
++ if (&mac[0] == NULL)
++ return 0;
++ sta = rwnx_get_sta(priv, mac);
++ if (sta == NULL)
++ return 0;
++
++ /* Get the content of the file */
++ if (copy_from_user(buf, user_buf, len))
++ return -EFAULT;
++ buf[len] = '\0';
++ //sscanf(buf, "%i\n", &fixed_rate_idx);
++ sscanf(buf, "%u %u %u %u %u",&formatmod, &mcs, &nss, &bwTx, &gi);
++ //printk("%u %u %u %u %u\n",formatmod, mcs, nss, bwTx, gi);
++ /* Convert rate index into rate configuration */
++ if ((fixed_rate_idx < 0) || (fixed_rate_idx >= (N_CCK + N_OFDM + N_HT + N_VHT + N_HE_SU)))
++ {
++ // disable fixed rate
++ rate_config.value = (u32)-1;
++ }
++ else
++ {
++ //idx_to_rate_cfg(fixed_rate_idx, &rate_config, NULL);
++ idx_to_rate_cfg1(formatmod, mcs, nss, bwTx, gi, &rate_config, NULL);
++ }
++ /*union rwnx_rate_ctrl_info *r_cfg=&rate_config;
++ printk("formatModTx=%u mcsIndexTx=%u bwTx=%u giAndPreTypeTx=%u\n",r_cfg->formatModTx,r_cfg->mcsIndexTx,r_cfg->bwTx,r_cfg->giAndPreTypeTx);
++ printk("you wen ti");*/
++ // Forward the request to the LMAC
++ if ((error = rwnx_send_me_rc_set_rate(priv, sta->sta_idx,
++ (u16)rate_config.value)) != 0)
++ {
++ return error;
++ }
++
++ priv->debugfs.rc_config[sta->sta_idx] = (int)rate_config.value;
++ return len;
++
++}
++
++
++DEBUGFS_WRITE_FILE_OPS(rc_fixed_rate_idx);
++
++static ssize_t rwnx_dbgfs_last_rx_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_sta *sta = NULL;
++ struct rwnx_hw *priv = file->private_data;
++ struct rwnx_rx_rate_stats *rate_stats;
++ char *buf;
++ int bufsz, i, len = 0;
++ ssize_t read;
++ unsigned int fmt, pre, bw, nss, mcs, gi;
++ u8 mac[6];
++ struct rx_vector_1 *last_rx;
++ char hist[] = "##################################################";
++ int hist_len = sizeof(hist) - 1;
++ u8 nrx;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* everything should fit in one call */
++ if (*ppos)
++ return 0;
++
++ /* Get the station index from MAC address */
++ sscanf(file->f_path.dentry->d_parent->d_iname, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
++ &mac[0], &mac[1], &mac[2], &mac[3], &mac[4], &mac[5]);
++ //if (mac == NULL)
++ // return 0;
++ sta = rwnx_get_sta(priv, mac);
++ if (sta == NULL)
++ return 0;
++
++ rate_stats = &sta->stats.rx_rate;
++ bufsz = (rate_stats->rate_cnt * ( 50 + hist_len) + 200);
++ buf = kmalloc(bufsz + 1, GFP_ATOMIC);
++ if (buf == NULL)
++ return 0;
++
++ // Get number of RX paths
++ nrx = (priv->version_cfm.version_phy_1 & MDM_NRX_MASK) >> MDM_NRX_LSB;
++
++ len += scnprintf(buf, bufsz,
++ "\nRX rate info for %02X:%02X:%02X:%02X:%02X:%02X:\n",
++ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
++
++ // Display Statistics
++ for (i = 0 ; i < rate_stats->size ; i++ )
++ {
++ if (rate_stats->table[i]) {
++ union rwnx_rate_ctrl_info rate_config;
++ int percent = (rate_stats->table[i] * 1000) / rate_stats->cpt;
++ int p;
++ int ru_size;
++
++ idx_to_rate_cfg(i, &rate_config, &ru_size);
++ len += print_rate_from_cfg(&buf[len], bufsz - len,
++ rate_config.value, NULL, ru_size);
++ p = (percent * hist_len) / 1000;
++ len += scnprintf(&buf[len], bufsz - len, ": %6d(%3d.%1d%%)%.*s\n",
++ rate_stats->table[i],
++ percent / 10, percent % 10, p, hist);
++ }
++ }
++
++ // Display detailed info of the last received rate
++ last_rx = &sta->stats.last_rx.rx_vect1;
++
++ len += scnprintf(&buf[len], bufsz - len,"\nLast received rate\n"
++ " type rate LDPC STBC BEAMFM DCM DOPPLER %s\n",
++ (nrx > 1) ? "rssi1(dBm) rssi2(dBm)" : "rssi(dBm)");
++
++ fmt = last_rx->format_mod;
++ bw = last_rx->ch_bw;
++ pre = last_rx->pre_type;
++ if (fmt >= FORMATMOD_HE_SU) {
++ mcs = last_rx->he.mcs;
++ nss = last_rx->he.nss;
++ gi = last_rx->he.gi_type;
++ if (fmt == FORMATMOD_HE_MU)
++ bw = last_rx->he.ru_size;
++ } else if (fmt == FORMATMOD_VHT) {
++ mcs = last_rx->vht.mcs;
++ nss = last_rx->vht.nss;
++ gi = last_rx->vht.short_gi;
++ } else if (fmt >= FORMATMOD_HT_MF) {
++ mcs = last_rx->ht.mcs % 8;
++ nss = last_rx->ht.mcs / 8;;
++ gi = last_rx->ht.short_gi;
++ } else {
++ BUG_ON((mcs = legrates_lut[last_rx->leg_rate]) == -1);
++ nss = 0;
++ gi = 0;
++ }
++
++ len += print_rate(&buf[len], bufsz - len, fmt, nss, mcs, bw, gi, pre, NULL);
++
++ /* flags for HT/VHT/HE */
++ if (fmt >= FORMATMOD_HE_SU) {
++ len += scnprintf(&buf[len], bufsz - len, " %c %c %c %c %c",
++ last_rx->he.fec ? 'L' : ' ',
++ last_rx->he.stbc ? 'S' : ' ',
++ last_rx->he.beamformed ? 'B' : ' ',
++ last_rx->he.dcm ? 'D' : ' ',
++ last_rx->he.doppler ? 'D' : ' ');
++ } else if (fmt == FORMATMOD_VHT) {
++ len += scnprintf(&buf[len], bufsz - len, " %c %c %c ",
++ last_rx->vht.fec ? 'L' : ' ',
++ last_rx->vht.stbc ? 'S' : ' ',
++ last_rx->vht.beamformed ? 'B' : ' ');
++ } else if (fmt >= FORMATMOD_HT_MF) {
++ len += scnprintf(&buf[len], bufsz - len, " %c %c ",
++ last_rx->ht.fec ? 'L' : ' ',
++ last_rx->ht.stbc ? 'S' : ' ');
++ } else {
++ len += scnprintf(&buf[len], bufsz - len, " ");
++ }
++ if (nrx > 1) {
++ len += scnprintf(&buf[len], bufsz - len, " %-4d %d\n",
++ last_rx->rssi1, last_rx->rssi1);
++ } else {
++ len += scnprintf(&buf[len], bufsz - len, " %d\n", last_rx->rssi1);
++ }
++
++ read = simple_read_from_buffer(user_buf, count, ppos, buf, len);
++
++ kfree(buf);
++ return read;
++}
++
++static ssize_t rwnx_dbgfs_last_rx_write(struct file *file,
++ const char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_sta *sta = NULL;
++ struct rwnx_hw *priv = file->private_data;
++ u8 mac[6];
++
++ /* Get the station index from MAC address */
++ sscanf(file->f_path.dentry->d_parent->d_iname, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
++ &mac[0], &mac[1], &mac[2], &mac[3], &mac[4], &mac[5]);
++ //if (mac == NULL)
++ // return 0;
++ sta = rwnx_get_sta(priv, mac);
++ if (sta == NULL)
++ return 0;
++
++ /* Prevent from interrupt preemption as these statistics are updated under
++ * interrupt */
++ spin_lock_bh(&priv->tx_lock);
++ memset(sta->stats.rx_rate.table, 0,
++ sta->stats.rx_rate.size * sizeof(sta->stats.rx_rate.table[0]));
++ sta->stats.rx_rate.cpt = 0;
++ sta->stats.rx_rate.rate_cnt = 0;
++ spin_unlock_bh(&priv->tx_lock);
++
++ return count;
++}
++
++DEBUGFS_READ_WRITE_FILE_OPS(last_rx);
++
++#endif /* CONFIG_RWNX_FULLMAC */
++
++#ifdef CONFIG_RWNX_FULLMAC
++static void rwnx_rc_stat_work(struct work_struct *ws)
++{
++ struct rwnx_debugfs *rwnx_debugfs = container_of(ws, struct rwnx_debugfs,
++ rc_stat_work);
++ struct rwnx_hw *rwnx_hw = container_of(rwnx_debugfs, struct rwnx_hw,
++ debugfs);
++ struct rwnx_sta *sta;
++ uint8_t ridx, sta_idx;
++
++ ridx = rwnx_debugfs->rc_read;
++ sta_idx = rwnx_debugfs->rc_sta[ridx];
++ if (sta_idx > (NX_REMOTE_STA_MAX + NX_VIRT_DEV_MAX)) {
++ WARN(1, "Invalid sta index %d", sta_idx);
++ return;
++ }
++
++ rwnx_debugfs->rc_sta[ridx] = 0xFF;
++ ridx = (ridx + 1) % ARRAY_SIZE(rwnx_debugfs->rc_sta);
++ rwnx_debugfs->rc_read = ridx;
++ sta = &rwnx_hw->sta_table[sta_idx];
++ if (!sta) {
++ WARN(1, "Invalid sta %d", sta_idx);
++ return;
++ }
++
++ if (rwnx_debugfs->dir_sta[sta_idx] == NULL) {
++ /* register the sta */
++ struct dentry *dir_rc = rwnx_debugfs->dir_rc;
++ struct dentry *dir_sta;
++ struct dentry *file;
++ char sta_name[18];
++ struct rwnx_rx_rate_stats *rate_stats = &sta->stats.rx_rate;
++ int nb_rx_rate = N_CCK + N_OFDM;
++ struct rwnx_rc_config_save *rc_cfg, *next;
++
++ if (sta->sta_idx >= NX_REMOTE_STA_MAX) {
++ scnprintf(sta_name, sizeof(sta_name), "bc_mc");
++ } else {
++ scnprintf(sta_name, sizeof(sta_name), "%pM", sta->mac_addr);
++ }
++
++ if (!(dir_sta = debugfs_create_dir(sta_name, dir_rc)))
++ goto error;
++
++ rwnx_debugfs->dir_sta[sta->sta_idx] = dir_sta;
++
++ file = debugfs_create_file("stats", S_IRUSR, dir_sta, rwnx_hw,
++ &rwnx_dbgfs_rc_stats_ops);
++ if (IS_ERR_OR_NULL(file))
++ goto error_after_dir;
++
++ file = debugfs_create_file("fixed_rate_idx", S_IWUSR , dir_sta, rwnx_hw,
++ &rwnx_dbgfs_rc_fixed_rate_idx_ops);
++ if (IS_ERR_OR_NULL(file))
++ goto error_after_dir;
++
++ file = debugfs_create_file("rx_rate", S_IRUSR | S_IWUSR, dir_sta, rwnx_hw,
++ &rwnx_dbgfs_last_rx_ops);
++ if (IS_ERR_OR_NULL(file))
++ goto error_after_dir;
++
++ if (rwnx_hw->mod_params->ht_on)
++ nb_rx_rate += N_HT;
++
++ if (rwnx_hw->mod_params->vht_on)
++ nb_rx_rate += N_VHT;
++
++ if (rwnx_hw->mod_params->he_on)
++ nb_rx_rate += N_HE_SU + N_HE_MU;
++
++ rate_stats->table = kzalloc(nb_rx_rate * sizeof(rate_stats->table[0]),
++ GFP_KERNEL);
++ if (!rate_stats->table)
++ goto error_after_dir;
++
++ rate_stats->size = nb_rx_rate;
++ rate_stats->cpt = 0;
++ rate_stats->rate_cnt = 0;
++
++ /* By default enable rate contoller */
++ rwnx_debugfs->rc_config[sta_idx] = -1;
++
++ /* Unless we already fix the rate for this station */
++ list_for_each_entry_safe(rc_cfg, next, &rwnx_debugfs->rc_config_save, list) {
++ if (jiffies_to_msecs(jiffies - rc_cfg->timestamp) > RC_CONFIG_DUR) {
++ list_del(&rc_cfg->list);
++ kfree(rc_cfg);
++ } else if (!memcmp(rc_cfg->mac_addr, sta->mac_addr, ETH_ALEN)) {
++ rwnx_debugfs->rc_config[sta_idx] = rc_cfg->rate;
++ list_del(&rc_cfg->list);
++ kfree(rc_cfg);
++ break;
++ }
++ }
++
++ if ((rwnx_debugfs->rc_config[sta_idx] >= 0) &&
++ rwnx_send_me_rc_set_rate(rwnx_hw, sta_idx,
++ (u16)rwnx_debugfs->rc_config[sta_idx]))
++ rwnx_debugfs->rc_config[sta_idx] = -1;
++
++ } else {
++ /* unregister the sta */
++ if (sta->stats.rx_rate.table) {
++ kfree(sta->stats.rx_rate.table);
++ sta->stats.rx_rate.table = NULL;
++ }
++ sta->stats.rx_rate.size = 0;
++ sta->stats.rx_rate.cpt = 0;
++ sta->stats.rx_rate.rate_cnt = 0;
++
++ /* If fix rate was set for this station, save the configuration in case
++ we reconnect to this station within RC_CONFIG_DUR msec */
++ if (rwnx_debugfs->rc_config[sta_idx] >= 0) {
++ struct rwnx_rc_config_save *rc_cfg;
++ rc_cfg = kmalloc(sizeof(*rc_cfg), GFP_KERNEL);
++ if (rc_cfg) {
++ rc_cfg->rate = rwnx_debugfs->rc_config[sta_idx];
++ rc_cfg->timestamp = jiffies;
++ memcpy(rc_cfg->mac_addr, sta->mac_addr, ETH_ALEN);
++ list_add_tail(&rc_cfg->list, &rwnx_debugfs->rc_config_save);
++ }
++ }
++
++ debugfs_remove_recursive(rwnx_debugfs->dir_sta[sta_idx]);
++ rwnx_debugfs->dir_sta[sta->sta_idx] = NULL;
++ }
++
++ return;
++
++ error_after_dir:
++ debugfs_remove_recursive(rwnx_debugfs->dir_sta[sta_idx]);
++ rwnx_debugfs->dir_sta[sta->sta_idx] = NULL;
++ error:
++ dev_err(rwnx_hw->dev,
++ "Error while (un)registering debug entry for sta %d\n", sta_idx);
++}
++
++void _rwnx_dbgfs_rc_stat_write(struct rwnx_debugfs *rwnx_debugfs, uint8_t sta_idx)
++{
++ uint8_t widx = rwnx_debugfs->rc_write;
++ if (rwnx_debugfs->rc_sta[widx] != 0XFF) {
++ WARN(1, "Overlap in debugfs rc_sta table\n");
++ }
++
++ if (rwnx_debugfs->unregistering)
++ return;
++
++ rwnx_debugfs->rc_sta[widx] = sta_idx;
++ widx = (widx + 1) % ARRAY_SIZE(rwnx_debugfs->rc_sta);
++ rwnx_debugfs->rc_write = widx;
++
++ schedule_work(&rwnx_debugfs->rc_stat_work);
++}
++
++void rwnx_dbgfs_register_rc_stat(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta)
++{
++ _rwnx_dbgfs_rc_stat_write(&rwnx_hw->debugfs, sta->sta_idx);
++}
++
++void rwnx_dbgfs_unregister_rc_stat(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta)
++{
++ _rwnx_dbgfs_rc_stat_write(&rwnx_hw->debugfs, sta->sta_idx);
++}
++#endif /* CONFIG_RWNX_FULLMAC */
++
++int rwnx_dbgfs_register(struct rwnx_hw *rwnx_hw, const char *name)
++{
++#ifdef CONFIG_RWNX_FULLMAC
++ struct dentry *phyd = rwnx_hw->wiphy->debugfsdir;
++ struct dentry *dir_rc;
++#endif /* CONFIG_RWNX_FULLMAC */
++ struct rwnx_debugfs *rwnx_debugfs = &rwnx_hw->debugfs;
++ struct dentry *dir_drv, *dir_diags;
++
++ if (!(dir_drv = debugfs_create_dir(name, phyd)))
++ return -ENOMEM;
++
++ rwnx_debugfs->dir = dir_drv;
++ rwnx_debugfs->unregistering = false;
++
++ if (!(dir_diags = debugfs_create_dir("diags", dir_drv)))
++ goto err;
++
++#ifdef CONFIG_RWNX_FULLMAC
++ if (!(dir_rc = debugfs_create_dir("rc", dir_drv)))
++ goto err;
++ rwnx_debugfs->dir_rc = dir_rc;
++ INIT_WORK(&rwnx_debugfs->rc_stat_work, rwnx_rc_stat_work);
++ INIT_LIST_HEAD(&rwnx_debugfs->rc_config_save);
++ rwnx_debugfs->rc_write = rwnx_debugfs->rc_read = 0;
++ memset(rwnx_debugfs->rc_sta, 0xFF, sizeof(rwnx_debugfs->rc_sta));
++#endif
++
++ DEBUGFS_ADD_U32(tcp_pacing_shift, dir_drv, &rwnx_hw->tcp_pacing_shift,
++ S_IWUSR | S_IRUSR);
++ DEBUGFS_ADD_FILE(stats, dir_drv, S_IWUSR | S_IRUSR);
++ DEBUGFS_ADD_FILE(sys_stats, dir_drv, S_IRUSR);
++ DEBUGFS_ADD_FILE(txq, dir_drv, S_IRUSR);
++ DEBUGFS_ADD_FILE(acsinfo, dir_drv, S_IRUSR);
++#ifdef CONFIG_RWNX_MUMIMO_TX
++ DEBUGFS_ADD_FILE(mu_group, dir_drv, S_IRUSR);
++#endif
++ DEBUGFS_ADD_FILE(regdbg, dir_drv, S_IWUSR);
++ DEBUGFS_ADD_FILE(vendor_hwconfig, dir_drv,S_IWUSR);
++
++#ifdef CONFIG_RWNX_P2P_DEBUGFS
++ {
++ /* Create a p2p directory */
++ struct dentry *dir_p2p;
++ if (!(dir_p2p = debugfs_create_dir("p2p", dir_drv)))
++ goto err;
++
++ /* Add file allowing to control Opportunistic PS */
++ DEBUGFS_ADD_FILE(oppps, dir_p2p, S_IRUSR);
++ /* Add file allowing to control Notice of Absence */
++ DEBUGFS_ADD_FILE(noa, dir_p2p, S_IRUSR);
++ }
++#endif /* CONFIG_RWNX_P2P_DEBUGFS */
++
++ if (rwnx_hw->fwlog_en) {
++ rwnx_fw_log_init(&rwnx_hw->debugfs.fw_log);
++ DEBUGFS_ADD_FILE(fw_log, dir_drv, S_IWUSR | S_IRUSR);
++ }
++#ifdef CONFIG_RWNX_RADAR
++ {
++ struct dentry *dir_radar, *dir_sec;
++ if (!(dir_radar = debugfs_create_dir("radar", dir_drv)))
++ goto err;
++
++ DEBUGFS_ADD_FILE(pulses_prim, dir_radar, S_IRUSR);
++ DEBUGFS_ADD_FILE(detected, dir_radar, S_IRUSR);
++ DEBUGFS_ADD_FILE(enable, dir_radar, S_IRUSR);
++
++ if (rwnx_hw->phy.cnt == 2) {
++ DEBUGFS_ADD_FILE(pulses_sec, dir_radar, S_IRUSR);
++
++ if (!(dir_sec = debugfs_create_dir("sec", dir_radar)))
++ goto err;
++
++ DEBUGFS_ADD_FILE(band, dir_sec, S_IWUSR | S_IRUSR);
++ DEBUGFS_ADD_FILE(type, dir_sec, S_IWUSR | S_IRUSR);
++ DEBUGFS_ADD_FILE(prim20, dir_sec, S_IWUSR | S_IRUSR);
++ DEBUGFS_ADD_FILE(center1, dir_sec, S_IWUSR | S_IRUSR);
++ DEBUGFS_ADD_FILE(center2, dir_sec, S_IWUSR | S_IRUSR);
++ DEBUGFS_ADD_FILE(set, dir_sec, S_IWUSR | S_IRUSR);
++ }
++ }
++#endif /* CONFIG_RWNX_RADAR */
++ return 0;
++
++err:
++ rwnx_dbgfs_unregister(rwnx_hw);
++ return -ENOMEM;
++}
++
++void rwnx_dbgfs_unregister(struct rwnx_hw *rwnx_hw)
++{
++ struct rwnx_debugfs *rwnx_debugfs = &rwnx_hw->debugfs;
++#ifdef CONFIG_RWNX_FULLMAC
++ struct rwnx_rc_config_save *cfg, *next;
++#endif
++
++#ifdef CONFIG_RWNX_FULLMAC
++ list_for_each_entry_safe(cfg, next, &rwnx_debugfs->rc_config_save, list) {
++ list_del(&cfg->list);
++ kfree(cfg);
++ }
++#endif /* CONFIG_RWNX_FULLMAC */
++
++ if (rwnx_hw->fwlog_en)
++ rwnx_fw_log_deinit(&rwnx_hw->debugfs.fw_log);
++
++ if (!rwnx_hw->debugfs.dir)
++ return;
++
++ rwnx_debugfs->unregistering = true;
++#ifdef CONFIG_RWNX_FULLMAC
++ flush_work(&rwnx_debugfs->rc_stat_work);
++#endif
++ debugfs_remove_recursive(rwnx_hw->debugfs.dir);
++ rwnx_hw->debugfs.dir = NULL;
++}
++
++#endif //
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_debugfs.h
+@@ -0,0 +1,203 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_debugfs.h
++ *
++ * @brief Miscellaneous utility function definitions
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ******************************************************************************
++ */
++
++
++#ifndef _RWNX_DEBUGFS_H_
++#define _RWNX_DEBUGFS_H_
++
++#include <linux/workqueue.h>
++#include <linux/if_ether.h>
++#include <linux/version.h>
++#include "rwnx_fw_trace.h"
++
++struct rwnx_hw;
++struct rwnx_sta;
++
++/* some macros taken from iwlwifi */
++/* TODO: replace with generic read and fill read buffer in open to avoid double
++ * reads */
++#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
++ if (!debugfs_create_file(#name, mode, parent, rwnx_hw, \
++ &rwnx_dbgfs_##name##_ops)) \
++ goto err; \
++} while (0)
++
++#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
++ struct dentry *__tmp; \
++ __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
++ parent, ptr); \
++ if (IS_ERR(__tmp) || !__tmp) \
++ goto err; \
++} while (0)
++
++#define DEBUGFS_ADD_X64(name, parent, ptr) do { \
++ struct dentry *__tmp; \
++ __tmp = debugfs_create_x64(#name, S_IWUSR | S_IRUSR, \
++ parent, ptr); \
++ if (IS_ERR(__tmp) || !__tmp) \
++ goto err; \
++} while (0)
++
++#define DEBUGFS_ADD_U64(name, parent, ptr, mode) do { \
++ struct dentry *__tmp; \
++ __tmp = debugfs_create_u64(#name, mode, \
++ parent, ptr); \
++ if (IS_ERR(__tmp) || !__tmp) \
++ goto err; \
++} while (0)
++
++#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
++ struct dentry *__tmp; \
++ __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
++ parent, ptr); \
++ if (IS_ERR(__tmp) || !__tmp) \
++ goto err; \
++} while (0)
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)
++#define DEBUGFS_ADD_U32(name, parent, ptr, mode) do { \
++ debugfs_create_u32(#name, mode, \
++ parent, ptr); \
++} while (0)
++#else
++#define DEBUGFS_ADD_U32(name, parent, ptr, mode) do { \
++ struct dentry *__tmp; \
++ __tmp = debugfs_create_u32(#name, mode, \
++ parent, ptr); \
++ if (IS_ERR(__tmp) || !__tmp) \
++ goto err; \
++ } while (0)
++#endif
++
++
++/* file operation */
++#define DEBUGFS_READ_FUNC(name) \
++ static ssize_t rwnx_dbgfs_##name##_read(struct file *file, \
++ char __user *user_buf, \
++ size_t count, loff_t *ppos);
++
++#define DEBUGFS_WRITE_FUNC(name) \
++ static ssize_t rwnx_dbgfs_##name##_write(struct file *file, \
++ const char __user *user_buf,\
++ size_t count, loff_t *ppos);
++
++#define DEBUGFS_OPEN_FUNC(name) \
++ static int rwnx_dbgfs_##name##_open(struct inode *inode, \
++ struct file *file);
++
++#define DEBUGFS_RELEASE_FUNC(name) \
++ static int rwnx_dbgfs_##name##_release(struct inode *inode, \
++ struct file *file);
++
++#define DEBUGFS_READ_FILE_OPS(name) \
++ DEBUGFS_READ_FUNC(name); \
++static const struct file_operations rwnx_dbgfs_##name##_ops = { \
++ .read = rwnx_dbgfs_##name##_read, \
++ .open = simple_open, \
++ .llseek = generic_file_llseek, \
++};
++
++#define DEBUGFS_WRITE_FILE_OPS(name) \
++ DEBUGFS_WRITE_FUNC(name); \
++static const struct file_operations rwnx_dbgfs_##name##_ops = { \
++ .write = rwnx_dbgfs_##name##_write, \
++ .open = simple_open, \
++ .llseek = generic_file_llseek, \
++};
++
++#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
++ DEBUGFS_READ_FUNC(name); \
++ DEBUGFS_WRITE_FUNC(name); \
++static const struct file_operations rwnx_dbgfs_##name##_ops = { \
++ .write = rwnx_dbgfs_##name##_write, \
++ .read = rwnx_dbgfs_##name##_read, \
++ .open = simple_open, \
++ .llseek = generic_file_llseek, \
++};
++
++#define DEBUGFS_READ_WRITE_OPEN_RELEASE_FILE_OPS(name) \
++ DEBUGFS_READ_FUNC(name); \
++ DEBUGFS_WRITE_FUNC(name); \
++ DEBUGFS_OPEN_FUNC(name); \
++ DEBUGFS_RELEASE_FUNC(name); \
++static const struct file_operations rwnx_dbgfs_##name##_ops = { \
++ .write = rwnx_dbgfs_##name##_write, \
++ .read = rwnx_dbgfs_##name##_read, \
++ .open = rwnx_dbgfs_##name##_open, \
++ .release = rwnx_dbgfs_##name##_release, \
++ .llseek = generic_file_llseek, \
++};
++
++
++#ifdef CONFIG_RWNX_DEBUGFS
++
++struct rwnx_debugfs {
++ unsigned long long rateidx;
++ struct dentry *dir;
++ bool trace_prst;
++
++ char helper_cmd[64];
++ //struct work_struct helper_work;
++ bool helper_scheduled;
++ spinlock_t umh_lock;
++ bool unregistering;
++
++#ifndef CONFIG_RWNX_FHOST
++ struct rwnx_fw_log fw_log;
++#endif /* CONFIG_RWNX_FHOST */
++
++#ifdef CONFIG_RWNX_FULLMAC
++ struct work_struct rc_stat_work;
++ uint8_t rc_sta[NX_REMOTE_STA_MAX];
++ uint8_t rc_write;
++ uint8_t rc_read;
++ struct dentry *dir_rc;
++ struct dentry *dir_sta[NX_REMOTE_STA_MAX];
++ int rc_config[NX_REMOTE_STA_MAX];
++ struct list_head rc_config_save;
++#endif
++};
++
++#ifdef CONFIG_RWNX_FULLMAC
++
++// Max duration in msecs to save rate config for a sta after disconnection
++#define RC_CONFIG_DUR 600000
++
++struct rwnx_rc_config_save {
++ struct list_head list;
++ unsigned long timestamp;
++ int rate;
++ u8 mac_addr[ETH_ALEN];
++};
++#endif
++
++int rwnx_dbgfs_register(struct rwnx_hw *rwnx_hw, const char *name);
++void rwnx_dbgfs_unregister(struct rwnx_hw *rwnx_hw);
++#ifdef CONFIG_RWNX_FULLMAC
++void rwnx_dbgfs_register_rc_stat(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta);
++void rwnx_dbgfs_unregister_rc_stat(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta);
++#endif
++#else
++
++struct rwnx_debugfs {
++};
++
++static inline int rwnx_dbgfs_register(struct rwnx_hw *rwnx_hw, const char *name) { return 0; }
++static inline void rwnx_dbgfs_unregister(struct rwnx_hw *rwnx_hw) {}
++#ifdef CONFIG_RWNX_FULLMAC
++static inline void rwnx_dbgfs_register_rc_stat(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta) {}
++static inline void rwnx_dbgfs_unregister_rc_stat(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta) {}
++#endif
++#endif /* CONFIG_RWNX_DEBUGFS */
++
++
++#endif /* _RWNX_DEBUGFS_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_defs.h
+@@ -0,0 +1,767 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_defs.h
++ *
++ * @brief Main driver structure declarations for fullmac driver
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ******************************************************************************
++ */
++
++#ifndef _RWNX_DEFS_H_
++#define _RWNX_DEFS_H_
++
++#include <linux/interrupt.h>
++#include <linux/device.h>
++#include <linux/dmapool.h>
++#include <linux/skbuff.h>
++#include <net/cfg80211.h>
++#include <linux/slab.h>
++
++#include "rwnx_mod_params.h"
++#include "rwnx_debugfs.h"
++#include "rwnx_tx.h"
++#include "rwnx_rx.h"
++#include "rwnx_radar.h"
++#include "rwnx_utils.h"
++#include "rwnx_mu_group.h"
++#include "rwnx_platform.h"
++#include "rwnx_cmds.h"
++#include "rwnx_compat.h"
++
++#ifdef AICWF_SDIO_SUPPORT
++#include "aicwf_sdio.h"
++#include "sdio_host.h"
++#endif
++
++#ifdef AICWF_USB_SUPPORT
++#include "usb_host.h"
++#endif
++
++#ifdef CONFIG_BR_SUPPORT
++#include "aic_br_ext.h"
++#endif /* CONFIG_BR_SUPPORT */
++
++#define WPI_HDR_LEN 18
++#define WPI_PN_LEN 16
++#define WPI_PN_OFST 2
++#define WPI_MIC_LEN 16
++#define WPI_KEY_LEN 32
++#define WPI_SUBKEY_LEN 16 // WPI key is actually two 16bytes key
++
++#define LEGACY_PS_ID 0
++#define UAPSD_ID 1
++
++#define PS_SP_INTERRUPTED 255
++#define MAC_ADDR_LEN 6
++
++
++#if LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION
++#define IEEE80211_MAX_AMPDU_BUF IEEE80211_MAX_AMPDU_BUF_HE
++#define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB
++#define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB
++#define IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU
++#endif
++
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0) || defined(CONFIG_VHT_FOR_OLD_KERNEL)
++enum nl80211_ac {
++ NL80211_AC_VO,
++ NL80211_AC_VI,
++ NL80211_AC_BE,
++ NL80211_AC_BK,
++ NL80211_NUM_ACS
++};
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0) || defined(CONFIG_VHT_FOR_OLD_KERNEL)
++struct ieee80211_vht_operation {
++ u8 vht_op_info_chwidth;
++ u8 vht_op_info_chan_center_freq_seg1_idx;
++ u8 vht_op_info_chan_center_freq_seg2_idx;
++ __le16 vht_basic_mcs_set;
++} __packed;
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
++#define NL80211_IFTYPE_P2P_DEVICE 10
++#define IEEE80211_RADIOTAP_AMPDU_STATUS 20
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0) || defined(CONFIG_VHT_FOR_OLD_KERNEL)
++#define IEEE80211_RADIOTAP_VHT 21
++#define IEEE80211_RADIOTAP_VHT_KNOWN_GI 0x0004
++#define IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH 0x0040
++
++#define IEEE80211_RADIOTAP_VHT_FLAG_STBC 0x01
++#define IEEE80211_RADIOTAP_VHT_FLAG_SGI 0x04
++
++#define NL80211_FEATURE_CELL_BASE_REG_HINTS 1 << 3
++#define NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL 1 << 4
++#define NL80211_FEATURE_SAE 1 << 5
++#define NL80211_FEATURE_LOW_PRIORITY_SCAN 1 << 6
++#define NL80211_FEATURE_SCAN_FLUSH 1 << 7
++#define NL80211_FEATURE_AP_SCAN 1 << 8
++#define NL80211_FEATURE_VIF_TXPOWER 1 << 9
++#define NL80211_FEATURE_NEED_OBSS_SCAN 1 << 10
++#define NL80211_FEATURE_P2P_GO_CTWIN 1 << 11
++#define NL80211_FEATURE_P2P_GO_OPPPS 1 << 12
++
++/* 802.11ac VHT Capabilities */
++#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000
++#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001
++#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 0x00000002
++#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004
++#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008
++#define IEEE80211_VHT_CAP_RXLDPC 0x00000010
++#define IEEE80211_VHT_CAP_SHORT_GI_80 0x00000020
++#define IEEE80211_VHT_CAP_SHORT_GI_160 0x00000040
++#define IEEE80211_VHT_CAP_TXSTBC 0x00000080
++#define IEEE80211_VHT_CAP_RXSTBC_1 0x00000100
++#define IEEE80211_VHT_CAP_RXSTBC_2 0x00000200
++#define IEEE80211_VHT_CAP_RXSTBC_3 0x00000300
++#define IEEE80211_VHT_CAP_RXSTBC_4 0x00000400
++#define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800
++#define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000
++#define IEEE80211_VHT_CAP_BEAMFORMER_ANTENNAS_MAX 0x00006000
++#define IEEE80211_VHT_CAP_SOUNDING_DIMENTION_MAX 0x00030000
++#define IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE 0x00080000
++#define IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE 0x00100000
++#define IEEE80211_VHT_CAP_VHT_TXOP_PS 0x00200000
++#define IEEE80211_VHT_CAP_HTC_VHT 0x00400000
++#define IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT 23
++#define IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK \
++ (7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT)
++#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_UNSOL_MFB 0x08000000
++#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB 0x0c000000
++#define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000
++#define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000
++
++enum ieee80211_vht_mcs_support {
++ IEEE80211_VHT_MCS_SUPPORT_0_7 = 0,
++ IEEE80211_VHT_MCS_SUPPORT_0_8 = 1,
++ IEEE80211_VHT_MCS_SUPPORT_0_9 = 2,
++ IEEE80211_VHT_MCS_NOT_SUPPORTED = 3,
++};
++
++enum nl80211_chan_width {
++ NL80211_CHAN_WIDTH_20_NOHT,
++ NL80211_CHAN_WIDTH_20,
++ NL80211_CHAN_WIDTH_40,
++ NL80211_CHAN_WIDTH_80,
++ NL80211_CHAN_WIDTH_80P80,
++ NL80211_CHAN_WIDTH_160,
++};
++
++struct cfg80211_chan_def {
++ struct ieee80211_channel *chan;
++ enum nl80211_chan_width width;
++ u32 center_freq1;
++ u32 center_freq2;
++};
++
++enum nl80211_mesh_power_mode {
++ NL80211_MESH_POWER_UNKNOWN,
++ NL80211_MESH_POWER_ACTIVE,
++ NL80211_MESH_POWER_LIGHT_SLEEP,
++ NL80211_MESH_POWER_DEEP_SLEEP,
++ __NL80211_MESH_POWER_AFTER_LAST,
++ NL80211_MESH_POWER_MAX = __NL80211_MESH_POWER_AFTER_LAST - 1
++};
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
++#define NL80211_MESHCONF_POWER_MODE 26
++
++/*
++ * TDLS capabililites to be enabled in the 5th byte of the
++ * @WLAN_EID_EXT_CAPABILITY information element
++ */
++#define WLAN_EXT_CAPA5_TDLS_ENABLED BIT(5)
++#define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6)
++
++#define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6)
++
++/* TDLS specific payload type in the LLC/SNAP header */
++#define WLAN_TDLS_SNAP_RFTYPE 0x2
++
++#endif
++
++/**
++ * struct rwnx_bcn - Information of the beacon in used (AP mode)
++ *
++ * @head: head portion of beacon (before TIM IE)
++ * @tail: tail portion of beacon (after TIM IE)
++ * @ies: extra IEs (not used ?)
++ * @head_len: length of head data
++ * @tail_len: length of tail data
++ * @ies_len: length of extra IEs data
++ * @tim_len: length of TIM IE
++ * @len: Total beacon len (head + tim + tail + extra)
++ * @dtim: dtim period
++ */
++struct rwnx_bcn {
++ u8 *head;
++ u8 *tail;
++ u8 *ies;
++ size_t head_len;
++ size_t tail_len;
++ size_t ies_len;
++ size_t tim_len;
++ size_t len;
++ u8 dtim;
++};
++
++/**
++ * struct rwnx_key - Key information
++ *
++ * @hw_idx: Idx of the key from hardware point of view
++ */
++struct rwnx_key {
++ u8 hw_idx;
++};
++
++/**
++ * Structure containing information about a Mesh Path
++ */
++struct rwnx_mesh_path {
++ struct list_head list; /* For rwnx_vif.mesh_paths */
++ u8 path_idx; /* Path Index */
++ struct mac_addr tgt_mac_addr; /* Target MAC Address */
++ struct rwnx_sta *p_nhop_sta; /* Pointer to the Next Hop STA */
++};
++
++struct rwnx_mesh_proxy {
++ struct list_head list; /* For rwnx_vif.mesh_proxy */
++ struct mac_addr ext_sta_addr; /* Address of the External STA */
++ struct mac_addr proxy_addr; /* Proxy MAC Address */
++ bool local; /* Indicate if interface is a proxy for the device */
++};
++
++/**
++ * struct rwnx_csa - Information for CSA (Channel Switch Announcement)
++ *
++ * @vif: Pointer to the vif doing the CSA
++ * @bcn: Beacon to use after CSA
++ * @elem: IPC buffer to send the new beacon to the fw
++ * @chandef: defines the channel to use after the switch
++ * @count: Current csa counter
++ * @status: Status of the CSA at fw level
++ * @ch_idx: Index of the new channel context
++ * @work: work scheduled at the end of CSA
++ */
++struct rwnx_csa {
++ struct rwnx_vif *vif;
++ struct rwnx_bcn bcn;
++ struct rwnx_ipc_elem_var elem;
++ struct cfg80211_chan_def chandef;
++ int count;
++ int status;
++ int ch_idx;
++ struct work_struct work;
++};
++
++struct apm_probe_sta {
++ u8 sta_mac_addr[6];
++ u8 vif_idx;
++ u64 probe_id;
++ struct work_struct apmprobestaWork;
++ struct workqueue_struct *apmprobesta_wq;
++};
++
++/// Possible States of the TDLS link.
++enum tdls_status_tag {
++ /// TDLS link is not active (no TDLS peer connected)
++ TDLS_LINK_IDLE,
++ /// TDLS Setup Request transmitted
++ TDLS_SETUP_REQ_TX,
++ /// TDLS Setup Response transmitted
++ TDLS_SETUP_RSP_TX,
++ /// TDLS link is active (TDLS peer connected)
++ TDLS_LINK_ACTIVE,
++ /// TDLS Max Number of states.
++ TDLS_STATE_MAX
++};
++
++/*
++ * Structure used to save information relative to the TDLS peer.
++ * This is also linked within the rwnx_hw vifs list.
++ *
++ */
++struct rwnx_tdls {
++ bool active; /* Indicate if TDLS link is active */
++ bool initiator; /* Indicate if TDLS peer is the TDLS initiator */
++ bool chsw_en; /* Indicate if channel switch is enabled */
++ u8 last_tid; /* TID of the latest MPDU transmitted over the
++ TDLS direct link to the TDLS STA */
++ u16 last_sn; /* Sequence number of the latest MPDU transmitted
++ over the TDLS direct link to the TDLS STA */
++ bool ps_on; /* Indicate if the power save is enabled on the
++ TDLS STA */
++ bool chsw_allowed; /* Indicate if TDLS channel switch is allowed */
++};
++
++
++/**
++ * enum rwnx_ap_flags - AP flags
++ *
++ * @RWNX_AP_ISOLATE Isolate clients (i.e. Don't brige packets transmitted by
++ * one client for another one)
++ */
++enum rwnx_ap_flags {
++ RWNX_AP_ISOLATE = BIT(0),
++};
++
++/*
++ * Structure used to save information relative to the managed interfaces.
++ * This is also linked within the rwnx_hw vifs list.
++ *
++ */
++struct rwnx_vif {
++ struct list_head list;
++ struct rwnx_hw *rwnx_hw;
++ struct wireless_dev wdev;
++ struct net_device *ndev;
++ struct net_device_stats net_stats;
++ struct rwnx_key key[6];
++ atomic_t drv_conn_state;
++ u8 drv_vif_index; /* Identifier of the VIF in driver */
++ u8 vif_index; /* Identifier of the station in FW */
++ u8 ch_index; /* Channel context identifier */
++ bool up; /* Indicate if associated netdev is up
++ (i.e. Interface is created at fw level) */
++ bool use_4addr; /* Should we use 4addresses mode */
++ bool is_resending; /* Indicate if a frame is being resent on this interface */
++ bool user_mpm; /* In case of Mesh Point VIF, indicate if MPM is handled by userspace */
++ bool roc_tdls; /* Indicate if the ROC has been called by a
++ TDLS station */
++ u8 tdls_status; /* Status of the TDLS link */
++ bool tdls_chsw_prohibited; /* Indicate if TDLS Channel Switch is prohibited */
++ bool wep_enabled; /* 1 if WEP is enabled */
++ bool wep_auth_err; /* 1 if auth status code is not supported auth alg when WEP enabled */
++ enum nl80211_auth_type last_auth_type; /* Authentication type (algorithm) sent in the last connection
++ when WEP enabled */
++ union
++ {
++ struct
++ {
++ struct rwnx_sta *ap; /* Pointer to the peer STA entry allocated for
++ the AP */
++ struct rwnx_sta *tdls_sta; /* Pointer to the TDLS station */
++ bool external_auth; /* Indicate if external authentication is in progress */
++ u8 group_cipher_type;
++ u8 paired_cipher_type;
++ //connected network info start
++ char ssid[33];//ssid max is 32, but this has one spare for '\0'
++ int ssid_len;
++ u8 bssid[ETH_ALEN];
++ //connected network info end
++ } sta;
++ struct
++ {
++ u16 flags; /* see rwnx_ap_flags */
++ struct list_head sta_list; /* List of STA connected to the AP */
++ struct rwnx_bcn bcn; /* beacon */
++ u8 bcmc_index; /* Index of the BCMC sta to use */
++ #if (defined CONFIG_HE_FOR_OLD_KERNEL) || (defined CONFIG_VHT_FOR_OLD_KERNEL)
++ u8 aic_index;
++ #endif
++ struct rwnx_csa *csa;
++
++ struct list_head mpath_list; /* List of Mesh Paths used on this interface */
++ struct list_head proxy_list; /* List of Proxies Information used on this interface */
++ bool create_path; /* Indicate if we are waiting for a MESH_CREATE_PATH_CFM
++ message */
++ int generation; /* Increased each time the list of Mesh Paths is updated */
++ enum nl80211_mesh_power_mode mesh_pm; /* mesh power save mode currently set in firmware */
++ enum nl80211_mesh_power_mode next_mesh_pm; /* mesh power save mode for next peer */
++ } ap;
++ struct
++ {
++ struct rwnx_vif *master; /* pointer on master interface */
++ struct rwnx_sta *sta_4a;
++ } ap_vlan;
++ };
++
++ u8_l key_has_add;
++ u8_l is_p2p_vif;
++ struct apm_probe_sta sta_probe;
++
++ #ifdef CONFIG_BR_SUPPORT
++ spinlock_t br_ext_lock;
++ /* unsigned int macclone_completed; */
++ struct nat25_network_db_entry *nethash[NAT25_HASH_SIZE];
++ int pppoe_connection_in_progress;
++ unsigned char pppoe_addr[MACADDRLEN];
++ unsigned char scdb_mac[MACADDRLEN];
++ unsigned char scdb_ip[4];
++ struct nat25_network_db_entry *scdb_entry;
++ unsigned char br_mac[MACADDRLEN];
++ unsigned char br_ip[4];
++
++ struct br_ext_info ethBrExtInfo;
++ #endif /* CONFIG_BR_SUPPORT */
++
++};
++
++#define RWNX_VIF_TYPE(rwnx_vif) (rwnx_vif->wdev.iftype)
++
++/**
++ * Structure used to store information relative to PS mode.
++ *
++ * @active: True when the sta is in PS mode.
++ * If false, other values should be ignored
++ * @pkt_ready: Number of packets buffered for the sta in drv's txq
++ * (1 counter for Legacy PS and 1 for U-APSD)
++ * @sp_cnt: Number of packets that remain to be pushed in the service period.
++ * 0 means that no service period is in progress
++ * (1 counter for Legacy PS and 1 for U-APSD)
++ */
++struct rwnx_sta_ps {
++ bool active;
++ u16 pkt_ready[2];
++ u16 sp_cnt[2];
++};
++
++/**
++ * struct rwnx_rx_rate_stats - Store statistics for RX rates
++ *
++ * @table: Table indicating how many frame has been receive which each
++ * rate index. Rate index is the same as the one used by RC algo for TX
++ * @size: Size of the table array
++ * @cpt: number of frames received
++ */
++struct rwnx_rx_rate_stats {
++ int *table;
++ int size;
++ int cpt;
++ int rate_cnt;
++};
++
++/**
++ * struct rwnx_sta_stats - Structure Used to store statistics specific to a STA
++ *
++ * @last_rx: Hardware vector of the last received frame
++ * @rx_rate: Statistics of the received rates
++ */
++struct rwnx_sta_stats {
++//#ifdef CONFIG_RWNX_DEBUGFS
++ struct hw_vect last_rx;
++ struct rwnx_rx_rate_stats rx_rate;
++//#endif
++};
++
++#if (defined CONFIG_HE_FOR_OLD_KERNEL) || (defined CONFIG_VHT_FOR_OLD_KERNEL)
++struct aic_sta {
++ u8 sta_idx; /* Identifier of the station */
++ bool he; /* Flag indicating if the station supports HE */
++ bool vht; /* Flag indicating if the station supports VHT */
++};
++#endif
++
++/*
++ * Structure used to save information relative to the managed stations.
++ */
++struct rwnx_sta {
++ struct list_head list;
++ u16 aid; /* association ID */
++ u8 sta_idx; /* Identifier of the station */
++ u8 vif_idx; /* Identifier of the VIF (fw id) the station
++ belongs to */
++ u8 vlan_idx; /* Identifier of the VLAN VIF (fw id) the station
++ belongs to (= vif_idx if no vlan in used) */
++ enum nl80211_band band; /* Band */
++ enum nl80211_chan_width width; /* Channel width */
++ u16 center_freq; /* Center frequency */
++ u32 center_freq1; /* Center frequency 1 */
++ u32 center_freq2; /* Center frequency 2 */
++ u8 ch_idx; /* Identifier of the channel
++ context the station belongs to */
++ bool qos; /* Flag indicating if the station
++ supports QoS */
++ u8 acm; /* Bitfield indicating which queues
++ have AC mandatory */
++ u16 uapsd_tids; /* Bitfield indicating which tids are subject to
++ UAPSD */
++ u8 mac_addr[ETH_ALEN]; /* MAC address of the station */
++ struct rwnx_key key;
++ bool valid; /* Flag indicating if the entry is valid */
++ struct rwnx_sta_ps ps; /* Information when STA is in PS (AP only) */
++#ifdef CONFIG_RWNX_BFMER
++ struct rwnx_bfmer_report *bfm_report; /* Beamforming report to be used for
++ VHT TX Beamforming */
++#ifdef CONFIG_RWNX_MUMIMO_TX
++ struct rwnx_sta_group_info group_info; /* MU grouping information for the STA */
++#endif /* CONFIG_RWNX_MUMIMO_TX */
++#endif /* CONFIG_RWNX_BFMER */
++
++ bool ht; /* Flag indicating if the station
++ supports HT */
++ bool vht; /* Flag indicating if the station
++ supports VHT */
++ u32 ac_param[AC_MAX]; /* EDCA parameters */
++ struct rwnx_tdls tdls; /* TDLS station information */
++ struct rwnx_sta_stats stats;
++ enum nl80211_mesh_power_mode mesh_pm; /* link-specific mesh power save mode */
++};
++
++static inline const u8 *rwnx_sta_addr(struct rwnx_sta *rwnx_sta) {
++ return rwnx_sta->mac_addr;
++}
++
++#ifdef CONFIG_RWNX_SPLIT_TX_BUF
++struct rwnx_amsdu_stats {
++ int done;
++ int failed;
++};
++#endif
++
++struct rwnx_stats {
++ int cfm_balance[NX_TXQ_CNT];
++ unsigned long last_rx, last_tx; /* jiffies */
++ int ampdus_tx[IEEE80211_MAX_AMPDU_BUF];
++ int ampdus_rx[IEEE80211_MAX_AMPDU_BUF];
++ int ampdus_rx_map[4];
++ int ampdus_rx_miss;
++#ifdef CONFIG_RWNX_SPLIT_TX_BUF
++ struct rwnx_amsdu_stats amsdus[NX_TX_PAYLOAD_MAX];
++#endif
++ int amsdus_rx[64];
++};
++
++struct rwnx_sec_phy_chan {
++ u16 prim20_freq;
++ u16 center_freq1;
++ u16 center_freq2;
++ enum nl80211_band band;
++ u8 type;
++};
++
++/* Structure that will contains all RoC information received from cfg80211 */
++struct rwnx_roc_elem {
++ struct wireless_dev *wdev;
++ struct ieee80211_channel *chan;
++ unsigned int duration;
++ /* Used to avoid call of CFG80211 callback upon expiration of RoC */
++ bool mgmt_roc;
++ /* Indicate if we have switch on the RoC channel */
++ bool on_chan;
++};
++
++/* Structure containing channel survey information received from MAC */
++struct rwnx_survey_info {
++ // Filled
++ u32 filled;
++ // Amount of time in ms the radio spent on the channel
++ u32 chan_time_ms;
++ // Amount of time the primary channel was sensed busy
++ u32 chan_time_busy_ms;
++ // Noise in dbm
++ s8 noise_dbm;
++};
++
++#define RWNX_CH_NOT_SET 0xFF
++#define RWNX_INVALID_VIF 0xFF
++#define RWNX_INVALID_STA 0xFF
++
++/* Structure containing channel context information */
++struct rwnx_chanctx {
++ struct cfg80211_chan_def chan_def; /* channel description */
++ u8 count; /* number of vif using this ctxt */
++};
++
++/**
++ * rwnx_phy_info - Phy information
++ *
++ * @phy_cnt: Number of phy interface
++ * @cfg: Configuration send to firmware
++ * @sec_chan: Channel configuration of the second phy interface (if phy_cnt > 1)
++ * @limit_bw: Set to true to limit BW on requested channel. Only set to use
++ * VHT with old radio that don't support 80MHz (deprecated)
++ */
++struct rwnx_phy_info {
++ u8 cnt;
++ struct phy_cfg_tag cfg;
++ struct rwnx_sec_phy_chan sec_chan;
++ bool limit_bw;
++};
++
++/* rwnx driver status */
++
++enum rwnx_drv_connect_status {
++ RWNX_DRV_STATUS_DISCONNECTED = 0,
++ RWNX_DRV_STATUS_DISCONNECTING,
++ RWNX_DRV_STATUS_CONNECTING,
++ RWNX_DRV_STATUS_CONNECTED,
++};
++
++
++struct sta_tx_flowctrl {
++ atomic_t tx_pending_cnt;
++ u8 flowctrl;
++};
++
++struct rwnx_hw {
++ struct rwnx_mod_params *mod_params;
++ struct device *dev;
++#ifdef AICWF_SDIO_SUPPORT
++ struct aic_sdio_dev *sdiodev;
++#endif
++#ifdef AICWF_USB_SUPPORT
++ struct aic_usb_dev *usbdev;
++#endif
++ struct wiphy *wiphy;
++ struct list_head vifs;
++ struct rwnx_vif *vif_table[NX_VIRT_DEV_MAX + NX_REMOTE_STA_MAX]; /* indexed with fw id */
++ struct rwnx_sta sta_table[NX_REMOTE_STA_MAX + NX_VIRT_DEV_MAX];
++ #ifdef CONFIG_HE_FOR_OLD_KERNEL
++ struct aic_sta aic_table[NX_REMOTE_STA_MAX + NX_VIRT_DEV_MAX];
++ #endif
++ struct rwnx_survey_info survey[SCAN_CHANNEL_MAX];
++ struct cfg80211_scan_request *scan_request;
++#ifdef CONFIG_SCHED_SCAN
++ struct cfg80211_sched_scan_request *sched_scan_req;
++#endif
++ struct rwnx_chanctx chanctx_table[NX_CHAN_CTXT_CNT];
++ u8 cur_chanctx;
++
++ u8 monitor_vif; /* FW id of the monitor interface, RWNX_INVALID_VIF if no monitor vif at fw level */
++
++ /* RoC Management */
++ struct rwnx_roc_elem *roc_elem; /* Information provided by cfg80211 in its remain on channel request */
++ u32 roc_cookie_cnt; /* Counter used to identify RoC request sent by cfg80211 */
++
++ struct rwnx_cmd_mgr *cmd_mgr;
++
++ unsigned long drv_flags;
++ struct rwnx_plat *plat;
++
++ spinlock_t tx_lock;
++ spinlock_t cb_lock;
++ struct mutex mutex; /* per-device perimeter lock */
++
++ struct tasklet_struct task;
++ struct mm_version_cfm version_cfm; /* Lower layers versions - obtained via MM_VERSION_REQ */
++
++ u32 tcp_pacing_shift;
++
++ /* IPC */
++ struct ipc_host_env_tag *ipc_env;
++#ifdef AICWF_SDIO_SUPPORT
++ struct sdio_host_env_tag sdio_env;
++#endif
++#ifdef AICWF_USB_SUPPORT
++ struct usb_host_env_tag usb_env;
++#endif
++
++ struct rwnx_ipc_elem_pool e2amsgs_pool;
++ struct rwnx_ipc_elem_pool dbgmsgs_pool;
++ struct rwnx_ipc_elem_pool e2aradars_pool;
++ struct rwnx_ipc_elem_var pattern_elem;
++ struct rwnx_ipc_dbgdump_elem dbgdump_elem;
++ struct rwnx_ipc_elem_pool e2arxdesc_pool;
++ struct rwnx_ipc_skb_elem *e2aunsuprxvec_elems;
++ //struct rwnx_ipc_rxbuf_elems rxbuf_elems;
++ struct rwnx_ipc_elem_var scan_ie;
++
++ struct kmem_cache *sw_txhdr_cache;
++
++ struct rwnx_debugfs debugfs;
++ struct rwnx_stats stats;
++
++#ifdef CONFIG_PREALLOC_TXQ
++ struct rwnx_txq *txq;
++#else
++ struct rwnx_txq txq[NX_NB_TXQ];
++#endif
++ struct rwnx_hwq hwq[NX_TXQ_CNT];
++
++ u8 avail_idx_map;
++ u8 vif_started;
++ bool adding_sta;
++ struct rwnx_phy_info phy;
++
++ struct rwnx_radar radar;
++
++ /* extended capabilities supported */
++ u8 ext_capa[8];
++
++#ifdef CONFIG_RWNX_MUMIMO_TX
++ struct rwnx_mu_info mu;
++#endif
++ u8 is_p2p_alive;
++ u8 is_p2p_connected;
++ struct timer_list p2p_alive_timer;
++ struct rwnx_vif *p2p_dev_vif;
++ atomic_t p2p_alive_timer_count;
++ bool band_5g_support;
++ bool fwlog_en;
++
++ struct work_struct apmStalossWork;
++ struct workqueue_struct *apmStaloss_wq;
++ u8 apm_vif_idx;
++ u8 sta_mac_addr[6];
++#ifdef CONFIG_SCHED_SCAN
++ bool is_sched_scan;
++#endif//CONFIG_SCHED_SCAN
++
++ struct sta_tx_flowctrl sta_flowctrl[NX_REMOTE_STA_MAX];
++#if 0
++ bool he_flag;
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)
++ struct mac_chan_op ap_chan;
++ struct ieee80211_channel set_chan;
++#endif
++#ifdef CONFIG_VHT_FOR_OLD_KERNEL
++ struct ieee80211_sta_vht_cap vht_cap_2G;
++ struct ieee80211_sta_vht_cap vht_cap_5G;
++#endif
++
++#ifdef CONFIG_USE_WIRELESS_EXT
++ bool wext_scan;
++ struct completion wext_scan_com;
++ struct list_head wext_scanre_list;
++ char wext_essid[32];
++ int support_freqs[SCAN_CHANNEL_MAX];
++ int support_freqs_number;
++#endif
++};
++
++u8 *rwnx_build_bcn(struct rwnx_bcn *bcn, struct cfg80211_beacon_data *new);
++
++void rwnx_chanctx_link(struct rwnx_vif *vif, u8 idx,
++ struct cfg80211_chan_def *chandef);
++void rwnx_chanctx_unlink(struct rwnx_vif *vif);
++int rwnx_chanctx_valid(struct rwnx_hw *rwnx_hw, u8 idx);
++
++extern u8 chip_id;
++static inline bool is_multicast_sta(int sta_idx)
++{
++
++ if((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8801) ||
++ ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC ||
++ g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW) && chip_id < 3)){
++ return (sta_idx >= NX_REMOTE_STA_MAX_FOR_OLD_IC);
++ }else{
++ return (sta_idx >= NX_REMOTE_STA_MAX);
++ }
++
++}
++struct rwnx_sta *rwnx_get_sta(struct rwnx_hw *rwnx_hw, const u8 *mac_addr);
++
++static inline uint8_t master_vif_idx(struct rwnx_vif *vif)
++{
++ if (unlikely(vif->wdev.iftype == NL80211_IFTYPE_AP_VLAN)) {
++ return vif->ap_vlan.master->vif_index;
++ } else {
++ return vif->vif_index;
++ }
++}
++
++void rwnx_external_auth_enable(struct rwnx_vif *vif);
++void rwnx_external_auth_disable(struct rwnx_vif *vif);
++
++#endif /* _RWNX_DEFS_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_dini.c
+@@ -0,0 +1,294 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_dini.c - Add support for dini platform
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ******************************************************************************
++ */
++
++#include "rwnx_dini.h"
++#include "rwnx_defs.h"
++#include "rwnx_irqs.h"
++#include "reg_access.h"
++
++/* Config FPGA is accessed via bar0 */
++#define CFPGA_DMA0_CTRL_REG 0x02C
++#define CFPGA_DMA1_CTRL_REG 0x04C
++#define CFPGA_DMA2_CTRL_REG 0x06C
++#define CFPGA_UINTR_SRC_REG 0x0E8
++#define CFPGA_UINTR_MASK_REG 0x0EC
++#define CFPGA_BAR4_HIADDR_REG 0x100
++#define CFPGA_BAR4_LOADDR_REG 0x104
++#define CFPGA_BAR4_LOADDR_MASK_REG 0x110
++#define CFPGA_BAR_TOUT 0x120
++
++#define CFPGA_DMA_CTRL_ENABLE 0x00001400
++#define CFPGA_DMA_CTRL_DISABLE 0x00001000
++#define CFPGA_DMA_CTRL_CLEAR 0x00001800
++#define CFPGA_DMA_CTRL_REREAD_TIME_MASK (BIT(10) - 1)
++
++#define CFPGA_BAR4_LOADDR_MASK_MAX 0xFF000000
++
++#define CFPGA_PCIEX_IT 0x00000001
++#define CFPGA_ALL_ITS 0x0000000F
++
++/* Programmable BAR4 Window start address */
++#define CPU_RAM_WINDOW_HIGH 0x00000000
++#define CPU_RAM_WINDOW_LOW 0x00000000
++#define AHB_BRIDGE_WINDOW_HIGH 0x00000000
++#define AHB_BRIDGE_WINDOW_LOW 0x60000000
++
++struct rwnx_dini
++{
++ u8 *pci_bar0_vaddr;
++ u8 *pci_bar4_vaddr;
++};
++
++static const u32 mv_cfg_fpga_dma_ctrl_regs[] = {
++ CFPGA_DMA0_CTRL_REG,
++ CFPGA_DMA1_CTRL_REG,
++ CFPGA_DMA2_CTRL_REG
++};
++
++/* This also clears running transactions */
++static void dini_dma_on(struct rwnx_dini *rwnx_dini)
++{
++ int i;
++ u32 reread_time;
++ volatile void *reg;
++
++ for (i = 0; i < ARRAY_SIZE(mv_cfg_fpga_dma_ctrl_regs); i++) {
++ reg = rwnx_dini->pci_bar0_vaddr + mv_cfg_fpga_dma_ctrl_regs[i];
++ reread_time = readl(reg) & CFPGA_DMA_CTRL_REREAD_TIME_MASK;
++
++ writel(CFPGA_DMA_CTRL_CLEAR | reread_time, reg);
++ writel(CFPGA_DMA_CTRL_ENABLE | reread_time, reg);
++ }
++}
++
++/* This also clears running transactions */
++static void dini_dma_off(struct rwnx_dini *rwnx_dini)
++{
++ int i;
++ u32 reread_time;
++ volatile void *reg;
++
++ for (i = 0; i < ARRAY_SIZE(mv_cfg_fpga_dma_ctrl_regs); i++) {
++ reg = rwnx_dini->pci_bar0_vaddr + mv_cfg_fpga_dma_ctrl_regs[i];
++ reread_time = readl(reg) & CFPGA_DMA_CTRL_REREAD_TIME_MASK;
++
++ writel(CFPGA_DMA_CTRL_DISABLE | reread_time, reg);
++ writel(CFPGA_DMA_CTRL_CLEAR | reread_time, reg);
++ }
++}
++
++
++/* Configure address range for BAR4.
++ * By default BAR4_LOADDR_MASK value is 0xFF000000, then there is no need to
++ * change it because the addresses we need to access are covered by this mask
++ */
++static void dini_set_bar4_win(u32 low, u32 high, struct rwnx_dini *rwnx_dini)
++{
++ writel(low, rwnx_dini->pci_bar0_vaddr + CFPGA_BAR4_LOADDR_REG);
++ writel(high, rwnx_dini->pci_bar0_vaddr + CFPGA_BAR4_HIADDR_REG);
++ writel(CFPGA_BAR4_LOADDR_MASK_MAX,
++ rwnx_dini->pci_bar0_vaddr + CFPGA_BAR4_LOADDR_MASK_REG);
++}
++
++
++/**
++ * Enable User Interrupts of CFPGA that trigger PCIe IRQs on PCIE_10
++ * and request the corresponding IRQ line
++ */
++int rwnx_cfpga_irq_enable(struct rwnx_hw *rwnx_hw)
++{
++ struct rwnx_plat *rwnx_plat = rwnx_hw->plat;
++ struct rwnx_dini *rwnx_dini = (struct rwnx_dini *)rwnx_plat->priv;
++ unsigned int cfpga_uintr_mask;
++ volatile void *reg;
++ int ret;
++
++ /* sched_setscheduler on ONESHOT threaded irq handler for BCNs ? */
++ if ((ret = request_irq(rwnx_hw->plat->pci_dev->irq, rwnx_irq_hdlr, 0,
++ "rwnx", rwnx_hw)))
++ return ret;
++
++ reg = rwnx_dini->pci_bar0_vaddr + CFPGA_UINTR_MASK_REG;
++ cfpga_uintr_mask = readl(reg);
++ writel(cfpga_uintr_mask | CFPGA_PCIEX_IT, reg);
++
++ return ret;
++}
++
++/**
++ * Disable User Interrupts of CFPGA that trigger PCIe IRQs on PCIE_10
++ * and free the corresponding IRQ line
++ */
++int rwnx_cfpga_irq_disable(struct rwnx_hw *rwnx_hw)
++{
++ struct rwnx_plat *rwnx_plat = rwnx_hw->plat;
++ struct rwnx_dini *rwnx_dini = (struct rwnx_dini *)rwnx_plat->priv;
++ unsigned int cfpga_uintr_mask;
++ volatile void *reg;
++
++ reg = rwnx_dini->pci_bar0_vaddr + CFPGA_UINTR_MASK_REG;
++ cfpga_uintr_mask = readl(reg);
++ writel(cfpga_uintr_mask & ~CFPGA_PCIEX_IT, reg);
++
++ free_irq(rwnx_hw->plat->pci_dev->irq, rwnx_hw);
++
++ return 0;
++}
++
++static int rwnx_dini_platform_enable(struct rwnx_hw *rwnx_hw)
++{
++ struct rwnx_plat *rwnx_plat = rwnx_hw->plat;
++ struct rwnx_dini *rwnx_dini = (struct rwnx_dini *)rwnx_plat->priv;
++
++#ifdef CONFIG_RWNX_SDM
++ writel(0x0000FFFF, rwnx_dini->pci_bar0_vaddr + CFPGA_BAR_TOUT);
++#endif
++
++ dini_dma_on(rwnx_dini);
++ return rwnx_cfpga_irq_enable(rwnx_hw);
++}
++
++static int rwnx_dini_platform_disable(struct rwnx_hw *rwnx_hw)
++{
++ struct rwnx_plat *rwnx_plat = rwnx_hw->plat;
++ struct rwnx_dini *rwnx_dini = (struct rwnx_dini *)rwnx_plat->priv;
++ int ret;
++
++ ret = rwnx_cfpga_irq_disable(rwnx_hw);
++ dini_dma_off(rwnx_dini);
++ return ret;
++}
++
++static void rwnx_dini_platform_deinit(struct rwnx_plat *rwnx_plat)
++{
++ struct rwnx_dini *rwnx_dini = (struct rwnx_dini *)rwnx_plat->priv;
++
++ pci_disable_device(rwnx_plat->pci_dev);
++ iounmap(rwnx_dini->pci_bar0_vaddr);
++ iounmap(rwnx_dini->pci_bar4_vaddr);
++ pci_release_regions(rwnx_plat->pci_dev);
++
++ kfree(rwnx_plat);
++}
++
++static u8* rwnx_dini_get_address(struct rwnx_plat *rwnx_plat, int addr_name,
++ unsigned int offset)
++{
++ struct rwnx_dini *rwnx_dini = (struct rwnx_dini *)rwnx_plat->priv;
++
++ if (WARN(addr_name >= RWNX_ADDR_MAX, "Invalid address %d", addr_name))
++ return NULL;
++
++ if (addr_name == RWNX_ADDR_CPU)
++ dini_set_bar4_win(CPU_RAM_WINDOW_LOW, CPU_RAM_WINDOW_HIGH, rwnx_dini);
++ else
++ dini_set_bar4_win(AHB_BRIDGE_WINDOW_LOW, AHB_BRIDGE_WINDOW_HIGH, rwnx_dini);
++
++ return rwnx_dini->pci_bar4_vaddr + offset;
++}
++
++static void rwnx_dini_ack_irq(struct rwnx_plat *rwnx_plat)
++{
++ struct rwnx_dini *rwnx_dini = (struct rwnx_dini *)rwnx_plat->priv;
++
++ writel(CFPGA_ALL_ITS, rwnx_dini->pci_bar0_vaddr + CFPGA_UINTR_SRC_REG);
++}
++
++static const u32 rwnx_dini_config_reg[] = {
++ NXMAC_DEBUG_PORT_SEL_ADDR,
++ SYSCTRL_DIAG_CONF_ADDR,
++ RF_V6_DIAGPORT_CONF1_ADDR,
++ RF_v6_PHYDIAG_CONF1_ADDR,
++};
++
++static int rwnx_dini_get_config_reg(struct rwnx_plat *rwnx_plat, const u32 **list)
++{
++ if (!list)
++ return 0;
++
++ *list = rwnx_dini_config_reg;
++ return ARRAY_SIZE(rwnx_dini_config_reg);
++}
++
++/**
++ * rwnx_dini_platform_init - Initialize the DINI platform
++ *
++ * @pci_dev PCI device
++ * @rwnx_plat Pointer on struct rwnx_stat * to be populated
++ *
++ * @return 0 on success, < 0 otherwise
++ *
++ * Allocate and initialize a rwnx_plat structure for the dini platform.
++ */
++int rwnx_dini_platform_init(struct pci_dev *pci_dev, struct rwnx_plat **rwnx_plat)
++{
++ struct rwnx_dini *rwnx_dini;
++ u16 pci_cmd;
++ int ret = 0;
++
++ *rwnx_plat = kzalloc(sizeof(struct rwnx_plat) + sizeof(struct rwnx_dini),
++ GFP_KERNEL);
++ if (!*rwnx_plat)
++ return -ENOMEM;
++
++ rwnx_dini = (struct rwnx_dini *)(*rwnx_plat)->priv;
++
++ /* Hotplug fixups */
++ pci_read_config_word(pci_dev, PCI_COMMAND, &pci_cmd);
++ pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
++ pci_write_config_word(pci_dev, PCI_COMMAND, pci_cmd);
++ pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, L1_CACHE_BYTES >> 2);
++
++ if ((ret = pci_enable_device(pci_dev))) {
++ dev_err(&(pci_dev->dev), "pci_enable_device failed\n");
++ goto out_enable;
++ }
++
++ pci_set_master(pci_dev);
++
++ if ((ret = pci_request_regions(pci_dev, KBUILD_MODNAME))) {
++ dev_err(&(pci_dev->dev), "pci_request_regions failed\n");
++ goto out_request;
++ }
++
++ if (!(rwnx_dini->pci_bar0_vaddr = (u8 *)pci_ioremap_bar(pci_dev, 0))) {
++ dev_err(&(pci_dev->dev), "pci_ioremap_bar(%d) failed\n", 0);
++ ret = -ENOMEM;
++ goto out_bar0;
++ }
++ if (!(rwnx_dini->pci_bar4_vaddr = (u8 *)pci_ioremap_bar(pci_dev, 4))) {
++ dev_err(&(pci_dev->dev), "pci_ioremap_bar(%d) failed\n", 4);
++ ret = -ENOMEM;
++ goto out_bar4;
++ }
++
++ (*rwnx_plat)->enable = rwnx_dini_platform_enable;
++ (*rwnx_plat)->disable = rwnx_dini_platform_disable;
++ (*rwnx_plat)->deinit = rwnx_dini_platform_deinit;
++ (*rwnx_plat)->get_address = rwnx_dini_get_address;
++ (*rwnx_plat)->ack_irq = rwnx_dini_ack_irq;
++ (*rwnx_plat)->get_config_reg = rwnx_dini_get_config_reg;
++
++#ifdef CONFIG_RWNX_SDM
++ writel(0x0000FFFF, rwnx_dini->pci_bar0_vaddr + CFPGA_BAR_TOUT);
++#endif
++
++ return 0;
++
++ out_bar4:
++ iounmap(rwnx_dini->pci_bar0_vaddr);
++ out_bar0:
++ pci_release_regions(pci_dev);
++ out_request:
++ pci_disable_device(pci_dev);
++ out_enable:
++ kfree(*rwnx_plat);
++ return ret;
++}
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_dini.h
+@@ -0,0 +1,20 @@
++/**
++ ****************************************************************************************
++ *
++ * @file rwnx_dini.h
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ******************************************************************************
++ */
++
++#ifndef _RWNX_DINI_H_
++#define _RWNX_DINI_H_
++
++#include <linux/pci.h>
++#include "rwnx_platform.h"
++
++int rwnx_dini_platform_init(struct pci_dev *pci_dev,
++ struct rwnx_plat **rwnx_plat);
++
++#endif /* _RWNX_DINI_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_events.h
+@@ -0,0 +1,1243 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_events.h
++ *
++ * @brief Trace events definition
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ******************************************************************************
++ */
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM rwnx
++
++#if !defined(_RWNX_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _RWNX_EVENTS_H
++
++#include <linux/tracepoint.h>
++#ifndef CONFIG_RWNX_FHOST
++#include "rwnx_tx.h"
++#endif
++#include "rwnx_compat.h"
++
++/*****************************************************************************
++ * TRACE function for MGMT TX (FULLMAC)
++ ****************************************************************************/
++#ifdef CONFIG_RWNX_FULLMAC
++#include "linux/ieee80211.h"
++#if defined(CONFIG_TRACEPOINTS) && defined(CREATE_TRACE_POINTS)
++#include <linux/trace_seq.h>
++
++/* P2P Public Action Frames Definitions (see WiFi P2P Technical Specification, section 4.2.8) */
++/* IEEE 802.11 Public Action Usage Category - Define P2P public action frames */
++#define MGMT_ACTION_PUBLIC_CAT (0x04)
++/* Offset of OUI Subtype field in P2P Action Frame format */
++#define MGMT_ACTION_OUI_SUBTYPE_OFFSET (6)
++/* P2P Public Action Frame Types */
++enum p2p_action_type {
++ P2P_ACTION_GO_NEG_REQ = 0, /* GO Negociation Request */
++ P2P_ACTION_GO_NEG_RSP, /* GO Negociation Response */
++ P2P_ACTION_GO_NEG_CFM, /* GO Negociation Confirmation */
++ P2P_ACTION_INVIT_REQ, /* P2P Invitation Request */
++ P2P_ACTION_INVIT_RSP, /* P2P Invitation Response */
++ P2P_ACTION_DEV_DISC_REQ, /* Device Discoverability Request */
++ P2P_ACTION_DEV_DISC_RSP, /* Device Discoverability Response */
++ P2P_ACTION_PROV_DISC_REQ, /* Provision Discovery Request */
++ P2P_ACTION_PROV_DISC_RSP, /* Provision Discovery Response */
++};
++
++const char *ftrace_print_mgmt_info(struct trace_seq *p, u16 frame_control, u8 cat, u8 type, u8 p2p) {
++ const char *ret = trace_seq_buffer_ptr(p);
++
++ switch (frame_control & IEEE80211_FCTL_STYPE) {
++ case (IEEE80211_STYPE_ASSOC_REQ): trace_seq_printf(p, "Association Request"); break;
++ case (IEEE80211_STYPE_ASSOC_RESP): trace_seq_printf(p, "Association Response"); break;
++ case (IEEE80211_STYPE_REASSOC_REQ): trace_seq_printf(p, "Reassociation Request"); break;
++ case (IEEE80211_STYPE_REASSOC_RESP): trace_seq_printf(p, "Reassociation Response"); break;
++ case (IEEE80211_STYPE_PROBE_REQ): trace_seq_printf(p, "Probe Request"); break;
++ case (IEEE80211_STYPE_PROBE_RESP): trace_seq_printf(p, "Probe Response"); break;
++ case (IEEE80211_STYPE_BEACON): trace_seq_printf(p, "Beacon"); break;
++ case (IEEE80211_STYPE_ATIM): trace_seq_printf(p, "ATIM"); break;
++ case (IEEE80211_STYPE_DISASSOC): trace_seq_printf(p, "Disassociation"); break;
++ case (IEEE80211_STYPE_AUTH): trace_seq_printf(p, "Authentication"); break;
++ case (IEEE80211_STYPE_DEAUTH): trace_seq_printf(p, "Deauthentication"); break;
++ case (IEEE80211_STYPE_ACTION):
++ trace_seq_printf(p, "Action");
++ if (cat == MGMT_ACTION_PUBLIC_CAT && type == 0x9)
++ switch (p2p) {
++ case (P2P_ACTION_GO_NEG_REQ): trace_seq_printf(p, ": GO Negociation Request"); break;
++ case (P2P_ACTION_GO_NEG_RSP): trace_seq_printf(p, ": GO Negociation Response"); break;
++ case (P2P_ACTION_GO_NEG_CFM): trace_seq_printf(p, ": GO Negociation Confirmation"); break;
++ case (P2P_ACTION_INVIT_REQ): trace_seq_printf(p, ": P2P Invitation Request"); break;
++ case (P2P_ACTION_INVIT_RSP): trace_seq_printf(p, ": P2P Invitation Response"); break;
++ case (P2P_ACTION_DEV_DISC_REQ): trace_seq_printf(p, ": Device Discoverability Request"); break;
++ case (P2P_ACTION_DEV_DISC_RSP): trace_seq_printf(p, ": Device Discoverability Response"); break;
++ case (P2P_ACTION_PROV_DISC_REQ): trace_seq_printf(p, ": Provision Discovery Request"); break;
++ case (P2P_ACTION_PROV_DISC_RSP): trace_seq_printf(p, ": Provision Discovery Response"); break;
++ default: trace_seq_printf(p, "Unknown p2p %d", p2p); break;
++ }
++ else {
++ switch (cat) {
++ case 0: trace_seq_printf(p, ":Spectrum %d", type); break;
++ case 1: trace_seq_printf(p, ":QOS %d", type); break;
++ case 2: trace_seq_printf(p, ":DLS %d", type); break;
++ case 3: trace_seq_printf(p, ":BA %d", type); break;
++ case 4: trace_seq_printf(p, ":Public %d", type); break;
++ case 5: trace_seq_printf(p, ":Radio Measure %d", type); break;
++ case 6: trace_seq_printf(p, ":Fast BSS %d", type); break;
++ case 7: trace_seq_printf(p, ":HT Action %d", type); break;
++ case 8: trace_seq_printf(p, ":SA Query %d", type); break;
++ case 9: trace_seq_printf(p, ":Protected Public %d", type); break;
++ case 10: trace_seq_printf(p, ":WNM %d", type); break;
++ case 11: trace_seq_printf(p, ":Unprotected WNM %d", type); break;
++ case 12: trace_seq_printf(p, ":TDLS %d", type); break;
++ case 13: trace_seq_printf(p, ":Mesh %d", type); break;
++ case 14: trace_seq_printf(p, ":MultiHop %d", type); break;
++ case 15: trace_seq_printf(p, ":Self Protected %d", type); break;
++ case 126: trace_seq_printf(p, ":Vendor protected"); break;
++ case 127: trace_seq_printf(p, ":Vendor"); break;
++ default: trace_seq_printf(p, ":Unknown category %d", cat); break;
++ }
++ }
++ break;
++ default: trace_seq_printf(p, "Unknown subtype %d", frame_control & IEEE80211_FCTL_STYPE); break;
++ }
++
++ trace_seq_putc(p, 0);
++
++ return ret;
++}
++#endif /* defined(CONFIG_TRACEPOINTS) && defined(CREATE_TRACE_POINTS) */
++
++#undef __print_mgmt_info
++#define __print_mgmt_info(frame_control, cat, type, p2p) ftrace_print_mgmt_info(p, frame_control, cat, type, p2p)
++
++TRACE_EVENT(
++ roc,
++ TP_PROTO(u8 vif_idx, u16 freq, unsigned int duration),
++ TP_ARGS(vif_idx, freq, duration),
++ TP_STRUCT__entry(
++ __field(u8, vif_idx)
++ __field(u16, freq)
++ __field(unsigned int, duration)
++ ),
++ TP_fast_assign(
++ __entry->vif_idx = vif_idx;
++ __entry->freq = freq;
++ __entry->duration = duration;
++ ),
++ TP_printk("f=%d vif=%d dur=%d",
++ __entry->freq, __entry->vif_idx, __entry->duration)
++);
++
++TRACE_EVENT(
++ cancel_roc,
++ TP_PROTO(u8 vif_idx),
++ TP_ARGS(vif_idx),
++ TP_STRUCT__entry(
++ __field(u8, vif_idx)
++ ),
++ TP_fast_assign(
++ __entry->vif_idx = vif_idx;
++ ),
++ TP_printk("vif=%d", __entry->vif_idx)
++);
++
++TRACE_EVENT(
++ roc_exp,
++ TP_PROTO(u8 vif_idx),
++ TP_ARGS(vif_idx),
++ TP_STRUCT__entry(
++ __field(u8, vif_idx)
++ ),
++ TP_fast_assign(
++ __entry->vif_idx = vif_idx;
++ ),
++ TP_printk("vif=%d", __entry->vif_idx)
++);
++
++TRACE_EVENT(
++ switch_roc,
++ TP_PROTO(u8 vif_idx),
++ TP_ARGS(vif_idx),
++ TP_STRUCT__entry(
++ __field(u8, vif_idx)
++ ),
++ TP_fast_assign(
++ __entry->vif_idx = vif_idx;
++ ),
++ TP_printk("vif=%d", __entry->vif_idx)
++);
++
++DECLARE_EVENT_CLASS(
++ mgmt_template,
++ TP_PROTO(u16 freq, u8 vif_idx, u8 sta_idx, struct ieee80211_mgmt *mgmt),
++ TP_ARGS(freq, vif_idx, sta_idx, mgmt),
++ TP_STRUCT__entry(
++ __field(u16, freq)
++ __field(u8, vif_idx)
++ __field(u8, sta_idx)
++ __field(u16, frame_control)
++ __field(u8, action_cat)
++ __field(u8, action_type)
++ __field(u8, action_p2p)
++ ),
++ TP_fast_assign(
++ __entry->freq = freq;
++ __entry->vif_idx = vif_idx;
++ __entry->sta_idx = sta_idx;
++ __entry->frame_control = mgmt->frame_control;
++ __entry->action_cat = mgmt->u.action.category;
++ __entry->action_type = mgmt->u.action.u.wme_action.action_code;
++ __entry->action_p2p = *((u8 *)&mgmt->u.action.category
++ + MGMT_ACTION_OUI_SUBTYPE_OFFSET);
++ ),
++ TP_printk("f=%d vif=%d sta=%d -> %s",
++ __entry->freq, __entry->vif_idx, __entry->sta_idx,
++ __print_mgmt_info(__entry->frame_control, __entry->action_cat,
++ __entry->action_type, __entry->action_p2p))
++);
++
++DEFINE_EVENT(mgmt_template, mgmt_tx,
++ TP_PROTO(u16 freq, u8 vif_idx, u8 sta_idx, struct ieee80211_mgmt *mgmt),
++ TP_ARGS(freq, vif_idx, sta_idx, mgmt));
++
++DEFINE_EVENT(mgmt_template, mgmt_rx,
++ TP_PROTO(u16 freq, u8 vif_idx, u8 sta_idx, struct ieee80211_mgmt *mgmt),
++ TP_ARGS(freq, vif_idx, sta_idx, mgmt));
++
++TRACE_EVENT(
++ mgmt_cfm,
++ TP_PROTO(u8 vif_idx, u8 sta_idx, bool acked),
++ TP_ARGS(vif_idx, sta_idx, acked),
++ TP_STRUCT__entry(
++ __field(u8, vif_idx)
++ __field(u8, sta_idx)
++ __field(bool, acked)
++ ),
++ TP_fast_assign(
++ __entry->vif_idx = vif_idx;
++ __entry->sta_idx = sta_idx;
++ __entry->acked = acked;
++ ),
++ TP_printk("vif=%d sta=%d ack=%d",
++ __entry->vif_idx, __entry->sta_idx, __entry->acked)
++);
++#endif /* CONFIG_RWNX_FULLMAC */
++
++/*****************************************************************************
++ * TRACE function for TXQ
++ ****************************************************************************/
++#ifndef CONFIG_RWNX_FHOST
++#if defined(CONFIG_TRACEPOINTS) && defined(CREATE_TRACE_POINTS)
++
++#include <linux/trace_seq.h>
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
++#include <linux/trace_events.h>
++#else
++#include <linux/ftrace_event.h>
++#endif
++
++const char *
++ftrace_print_txq(struct trace_seq *p, int txq_idx) {
++ const char *ret = trace_seq_buffer_ptr(p);
++
++ if (txq_idx == TXQ_INACTIVE) {
++ trace_seq_printf(p, "[INACTIVE]");
++ } else if (txq_idx < NX_FIRST_VIF_TXQ_IDX) {
++ trace_seq_printf(p, "[STA %d/%d]",
++ txq_idx / NX_NB_TXQ_PER_STA,
++ txq_idx % NX_NB_TXQ_PER_STA);
++#ifdef CONFIG_RWNX_FULLMAC
++ } else if (txq_idx < NX_FIRST_UNK_TXQ_IDX) {
++ trace_seq_printf(p, "[BC/MC %d]",
++ txq_idx - NX_FIRST_BCMC_TXQ_IDX);
++ } else if (txq_idx < NX_OFF_CHAN_TXQ_IDX) {
++ trace_seq_printf(p, "[UNKNOWN %d]",
++ txq_idx - NX_FIRST_UNK_TXQ_IDX);
++ } else if (txq_idx == NX_OFF_CHAN_TXQ_IDX) {
++ trace_seq_printf(p, "[OFFCHAN]");
++#else
++ } else if (txq_idx < NX_NB_TXQ) {
++ txq_idx -= NX_FIRST_VIF_TXQ_IDX;
++ trace_seq_printf(p, "[VIF %d/%d]",
++ txq_idx / NX_NB_TXQ_PER_VIF,
++ txq_idx % NX_NB_TXQ_PER_VIF);
++#endif
++ } else {
++ trace_seq_printf(p, "[ERROR %d]", txq_idx);
++ }
++
++ trace_seq_putc(p, 0);
++
++ return ret;
++}
++
++const char *
++ftrace_print_sta(struct trace_seq *p, int sta_idx) {
++ const char *ret = trace_seq_buffer_ptr(p);
++
++ if (sta_idx < NX_REMOTE_STA_MAX) {
++ trace_seq_printf(p, "[STA %d]", sta_idx);
++ } else {
++ trace_seq_printf(p, "[BC/MC %d]", sta_idx - NX_REMOTE_STA_MAX);
++ }
++
++ trace_seq_putc(p, 0);
++
++ return ret;
++}
++
++const char *
++ftrace_print_hwq(struct trace_seq *p, int hwq_idx) {
++
++ static const struct trace_print_flags symbols[] =
++ {{RWNX_HWQ_BK, "BK"},
++ {RWNX_HWQ_BE, "BE"},
++ {RWNX_HWQ_VI, "VI"},
++ {RWNX_HWQ_VO, "VO"},
++#ifdef CONFIG_RWNX_FULLMAC
++ {RWNX_HWQ_BCMC, "BCMC"},
++#else
++ {RWNX_HWQ_BCN, "BCN"},
++#endif
++ { -1, NULL }};
++ return trace_print_symbols_seq(p, hwq_idx, symbols);
++}
++
++const char *
++ftrace_print_hwq_cred(struct trace_seq *p, u8 *cred) {
++ const char *ret = trace_seq_buffer_ptr(p);
++
++#if CONFIG_USER_MAX == 1
++ trace_seq_printf(p, "%d", cred[0]);
++#else
++ int i;
++
++ for (i = 0; i < CONFIG_USER_MAX - 1; i++)
++ trace_seq_printf(p, "%d-", cred[i]);
++ trace_seq_printf(p, "%d", cred[i]);
++#endif
++
++ trace_seq_putc(p, 0);
++ return ret;
++}
++
++const char *
++ftrace_print_mu_info(struct trace_seq *p, u8 mu_info) {
++ const char *ret = trace_seq_buffer_ptr(p);
++
++ if (mu_info)
++ trace_seq_printf(p, "MU: %d-%d", (mu_info & 0x3f), (mu_info >> 6));
++
++ trace_seq_putc(p, 0);
++ return ret;
++}
++
++const char *
++ftrace_print_mu_group(struct trace_seq *p, int nb_user, u8 *users) {
++ const char *ret = trace_seq_buffer_ptr(p);
++ int i;
++
++ if (users[0] != 0xff)
++ trace_seq_printf(p, "(%d", users[0]);
++ else
++ trace_seq_printf(p, "(-");
++ for (i = 1; i < CONFIG_USER_MAX ; i++) {
++ if (users[i] != 0xff)
++ trace_seq_printf(p, ",%d", users[i]);
++ else
++ trace_seq_printf(p, ",-");
++ }
++
++ trace_seq_printf(p, ")");
++ trace_seq_putc(p, 0);
++ return ret;
++}
++
++const char *
++ftrace_print_amsdu(struct trace_seq *p, u16 nb_pkt) {
++ const char *ret = trace_seq_buffer_ptr(p);
++
++ if (nb_pkt > 1)
++ trace_seq_printf(p, "(AMSDU %d)", nb_pkt);
++
++ trace_seq_putc(p, 0);
++ return ret;
++}
++#endif /* defined(CONFIG_TRACEPOINTS) && defined(CREATE_TRACE_POINTS) */
++
++#undef __print_txq
++#define __print_txq(txq_idx) ftrace_print_txq(p, txq_idx)
++
++#undef __print_sta
++#define __print_sta(sta_idx) ftrace_print_sta(p, sta_idx)
++
++#undef __print_hwq
++#define __print_hwq(hwq) ftrace_print_hwq(p, hwq)
++
++#undef __print_hwq_cred
++#define __print_hwq_cred(cred) ftrace_print_hwq_cred(p, cred)
++
++#undef __print_mu_info
++#define __print_mu_info(mu_info) ftrace_print_mu_info(p, mu_info)
++
++#undef __print_mu_group
++#define __print_mu_group(nb, users) ftrace_print_mu_group(p, nb, users)
++
++#undef __print_amsdu
++#define __print_amsdu(nb_pkt) ftrace_print_amsdu(p, nb_pkt)
++
++#ifdef CONFIG_RWNX_FULLMAC
++
++TRACE_EVENT(
++ txq_select,
++ TP_PROTO(int txq_idx, u16 pkt_ready_up, struct sk_buff *skb),
++ TP_ARGS(txq_idx, pkt_ready_up, skb),
++ TP_STRUCT__entry(
++ __field(u16, txq_idx)
++ __field(u16, pkt_ready)
++ __field(struct sk_buff *, skb)
++ ),
++ TP_fast_assign(
++ __entry->txq_idx = txq_idx;
++ __entry->pkt_ready = pkt_ready_up;
++ __entry->skb = skb;
++ ),
++ TP_printk("%s pkt_ready_up=%d skb=%p", __print_txq(__entry->txq_idx),
++ __entry->pkt_ready, __entry->skb)
++);
++
++#endif /* CONFIG_RWNX_FULLMAC */
++
++DECLARE_EVENT_CLASS(
++ hwq_template,
++ TP_PROTO(u8 hwq_idx),
++ TP_ARGS(hwq_idx),
++ TP_STRUCT__entry(
++ __field(u8, hwq_idx)
++ ),
++ TP_fast_assign(
++ __entry->hwq_idx = hwq_idx;
++ ),
++ TP_printk("%s", __print_hwq(__entry->hwq_idx))
++);
++
++DEFINE_EVENT(hwq_template, hwq_flowctrl_stop,
++ TP_PROTO(u8 hwq_idx),
++ TP_ARGS(hwq_idx));
++
++DEFINE_EVENT(hwq_template, hwq_flowctrl_start,
++ TP_PROTO(u8 hwq_idx),
++ TP_ARGS(hwq_idx));
++
++
++DECLARE_EVENT_CLASS(
++ txq_template,
++ TP_PROTO(struct rwnx_txq *txq),
++ TP_ARGS(txq),
++ TP_STRUCT__entry(
++ __field(u16, txq_idx)
++ ),
++ TP_fast_assign(
++ __entry->txq_idx = txq->idx;
++ ),
++ TP_printk("%s", __print_txq(__entry->txq_idx))
++);
++
++DEFINE_EVENT(txq_template, txq_add_to_hw,
++ TP_PROTO(struct rwnx_txq *txq),
++ TP_ARGS(txq));
++
++DEFINE_EVENT(txq_template, txq_del_from_hw,
++ TP_PROTO(struct rwnx_txq *txq),
++ TP_ARGS(txq));
++
++#ifdef CONFIG_RWNX_FULLMAC
++
++DEFINE_EVENT(txq_template, txq_flowctrl_stop,
++ TP_PROTO(struct rwnx_txq *txq),
++ TP_ARGS(txq));
++
++DEFINE_EVENT(txq_template, txq_flowctrl_restart,
++ TP_PROTO(struct rwnx_txq *txq),
++ TP_ARGS(txq));
++
++#endif /* CONFIG_RWNX_FULLMAC */
++
++TRACE_EVENT(
++ process_txq,
++ TP_PROTO(struct rwnx_txq *txq),
++ TP_ARGS(txq),
++ TP_STRUCT__entry(
++ __field(u16, txq_idx)
++ __field(u16, len)
++ __field(u16, len_retry)
++ __field(s8, credit)
++ #ifdef CONFIG_RWNX_FULLMAC
++ __field(u16, limit)
++ #endif /* CONFIG_RWNX_FULLMAC*/
++ ),
++ TP_fast_assign(
++ __entry->txq_idx = txq->idx;
++ __entry->len = skb_queue_len(&txq->sk_list);
++ #ifdef CONFIG_MAC80211_TXQ
++ __entry->len += txq->nb_ready_mac80211;
++ #endif
++ __entry->len_retry = txq->nb_retry;
++ __entry->credit = txq->credits;
++ #ifdef CONFIG_RWNX_FULLMAC
++ __entry->limit = txq->push_limit;
++ #endif /* CONFIG_RWNX_FULLMAC*/
++ ),
++
++ #ifdef CONFIG_RWNX_FULLMAC
++ TP_printk("%s txq_credits=%d, len=%d, retry_len=%d, push_limit=%d",
++ __print_txq(__entry->txq_idx), __entry->credit,
++ __entry->len, __entry->len_retry, __entry->limit)
++ #else
++ TP_printk("%s txq_credits=%d, len=%d, retry_len=%d",
++ __print_txq(__entry->txq_idx), __entry->credit,
++ __entry->len, __entry->len_retry)
++ #endif /* CONFIG_RWNX_FULLMAC*/
++);
++
++DECLARE_EVENT_CLASS(
++ txq_reason_template,
++ TP_PROTO(struct rwnx_txq *txq, u16 reason),
++ TP_ARGS(txq, reason),
++ TP_STRUCT__entry(
++ __field(u16, txq_idx)
++ __field(u16, reason)
++ __field(u16, status)
++ ),
++ TP_fast_assign(
++ __entry->txq_idx = txq->idx;
++ __entry->reason = reason;
++ __entry->status = txq->status;
++ ),
++ TP_printk("%s reason=%s status=%s",
++ __print_txq(__entry->txq_idx),
++ __print_symbolic(__entry->reason,
++ {RWNX_TXQ_STOP_FULL, "FULL"},
++ {RWNX_TXQ_STOP_CSA, "CSA"},
++ {RWNX_TXQ_STOP_STA_PS, "PS"},
++ {RWNX_TXQ_STOP_VIF_PS, "VPS"},
++ {RWNX_TXQ_STOP_CHAN, "CHAN"},
++ {RWNX_TXQ_STOP_MU_POS, "MU"}),
++ __print_flags(__entry->status, "|",
++ {RWNX_TXQ_IN_HWQ_LIST, "IN LIST"},
++ {RWNX_TXQ_STOP_FULL, "FULL"},
++ {RWNX_TXQ_STOP_CSA, "CSA"},
++ {RWNX_TXQ_STOP_STA_PS, "PS"},
++ {RWNX_TXQ_STOP_VIF_PS, "VPS"},
++ {RWNX_TXQ_STOP_CHAN, "CHAN"},
++ {RWNX_TXQ_STOP_MU_POS, "MU"},
++ {RWNX_TXQ_NDEV_FLOW_CTRL, "FLW_CTRL"}))
++);
++
++DEFINE_EVENT(txq_reason_template, txq_start,
++ TP_PROTO(struct rwnx_txq *txq, u16 reason),
++ TP_ARGS(txq, reason));
++
++DEFINE_EVENT(txq_reason_template, txq_stop,
++ TP_PROTO(struct rwnx_txq *txq, u16 reason),
++ TP_ARGS(txq, reason));
++
++
++TRACE_EVENT(
++ push_desc,
++ TP_PROTO(struct sk_buff *skb, struct rwnx_sw_txhdr *sw_txhdr, int push_flags),
++
++ TP_ARGS(skb, sw_txhdr, push_flags),
++
++ TP_STRUCT__entry(
++ __field(struct sk_buff *, skb)
++ __field(unsigned int, len)
++ __field(u16, tx_queue)
++ __field(u8, hw_queue)
++ __field(u8, push_flag)
++ __field(u32, flag)
++ __field(s8, txq_cred)
++ __field(u8, hwq_cred)
++ __field(u16, pkt_cnt)
++ __field(u8, mu_info)
++ ),
++ TP_fast_assign(
++ __entry->skb = skb;
++ __entry->tx_queue = sw_txhdr->txq->idx;
++ __entry->push_flag = push_flags;
++ __entry->hw_queue = sw_txhdr->txq->hwq->id;
++ __entry->txq_cred = sw_txhdr->txq->credits;
++ //__entry->hwq_cred = sw_txhdr->txq->hwq->credits[RWNX_TXQ_POS_ID(sw_txhdr->txq)];
++#ifdef CONFIG_RWNX_SPLIT_TX_BUF
++ __entry->pkt_cnt = sw_txhdr->desc.host.packet_cnt;
++#endif
++#ifdef CONFIG_RWNX_FULLMAC
++ __entry->flag = sw_txhdr->desc.host.flags;
++#ifdef CONFIG_RWNX_SPLIT_TX_BUF
++#ifdef CONFIG_RWNX_AMSDUS_TX
++ if (sw_txhdr->amsdu.len)
++ __entry->len = sw_txhdr->amsdu.len;
++ else
++#endif /* CONFIG_RWNX_AMSDUS_TX */
++ __entry->len = sw_txhdr->desc.host.packet_len[0];
++#else
++ __entry->len = sw_txhdr->desc.host.packet_len;
++#endif /* CONFIG_RWNX_SPLIT_TX_BUF */
++
++#else /* !CONFIG_RWNX_FULLMAC */
++ __entry->flag = sw_txhdr->desc.umac.flags;
++ __entry->len = sw_txhdr->frame_len;
++ __entry->sn = sw_txhdr->sn;
++#endif /* CONFIG_RWNX_FULLMAC */
++#ifdef CONFIG_RWNX_MUMIMO_TX
++ __entry->mu_info = sw_txhdr->desc.host.mumimo_info;
++#else
++ __entry->mu_info = 0;
++#endif
++ ),
++
++#ifdef CONFIG_RWNX_FULLMAC
++ TP_printk("%s skb=%p (len=%d) hw_queue=%s cred_txq=%d cred_hwq=%d %s flag=%s %s%s%s",
++ __print_txq(__entry->tx_queue), __entry->skb, __entry->len,
++ __print_hwq(__entry->hw_queue),
++ __entry->txq_cred, __entry->hwq_cred,
++ __print_mu_info(__entry->mu_info),
++ __print_flags(__entry->flag, "|",
++ {TXU_CNTRL_RETRY, "RETRY"},
++ {TXU_CNTRL_MORE_DATA, "MOREDATA"},
++ {TXU_CNTRL_MGMT, "MGMT"},
++ {TXU_CNTRL_MGMT_NO_CCK, "NO_CCK"},
++ {TXU_CNTRL_MGMT_ROBUST, "ROBUST"},
++ {TXU_CNTRL_AMSDU, "AMSDU"},
++ {TXU_CNTRL_USE_4ADDR, "4ADDR"},
++ {TXU_CNTRL_EOSP, "EOSP"},
++ {TXU_CNTRL_MESH_FWD, "MESH_FWD"},
++ {TXU_CNTRL_TDLS, "TDLS"}),
++ (__entry->push_flag & RWNX_PUSH_IMMEDIATE) ? "(IMMEDIATE)" : "",
++ (!(__entry->flag & TXU_CNTRL_RETRY) &&
++ (__entry->push_flag & RWNX_PUSH_RETRY)) ? "(SW_RETRY)" : "",
++ __print_amsdu(__entry->pkt_cnt))
++#else
++ TP_printk("%s skb=%p (len=%d) hw_queue=%s cred_txq=%d cred_hwq=%d %s flag=%x (%s) sn=%d %s",
++ __print_txq(__entry->tx_queue), __entry->skb, __entry->len,
++ __print_hwq(__entry->hw_queue), __entry->txq_cred, __entry->hwq_cred,
++ __print_mu_info(__entry->mu_info),
++ __entry->flag,
++ __print_flags(__entry->push_flag, "|",
++ {RWNX_PUSH_RETRY, "RETRY"},
++ {RWNX_PUSH_IMMEDIATE, "IMMEDIATE"}),
++ __entry->sn, __print_amsdu(__entry->pkt_cnt))
++#endif /* CONFIG_RWNX_FULLMAC */
++);
++
++
++TRACE_EVENT(
++ txq_queue_skb,
++ TP_PROTO(struct sk_buff *skb, struct rwnx_txq *txq, bool retry),
++ TP_ARGS(skb, txq, retry),
++ TP_STRUCT__entry(
++ __field(struct sk_buff *, skb)
++ __field(u16, txq_idx)
++ __field(s8, credit)
++ __field(u16, q_len)
++ __field(u16, q_len_retry)
++ __field(bool, retry)
++ ),
++ TP_fast_assign(
++ __entry->skb = skb;
++ __entry->txq_idx = txq->idx;
++ __entry->credit = txq->credits;
++ __entry->q_len = skb_queue_len(&txq->sk_list);
++ __entry->q_len_retry = txq->nb_retry;
++ __entry->retry = retry;
++ ),
++
++ TP_printk("%s skb=%p retry=%d txq_credits=%d queue_len=%d (retry = %d)",
++ __print_txq(__entry->txq_idx), __entry->skb, __entry->retry,
++ __entry->credit, __entry->q_len, __entry->q_len_retry)
++);
++
++#ifdef CONFIG_MAC80211_TXQ
++TRACE_EVENT(
++ txq_wake,
++ TP_PROTO(struct rwnx_txq *txq),
++ TP_ARGS(txq),
++ TP_STRUCT__entry(
++ __field(u16, txq_idx)
++ __field(u16, q_len)
++ ),
++ TP_fast_assign(
++ __entry->txq_idx = txq->idx;
++ __entry->q_len = txq->nb_ready_mac80211;
++ ),
++
++ TP_printk("%s mac80211_queue_len=%d", __print_txq(__entry->txq_idx), __entry->q_len)
++);
++
++TRACE_EVENT(
++ txq_drop,
++ TP_PROTO(struct rwnx_txq *txq, unsigned long nb_drop),
++ TP_ARGS(txq, nb_drop),
++ TP_STRUCT__entry(
++ __field(u16, txq_idx)
++ __field(u16, nb_drop)
++ ),
++ TP_fast_assign(
++ __entry->txq_idx = txq->idx;
++ __entry->nb_drop = nb_drop;
++ ),
++
++ TP_printk("%s %u pkt have been dropped by codel in mac80211 txq",
++ __print_txq(__entry->txq_idx), __entry->nb_drop)
++);
++
++#endif
++
++
++DECLARE_EVENT_CLASS(
++ idx_template,
++ TP_PROTO(u16 idx),
++ TP_ARGS(idx),
++ TP_STRUCT__entry(
++ __field(u16, idx)
++ ),
++ TP_fast_assign(
++ __entry->idx = idx;
++ ),
++ TP_printk("idx=%d", __entry->idx)
++);
++
++
++DEFINE_EVENT(idx_template, txq_vif_start,
++ TP_PROTO(u16 idx),
++ TP_ARGS(idx));
++
++DEFINE_EVENT(idx_template, txq_vif_stop,
++ TP_PROTO(u16 idx),
++ TP_ARGS(idx));
++
++TRACE_EVENT(
++ process_hw_queue,
++ TP_PROTO(struct rwnx_hwq *hwq),
++ TP_ARGS(hwq),
++ TP_STRUCT__entry(
++ __field(u16, hwq)
++ __array(u8, credits, CONFIG_USER_MAX)
++ ),
++ TP_fast_assign(
++ //int i;
++ __entry->hwq = hwq->id;
++ //for (i=0; i < CONFIG_USER_MAX; i ++)
++ // __entry->credits[i] = hwq->credits[i];
++ ),
++ TP_printk("hw_queue=%s hw_credits=%s",
++ __print_hwq(__entry->hwq), __print_hwq_cred(__entry->credits))
++);
++
++DECLARE_EVENT_CLASS(
++ sta_idx_template,
++ TP_PROTO(u16 idx),
++ TP_ARGS(idx),
++ TP_STRUCT__entry(
++ __field(u16, idx)
++ ),
++ TP_fast_assign(
++ __entry->idx = idx;
++ ),
++ TP_printk("%s", __print_sta(__entry->idx))
++);
++
++DEFINE_EVENT(sta_idx_template, txq_sta_start,
++ TP_PROTO(u16 idx),
++ TP_ARGS(idx));
++
++DEFINE_EVENT(sta_idx_template, txq_sta_stop,
++ TP_PROTO(u16 idx),
++ TP_ARGS(idx));
++
++#ifdef CONFIG_RWNX_FULLMAC
++
++DEFINE_EVENT(sta_idx_template, ps_disable,
++ TP_PROTO(u16 idx),
++ TP_ARGS(idx));
++
++#endif /* CONFIG_RWNX_FULLMAC */
++
++TRACE_EVENT(
++ skb_confirm,
++ TP_PROTO(struct sk_buff *skb, struct rwnx_txq *txq, struct rwnx_hwq *hwq,
++#ifdef CONFIG_RWNX_FULLMAC
++ struct tx_cfm_tag *cfm
++#else
++ u8 cfm
++#endif
++ ),
++
++ TP_ARGS(skb, txq, hwq, cfm),
++
++ TP_STRUCT__entry(
++ __field(struct sk_buff *, skb)
++ __field(u16, txq_idx)
++ __field(u8, hw_queue)
++ __array(u8, hw_credit, CONFIG_USER_MAX)
++ __field(s8, sw_credit)
++ __field(s8, sw_credit_up)
++#ifdef CONFIG_RWNX_FULLMAC
++ __field(u8, ampdu_size)
++#ifdef CONFIG_RWNX_SPLIT_TX_BUF
++ __field(u16, amsdu)
++#endif /* CONFIG_RWNX_SPLIT_TX_BUF */
++ __field(u16, sn)
++#endif /* CONFIG_RWNX_FULLMAC*/
++ ),
++
++ TP_fast_assign(
++ //int i;
++ __entry->skb = skb;
++ __entry->txq_idx = txq->idx;
++ __entry->hw_queue = hwq->id;
++ //for (i = 0 ; i < CONFIG_USER_MAX ; i++)
++ // __entry->hw_credit[i] = hwq->credits[i];
++ __entry->sw_credit = txq->credits;
++#if defined CONFIG_RWNX_FULLMAC
++ __entry->sw_credit_up = cfm->credits;
++ __entry->ampdu_size = cfm->ampdu_size;
++#ifdef CONFIG_RWNX_SPLIT_TX_BUF
++ __entry->amsdu = cfm->amsdu_size;
++ __entry->sn = cfm->sn;
++#endif
++#else
++ __entry->sw_credit_up = cfm
++#endif /* CONFIG_RWNX_FULLMAC */
++ ),
++
++ TP_printk("%s skb=%p hw_queue=%s, hw_credits=%s, txq_credits=%d (+%d)"
++#ifdef CONFIG_RWNX_FULLMAC
++ " sn=%u ampdu=%d"
++#ifdef CONFIG_RWNX_SPLIT_TX_BUF
++ " amsdu=%u"
++#endif
++#endif
++ , __print_txq(__entry->txq_idx), __entry->skb,
++ __print_hwq(__entry->hw_queue),
++ __print_hwq_cred(__entry->hw_credit),
++ __entry->sw_credit, __entry->sw_credit_up
++#ifdef CONFIG_RWNX_FULLMAC
++ , __entry->sn, __entry->ampdu_size
++#ifdef CONFIG_RWNX_SPLIT_TX_BUF
++ , __entry->amsdu
++#endif
++#endif
++ )
++);
++
++TRACE_EVENT(
++ credit_update,
++ TP_PROTO(struct rwnx_txq *txq, s8_l cred_up),
++
++ TP_ARGS(txq, cred_up),
++
++ TP_STRUCT__entry(
++ __field(struct sk_buff *, skb)
++ __field(u16, txq_idx)
++ __field(s8, sw_credit)
++ __field(s8, sw_credit_up)
++ ),
++
++ TP_fast_assign(
++ __entry->txq_idx = txq->idx;
++ __entry->sw_credit = txq->credits;
++ __entry->sw_credit_up = cred_up;
++ ),
++
++ TP_printk("%s txq_credits=%d (%+d)", __print_txq(__entry->txq_idx),
++ __entry->sw_credit, __entry->sw_credit_up)
++)
++
++#ifdef CONFIG_RWNX_FULLMAC
++
++DECLARE_EVENT_CLASS(
++ ps_template,
++ TP_PROTO(struct rwnx_sta *sta),
++ TP_ARGS(sta),
++ TP_STRUCT__entry(
++ __field(u16, idx)
++ __field(u16, ready_ps)
++ __field(u16, sp_ps)
++ __field(u16, ready_uapsd)
++ __field(u16, sp_uapsd)
++ ),
++ TP_fast_assign(
++ __entry->idx = sta->sta_idx;
++ __entry->ready_ps = sta->ps.pkt_ready[LEGACY_PS_ID];
++ __entry->sp_ps = sta->ps.sp_cnt[LEGACY_PS_ID];
++ __entry->ready_uapsd = sta->ps.pkt_ready[UAPSD_ID];
++ __entry->sp_uapsd = sta->ps.sp_cnt[UAPSD_ID];
++ ),
++
++ TP_printk("%s [PS] ready=%d sp=%d [UAPSD] ready=%d sp=%d",
++ __print_sta(__entry->idx), __entry->ready_ps, __entry->sp_ps,
++ __entry->ready_uapsd, __entry->sp_uapsd)
++);
++
++DEFINE_EVENT(ps_template, ps_queue,
++ TP_PROTO(struct rwnx_sta *sta),
++ TP_ARGS(sta));
++
++DEFINE_EVENT(ps_template, ps_push,
++ TP_PROTO(struct rwnx_sta *sta),
++ TP_ARGS(sta));
++
++DEFINE_EVENT(ps_template, ps_enable,
++ TP_PROTO(struct rwnx_sta *sta),
++ TP_ARGS(sta));
++
++TRACE_EVENT(
++ ps_traffic_update,
++ TP_PROTO(u16 sta_idx, u8 traffic, bool uapsd),
++
++ TP_ARGS(sta_idx, traffic, uapsd),
++
++ TP_STRUCT__entry(
++ __field(u16, sta_idx)
++ __field(u8, traffic)
++ __field(bool, uapsd)
++ ),
++
++ TP_fast_assign(
++ __entry->sta_idx = sta_idx;
++ __entry->traffic = traffic;
++ __entry->uapsd = uapsd;
++ ),
++
++ TP_printk("%s %s%s traffic available ", __print_sta(__entry->sta_idx),
++ __entry->traffic ? "" : "no more ",
++ __entry->uapsd ? "U-APSD" : "legacy PS")
++);
++
++TRACE_EVENT(
++ ps_traffic_req,
++ TP_PROTO(struct rwnx_sta *sta, u16 pkt_req, u8 ps_id),
++ TP_ARGS(sta, pkt_req, ps_id),
++ TP_STRUCT__entry(
++ __field(u16, idx)
++ __field(u16, pkt_req)
++ __field(u8, ps_id)
++ __field(u16, ready)
++ __field(u16, sp)
++ ),
++ TP_fast_assign(
++ __entry->idx = sta->sta_idx;
++ __entry->pkt_req = pkt_req;
++ __entry->ps_id = ps_id;
++ __entry->ready = sta->ps.pkt_ready[ps_id];
++ __entry->sp = sta->ps.sp_cnt[ps_id];
++ ),
++
++ TP_printk("%s %s traffic request %d pkt (ready=%d, sp=%d)",
++ __print_sta(__entry->idx),
++ __entry->ps_id == UAPSD_ID ? "U-APSD" : "legacy PS" ,
++ __entry->pkt_req, __entry->ready, __entry->sp)
++);
++
++
++#ifdef CONFIG_RWNX_AMSDUS_TX
++TRACE_EVENT(
++ amsdu_subframe,
++ TP_PROTO(struct rwnx_sw_txhdr *sw_txhdr),
++ TP_ARGS(sw_txhdr),
++ TP_STRUCT__entry(
++ __field(struct sk_buff *, skb)
++ __field(u16, txq_idx)
++ __field(u8, nb)
++ __field(u32, len)
++ ),
++ TP_fast_assign(
++ __entry->skb = sw_txhdr->skb;
++ __entry->nb = sw_txhdr->amsdu.nb;
++ __entry->len = sw_txhdr->amsdu.len;
++ __entry->txq_idx = sw_txhdr->txq->idx;
++ ),
++
++ TP_printk("%s skb=%p %s nb_subframe=%d, len=%u",
++ __print_txq(__entry->txq_idx), __entry->skb,
++ (__entry->nb == 2) ? "Start new AMSDU" : "Add subframe",
++ __entry->nb, __entry->len)
++);
++#endif
++
++#endif /* CONFIG_RWNX_FULLMAC */
++
++#ifdef CONFIG_RWNX_MUMIMO_TX
++TRACE_EVENT(
++ mu_group_update,
++ TP_PROTO(struct rwnx_mu_group *group),
++ TP_ARGS(group),
++ TP_STRUCT__entry(
++ __field(u8, nb_user)
++ __field(u8, group_id)
++ __array(u8, users, CONFIG_USER_MAX)
++ ),
++ TP_fast_assign(
++ int i;
++ __entry->nb_user = group->user_cnt;
++ for (i = 0; i < CONFIG_USER_MAX ; i++) {
++ if (group->users[i]) {
++ __entry->users[i] = group->users[i]->sta_idx;
++ } else {
++ __entry->users[i] = 0xff;
++ }
++ }
++
++ __entry->group_id = group->group_id;
++ ),
++
++ TP_printk("Group-id = %d, Users = %s",
++ __entry->group_id,
++ __print_mu_group(__entry->nb_user, __entry->users))
++);
++
++TRACE_EVENT(
++ mu_group_delete,
++ TP_PROTO(int group_id),
++ TP_ARGS(group_id),
++ TP_STRUCT__entry(
++ __field(u8, group_id)
++ ),
++ TP_fast_assign(
++ __entry->group_id = group_id;
++ ),
++
++ TP_printk("Group-id = %d", __entry->group_id)
++);
++
++TRACE_EVENT(
++ mu_group_selection,
++ TP_PROTO(struct rwnx_sta *sta, int group_id),
++ TP_ARGS(sta, group_id),
++ TP_STRUCT__entry(
++ __field(u8, sta_idx)
++ __field(u8, group_id)
++ ),
++ TP_fast_assign(
++ __entry->sta_idx = sta->sta_idx;
++ __entry->group_id = group_id;
++ ),
++
++ TP_printk("[Sta %d] Group-id = %d", __entry->sta_idx, __entry->group_id)
++);
++
++TRACE_EVENT(
++ txq_select_mu_group,
++ TP_PROTO(struct rwnx_txq *txq, int group_id, int pos),
++
++ TP_ARGS(txq, group_id, pos),
++
++ TP_STRUCT__entry(
++ __field(u16, txq_idx)
++ __field(u8, group_id)
++ __field(u8, pos)
++ ),
++ TP_fast_assign(
++ __entry->txq_idx = txq->idx;
++ __entry->group_id = group_id;
++ __entry->pos = pos;
++ ),
++
++ TP_printk("%s: group=%d pos=%d", __print_txq(__entry->txq_idx),
++ __entry->group_id, __entry->pos)
++);
++
++#endif /* CONFIG_RWNX_MUMIMO_TX */
++#endif /* ! CONFIG_RWNX_FHOST */
++
++/*****************************************************************************
++ * TRACE functions for MESH
++ ****************************************************************************/
++#ifdef CONFIG_RWNX_FULLMAC
++DECLARE_EVENT_CLASS(
++ mesh_path_template,
++ TP_PROTO(struct rwnx_mesh_path *mesh_path),
++ TP_ARGS(mesh_path),
++ TP_STRUCT__entry(
++ __field(u8, idx)
++ __field(u8, next_hop_sta)
++ __array(u8, tgt_mac, ETH_ALEN)
++ ),
++
++ TP_fast_assign(
++ __entry->idx = mesh_path->path_idx;
++ memcpy(__entry->tgt_mac, &mesh_path->tgt_mac_addr, ETH_ALEN);
++ if (mesh_path->p_nhop_sta)
++ __entry->next_hop_sta = mesh_path->p_nhop_sta->sta_idx;
++ else
++ __entry->next_hop_sta = 0xff;
++ ),
++
++ TP_printk("Mpath(%d): target=%pM next_hop=STA-%d",
++ __entry->idx, __entry->tgt_mac, __entry->next_hop_sta)
++);
++
++DEFINE_EVENT(mesh_path_template, mesh_create_path,
++ TP_PROTO(struct rwnx_mesh_path *mesh_path),
++ TP_ARGS(mesh_path));
++
++DEFINE_EVENT(mesh_path_template, mesh_delete_path,
++ TP_PROTO(struct rwnx_mesh_path *mesh_path),
++ TP_ARGS(mesh_path));
++
++DEFINE_EVENT(mesh_path_template, mesh_update_path,
++ TP_PROTO(struct rwnx_mesh_path *mesh_path),
++ TP_ARGS(mesh_path));
++
++#endif /* CONFIG_RWNX_FULLMAC */
++
++/*****************************************************************************
++ * TRACE functions for RADAR
++ ****************************************************************************/
++#ifdef CONFIG_RWNX_RADAR
++TRACE_EVENT(
++ radar_pulse,
++ TP_PROTO(u8 chain, struct radar_pulse *pulse),
++ TP_ARGS(chain, pulse),
++ TP_STRUCT__entry(
++ __field(u8, chain)
++ __field(s16, freq)
++ __field(u16, pri)
++ __field(u8, len)
++ __field(u8, fom)
++ ),
++ TP_fast_assign(
++ __entry->freq = pulse->freq * 2;
++ __entry->len = pulse->len * 2;
++ __entry->fom = pulse->fom * 6;
++ __entry->pri = pulse->rep;
++ __entry->chain = chain;
++ ),
++
++ TP_printk("%s: PRI=%.5d LEN=%.3d FOM=%.2d%% freq=%dMHz ",
++ __print_symbolic(__entry->chain,
++ {RWNX_RADAR_RIU, "RIU"},
++ {RWNX_RADAR_FCU, "FCU"}),
++ __entry->pri, __entry->len, __entry->fom, __entry->freq)
++ );
++
++TRACE_EVENT(
++ radar_detected,
++ TP_PROTO(u8 chain, u8 region, s16 freq, u8 type, u16 pri),
++ TP_ARGS(chain, region, freq, type, pri),
++ TP_STRUCT__entry(
++ __field(u8, chain)
++ __field(u8, region)
++ __field(s16, freq)
++ __field(u8, type)
++ __field(u16, pri)
++ ),
++ TP_fast_assign(
++ __entry->chain = chain;
++ __entry->region = region;
++ __entry->freq = freq;
++ __entry->type = type;
++ __entry->pri = pri;
++ ),
++ TP_printk("%s: region=%s type=%d freq=%dMHz (pri=%dus)",
++ __print_symbolic(__entry->chain,
++ {RWNX_RADAR_RIU, "RIU"},
++ {RWNX_RADAR_FCU, "FCU"}),
++ __print_symbolic(__entry->region,
++ {NL80211_DFS_UNSET, "UNSET"},
++ {NL80211_DFS_FCC, "FCC"},
++ {NL80211_DFS_ETSI, "ETSI"},
++ {NL80211_DFS_JP, "JP"}),
++ __entry->type, __entry->freq, __entry->pri)
++);
++
++TRACE_EVENT(
++ radar_set_region,
++ TP_PROTO(u8 region),
++ TP_ARGS(region),
++ TP_STRUCT__entry(
++ __field(u8, region)
++ ),
++ TP_fast_assign(
++ __entry->region = region;
++ ),
++ TP_printk("region=%s",
++ __print_symbolic(__entry->region,
++ {NL80211_DFS_UNSET, "UNSET"},
++ {NL80211_DFS_FCC, "FCC"},
++ {NL80211_DFS_ETSI, "ETSI"},
++ {NL80211_DFS_JP, "JP"}))
++);
++
++TRACE_EVENT(
++ radar_enable_detection,
++ TP_PROTO(u8 region, u8 enable, u8 chain),
++ TP_ARGS(region, enable, chain),
++ TP_STRUCT__entry(
++ __field(u8, region)
++ __field(u8, chain)
++ __field(u8, enable)
++ ),
++ TP_fast_assign(
++ __entry->chain = chain;
++ __entry->enable = enable;
++ __entry->region = region;
++ ),
++ TP_printk("%s: %s radar detection %s",
++ __print_symbolic(__entry->chain,
++ {RWNX_RADAR_RIU, "RIU"},
++ {RWNX_RADAR_FCU, "FCU"}),
++ __print_symbolic(__entry->enable,
++ {RWNX_RADAR_DETECT_DISABLE, "Disable"},
++ {RWNX_RADAR_DETECT_ENABLE, "Enable (no report)"},
++ {RWNX_RADAR_DETECT_REPORT, "Enable"}),
++ __entry->enable == RWNX_RADAR_DETECT_DISABLE ? "" :
++ __print_symbolic(__entry->region,
++ {NL80211_DFS_UNSET, "UNSET"},
++ {NL80211_DFS_FCC, "FCC"},
++ {NL80211_DFS_ETSI, "ETSI"},
++ {NL80211_DFS_JP, "JP"}))
++);
++#endif /* CONFIG_RWNX_RADAR */
++
++/*****************************************************************************
++ * TRACE functions for IPC message
++ ****************************************************************************/
++#include "rwnx_strs.h"
++
++DECLARE_EVENT_CLASS(
++ ipc_msg_template,
++ TP_PROTO(u16 id),
++ TP_ARGS(id),
++ TP_STRUCT__entry(
++ __field(u16, id)
++ ),
++ TP_fast_assign(
++ __entry->id = id;
++ ),
++
++ TP_printk("%s (%d - %d)", RWNX_ID2STR(__entry->id),
++ MSG_T(__entry->id), MSG_I(__entry->id))
++);
++
++DEFINE_EVENT(ipc_msg_template, msg_send,
++ TP_PROTO(u16 id),
++ TP_ARGS(id));
++
++DEFINE_EVENT(ipc_msg_template, msg_recv,
++ TP_PROTO(u16 id),
++ TP_ARGS(id));
++
++
++
++#endif /* !defined(_RWNX_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ) */
++
++#undef TRACE_INCLUDE_PATH
++#undef TRACE_INCLUDE_FILE
++#define TRACE_INCLUDE_PATH .
++#define TRACE_INCLUDE_FILE rwnx_events
++#include <trace/define_trace.h>
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_fw_dump.c
+@@ -0,0 +1,568 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_fw_dump.c
++ *
++ * @brief Definition of debug fs entries to process fw dump
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ******************************************************************************
++ */
++
++
++#include <linux/kmod.h>
++#include <linux/debugfs.h>
++
++#include "rwnx_defs.h"
++#include "rwnx_debugfs.h"
++
++static ssize_t rwnx_dbgfs_rhd_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr;
++ ssize_t read;
++
++ mutex_lock(&priv->dbgdump_elem.mutex);
++ if (!priv->debugfs.trace_prst) {
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return 0;
++ }
++
++ read = simple_read_from_buffer(user_buf, count, ppos,
++ dump->rhd_mem,
++ dump->dbg_info.rhd_len);
++
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return read;
++}
++
++DEBUGFS_READ_FILE_OPS(rhd);
++
++static ssize_t rwnx_dbgfs_rbd_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr;
++ ssize_t read;
++
++ mutex_lock(&priv->dbgdump_elem.mutex);
++ if (!priv->debugfs.trace_prst) {
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return 0;
++ }
++
++ read = simple_read_from_buffer(user_buf, count, ppos,
++ dump->rbd_mem,
++ dump->dbg_info.rbd_len);
++
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return read;
++}
++
++DEBUGFS_READ_FILE_OPS(rbd);
++
++static ssize_t rwnx_dbgfs_thdx_read(struct file *file, char __user *user_buf,
++ size_t count, loff_t *ppos, int idx)
++{
++ struct rwnx_hw *priv = file->private_data;
++ struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr;
++ ssize_t read;
++
++ mutex_lock(&priv->dbgdump_elem.mutex);
++ if (!priv->debugfs.trace_prst) {
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return 0;
++ }
++
++ read = simple_read_from_buffer(user_buf, count, ppos,
++ &dump->thd_mem[idx],
++ dump->dbg_info.thd_len[idx]);
++
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return read;
++}
++
++static ssize_t rwnx_dbgfs_thd0_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ return rwnx_dbgfs_thdx_read(file, user_buf, count, ppos, 0);
++}
++DEBUGFS_READ_FILE_OPS(thd0);
++
++static ssize_t rwnx_dbgfs_thd1_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ return rwnx_dbgfs_thdx_read(file, user_buf, count, ppos, 1);
++}
++DEBUGFS_READ_FILE_OPS(thd1);
++
++static ssize_t rwnx_dbgfs_thd2_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ return rwnx_dbgfs_thdx_read(file, user_buf, count, ppos, 2);
++}
++DEBUGFS_READ_FILE_OPS(thd2);
++
++static ssize_t rwnx_dbgfs_thd3_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ return rwnx_dbgfs_thdx_read(file, user_buf, count, ppos, 3);
++}
++DEBUGFS_READ_FILE_OPS(thd3);
++
++#if (NX_TXQ_CNT == 5)
++static ssize_t rwnx_dbgfs_thd4_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ return rwnx_dbgfs_thdx_read(file, user_buf, count, ppos, 4);
++}
++DEBUGFS_READ_FILE_OPS(thd4);
++#endif
++
++static ssize_t rwnx_dbgfs_mactrace_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr;
++ ssize_t read;
++
++ mutex_lock(&priv->dbgdump_elem.mutex);
++ if (!priv->debugfs.trace_prst) {
++ char msg[64];
++
++ scnprintf(msg, sizeof(msg), "Force trigger\n");
++ rwnx_dbgfs_trigger_fw_dump(priv, msg);
++
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return 0;
++ }
++
++ read = simple_read_from_buffer(user_buf, count, ppos,
++ dump->la_mem,
++ dump->dbg_info.la_conf.trace_len);
++
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++
++ return read;
++}
++DEBUGFS_READ_FILE_OPS(mactrace);
++
++static ssize_t rwnx_dbgfs_macdiags_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr;
++ ssize_t read;
++
++ mutex_lock(&priv->dbgdump_elem.mutex);
++ if (!priv->debugfs.trace_prst) {
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return 0;
++ }
++
++ read = simple_read_from_buffer(user_buf, count, ppos,
++ dump->dbg_info.diags_mac,
++ DBG_DIAGS_MAC_MAX * 2);
++
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return read;
++}
++
++DEBUGFS_READ_FILE_OPS(macdiags);
++
++static ssize_t rwnx_dbgfs_phydiags_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr;
++ ssize_t read;
++
++ mutex_lock(&priv->dbgdump_elem.mutex);
++ if (!priv->debugfs.trace_prst) {
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return 0;
++ }
++
++ read = simple_read_from_buffer(user_buf, count, ppos,
++ dump->dbg_info.diags_phy,
++ DBG_DIAGS_PHY_MAX * 2);
++
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return read;
++}
++
++DEBUGFS_READ_FILE_OPS(phydiags);
++
++static ssize_t rwnx_dbgfs_hwdiags_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr;
++ char buf[16];
++ int ret;
++
++ mutex_lock(&priv->dbgdump_elem.mutex);
++ if (!priv->debugfs.trace_prst) {
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return 0;
++ }
++
++ ret = scnprintf(buf, min_t(size_t, sizeof(buf) - 1, count),
++ "%08X\n", dump->dbg_info.hw_diag);
++
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return simple_read_from_buffer(user_buf, count, ppos, buf, ret);
++}
++
++DEBUGFS_READ_FILE_OPS(hwdiags);
++
++static ssize_t rwnx_dbgfs_plfdiags_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr;
++ char buf[16];
++ int ret;
++
++ mutex_lock(&priv->dbgdump_elem.mutex);
++ if (!priv->debugfs.trace_prst) {
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return 0;
++ }
++
++ ret = scnprintf(buf, min_t(size_t, sizeof(buf) - 1, count),
++ "%08X\n", dump->dbg_info.la_conf.diag_conf);
++
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return simple_read_from_buffer(user_buf, count, ppos, buf, ret);
++}
++
++DEBUGFS_READ_FILE_OPS(plfdiags);
++
++static ssize_t rwnx_dbgfs_swdiags_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr;
++ ssize_t read;
++
++ mutex_lock(&priv->dbgdump_elem.mutex);
++ if (!priv->debugfs.trace_prst) {
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return 0;
++ }
++
++ read = simple_read_from_buffer(user_buf, count, ppos,
++ &dump->dbg_info.sw_diag,
++ dump->dbg_info.sw_diag_len);
++
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return read;
++}
++
++DEBUGFS_READ_FILE_OPS(swdiags);
++
++static ssize_t rwnx_dbgfs_error_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr;
++ ssize_t read;
++
++ mutex_lock(&priv->dbgdump_elem.mutex);
++ if (!priv->debugfs.trace_prst) {
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return 0;
++ }
++
++ read = simple_read_from_buffer(user_buf, count, ppos,
++ dump->dbg_info.error,
++ strlen((char *)dump->dbg_info.error));
++
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return read;
++}
++
++DEBUGFS_READ_FILE_OPS(error);
++
++static ssize_t rwnx_dbgfs_rxdesc_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr;
++ char buf[32];
++ int ret;
++ ssize_t read;
++
++ mutex_lock(&priv->dbgdump_elem.mutex);
++ if (!priv->debugfs.trace_prst) {
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return 0;
++ }
++
++ ret = scnprintf(buf, min_t(size_t, sizeof(buf) - 1, count),
++ "%08X\n%08X\n", dump->dbg_info.rhd,
++ dump->dbg_info.rbd);
++ read = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
++
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return read;
++}
++
++DEBUGFS_READ_FILE_OPS(rxdesc);
++
++static ssize_t rwnx_dbgfs_txdesc_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr;
++ char buf[64];
++ int len = 0;
++ int i;
++
++ mutex_lock(&priv->dbgdump_elem.mutex);
++ if (!priv->debugfs.trace_prst) {
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return 0;
++ }
++
++ for (i = 0; i < NX_TXQ_CNT; i++) {
++ len += scnprintf(&buf[len], min_t(size_t, sizeof(buf) - len - 1, count),
++ "%08X\n", dump->dbg_info.thd[i]);
++ }
++
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
++}
++
++DEBUGFS_READ_FILE_OPS(txdesc);
++
++static ssize_t rwnx_dbgfs_macrxptr_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr;
++ ssize_t read;
++
++ mutex_lock(&priv->dbgdump_elem.mutex);
++ if (!priv->debugfs.trace_prst) {
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return 0;
++ }
++
++ read = simple_read_from_buffer(user_buf, count, ppos,
++ &dump->dbg_info.rhd_hw_ptr,
++ 2 * sizeof(dump->dbg_info.rhd_hw_ptr));
++
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return read;
++}
++
++DEBUGFS_READ_FILE_OPS(macrxptr);
++
++static ssize_t rwnx_dbgfs_lamacconf_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr;
++ ssize_t read;
++
++ mutex_lock(&priv->dbgdump_elem.mutex);
++ if (!priv->debugfs.trace_prst) {
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return 0;
++ }
++
++ read = simple_read_from_buffer(user_buf, count, ppos,
++ dump->dbg_info.la_conf.conf,
++ LA_CONF_LEN * 4);
++
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return read;
++}
++DEBUGFS_READ_FILE_OPS(lamacconf);
++
++static ssize_t rwnx_dbgfs_chaninfo_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ struct dbg_debug_dump_tag *dump = priv->dbgdump_elem.buf.addr;
++ char buf[4 * 32];
++ int ret;
++
++ mutex_lock(&priv->dbgdump_elem.mutex);
++ if (!priv->debugfs.trace_prst) {
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return 0;
++ }
++
++ ret = scnprintf(buf, min_t(size_t, sizeof(buf) - 1, count),
++ "type: %d\n"
++ "prim20_freq: %d MHz\n"
++ "center1_freq: %d MHz\n"
++ "center2_freq: %d MHz\n",
++ (dump->dbg_info.chan_info.info1 >> 8) & 0xFF,
++ (dump->dbg_info.chan_info.info1 >> 16) & 0xFFFF,
++ (dump->dbg_info.chan_info.info2 >> 0) & 0xFFFF,
++ (dump->dbg_info.chan_info.info2 >> 16) & 0xFFFF);
++
++ mutex_unlock(&priv->dbgdump_elem.mutex);
++ return simple_read_from_buffer(user_buf, count, ppos, buf, ret);
++}
++
++DEBUGFS_READ_FILE_OPS(chaninfo);
++
++static ssize_t rwnx_dbgfs_um_helper_read(struct file *file,
++ char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ char buf[sizeof(priv->debugfs.helper_cmd)];
++ int ret;
++
++ ret = scnprintf(buf, min_t(size_t, sizeof(buf) - 1, count),
++ "%s", priv->debugfs.helper_cmd);
++
++ return simple_read_from_buffer(user_buf, count, ppos, buf, ret);
++}
++
++static ssize_t rwnx_dbgfs_um_helper_write(struct file *file,
++ const char __user *user_buf,
++ size_t count, loff_t *ppos)
++{
++ struct rwnx_hw *priv = file->private_data;
++ int eobuf = min_t(size_t, sizeof(priv->debugfs.helper_cmd) - 1, count);
++
++ priv->debugfs.helper_cmd[eobuf] = '\0';
++ if (copy_from_user(priv->debugfs.helper_cmd, user_buf, eobuf))
++ return -EFAULT;
++
++ return count;
++}
++
++DEBUGFS_READ_WRITE_FILE_OPS(um_helper);
++
++/*
++ * Calls a userspace pgm
++ */
++int rwnx_um_helper(struct rwnx_debugfs *rwnx_debugfs, const char *cmd)
++{
++ char *envp[] = { "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
++ char **argv;
++ int argc, ret;
++
++ if (!rwnx_debugfs->dir ||
++ !strlen((cmd = cmd ? cmd : rwnx_debugfs->helper_cmd)))
++ return 0;
++ argv = argv_split(in_interrupt() ? GFP_ATOMIC : GFP_KERNEL, cmd, &argc);
++ if (!argc)
++ return PTR_ERR(argv);
++
++ if ((ret = call_usermodehelper(argv[0], argv, envp,
++ UMH_WAIT_PROC | UMH_KILLABLE)))
++ printk(KERN_CRIT "Failed to call %s (%s returned %d)\n",
++ argv[0], cmd, ret);
++ argv_free(argv);
++
++ return ret;
++}
++
++static void rwnx_um_helper_work(struct work_struct *ws)
++{
++ struct rwnx_debugfs *rwnx_debugfs = container_of(ws, struct rwnx_debugfs,
++ helper_work);
++ struct rwnx_hw *rwnx_hw = container_of(rwnx_debugfs, struct rwnx_hw,
++ debugfs);
++ rwnx_um_helper(rwnx_debugfs, NULL);
++ if (!rwnx_debugfs->unregistering)
++ rwnx_umh_done(rwnx_hw);
++ rwnx_debugfs->helper_scheduled = false;
++}
++
++int rwnx_trigger_um_helper(struct rwnx_debugfs *rwnx_debugfs)
++{
++ struct rwnx_hw *rwnx_hw = container_of(rwnx_debugfs, struct rwnx_hw,
++ debugfs);
++
++ if (rwnx_debugfs->helper_scheduled == true) {
++ dev_err(rwnx_hw->dev, "%s: Already scheduled\n", __func__);
++ return -EBUSY;
++ }
++
++ spin_lock_bh(&rwnx_debugfs->umh_lock);
++ if (rwnx_debugfs->unregistering) {
++ spin_unlock_bh(&rwnx_debugfs->umh_lock);
++ dev_err(rwnx_hw->dev, "%s: unregistering\n", __func__);
++ return -ENOENT;
++ }
++ rwnx_debugfs->helper_scheduled = true;
++ schedule_work(&rwnx_debugfs->helper_work);
++ spin_unlock_bh(&rwnx_debugfs->umh_lock);
++
++ return 0;
++}
++
++int rwnx_dbgfs_register_fw_dump(struct rwnx_hw *rwnx_hw,
++ struct dentry *dir_drv,
++ struct dentry *dir_diags)
++{
++
++ struct rwnx_debugfs *rwnx_debugfs = &rwnx_hw->debugfs;
++
++ BUILD_BUG_ON(sizeof(CONFIG_RWNX_UM_HELPER_DFLT) >=
++ sizeof(rwnx_debugfs->helper_cmd));
++ strncpy(rwnx_debugfs->helper_cmd,
++ CONFIG_RWNX_UM_HELPER_DFLT, sizeof(rwnx_debugfs->helper_cmd));
++ INIT_WORK(&rwnx_debugfs->helper_work, rwnx_um_helper_work);
++ DEBUGFS_ADD_FILE(um_helper, dir_drv, S_IWUSR | S_IRUSR);
++
++ rwnx_debugfs->trace_prst = rwnx_debugfs->helper_scheduled = false;
++ spin_lock_init(&rwnx_debugfs->umh_lock);
++ DEBUGFS_ADD_FILE(rhd, dir_diags, S_IRUSR);
++ DEBUGFS_ADD_FILE(rbd, dir_diags, S_IRUSR);
++ DEBUGFS_ADD_FILE(thd0, dir_diags, S_IRUSR);
++ DEBUGFS_ADD_FILE(thd1, dir_diags, S_IRUSR);
++ DEBUGFS_ADD_FILE(thd2, dir_diags, S_IRUSR);
++ DEBUGFS_ADD_FILE(thd3, dir_diags, S_IRUSR);
++#if (NX_TXQ_CNT == 5)
++ DEBUGFS_ADD_FILE(thd4, dir_diags, S_IRUSR);
++#endif
++ DEBUGFS_ADD_FILE(mactrace, dir_diags, S_IRUSR);
++ DEBUGFS_ADD_FILE(macdiags, dir_diags, S_IRUSR);
++ DEBUGFS_ADD_FILE(phydiags, dir_diags, S_IRUSR);
++ DEBUGFS_ADD_FILE(plfdiags, dir_diags, S_IRUSR);
++ DEBUGFS_ADD_FILE(hwdiags, dir_diags, S_IRUSR);
++ DEBUGFS_ADD_FILE(swdiags, dir_diags, S_IRUSR);
++ DEBUGFS_ADD_FILE(error, dir_diags, S_IRUSR);
++ DEBUGFS_ADD_FILE(rxdesc, dir_diags, S_IRUSR);
++ DEBUGFS_ADD_FILE(txdesc, dir_diags, S_IRUSR);
++ DEBUGFS_ADD_FILE(macrxptr, dir_diags, S_IRUSR);
++ DEBUGFS_ADD_FILE(lamacconf, dir_diags, S_IRUSR);
++ DEBUGFS_ADD_FILE(chaninfo, dir_diags, S_IRUSR);
++
++ return 0;
++
++ err:
++ return -1;
++}
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_fw_trace.c
+@@ -0,0 +1,47 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_fw_trace.c
++ *
++ * Copyright (C) RivieraWaves 2017-2019
++ *
++ ******************************************************************************
++ */
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/uaccess.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/delay.h>
++#include "rwnx_fw_trace.h"
++
++int rwnx_fw_log_init(struct rwnx_fw_log *fw_log)
++{
++ u8 *buf = kmalloc(FW_LOG_SIZE, GFP_KERNEL);
++ if (!buf)
++ return -ENOMEM;
++
++ fw_log->buf.data = buf;
++ fw_log->buf.start = fw_log->buf.data;
++ fw_log->buf.size = 0;
++ fw_log->buf.end = fw_log->buf.data;
++ fw_log->buf.dataend = fw_log->buf.data + FW_LOG_SIZE;
++ spin_lock_init(&fw_log->lock);
++
++ printk("fw_log_init: %lx, %lx\n", (unsigned long)fw_log->buf.start, (unsigned long)(fw_log->buf.dataend));
++ return 0;
++}
++
++void rwnx_fw_log_deinit(struct rwnx_fw_log *fw_log)
++{
++ if (!fw_log)
++ return;
++
++ if (fw_log->buf.data)
++ kfree(fw_log->buf.data);
++ fw_log->buf.start = NULL;
++ fw_log->buf.end = NULL;
++ fw_log->buf.size = 0;
++}
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_fw_trace.h
+@@ -0,0 +1,35 @@
++/**
++ ******************************************************************************
++ *
++ * rwnx_fw_trace.h
++ *
++ * Copyright (C) RivieraWaves 2017-2019
++ *
++ ******************************************************************************
++ */
++
++#ifndef _RWNX_FW_TRACE_H_
++#define _RWNX_FW_TRACE_H_
++
++#include <linux/mutex.h>
++#include <linux/wait.h>
++#include <linux/workqueue.h>
++
++#define FW_LOG_SIZE (10240)
++
++struct rwnx_fw_log_buf {
++ uint8_t *data;
++ uint8_t *start;
++ uint8_t *end;
++ uint8_t *dataend;
++ uint32_t size;
++};
++
++struct rwnx_fw_log {
++ struct rwnx_fw_log_buf buf;
++ spinlock_t lock;
++};
++
++int rwnx_fw_log_init(struct rwnx_fw_log *fw_log);
++void rwnx_fw_log_deinit(struct rwnx_fw_log *fw_log);
++#endif /* _RWNX_FW_TRACE_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_gki.c
+@@ -0,0 +1,18 @@
++#include <linux/version.h>
++#include <linux/skbuff.h>
++
++
++void rwnx_gki_skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
++{
++ unsigned long flags;
++ struct sk_buff *prev = old;
++ struct sk_buff *next = prev->next;
++ spin_lock_irqsave(&list->lock, flags);
++ WRITE_ONCE(newsk->next, next);
++ WRITE_ONCE(newsk->prev, prev);
++ WRITE_ONCE(next->prev, newsk);
++ WRITE_ONCE(prev->next, newsk);
++ list->qlen++;
++ spin_unlock_irqrestore(&list->lock, flags);
++}
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_gki.h
+@@ -0,0 +1,11 @@
++#ifndef __RWNX_GKI_H
++#define __RWNX_GKI_H
++
++#ifdef CONFIG_GKI
++void rwnx_gki_skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
++#define rwnx_skb_append rwnx_gki_skb_append
++#else
++#define rwnx_skb_append skb_append
++#endif//CONFIG_GKI
++
++#endif
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_irqs.c
+@@ -0,0 +1,67 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_irqs.c
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ******************************************************************************
++ */
++#include <linux/interrupt.h>
++
++#include "rwnx_defs.h"
++#include "ipc_host.h"
++#include "rwnx_prof.h"
++
++/**
++ * rwnx_irq_hdlr - IRQ handler
++ *
++ * Handler registerd by the platform driver
++ */
++irqreturn_t rwnx_irq_hdlr(int irq, void *dev_id)
++{
++ struct rwnx_hw *rwnx_hw = (struct rwnx_hw *)dev_id;
++ disable_irq_nosync(irq);
++ tasklet_schedule(&rwnx_hw->task);
++ return IRQ_HANDLED;
++}
++
++/**
++ * rwnx_task - Bottom half for IRQ handler
++ *
++ * Read irq status and process accordingly
++ */
++void rwnx_task(unsigned long data)
++{
++ struct rwnx_hw *rwnx_hw = (struct rwnx_hw *)data;
++ REG_SW_SET_PROFILING(rwnx_hw, SW_PROF_RWNX_IPC_IRQ_HDLR);
++
++#if 0
++ struct rwnx_plat *rwnx_plat = rwnx_hw->plat;
++ u32 status, statuses = 0;
++
++ /* Ack unconditionnally in case ipc_host_get_status does not see the irq */
++ rwnx_plat->ack_irq(rwnx_plat);
++
++ while ((status = ipc_host_get_status(rwnx_hw->ipc_env))) {
++ statuses |= status;
++ /* All kinds of IRQs will be handled in one shot (RX, MSG, DBG, ...)
++ * this will ack IPC irqs not the cfpga irqs */
++ ipc_host_irq(rwnx_hw->ipc_env, status);
++
++ rwnx_plat->ack_irq(rwnx_plat);
++ }
++#endif
++ //if (statuses & IPC_IRQ_E2A_RXDESC)
++ // rwnx_hw->stats.last_rx = now;
++ //if (statuses & IPC_IRQ_E2A_TXCFM)
++ // rwnx_hw->stats.last_tx = now;
++ AICWFDBG(LOGTRACE, "rwnx_task\n");
++ spin_lock_bh(&rwnx_hw->tx_lock);
++ rwnx_hwq_process_all(rwnx_hw);
++ spin_unlock_bh(&rwnx_hw->tx_lock);
++#if 0
++ enable_irq(rwnx_platform_get_irq(rwnx_plat));
++#endif
++ REG_SW_CLEAR_PROFILING(rwnx_hw, SW_PROF_RWNX_IPC_IRQ_HDLR);
++}
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_irqs.h
+@@ -0,0 +1,20 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_irqs.h
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ******************************************************************************
++ */
++#ifndef _RWNX_IRQS_H_
++#define _RWNX_IRQS_H_
++
++#include <linux/interrupt.h>
++
++/* IRQ handler to be registered by platform driver */
++irqreturn_t rwnx_irq_hdlr(int irq, void *dev_id);
++
++void rwnx_task(unsigned long data);
++
++#endif /* _RWNX_IRQS_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_main.c
+@@ -0,0 +1,9564 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_main.c
++ *
++ * @brief Entry point of the RWNX driver
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ******************************************************************************
++ */
++
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/inetdevice.h>
++#include <net/cfg80211.h>
++#include <net/ip.h>
++#include <linux/etherdevice.h>
++#include <linux/netdevice.h>
++#include <net/netlink.h>
++#include <linux/wireless.h>
++#include <linux/if_arp.h>
++#include <linux/ctype.h>
++#include <linux/random.h>
++#include "rwnx_defs.h"
++#include "rwnx_dini.h"
++#include "rwnx_msg_tx.h"
++#include "reg_access.h"
++#include "hal_desc.h"
++#include "rwnx_debugfs.h"
++#include "rwnx_cfgfile.h"
++#include "rwnx_irqs.h"
++#include "rwnx_radar.h"
++#include "rwnx_version.h"
++#ifdef CONFIG_RWNX_BFMER
++#include "rwnx_bfmer.h"
++#endif //(CONFIG_RWNX_BFMER)
++#include "rwnx_tdls.h"
++#include "rwnx_events.h"
++#include "rwnx_compat.h"
++#include "rwnx_version.h"
++#include "rwnx_main.h"
++#include "aicwf_txrxif.h"
++#include "aicwf_compat_8800dc.h"
++#include "aicwf_compat_8800d80.h"
++
++
++#ifdef CONFIG_USE_WIRELESS_EXT
++#include "aicwf_wext_linux.h"
++#endif
++
++#ifdef AICWF_SDIO_SUPPORT
++#include "aicwf_sdio.h"
++#endif
++#ifdef AICWF_USB_SUPPORT
++#include "aicwf_usb.h"
++#endif
++#include <linux/semaphore.h>
++
++#define RW_DRV_DESCRIPTION "RivieraWaves 11nac driver for Linux cfg80211"
++#define RW_DRV_COPYRIGHT "Copyright(c) 2015-2017 RivieraWaves"
++#define RW_DRV_AUTHOR "RivieraWaves S.A.S"
++
++#define RWNX_PRINT_CFM_ERR(req) \
++ printk(KERN_CRIT "%s: Status Error(%d)\n", #req, (&req##_cfm)->status)
++
++#define RWNX_HT_CAPABILITIES \
++{ \
++ .ht_supported = true, \
++ .cap = 0, \
++ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, \
++ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16, \
++ .mcs = { \
++ .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, \
++ .rx_highest = cpu_to_le16(65), \
++ .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
++ }, \
++}
++
++#define RWNX_VHT_CAPABILITIES \
++{ \
++ .vht_supported = false, \
++ .cap = \
++ (7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT),\
++ .vht_mcs = { \
++ .rx_mcs_map = cpu_to_le16( \
++ IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | \
++ IEEE80211_VHT_MCS_NOT_SUPPORTED << 2 | \
++ IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 | \
++ IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | \
++ IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | \
++ IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | \
++ IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | \
++ IEEE80211_VHT_MCS_NOT_SUPPORTED << 14), \
++ .tx_mcs_map = cpu_to_le16( \
++ IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | \
++ IEEE80211_VHT_MCS_NOT_SUPPORTED << 2 | \
++ IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 | \
++ IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | \
++ IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | \
++ IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | \
++ IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | \
++ IEEE80211_VHT_MCS_NOT_SUPPORTED << 14), \
++ } \
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) || defined(CONFIG_HE_FOR_OLD_KERNEL)
++#define RWNX_HE_CAPABILITIES \
++{ \
++ .has_he = false, \
++ .he_cap_elem = { \
++ .mac_cap_info[0] = 0, \
++ .mac_cap_info[1] = 0, \
++ .mac_cap_info[2] = 0, \
++ .mac_cap_info[3] = 0, \
++ .mac_cap_info[4] = 0, \
++ .mac_cap_info[5] = 0, \
++ .phy_cap_info[0] = 0, \
++ .phy_cap_info[1] = 0, \
++ .phy_cap_info[2] = 0, \
++ .phy_cap_info[3] = 0, \
++ .phy_cap_info[4] = 0, \
++ .phy_cap_info[5] = 0, \
++ .phy_cap_info[6] = 0, \
++ .phy_cap_info[7] = 0, \
++ .phy_cap_info[8] = 0, \
++ .phy_cap_info[9] = 0, \
++ .phy_cap_info[10] = 0, \
++ }, \
++ .he_mcs_nss_supp = { \
++ .rx_mcs_80 = cpu_to_le16(0xfffa), \
++ .tx_mcs_80 = cpu_to_le16(0xfffa), \
++ .rx_mcs_160 = cpu_to_le16(0xffff), \
++ .tx_mcs_160 = cpu_to_le16(0xffff), \
++ .rx_mcs_80p80 = cpu_to_le16(0xffff), \
++ .tx_mcs_80p80 = cpu_to_le16(0xffff), \
++ }, \
++ .ppe_thres = {0x08, 0x1c, 0x07}, \
++}
++#else
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)
++#define RWNX_HE_CAPABILITIES \
++{ \
++ .has_he = false, \
++ .he_cap_elem = { \
++ .mac_cap_info[0] = 0, \
++ .mac_cap_info[1] = 0, \
++ .mac_cap_info[2] = 0, \
++ .mac_cap_info[3] = 0, \
++ .mac_cap_info[4] = 0, \
++ .phy_cap_info[0] = 0, \
++ .phy_cap_info[1] = 0, \
++ .phy_cap_info[2] = 0, \
++ .phy_cap_info[3] = 0, \
++ .phy_cap_info[4] = 0, \
++ .phy_cap_info[5] = 0, \
++ .phy_cap_info[6] = 0, \
++ .phy_cap_info[7] = 0, \
++ .phy_cap_info[8] = 0, \
++ }, \
++ .he_mcs_nss_supp = { \
++ .rx_mcs_80 = cpu_to_le16(0xfffa), \
++ .tx_mcs_80 = cpu_to_le16(0xfffa), \
++ .rx_mcs_160 = cpu_to_le16(0xffff), \
++ .tx_mcs_160 = cpu_to_le16(0xffff), \
++ .rx_mcs_80p80 = cpu_to_le16(0xffff), \
++ .tx_mcs_80p80 = cpu_to_le16(0xffff), \
++ }, \
++ .ppe_thres = {0x08, 0x1c, 0x07}, \
++}
++#endif
++#endif
++
++#define RATE(_bitrate, _hw_rate, _flags) { \
++ .bitrate = (_bitrate), \
++ .flags = (_flags), \
++ .hw_value = (_hw_rate), \
++}
++
++#define CHAN(_freq) { \
++ .center_freq = (_freq), \
++ .max_power = 30, /* FIXME */ \
++}
++
++#ifdef ANDROID_PLATFORM
++#define HIGH_KERNEL_VERSION KERNEL_VERSION(5, 15, 41)
++#else
++#define HIGH_KERNEL_VERSION KERNEL_VERSION(5, 0, 0)
++#endif
++
++
++static struct ieee80211_rate rwnx_ratetable[] = {
++ RATE(10, 0x00, 0),
++ RATE(20, 0x01, IEEE80211_RATE_SHORT_PREAMBLE),
++ RATE(55, 0x02, IEEE80211_RATE_SHORT_PREAMBLE),
++ RATE(110, 0x03, IEEE80211_RATE_SHORT_PREAMBLE),
++ RATE(60, 0x04, 0),
++ RATE(90, 0x05, 0),
++ RATE(120, 0x06, 0),
++ RATE(180, 0x07, 0),
++ RATE(240, 0x08, 0),
++ RATE(360, 0x09, 0),
++ RATE(480, 0x0A, 0),
++ RATE(540, 0x0B, 0),
++};
++
++/* The channels indexes here are not used anymore */
++static struct ieee80211_channel rwnx_2ghz_channels[] = {
++ CHAN(2412),
++ CHAN(2417),
++ CHAN(2422),
++ CHAN(2427),
++ CHAN(2432),
++ CHAN(2437),
++ CHAN(2442),
++ CHAN(2447),
++ CHAN(2452),
++ CHAN(2457),
++ CHAN(2462),
++ CHAN(2467),
++ CHAN(2472),
++ CHAN(2484),
++ // Extra channels defined only to be used for PHY measures.
++ // Enabled only if custregd and custchan parameters are set
++ CHAN(2390),
++ CHAN(2400),
++ CHAN(2410),
++ CHAN(2420),
++ CHAN(2430),
++ CHAN(2440),
++ CHAN(2450),
++ CHAN(2460),
++ CHAN(2470),
++ CHAN(2480),
++ CHAN(2490),
++ CHAN(2500),
++ CHAN(2510),
++};
++
++//#ifdef USE_5G
++static struct ieee80211_channel rwnx_5ghz_channels[] = {
++ CHAN(5180), // 36 - 20MHz
++ CHAN(5200), // 40 - 20MHz
++ CHAN(5220), // 44 - 20MHz
++ CHAN(5240), // 48 - 20MHz
++ CHAN(5260), // 52 - 20MHz
++ CHAN(5280), // 56 - 20MHz
++ CHAN(5300), // 60 - 20MHz
++ CHAN(5320), // 64 - 20MHz
++ CHAN(5500), // 100 - 20MHz
++ CHAN(5520), // 104 - 20MHz
++ CHAN(5540), // 108 - 20MHz
++ CHAN(5560), // 112 - 20MHz
++ CHAN(5580), // 116 - 20MHz
++ CHAN(5600), // 120 - 20MHz
++ CHAN(5620), // 124 - 20MHz
++ CHAN(5640), // 128 - 20MHz
++ CHAN(5660), // 132 - 20MHz
++ CHAN(5680), // 136 - 20MHz
++ CHAN(5700), // 140 - 20MHz
++ CHAN(5720), // 144 - 20MHz
++ CHAN(5745), // 149 - 20MHz
++ CHAN(5765), // 153 - 20MHz
++ CHAN(5785), // 157 - 20MHz
++ CHAN(5805), // 161 - 20MHz
++ CHAN(5825), // 165 - 20MHz
++ // Extra channels defined only to be used for PHY measures.
++ // Enabled only if custregd and custchan parameters are set
++ CHAN(5190),
++ CHAN(5210),
++ CHAN(5230),
++ CHAN(5250),
++ CHAN(5270),
++ CHAN(5290),
++ CHAN(5310),
++ CHAN(5330),
++ CHAN(5340),
++ CHAN(5350),
++ CHAN(5360),
++ CHAN(5370),
++ CHAN(5380),
++ CHAN(5390),
++ CHAN(5400),
++ CHAN(5410),
++ CHAN(5420),
++ CHAN(5430),
++ CHAN(5440),
++ CHAN(5450),
++ CHAN(5460),
++ CHAN(5470),
++ CHAN(5480),
++ CHAN(5490),
++ CHAN(5510),
++ CHAN(5530),
++ CHAN(5550),
++ CHAN(5570),
++ CHAN(5590),
++ CHAN(5610),
++ CHAN(5630),
++ CHAN(5650),
++ CHAN(5670),
++ CHAN(5690),
++ CHAN(5710),
++ CHAN(5730),
++ CHAN(5750),
++ CHAN(5760),
++ CHAN(5770),
++ CHAN(5780),
++ CHAN(5790),
++ CHAN(5800),
++ CHAN(5810),
++ CHAN(5820),
++ CHAN(5830),
++ CHAN(5840),
++ CHAN(5850),
++ CHAN(5860),
++ CHAN(5870),
++ CHAN(5880),
++ CHAN(5890),
++ CHAN(5900),
++ CHAN(5910),
++ CHAN(5920),
++ CHAN(5930),
++ CHAN(5940),
++ CHAN(5950),
++ CHAN(5960),
++ CHAN(5970),
++};
++//#endif
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)) || defined(CONFIG_HE_FOR_OLD_KERNEL)
++struct ieee80211_sband_iftype_data rwnx_he_capa = {
++ .types_mask = BIT(NL80211_IFTYPE_STATION)|BIT(NL80211_IFTYPE_AP),
++ .he_cap = RWNX_HE_CAPABILITIES,
++};
++#endif
++
++static struct ieee80211_supported_band rwnx_band_2GHz = {
++ .channels = rwnx_2ghz_channels,
++ .n_channels = ARRAY_SIZE(rwnx_2ghz_channels) - 13, // -13 to exclude extra channels
++ .bitrates = rwnx_ratetable,
++ .n_bitrates = ARRAY_SIZE(rwnx_ratetable),
++ .ht_cap = RWNX_HT_CAPABILITIES,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
++ .vht_cap = RWNX_VHT_CAPABILITIES,
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)
++ .iftype_data = &rwnx_he_capa,
++ .n_iftype_data = 1,
++#endif
++};
++
++//#ifdef USE_5G
++static struct ieee80211_supported_band rwnx_band_5GHz = {
++ .channels = rwnx_5ghz_channels,
++ .n_channels = ARRAY_SIZE(rwnx_5ghz_channels) - 59, // -59 to exclude extra channels
++ .bitrates = &rwnx_ratetable[4],
++ .n_bitrates = ARRAY_SIZE(rwnx_ratetable) - 4,
++ .ht_cap = RWNX_HT_CAPABILITIES,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
++ .vht_cap = RWNX_VHT_CAPABILITIES,
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)
++ .iftype_data = &rwnx_he_capa,
++ .n_iftype_data = 1,
++#endif
++};
++//#endif
++
++static struct ieee80211_iface_limit rwnx_limits[] = {
++ { .max = 1,
++ .types = BIT(NL80211_IFTYPE_STATION)},
++ { .max = 1,
++ .types = BIT(NL80211_IFTYPE_AP)},
++#ifdef CONFIG_USE_P2P0
++ { .max = 2,
++#else
++ { .max = 1,
++#endif
++ .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO)},
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
++#ifndef CONFIG_USE_P2P0
++ { .max = 1,
++ .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
++ }
++#endif
++#endif
++};
++
++static struct ieee80211_iface_limit rwnx_limits_dfs[] = {
++ { .max = NX_VIRT_DEV_MAX, .types = BIT(NL80211_IFTYPE_AP)}
++};
++
++static const struct ieee80211_iface_combination rwnx_combinations[] = {
++ {
++ .limits = rwnx_limits,
++ .n_limits = ARRAY_SIZE(rwnx_limits),
++ .num_different_channels = NX_CHAN_CTXT_CNT,
++ .max_interfaces = NX_VIRT_DEV_MAX,
++ },
++ /* Keep this combination as the last one */
++ {
++ .limits = rwnx_limits_dfs,
++ .n_limits = ARRAY_SIZE(rwnx_limits_dfs),
++ .num_different_channels = 1,
++ .max_interfaces = NX_VIRT_DEV_MAX,
++ .radar_detect_widths = (BIT(NL80211_CHAN_WIDTH_20_NOHT) |
++ BIT(NL80211_CHAN_WIDTH_20) |
++ BIT(NL80211_CHAN_WIDTH_40) |
++ BIT(NL80211_CHAN_WIDTH_80)),
++ }
++};
++
++/* There isn't a lot of sense in it, but you can transmit anything you like */
++static struct ieee80211_txrx_stypes
++rwnx_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
++ [NL80211_IFTYPE_STATION] = {
++ .tx = 0xffff,
++ .rx = (BIT(IEEE80211_STYPE_ACTION >> 4) |
++ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
++ BIT(IEEE80211_STYPE_AUTH >> 4)),
++ },
++ [NL80211_IFTYPE_AP] = {
++ .tx = 0xffff,
++ .rx = (BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
++ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
++ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
++ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
++ BIT(IEEE80211_STYPE_AUTH >> 4) |
++ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
++ BIT(IEEE80211_STYPE_ACTION >> 4)),
++ },
++ [NL80211_IFTYPE_AP_VLAN] = {
++ /* copy AP */
++ .tx = 0xffff,
++ .rx = (BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
++ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
++ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
++ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
++ BIT(IEEE80211_STYPE_AUTH >> 4) |
++ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
++ BIT(IEEE80211_STYPE_ACTION >> 4)),
++ },
++ [NL80211_IFTYPE_P2P_CLIENT] = {
++ .tx = 0xffff,
++ .rx = (BIT(IEEE80211_STYPE_ACTION >> 4) |
++ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)),
++ },
++ [NL80211_IFTYPE_P2P_GO] = {
++ .tx = 0xffff,
++ .rx = (BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
++ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
++ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
++ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
++ BIT(IEEE80211_STYPE_AUTH >> 4) |
++ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
++ BIT(IEEE80211_STYPE_ACTION >> 4)),
++ },
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
++ [NL80211_IFTYPE_P2P_DEVICE] = {
++ .tx = 0xffff,
++ .rx = (BIT(IEEE80211_STYPE_ACTION >> 4) |
++ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)),
++ },
++#endif
++ [NL80211_IFTYPE_MESH_POINT] = {
++ .tx = 0xffff,
++ .rx = (BIT(IEEE80211_STYPE_ACTION >> 4) |
++ BIT(IEEE80211_STYPE_AUTH >> 4) |
++ BIT(IEEE80211_STYPE_DEAUTH >> 4)),
++ },
++};
++
++
++static u32 cipher_suites[] = {
++ WLAN_CIPHER_SUITE_WEP40,
++ WLAN_CIPHER_SUITE_WEP104,
++ WLAN_CIPHER_SUITE_TKIP,
++ WLAN_CIPHER_SUITE_CCMP,
++ WLAN_CIPHER_SUITE_AES_CMAC, // reserved entries to enable AES-CMAC and/or SMS4
++ WLAN_CIPHER_SUITE_SMS4,
++ 0,
++};
++#define NB_RESERVED_CIPHER 1;
++
++static const int rwnx_ac2hwq[1][NL80211_NUM_ACS] = {
++ {
++ [NL80211_TXQ_Q_VO] = RWNX_HWQ_VO,
++ [NL80211_TXQ_Q_VI] = RWNX_HWQ_VI,
++ [NL80211_TXQ_Q_BE] = RWNX_HWQ_BE,
++ [NL80211_TXQ_Q_BK] = RWNX_HWQ_BK
++ }
++};
++
++const int rwnx_tid2hwq[IEEE80211_NUM_TIDS] = {
++ RWNX_HWQ_BE,
++ RWNX_HWQ_BK,
++ RWNX_HWQ_BK,
++ RWNX_HWQ_BE,
++ RWNX_HWQ_VI,
++ RWNX_HWQ_VI,
++ RWNX_HWQ_VO,
++ RWNX_HWQ_VO,
++ /* TID_8 is used for management frames */
++ RWNX_HWQ_VO,
++ /* At the moment, all others TID are mapped to BE */
++ RWNX_HWQ_BE,
++ RWNX_HWQ_BE,
++ RWNX_HWQ_BE,
++ RWNX_HWQ_BE,
++ RWNX_HWQ_BE,
++ RWNX_HWQ_BE,
++ RWNX_HWQ_BE,
++};
++
++static const int rwnx_hwq2uapsd[NL80211_NUM_ACS] = {
++ [RWNX_HWQ_VO] = IEEE80211_WMM_IE_STA_QOSINFO_AC_VO,
++ [RWNX_HWQ_VI] = IEEE80211_WMM_IE_STA_QOSINFO_AC_VI,
++ [RWNX_HWQ_BE] = IEEE80211_WMM_IE_STA_QOSINFO_AC_BE,
++ [RWNX_HWQ_BK] = IEEE80211_WMM_IE_STA_QOSINFO_AC_BK,
++};
++
++#define P2P_ALIVE_TIME_MS (1*1000)
++#define P2P_ALIVE_TIME_COUNT 200
++
++extern uint8_t scanning;
++extern uint8_t p2p_working;
++struct semaphore aicwf_deinit_sem;
++atomic_t aicwf_deinit_atomic;
++
++int aicwf_dbg_level = LOGERROR|LOGINFO;
++module_param(aicwf_dbg_level, int, 0660);
++
++int testmode = 0;
++char aic_fw_path[200];
++
++extern void set_testmode(int);
++
++
++void rwnx_skb_align_8bytes(struct sk_buff *skb){
++#ifdef CONFIG_ALIGN_8BYTES
++ int align __maybe_unused;
++ u8 *data;
++ size_t len = 0;
++
++ align = ((unsigned long)(skb->data + 14)) & 7;
++ if (align) {
++ if (WARN_ON(skb_headroom(skb) < 7)) {
++ dev_kfree_skb(skb);
++ skb = NULL;
++ } else {
++ //printk("AIDEN align1:%d rx_skb->data + 14:%p\r\n", align, skb->data + 14);
++ data = skb->data;
++ len = skb_headlen(skb);
++ skb->data -= align;
++ memmove(skb->data, data, len);
++ skb_set_tail_pointer(skb, len);
++ //printk("AIDEN align2:%d rx_skb->data + 14:%p\r\n", align, skb->data + 14);
++ }
++ }
++#endif
++}
++
++int rwnx_init_cmd_array(void);
++void rwnx_free_cmd_array(void);
++
++void rwnx_data_dump(char* tag, void* data, unsigned long len){
++ unsigned long i = 0;
++ char* data_ = (char* )data;
++
++ AICWFDBG(LOGDATA, "%s %s len:(%lu)\r\n", __func__, tag, len);
++
++ for (i = 0; i < len; i += 16){
++ AICWFDBG(LOGDATA, "%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\r\n",
++ data_[0 + i],
++ data_[1 + i],
++ data_[2 + i],
++ data_[3 + i],
++ data_[4 + i],
++ data_[5 + i],
++ data_[6 + i],
++ data_[7 + i],
++ data_[8 + i],
++ data_[9 + i],
++ data_[10 + i],
++ data_[11 + i],
++ data_[12 + i],
++ data_[13 + i],
++ data_[14 + i],
++ data_[15 + i]);
++ }
++
++}
++
++#define ASSOC_REQ 0x00
++#define ASSOC_RSP 0x10
++#define PROBE_REQ 0x40
++#define PROBE_RSP 0x50
++#define ACTION 0xD0
++#define AUTH 0xB0
++#define DEAUTH 0xC0
++#define QOS 0x88
++
++#define ACTION_MAC_HDR_LEN 24
++#define QOS_MAC_HDR_LEN 26
++
++void rwnx_frame_parser(char* tag, char* data, unsigned long len){
++ char print_data[100];
++ int print_index = 0;
++
++ memset(print_data, 0, 100);
++
++ if(data[0] == ASSOC_REQ){
++ sprintf(&print_data[print_index], "%s", "ASSOC_REQ \r\n");
++ print_index = strlen(print_data);
++ }else if(data[0] == ASSOC_RSP){
++ sprintf(&print_data[print_index], "%s", "ASSOC_RSP \r\n");
++ print_index = strlen(print_data);
++ }else if(data[0] == PROBE_REQ){
++ sprintf(&print_data[print_index], "%s", "PROBE_REQ \r\n");
++ print_index = strlen(print_data);
++ }else if(data[0] == PROBE_RSP){
++ sprintf(&print_data[print_index], "%s", "PROBE_RSP \r\n");
++ print_index = strlen(print_data);
++ }else if(data[0] == ACTION){
++ sprintf(&print_data[print_index], "%s", "ACTION ");
++ print_index = strlen(print_data);
++ if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x00){
++ sprintf(&print_data[print_index], "%s", "GO_NEG_REQ \r\n");
++ }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x01){
++ sprintf(&print_data[print_index], "%s", "GO_NEG_RSP \r\n");
++ }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x02){
++ sprintf(&print_data[print_index], "%s", "GO_NEG_CFM \r\n");
++ }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x03){
++ sprintf(&print_data[print_index], "%s", "P2P_INV_REQ \r\n");
++ }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x04){
++ sprintf(&print_data[print_index], "%s", "P2P_INV_RSP \r\n");
++ }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x05){
++ sprintf(&print_data[print_index], "%s", "DD_REQ \r\n");
++ }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x06){
++ sprintf(&print_data[print_index], "%s", "DD_RSP \r\n");
++ }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x07){
++ sprintf(&print_data[print_index], "%s", "PD_REQ \r\n");
++ }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x08){
++ sprintf(&print_data[print_index], "%s", "PD_RSP \r\n");
++ }else{
++ sprintf(&print_data[print_index], "%s(0x%x 0x%x) \r\n", "UNKNOW",
++ data[ACTION_MAC_HDR_LEN], data[ACTION_MAC_HDR_LEN + 6]);
++ }
++ print_index = strlen(print_data);
++ }else if(data[0] == AUTH){
++ sprintf(&print_data[print_index], "%s", "AUTH \r\n");
++ print_index = strlen(print_data);
++ }else if(data[0] == DEAUTH){
++ sprintf(&print_data[print_index], "%s", "DEAUTH \r\n");
++ print_index = strlen(print_data);
++ }else if(data[0] == QOS){
++ if(data[QOS_MAC_HDR_LEN + 6] == 0x88 && data[QOS_MAC_HDR_LEN + 7] == 0x8E){
++ sprintf(&print_data[print_index], "%s", "QOS_802.1X ");
++ if(data[QOS_MAC_HDR_LEN + 9] == 0x03){
++ sprintf(&print_data[print_index], "%s", "EAPOL \r\n");
++ }else if(data[QOS_MAC_HDR_LEN + 9] == 0x00){
++ sprintf(&print_data[print_index], "%s", "EAP_PACKAGE ");
++ print_index = strlen(print_data);
++ if(data[QOS_MAC_HDR_LEN + 12] == 0x01){
++ sprintf(&print_data[print_index], "%s", "EAP_REQ \r\n");
++ }else if(data[QOS_MAC_HDR_LEN + 12] == 0x02){
++ sprintf(&print_data[print_index], "%s", "EAP_RSP \r\n");
++ }else if(data[QOS_MAC_HDR_LEN + 12] == 0x04){
++ sprintf(&print_data[print_index], "%s", "EAP_FAIL \r\n");
++ }else{
++ sprintf(&print_data[print_index], "%s", "UNKNOW \r\n");
++ }
++ }else if(data[QOS_MAC_HDR_LEN + 9] == 0x01){
++ sprintf(&print_data[print_index], "%s","EAP_START \r\n");
++ }
++ print_index = strlen(print_data);
++ }
++ }
++
++ if(print_index > 0){
++ AICWFDBG(LOGDATA, "%s %s", tag, print_data);
++ }
++
++#if 0
++ if(data[0] == ASSOC_REQ){
++ printk("%s %s ASSOC_REQ \r\n", __func__, tag);
++ }else if(data[0] == ASSOC_RSP){
++ printk("%s %s ASSOC_RSP \r\n", __func__, tag);
++ }else if(data[0] == PROBE_REQ){
++ printk("%s %s PROBE_REQ \r\n", __func__, tag);
++ }else if(data[0] == PROBE_RSP){
++ printk("%s %s PROBE_RSP \r\n", __func__, tag);
++ }else if(data[0] == ACTION){
++ printk("%s %s ACTION ", __func__, tag);
++ if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x00){
++ printk("GO NEG REQ \r\n");
++ }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x01){
++ printk("GO NEG RSP \r\n");
++ }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x02){
++ printk("GO NEG CFM \r\n");
++ }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x03){
++ printk("P2P INV REQ \r\n");
++ }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x04){
++ printk("P2P INV RSP \r\n");
++ }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x05){
++ printk("DD REQ \r\n");
++ }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x06){
++ printk("DD RSP \r\n");
++ }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x07){
++ printk("PD REQ \r\n");
++ }else if(data[ACTION_MAC_HDR_LEN] == 0x04 && data[ACTION_MAC_HDR_LEN + 6] == 0x08){
++ printk("PD RSP \r\n");
++ }else{
++ printk("\r\n");
++ }
++
++ }else if(data[0] == AUTH){
++ printk("%s %s AUTH \r\n", __func__, tag);
++ }else if(data[0] == DEAUTH){
++ printk("%s %s DEAUTH \r\n", __func__, tag);
++ }else if(data[0] == QOS){
++ if(data[QOS_MAC_HDR_LEN + 6] == 0x88 && data[QOS_MAC_HDR_LEN + 7] == 0x8E){
++ printk("%s %s QOS 802.1X ", __func__, tag);
++ if(data[QOS_MAC_HDR_LEN + 9] == 0x03){
++ printk("EAPOL");
++ }else if(data[QOS_MAC_HDR_LEN + 9] == 0x00){
++ printk("EAP PACKAGE ");
++ if(data[QOS_MAC_HDR_LEN + 12] == 0x01){
++ printk("EAP REQ\r\n");
++ }else if(data[QOS_MAC_HDR_LEN + 12] == 0x02){
++ printk("EAP RSP\r\n");
++ }else if(data[QOS_MAC_HDR_LEN + 12] == 0x04){
++ printk("EAP FAIL\r\n");
++ }else{
++ printk("\r\n");
++ }
++ }else if(data[QOS_MAC_HDR_LEN + 9] == 0x01){
++ printk("EAP START \r\n");
++
++ }
++ }
++ }
++#endif
++}
++
++/*********************************************************************
++ * helper
++ *********************************************************************/
++struct rwnx_sta *rwnx_get_sta(struct rwnx_hw *rwnx_hw, const u8 *mac_addr)
++{
++ int i;
++
++ for (i = 0; i < NX_REMOTE_STA_MAX; i++) {
++ struct rwnx_sta *sta = &rwnx_hw->sta_table[i];
++ if (sta->valid && (memcmp(mac_addr, &sta->mac_addr, 6) == 0))
++ return sta;
++ }
++
++ return NULL;
++}
++
++void rwnx_enable_wapi(struct rwnx_hw *rwnx_hw)
++{
++ //cipher_suites[rwnx_hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_SMS4;
++ rwnx_hw->wiphy->n_cipher_suites ++;
++ rwnx_hw->wiphy->flags |= WIPHY_FLAG_CONTROL_PORT_PROTOCOL;
++}
++
++void rwnx_enable_mfp(struct rwnx_hw *rwnx_hw)
++{
++ cipher_suites[rwnx_hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_AES_CMAC;
++ rwnx_hw->wiphy->n_cipher_suites ++;
++}
++
++#ifdef CONFIG_SET_VENDOR_EXTENSION_IE
++extern u8_l vendor_extension_data[256];
++extern u8_l vendor_extension_len;
++
++void rwnx_insert_vendor_extension_in_bcn(struct rwnx_bcn *bcn){
++ u8_l temp_ie[256];
++ u8_l vendor_extension_subelement[3] = {0x00,0x37,0x2A};
++ u8_l vendor_extension_id[2] = {0x10,0x49};
++ u8_l wps_ie[1] = {0xDD};
++ u8_l wps_len_index = 0;
++ int index = 0;
++ int vendor_extension_subelement_len = 0;
++ int find_wps_ie = 0;
++
++ memset(temp_ie, 0, 256);
++
++ //find wps_ie
++ for(index = 0; index < bcn->tail_len; index++){
++ if(bcn->tail[index] == wps_ie[0]){
++ find_wps_ie = 1;
++ wps_len_index = index + 1;
++ }
++
++ if(find_wps_ie && bcn->tail[index] == vendor_extension_id[0]){
++ if(bcn->tail[index + 1] == vendor_extension_id[1]){
++ break;
++ }
++ }
++ }
++
++
++ //find vendor_extension_subelement
++ for(index = 0; index < bcn->tail_len; index++){
++ if(bcn->tail[index] == vendor_extension_id[0]){
++ index++;
++ if(index == bcn->tail_len){
++ return;
++ }
++ if(bcn->tail[index] == vendor_extension_id[1] &&
++ bcn->tail[index + 3] == vendor_extension_subelement[0]&&
++ bcn->tail[index + 4] == vendor_extension_subelement[1]&&
++ bcn->tail[index + 5] == vendor_extension_subelement[2]){
++ index = index + 2;
++ vendor_extension_subelement_len = bcn->tail[index];
++ printk("%s find vendor_extension_subelement,index:%d len:%d\r\n", __func__, index, bcn->tail[index]);
++ break;
++ }
++ }
++ }
++ index = index + vendor_extension_subelement_len;
++
++ //insert vendor extension
++ memcpy(&temp_ie[0], bcn->tail, index + 1);
++ memcpy(&temp_ie[index + 1], vendor_extension_data, vendor_extension_len/*sizeof(vendor_extension_data)*/);//insert vendor extension data
++ memcpy(&temp_ie[index + 1 + vendor_extension_len/*sizeof(vendor_extension_data)*/], &bcn->tail[index + 1], bcn->tail_len - index);
++
++ memcpy(bcn->tail, temp_ie, bcn->tail_len + vendor_extension_len/*sizeof(vendor_extension_data)*/);
++ bcn->tail_len = bcn->tail_len + vendor_extension_len/*sizeof(vendor_extension_data)*/;
++
++ bcn->tail[wps_len_index] = bcn->tail[wps_len_index] + vendor_extension_len;
++ //rwnx_data_dump((char*)__func__, (void*)ie_req->ie, ie_req->add_ie_len);
++}
++#endif
++
++u8 *rwnx_build_bcn(struct rwnx_bcn *bcn, struct cfg80211_beacon_data *new)
++{
++ u8 *buf, *pos;
++
++ if (new->head) {
++ u8 *head = kmalloc(new->head_len, GFP_KERNEL);
++
++ if (!head)
++ return NULL;
++
++ if (bcn->head)
++ kfree(bcn->head);
++
++ bcn->head = head;
++ bcn->head_len = new->head_len;
++ memcpy(bcn->head, new->head, new->head_len);
++ }
++ if (new->tail) {
++ u8 *tail = kmalloc(new->tail_len, GFP_KERNEL);
++
++ if (!tail)
++ return NULL;
++
++ if (bcn->tail)
++ kfree(bcn->tail);
++
++ bcn->tail = tail;
++ bcn->tail_len = new->tail_len;
++ memcpy(bcn->tail, new->tail, new->tail_len);
++ }
++
++ if (!bcn->head)
++ return NULL;
++
++ bcn->tim_len = 6;
++ bcn->len = bcn->head_len + bcn->tail_len + bcn->ies_len + bcn->tim_len;
++#ifdef CONFIG_SET_VENDOR_EXTENSION_IE
++ buf = kmalloc(bcn->len + vendor_extension_len, GFP_KERNEL);
++#else
++ buf = kmalloc(bcn->len, GFP_KERNEL);
++#endif
++ if (!buf)
++ return NULL;
++
++ // Build the beacon buffer
++ pos = buf;
++ memcpy(pos, bcn->head, bcn->head_len);
++ pos += bcn->head_len;
++ *pos++ = WLAN_EID_TIM;
++ *pos++ = 4;
++ *pos++ = 0;
++ *pos++ = bcn->dtim;
++ *pos++ = 0;
++ *pos++ = 0;
++ if (bcn->tail) {
++#ifdef CONFIG_SET_VENDOR_EXTENSION_IE
++ rwnx_insert_vendor_extension_in_bcn(bcn);
++#endif
++ memcpy(pos, bcn->tail, bcn->tail_len);
++ pos += bcn->tail_len;
++ }
++ if (bcn->ies) {
++ memcpy(pos, bcn->ies, bcn->ies_len);
++ }
++
++ return buf;
++}
++
++
++static void rwnx_del_bcn(struct rwnx_bcn *bcn)
++{
++ if (bcn->head) {
++ kfree(bcn->head);
++ bcn->head = NULL;
++ }
++ bcn->head_len = 0;
++
++ if (bcn->tail) {
++ kfree(bcn->tail);
++ bcn->tail = NULL;
++ }
++ bcn->tail_len = 0;
++
++ if (bcn->ies) {
++ kfree(bcn->ies);
++ bcn->ies = NULL;
++ }
++ bcn->ies_len = 0;
++ bcn->tim_len = 0;
++ bcn->dtim = 0;
++ bcn->len = 0;
++}
++
++/**
++ * Link channel ctxt to a vif and thus increments count for this context.
++ */
++void rwnx_chanctx_link(struct rwnx_vif *vif, u8 ch_idx,
++ struct cfg80211_chan_def *chandef)
++{
++ struct rwnx_chanctx *ctxt;
++
++ if (ch_idx >= NX_CHAN_CTXT_CNT) {
++ WARN(1, "Invalid channel ctxt id %d", ch_idx);
++ return;
++ }
++
++ vif->ch_index = ch_idx;
++ ctxt = &vif->rwnx_hw->chanctx_table[ch_idx];
++ ctxt->count++;
++
++ // For now chandef is NULL for STATION interface
++ if (chandef) {
++ if (!ctxt->chan_def.chan)
++ ctxt->chan_def = *chandef;
++ else {
++ // TODO. check that chandef is the same as the one already
++ // set for this ctxt
++ }
++ }
++}
++
++/**
++ * Unlink channel ctxt from a vif and thus decrements count for this context
++ */
++void rwnx_chanctx_unlink(struct rwnx_vif *vif)
++{
++ struct rwnx_chanctx *ctxt;
++
++ if (vif->ch_index == RWNX_CH_NOT_SET)
++ return;
++
++ ctxt = &vif->rwnx_hw->chanctx_table[vif->ch_index];
++
++ if (ctxt->count == 0) {
++ WARN(1, "Chan ctxt ref count is already 0");
++ } else {
++ ctxt->count--;
++ }
++
++ if (ctxt->count == 0) {
++ if (vif->ch_index == vif->rwnx_hw->cur_chanctx) {
++ /* If current chan ctxt is no longer linked to a vif
++ disable radar detection (no need to check if it was activated) */
++ rwnx_radar_detection_enable(&vif->rwnx_hw->radar,
++ RWNX_RADAR_DETECT_DISABLE,
++ RWNX_RADAR_RIU);
++ }
++ /* set chan to null, so that if this ctxt is relinked to a vif that
++ don't have channel information, don't use wrong information */
++ ctxt->chan_def.chan = NULL;
++ }
++ vif->ch_index = RWNX_CH_NOT_SET;
++}
++
++int rwnx_chanctx_valid(struct rwnx_hw *rwnx_hw, u8 ch_idx)
++{
++ if (ch_idx >= NX_CHAN_CTXT_CNT ||
++ rwnx_hw->chanctx_table[ch_idx].chan_def.chan == NULL) {
++ return 0;
++ }
++
++ return 1;
++}
++
++static void rwnx_del_csa(struct rwnx_vif *vif)
++{
++ //struct rwnx_hw *rwnx_hw = vif->rwnx_hw;
++ struct rwnx_csa *csa = vif->ap.csa;
++
++ if (!csa)
++ return;
++
++ rwnx_del_bcn(&csa->bcn);
++ kfree(csa);
++ vif->ap.csa = NULL;
++}
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
++static void rwnx_csa_finish(struct work_struct *ws)
++{
++
++ struct rwnx_csa *csa = container_of(ws, struct rwnx_csa, work);
++ struct rwnx_vif *vif = csa->vif;
++ struct rwnx_hw *rwnx_hw = vif->rwnx_hw;
++ int error = csa->status;
++ u8 *buf, *pos;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ buf = kmalloc(csa->bcn.len, GFP_KERNEL);
++ if (!buf) {
++ printk ("%s buf fail\n", __func__);
++ return;
++ }
++ pos = buf;
++
++ memcpy(pos, csa->bcn.head, csa->bcn.head_len);
++ pos += csa->bcn.head_len;
++ *pos++ = WLAN_EID_TIM;
++ *pos++ = 4;
++ *pos++ = 0;
++ *pos++ = csa->bcn.dtim;
++ *pos++ = 0;
++ *pos++ = 0;
++ if (csa->bcn.tail) {
++ memcpy(pos, csa->bcn.tail, csa->bcn.tail_len);
++ pos += csa->bcn.tail_len;
++ }
++ if (csa->bcn.ies) {
++ memcpy(pos, csa->bcn.ies, csa->bcn.ies_len);
++ }
++
++ if (!error) {
++ error = rwnx_send_bcn(rwnx_hw, buf, vif->vif_index, csa->bcn.len);
++ if (error)
++ return;
++ error = rwnx_send_bcn_change(rwnx_hw, vif->vif_index, csa->elem.dma_addr,
++ csa->bcn.len, csa->bcn.head_len,
++ csa->bcn.tim_len, NULL);
++ }
++
++ if (error) {
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
++ cfg80211_stop_iface(rwnx_hw->wiphy, &vif->wdev, GFP_KERNEL);
++ #else
++ cfg80211_disconnected(vif->ndev, 0, NULL, 0, 0, GFP_KERNEL);
++ #endif
++ } else {
++ spin_lock_bh(&rwnx_hw->cb_lock);
++ rwnx_chanctx_unlink(vif);
++ rwnx_chanctx_link(vif, csa->ch_idx, &csa->chandef);
++ if (rwnx_hw->cur_chanctx == csa->ch_idx) {
++ rwnx_radar_detection_enable_on_cur_channel(rwnx_hw);
++ rwnx_txq_vif_start(vif, RWNX_TXQ_STOP_CHAN, rwnx_hw);
++ } else
++ rwnx_txq_vif_stop(vif, RWNX_TXQ_STOP_CHAN, rwnx_hw);
++ spin_unlock_bh(&rwnx_hw->cb_lock);
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 9, 0))
++ cfg80211_ch_switch_notify(vif->ndev, &csa->chandef, 0);
++#elif (LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION)
++ cfg80211_ch_switch_notify(vif->ndev, &csa->chandef, 0, 0);
++#else
++ cfg80211_ch_switch_notify(vif->ndev, &csa->chandef);
++#endif
++ }
++ rwnx_del_csa(vif);
++}
++#endif
++
++/**
++ * rwnx_external_auth_enable - Enable external authentication on a vif
++ *
++ * @vif: VIF on which external authentication must be enabled
++ *
++ * External authentication requires to start TXQ for unknown STA in
++ * order to send auth frame pusehd by user space.
++ * Note: It is assumed that fw is on the correct channel.
++ */
++void rwnx_external_auth_enable(struct rwnx_vif *vif)
++{
++ vif->sta.external_auth = true;
++ rwnx_txq_unk_vif_init(vif);
++ rwnx_txq_start(rwnx_txq_vif_get(vif, NX_UNK_TXQ_TYPE), 0);
++}
++
++/**
++ * rwnx_external_auth_disable - Disable external authentication on a vif
++ *
++ * @vif: VIF on which external authentication must be disabled
++ */
++void rwnx_external_auth_disable(struct rwnx_vif *vif)
++{
++ if (!vif->sta.external_auth)
++ return;
++
++ vif->sta.external_auth = false;
++ rwnx_txq_unk_vif_deinit(vif);
++}
++
++/**
++ * rwnx_update_mesh_power_mode -
++ *
++ * @vif: mesh VIF for which power mode is updated
++ *
++ * Does nothing if vif is not a mesh point interface.
++ * Since firmware doesn't support one power save mode per link select the
++ * most "active" power mode among all mesh links.
++ * Indeed as soon as we have to be active on one link we might as well be
++ * active on all links.
++ *
++ * If there is no link then the power mode for next peer is used;
++ */
++void rwnx_update_mesh_power_mode(struct rwnx_vif *vif)
++{
++ enum nl80211_mesh_power_mode mesh_pm;
++ struct rwnx_sta *sta;
++ struct mesh_config mesh_conf;
++ struct mesh_update_cfm cfm;
++ u32 mask;
++
++ if (RWNX_VIF_TYPE(vif) != NL80211_IFTYPE_MESH_POINT)
++ return;
++
++ if (list_empty(&vif->ap.sta_list)) {
++ mesh_pm = vif->ap.next_mesh_pm;
++ } else {
++ mesh_pm = NL80211_MESH_POWER_DEEP_SLEEP;
++ list_for_each_entry(sta, &vif->ap.sta_list, list) {
++ if (sta->valid && (sta->mesh_pm < mesh_pm)) {
++ mesh_pm = sta->mesh_pm;
++ }
++ }
++ }
++
++ if (mesh_pm == vif->ap.mesh_pm)
++ return;
++
++ mask = BIT(NL80211_MESHCONF_POWER_MODE - 1);
++ mesh_conf.power_mode = mesh_pm;
++ if (rwnx_send_mesh_update_req(vif->rwnx_hw, vif, mask, &mesh_conf, &cfm) ||
++ cfm.status)
++ return;
++
++ vif->ap.mesh_pm = mesh_pm;
++}
++
++#ifdef CONFIG_BR_SUPPORT
++void netdev_br_init(struct net_device *netdev)
++{
++ struct rwnx_vif *rwnx_vif = netdev_priv(netdev);
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35))
++ rcu_read_lock();
++#endif
++
++ /* if(check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE) == _TRUE) */
++ {
++ /* struct net_bridge *br = netdev->br_port->br; */ /* ->dev->dev_addr; */
++ #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))
++ if (netdev->br_port)
++ #else
++ if (rcu_dereference(rwnx_vif->ndev->rx_handler_data))
++ #endif
++ {
++ struct net_device *br_netdev;
++
++ br_netdev = dev_get_by_name(&init_net, CONFIG_BR_SUPPORT_BRNAME);
++ if (br_netdev) {
++ memcpy(rwnx_vif->br_mac, br_netdev->dev_addr, ETH_ALEN);
++ dev_put(br_netdev);
++ AICWFDBG(LOGINFO, FUNC_NDEV_FMT" bind bridge dev "NDEV_FMT"("MAC_FMT")\n"
++ , FUNC_NDEV_ARG(netdev), NDEV_ARG(br_netdev), MAC_ARG(br_netdev->dev_addr));
++ } else {
++ AICWFDBG(LOGINFO, FUNC_NDEV_FMT" can't get bridge dev by name \"%s\"\n"
++ , FUNC_NDEV_ARG(netdev), CONFIG_BR_SUPPORT_BRNAME);
++ }
++ }
++
++ rwnx_vif->ethBrExtInfo.addPPPoETag = 1;
++ }
++
++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35))
++ rcu_read_unlock();
++#endif
++}
++#endif /* CONFIG_BR_SUPPORT */
++
++
++/*********************************************************************
++ * netdev callbacks
++ ********************************************************************/
++/**
++ * int (*ndo_open)(struct net_device *dev);
++ * This function is called when network device transistions to the up
++ * state.
++ *
++ * - Start FW if this is the first interface opened
++ * - Add interface at fw level
++ */
++static int rwnx_open(struct net_device *dev)
++{
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ struct rwnx_hw *rwnx_hw = rwnx_vif->rwnx_hw;
++ struct mm_add_if_cfm add_if_cfm;
++ int error = 0;
++ u8 rwnx_rx_gain = 0x0E;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ // Check if it is the first opened VIF
++ if (rwnx_hw->vif_started == 0)
++ {
++ // Start the FW
++ if ((error = rwnx_send_start(rwnx_hw)))
++ return error;
++
++ if (rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC || rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW) {
++ error = rwnx_send_dbg_mem_mask_write_req(rwnx_hw, 0x4033b300, 0xFF, rwnx_rx_gain);
++ if(error){
++ return error;
++ }
++ }
++
++ /* Device is now started */
++ set_bit(RWNX_DEV_STARTED, &rwnx_hw->drv_flags);
++ atomic_set(&rwnx_vif->drv_conn_state, RWNX_DRV_STATUS_DISCONNECTED);
++ }
++
++ if (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_AP_VLAN) {
++ /* For AP_vlan use same fw and drv indexes. We ensure that this index
++ will not be used by fw for another vif by taking index >= NX_VIRT_DEV_MAX */
++ add_if_cfm.inst_nbr = rwnx_vif->drv_vif_index;
++ netif_tx_stop_all_queues(dev);
++
++ /* Save the index retrieved from LMAC */
++ spin_lock_bh(&rwnx_hw->cb_lock);
++ rwnx_vif->vif_index = add_if_cfm.inst_nbr;
++ rwnx_vif->up = true;
++ rwnx_hw->vif_started++;
++ rwnx_hw->vif_table[add_if_cfm.inst_nbr] = rwnx_vif;
++ spin_unlock_bh(&rwnx_hw->cb_lock);
++ } else {
++ /* Forward the information to the LMAC,
++ * p2p value not used in FMAC configuration, iftype is sufficient */
++
++ if ((error = rwnx_send_add_if(rwnx_hw, rwnx_vif->wdev.address,
++ RWNX_VIF_TYPE(rwnx_vif), false, &add_if_cfm))) {
++ AICWFDBG(LOGERROR, "add if fail\n");
++ return error;
++ }
++
++ if (add_if_cfm.status != 0) {
++ RWNX_PRINT_CFM_ERR(add_if);
++ return -EIO;
++ }
++
++ /* Save the index retrieved from LMAC */
++ spin_lock_bh(&rwnx_hw->cb_lock);
++ rwnx_vif->vif_index = add_if_cfm.inst_nbr;
++ rwnx_vif->up = true;
++ rwnx_hw->vif_started++;
++ rwnx_hw->vif_table[add_if_cfm.inst_nbr] = rwnx_vif;
++ spin_unlock_bh(&rwnx_hw->cb_lock);
++#ifdef CONFIG_USE_P2P0
++ if(rwnx_vif->is_p2p_vif){
++ rwnx_hw->p2p_dev_vif = rwnx_vif;
++ rwnx_hw->is_p2p_alive = 1;
++ }
++#endif
++
++ }
++
++ if (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_AP || RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_P2P_GO)
++ {
++ #ifdef CONFIG_COEX
++ rwnx_send_coex_req(rwnx_hw, 1, 0);
++ #endif
++ }
++
++ if (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_MONITOR){
++ rwnx_hw->monitor_vif = rwnx_vif->vif_index;
++ if (rwnx_vif->ch_index != RWNX_CH_NOT_SET){
++ //Configure the monitor channel
++ error = rwnx_send_config_monitor_req(rwnx_hw, &rwnx_hw->chanctx_table[rwnx_vif->ch_index].chan_def, NULL);
++ }
++ #if defined(CONFIG_RWNX_MON_XMIT)
++ rwnx_txq_unk_vif_init(rwnx_vif);
++ #endif
++ #if defined(CONFIG_RWNX_MON_RXFILTER)
++ rwnx_send_set_filter(rwnx_hw, (FIF_BCN_PRBRESP_PROMISC | FIF_OTHER_BSS | FIF_PSPOLL | FIF_PROBE_REQ));
++ #endif
++ }
++
++ //netif_carrier_off(dev);
++ #if defined(CONFIG_RWNX_MON_XMIT)
++ netif_carrier_on(dev);
++ AICWFDBG(LOGINFO, "monitor xmit: netif_carrier_on\n");
++ #endif
++
++#ifdef CONFIG_BR_SUPPORT
++ netdev_br_init(dev);
++#endif /* CONFIG_BR_SUPPORT */
++
++ //netif_carrier_off(dev);
++ netif_start_queue(dev);
++
++ return error;
++}
++
++/**
++ * int (*ndo_stop)(struct net_device *dev);
++ * This function is called when network device transistions to the down
++ * state.
++ *
++ * - Remove interface at fw level
++ * - Reset FW if this is the last interface opened
++ */
++static int rwnx_close(struct net_device *dev)
++{
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ struct rwnx_hw *rwnx_hw = rwnx_vif->rwnx_hw;
++ struct aicwf_bus *bus_if = NULL;
++ int ret = 0;
++#if defined(AICWF_USB_SUPPORT)
++ struct aic_usb_dev *usbdev = NULL;
++ bus_if = dev_get_drvdata(rwnx_hw->dev);
++ if (bus_if)
++ usbdev = bus_if->bus_priv.usb;
++#endif
++#if defined(AICWF_SDIO_SUPPORT)
++ struct aic_sdio_dev *sdiodev = NULL;
++ bus_if = dev_get_drvdata(rwnx_hw->dev);
++ if (bus_if)
++ sdiodev = bus_if->bus_priv.sdio;
++#endif
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++#if defined(AICWF_USB_SUPPORT) || defined(AICWF_SDIO_SUPPORT)
++ if (scanning){
++ scanning = false;
++ }
++
++ if(p2p_working){
++ p2p_working = false;
++ }
++#endif
++#if 0
++ netdev_info(dev, "CLOSE");
++#endif
++ AICWFDBG(LOGINFO, "%s %s Enter\n", __func__, dev->name);
++
++#ifdef CONFIG_USE_P2P0
++ if(rwnx_hw->p2p_dev_vif){
++ atomic_set(&rwnx_hw->p2p_alive_timer_count, P2P_ALIVE_TIME_MS);
++ rwnx_hw->is_p2p_alive = 0;
++ }
++#endif
++
++ rwnx_radar_cancel_cac(&rwnx_hw->radar);
++
++ /* Abort scan request on the vif */
++ if (rwnx_hw->scan_request &&
++ rwnx_hw->scan_request->wdev == &rwnx_vif->wdev) {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
++ struct cfg80211_scan_info info = {
++ .aborted = true,
++ };
++
++ cfg80211_scan_done(rwnx_hw->scan_request, &info);
++#else
++ cfg80211_scan_done(rwnx_hw->scan_request, true);
++#endif
++ rwnx_hw->scan_request = NULL;
++ ret = rwnx_send_scanu_cancel_req(rwnx_hw, NULL);
++ mdelay(35);//make sure firmware take affect
++ if (ret) {
++ AICWFDBG(LOGERROR, "scanu_cancel fail\n");
++ return ret;
++ }
++ }
++
++#if defined(AICWF_USB_SUPPORT)
++ if (usbdev != NULL) {
++ if (usbdev->bus_if->state != BUS_DOWN_ST && usbdev->state != USB_DOWN_ST)
++ AICWFDBG(LOGINFO, "state: %d %d \r\n", (int)usbdev->bus_if->state, (int)usbdev->state);
++
++ if(RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_STATION ||
++ RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_P2P_CLIENT){
++ if(atomic_read(&rwnx_vif->drv_conn_state) == (int)RWNX_DRV_STATUS_CONNECTING){
++ rwnx_send_sm_disconnect_req(rwnx_hw, rwnx_vif, 3);
++ atomic_set(&rwnx_vif->drv_conn_state, RWNX_DRV_STATUS_DISCONNECTED);
++ }
++ }
++#ifdef CONFIG_USE_P2P0
++ if(!rwnx_vif->is_p2p_vif || ( rwnx_vif->is_p2p_vif && rwnx_hw->is_p2p_alive)){
++#endif
++ rwnx_send_remove_if(rwnx_hw, rwnx_vif->vif_index, false);
++#ifdef CONFIG_USE_P2P0
++ }
++#endif
++
++ }
++#endif
++#if defined(AICWF_SDIO_SUPPORT)
++ if (sdiodev != NULL ) {
++ if (sdiodev->bus_if->state != BUS_DOWN_ST)
++ rwnx_send_remove_if(rwnx_hw, rwnx_vif->vif_index, false);
++ }
++#endif
++
++ if (rwnx_hw->roc_elem && (rwnx_hw->roc_elem->wdev == &rwnx_vif->wdev)) {
++ AICWFDBG(LOGINFO, "%s clear roc\n", __func__);
++ /* Initialize RoC element pointer to NULL, indicate that RoC can be started */
++ rwnx_hw->roc_elem = NULL;
++ }
++
++ /* Ensure that we won't process disconnect ind */
++ spin_lock_bh(&rwnx_hw->cb_lock);
++
++ rwnx_vif->up = false;
++ if (netif_carrier_ok(dev)) {
++ if (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_STATION ||
++ RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_P2P_CLIENT) {
++ cfg80211_disconnected(dev, WLAN_REASON_DEAUTH_LEAVING,
++ NULL, 0, true, GFP_ATOMIC);
++ netif_tx_stop_all_queues(dev);
++ netif_carrier_off(dev);
++ } else if (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_AP_VLAN) {
++ netif_carrier_off(dev);
++ } else {
++ netdev_warn(dev, "AP not stopped when disabling interface");
++ }
++#ifdef CONFIG_BR_SUPPORT
++ /* if (OPMODE & (WIFI_STATION_STATE | WIFI_ADHOC_STATE)) */
++ {
++ /* void nat25_db_cleanup(_adapter *priv); */
++ nat25_db_cleanup(rwnx_vif);
++ }
++#endif /* CONFIG_BR_SUPPORT */
++ }
++
++
++ rwnx_hw->vif_table[rwnx_vif->vif_index] = NULL;
++ spin_unlock_bh(&rwnx_hw->cb_lock);
++
++ rwnx_chanctx_unlink(rwnx_vif);
++
++ if (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_MONITOR)
++ rwnx_hw->monitor_vif = RWNX_INVALID_VIF;
++
++ if (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_AP || RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_P2P_GO)
++ {
++ #ifdef CONFIG_COEX
++ rwnx_send_coex_req(rwnx_hw, 0, 1);
++ #endif
++ }
++
++ rwnx_hw->vif_started--;
++ if (rwnx_hw->vif_started == 0) {
++ /* This also lets both ipc sides remain in sync before resetting */
++ #if 0
++ rwnx_ipc_tx_drain(rwnx_hw);
++ #else
++ #if defined(AICWF_USB_SUPPORT)
++ if (usbdev->bus_if->state != BUS_DOWN_ST && usbdev->state != USB_DOWN_ST) {
++ #else
++ if (sdiodev->bus_if->state != BUS_DOWN_ST) {
++ #endif
++ rwnx_send_reset(rwnx_hw);
++ #if defined(AICWF_USB_SUPPORT)
++ if (rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8801 ||
++ ((rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC ||
++ rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW ||
++ rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81)&& testmode == 0)) {
++ #endif
++ // Set parameters to firmware
++ rwnx_send_me_config_req(rwnx_hw);
++ // Set channel parameters to firmware
++ rwnx_send_me_chan_config_req(rwnx_hw);
++ #if defined(AICWF_USB_SUPPORT)
++ }
++ #endif
++ }
++ #endif
++ clear_bit(RWNX_DEV_STARTED, &rwnx_hw->drv_flags);
++ }
++
++ return 0;
++}
++
++#ifdef CONFIG_RFTEST
++enum {
++ SET_TX,
++ SET_TXSTOP,
++ SET_TXTONE,
++ SET_RX,
++ GET_RX_RESULT,
++ SET_RXSTOP,
++ SET_RX_METER,
++ SET_POWER,
++ SET_XTAL_CAP,
++ SET_XTAL_CAP_FINE,
++ GET_EFUSE_BLOCK,
++ SET_FREQ_CAL,
++ SET_FREQ_CAL_FINE,
++ GET_FREQ_CAL,
++ SET_MAC_ADDR,
++ GET_MAC_ADDR,
++ SET_BT_MAC_ADDR,
++ GET_BT_MAC_ADDR,
++ SET_VENDOR_INFO,
++ GET_VENDOR_INFO,
++ RDWR_PWRMM,
++ RDWR_PWRIDX,
++ RDWR_PWRLVL = RDWR_PWRIDX,
++ RDWR_PWROFST,
++ RDWR_DRVIBIT,
++ RDWR_EFUSE_PWROFST,
++ RDWR_EFUSE_DRVIBIT,
++ SET_PAPR,
++ SET_CAL_XTAL,
++ GET_CAL_XTAL_RES,
++ SET_COB_CAL,
++ GET_COB_CAL_RES,
++ RDWR_EFUSE_USRDATA,
++ SET_NOTCH,
++ RDWR_PWROFSTFINE,
++ RDWR_EFUSE_PWROFSTFINE,
++ RDWR_EFUSE_SDIOCFG,
++
++ #ifdef CONFIG_USB_BT
++ BT_CMD_BASE = 0x100,
++ BT_RESET,
++ BT_TXDH,
++ BT_RXDH,
++ BT_STOP,
++ GET_BT_RX_RESULT,
++ #endif
++};
++
++typedef struct
++{
++ u8_l chan;
++ u8_l bw;
++ u8_l mode;
++ u8_l rate;
++ u16_l length;
++}cmd_rf_settx_t;
++
++typedef struct
++{
++ u8_l val;
++}cmd_rf_setfreq_t;
++
++typedef struct
++{
++ u8_l chan;
++ u8_l bw;
++}cmd_rf_rx_t;
++
++typedef struct
++{
++ u8_l block;
++}cmd_rf_getefuse_t;
++typedef struct
++{
++ u8_l dutid;
++ u8_l chip_num;
++ u8_l dis_xtal;
++}cmd_rf_setcobcal_t;
++typedef struct
++{
++ u16_l dut_rcv_golden_num;
++ u8_l golden_rcv_dut_num;
++ s8_l rssi_static;
++ s8_l snr_static;
++ s8_l dut_rssi_static;
++ u16_l reserved;
++}cob_result_ptr_t;
++
++typedef struct
++{
++ u8_l func;
++ u8_l cnt;
++ u8_l reserved[2];
++ u32_l usrdata[3]; // 3 words totally
++} cmd_ef_usrdata_t;
++
++#endif
++
++#define CMD_MAXARGS 30
++
++#if 0
++#define isblank(c) ((c) == ' ' || (c) == '\t')
++#define isascii(c) (((unsigned char)(c)) <= 0x7F)
++
++static int isdigit(unsigned char c)
++{
++ return ((c >= '0') && (c <='9'));
++}
++
++static int isxdigit(unsigned char c)
++{
++ if ((c >= '0') && (c <='9'))
++ return 1;
++ if ((c >= 'a') && (c <='f'))
++ return 1;
++ if ((c >= 'A') && (c <='F'))
++ return 1;
++ return 0;
++}
++
++static int islower(unsigned char c)
++{
++ return ((c >= 'a') && (c <='z'));
++}
++
++static unsigned char toupper(unsigned char c)
++{
++ if (islower(c))
++ c -= 'a'-'A';
++ return c;
++}
++#endif
++
++
++static int parse_line (char *line, char *argv[])
++{
++ int nargs = 0;
++
++ while (nargs < CMD_MAXARGS) {
++ /* skip any white space */
++ while ((*line == ' ') || (*line == '\t')) {
++ ++line;
++ }
++
++ if (*line == '\0') { /* end of line, no more args */
++ argv[nargs] = 0;
++ return (nargs);
++ }
++
++ /* Argument include space should be bracketed by quotation mark */
++ if (*line == '\"') {
++ /* Skip quotation mark */
++ line++;
++
++ /* Begin of argument string */
++ argv[nargs++] = line;
++
++ /* Until end of argument */
++ while(*line && (*line != '\"')) {
++ ++line;
++ }
++ } else {
++ argv[nargs++] = line; /* begin of argument string */
++
++ /* find end of string */
++ while(*line && (*line != ' ') && (*line != '\t')) {
++ ++line;
++ }
++ }
++
++ if (*line == '\0') { /* end of line, no more args */
++ argv[nargs] = 0;
++ return (nargs);
++ }
++
++ *line++ = '\0'; /* terminate current arg */
++ }
++
++ printk("** Too many args (max. %d) **\n", CMD_MAXARGS);
++
++ return (nargs);
++}
++
++unsigned int command_strtoul(const char *cp, char **endp, unsigned int base)
++{
++ unsigned int result = 0, value, is_neg=0;
++
++ if (*cp == '0') {
++ cp++;
++ if ((*cp == 'x') && isxdigit(cp[1])) {
++ base = 16;
++ cp++;
++ }
++ if (!base) {
++ base = 8;
++ }
++ }
++ if (!base) {
++ base = 10;
++ }
++ if (*cp == '-') {
++ is_neg = 1;
++ cp++;
++ }
++ while (isxdigit(*cp) && (value = isdigit(*cp) ? *cp - '0' : (islower(*cp) ? toupper(*cp) : *cp) - 'A' + 10) < base) {
++ result = result * base + value;
++ cp++;
++ }
++ if (is_neg)
++ result = (unsigned int)((int)result * (-1));
++
++ if (endp)
++ *endp = (char *)cp;
++ return result;
++}
++
++
++int handle_private_cmd(struct net_device *net, char *command, u32 cmd_len)
++{
++ int bytes_written = 0;
++ char* para = NULL;
++ char* cmd = NULL;
++ char *argv[CMD_MAXARGS + 1];
++ int argc;
++ #ifdef CONFIG_RFTEST
++ struct dbg_rftest_cmd_cfm cfm = {{0,}};
++ u8_l mac_addr[6];
++ cmd_rf_settx_t settx_param;
++ cmd_rf_rx_t setrx_param;
++ int freq;
++ cmd_rf_getefuse_t getefuse_param;
++ cmd_rf_setfreq_t cmd_setfreq;
++ cmd_rf_setcobcal_t setcob_cal;
++ cob_result_ptr_t *cob_result_ptr;
++ cmd_ef_usrdata_t cmd_ef_usrdata;
++ u8_l ana_pwr;
++ u8_l dig_pwr;
++ u8_l pwr;
++ u8_l papr;
++ u8_l xtal_cap;
++ u8_l xtal_cap_fine;
++ u8_l vendor_info;
++ #ifdef CONFIG_USB_BT
++ int bt_index;
++ u8_l dh_cmd_reset[4];
++ u8_l dh_cmd_txdh[18];
++ u8_l dh_cmd_rxdh[17];
++ u8_l dh_cmd_stop[5];
++ #endif
++ #endif
++ u8_l buf[2];
++ s8_l freq_ = 0;
++ u8_l func = 0;
++ u8_l state = 0;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ if ((argc = parse_line(command, argv)) == 0) {
++ return -1;
++ }
++
++ do {
++ #ifdef AICWF_SDIO_SUPPORT
++ struct rwnx_hw *p_rwnx_hw = g_rwnx_plat->sdiodev->rwnx_hw;
++ #endif
++ #ifdef AICWF_USB_SUPPORT
++ #ifdef CONFIG_RFTEST
++ struct rwnx_hw *p_rwnx_hw = g_rwnx_plat->usbdev->rwnx_hw;
++ #endif
++ #endif
++ #ifdef CONFIG_RFTEST
++ if (strcasecmp(argv[0], "GET_RX_RESULT") ==0) {
++ AICWFDBG(LOGINFO, "get_rx_result\n");
++ rwnx_send_rftest_req(p_rwnx_hw, GET_RX_RESULT, 0, NULL, &cfm);
++ memcpy(command, &cfm.rftest_result[0], 8);
++ bytes_written = 8;
++ } else if (strcasecmp(argv[0], "SET_TX") == 0) {
++ AICWFDBG(LOGINFO, "set_tx\n");
++ if (argc < 6) {
++ printk("wrong param\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ settx_param.chan = command_strtoul(argv[1], NULL, 10);
++ settx_param.bw = command_strtoul(argv[2], NULL, 10);
++ settx_param.mode = command_strtoul(argv[3], NULL, 10);
++ settx_param.rate = command_strtoul(argv[4], NULL, 10);
++ settx_param.length = command_strtoul(argv[5], NULL, 10);
++ AICWFDBG(LOGINFO, "txparam:%d,%d,%d,%d,%d\n", settx_param.chan, settx_param.bw,
++ settx_param.mode, settx_param.rate, settx_param.length);
++ rwnx_send_rftest_req(p_rwnx_hw, SET_TX, sizeof(cmd_rf_settx_t), (u8_l *)&settx_param, NULL);
++ } else if (strcasecmp(argv[0], "SET_TXSTOP") == 0) {
++ AICWFDBG(LOGINFO, "settx_stop\n");
++ rwnx_send_rftest_req(p_rwnx_hw, SET_TXSTOP, 0, NULL, NULL);
++ } else if (strcasecmp(argv[0], "SET_TXTONE") == 0) {
++ AICWFDBG(LOGINFO, "set_tx_tone,argc:%d\n",argc);
++ if ((argc == 2) || (argc == 3)) {
++ AICWFDBG(LOGINFO, "argv 1:%s\n",argv[1]);
++ //u8_l func = (u8_l)command_strtoul(argv[1], NULL, 16);
++ func = (u8_l)command_strtoul(argv[1], NULL, 16);
++ //s8_l freq;
++ if (argc == 3) {
++ AICWFDBG(LOGINFO, "argv 2:%s\n",argv[2]);
++ freq_ = (u8_l)command_strtoul(argv[2], NULL, 10);
++ } else {
++ freq_ = 0;
++ };
++ //u8_l buf[2] = {func, (u8_l)freq};
++ buf[0] = func;
++ buf[1] = (u8_l)freq_;
++ rwnx_send_rftest_req(p_rwnx_hw, SET_TXTONE, argc - 1, buf, NULL);
++ } else {
++ AICWFDBG(LOGINFO, "wrong args\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ } else if (strcasecmp(argv[0], "SET_RX") == 0) {
++ AICWFDBG(LOGINFO, "set_rx\n");
++ if (argc < 3) {
++ AICWFDBG(LOGERROR, "wrong param\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ setrx_param.chan = command_strtoul(argv[1], NULL, 10);
++ setrx_param.bw = command_strtoul(argv[2], NULL, 10);
++ rwnx_send_rftest_req(p_rwnx_hw, SET_RX, sizeof(cmd_rf_rx_t), (u8_l *)&setrx_param, NULL);
++ } else if (strcasecmp(argv[0], "SET_RXSTOP") == 0) {
++ AICWFDBG(LOGINFO, "set_rxstop\n");
++ rwnx_send_rftest_req(p_rwnx_hw, SET_RXSTOP, 0, NULL, NULL);
++ } else if (strcasecmp(argv[0], "SET_RX_METER") == 0) {
++ AICWFDBG(LOGINFO, "set_rx_meter\n");
++ freq = (int)command_strtoul(argv[1], NULL, 10);
++ rwnx_send_rftest_req(p_rwnx_hw, SET_RX_METER, sizeof(freq), (u8_l *)&freq, NULL);
++ } else if (strcasecmp(argv[0], "SET_FREQ_CAL") == 0) {
++ AICWFDBG(LOGINFO, "set_freq_cal\n");
++ if (argc < 2) {
++ AICWFDBG(LOGERROR, "wrong param\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ cmd_setfreq.val = command_strtoul(argv[1], NULL, 16);
++ AICWFDBG(LOGINFO, "param:%x\r\n", cmd_setfreq.val);
++ rwnx_send_rftest_req(p_rwnx_hw, SET_FREQ_CAL, sizeof(cmd_rf_setfreq_t), (u8_l *)&cmd_setfreq, &cfm);
++ memcpy(command, &cfm.rftest_result[0], 4);
++ bytes_written = 4;
++ } else if (strcasecmp(argv[0], "SET_FREQ_CAL_FINE") == 0) {
++ AICWFDBG(LOGINFO, "set_freq_cal_fine\n");
++ if (argc < 2) {
++ AICWFDBG(LOGERROR, "wrong param\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ cmd_setfreq.val = command_strtoul(argv[1], NULL, 16);
++ AICWFDBG(LOGINFO, "param:%x\r\n", cmd_setfreq.val);
++ rwnx_send_rftest_req(p_rwnx_hw, SET_FREQ_CAL_FINE, sizeof(cmd_rf_setfreq_t), (u8_l *)&cmd_setfreq, &cfm);
++ memcpy(command, &cfm.rftest_result[0], 4);
++ bytes_written = 4;
++ } else if (strcasecmp(argv[0], "GET_EFUSE_BLOCK") == 0) {
++ AICWFDBG(LOGINFO, "get_efuse_block\n");
++ if (argc < 2) {
++ AICWFDBG(LOGERROR, "wrong param\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ getefuse_param.block = command_strtoul(argv[1], NULL, 10);
++ rwnx_send_rftest_req(p_rwnx_hw, GET_EFUSE_BLOCK, sizeof(cmd_rf_getefuse_t), (u8_l *)&getefuse_param, &cfm);
++ AICWFDBG(LOGINFO, "get val=%x\r\n", cfm.rftest_result[0]);
++ memcpy(command, &cfm.rftest_result[0], 4);
++ bytes_written = 4;
++ } else if (strcasecmp(argv[0], "SET_POWER") == 0) {
++ AICWFDBG(LOGINFO, "set_power\n");
++ if (g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8801) {
++ ana_pwr = command_strtoul(argv[1], NULL, 16);
++ dig_pwr = command_strtoul(argv[2], NULL, 16);
++ pwr = (ana_pwr << 4 | dig_pwr);
++ if (ana_pwr > 0xf || dig_pwr > 0xf) {
++ AICWFDBG(LOGERROR, "invalid param\r\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ } else {
++ ana_pwr = command_strtoul(argv[1], NULL, 10);
++ pwr = ana_pwr;
++ if (ana_pwr > 0x1e) {
++ AICWFDBG(LOGERROR, "invalid param\r\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ }
++ AICWFDBG(LOGINFO, "pwr =%x\r\n", pwr);
++ rwnx_send_rftest_req(p_rwnx_hw, SET_POWER, sizeof(pwr), (u8_l *)&pwr, NULL);
++ } else if (strcasecmp(argv[0], "SET_PAPR") == 0) {
++ printk("set papr\n");
++ if (argc > 1) {
++ papr = command_strtoul(argv[1], NULL, 10);
++ printk("papr %d\r\n", papr);
++ rwnx_send_rftest_req(p_rwnx_hw, SET_PAPR, sizeof(papr), (u8_l *)&papr, NULL);
++ } else {
++ printk("wrong args\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ } else if (strcasecmp(argv[0], "SET_NOTCH") == 0) {
++ if (argc > 1) {
++ func = command_strtoul(argv[1], NULL, 10);
++ printk("set notch: %d\n", func);
++ rwnx_send_rftest_req(p_rwnx_hw, SET_NOTCH, sizeof(func), (u8_l *)&func, NULL);
++ } else {
++ printk("wrong args\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ } else if (strcasecmp(argv[0], "SET_XTAL_CAP")==0) {
++ AICWFDBG(LOGINFO, "set_xtal_cap\n");
++ if (argc < 2) {
++ AICWFDBG(LOGERROR, "wrong param\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ xtal_cap = command_strtoul(argv[1], NULL, 10);
++ AICWFDBG(LOGINFO, "xtal_cap =%x\r\n", xtal_cap);
++ rwnx_send_rftest_req(p_rwnx_hw, SET_XTAL_CAP, sizeof(xtal_cap), (u8_l *)&xtal_cap, &cfm);
++ memcpy(command, &cfm.rftest_result[0], 4);
++ bytes_written = 4;
++ } else if (strcasecmp(argv[0], "SET_XTAL_CAP_FINE")==0) {
++ AICWFDBG(LOGINFO, "set_xtal_cap_fine\n");
++ if (argc < 2) {
++ AICWFDBG(LOGERROR, "wrong param\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ xtal_cap_fine = command_strtoul(argv[1], NULL, 10);
++ AICWFDBG(LOGINFO, "xtal_cap_fine =%x\r\n", xtal_cap_fine);
++ rwnx_send_rftest_req(p_rwnx_hw, SET_XTAL_CAP_FINE, sizeof(xtal_cap_fine), (u8_l *)&xtal_cap_fine, &cfm);
++ memcpy(command, &cfm.rftest_result[0], 4);
++ bytes_written = 4;
++ } else if (strcasecmp(argv[0], "SET_MAC_ADDR")==0) {
++ AICWFDBG(LOGINFO, "set_mac_addr\n");
++ if (argc < 7) {
++ AICWFDBG(LOGERROR, "wrong param\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ mac_addr[5] = command_strtoul(argv[1], NULL, 16);
++ mac_addr[4] = command_strtoul(argv[2], NULL, 16);
++ mac_addr[3] = command_strtoul(argv[3], NULL, 16);
++ mac_addr[2] = command_strtoul(argv[4], NULL, 16);
++ mac_addr[1] = command_strtoul(argv[5], NULL, 16);
++ mac_addr[0] = command_strtoul(argv[6], NULL, 16);
++ AICWFDBG(LOGINFO, "set macaddr:%x,%x,%x,%x,%x,%x\n", mac_addr[5], mac_addr[4], mac_addr[3], mac_addr[2], mac_addr[1], mac_addr[0]);
++ rwnx_send_rftest_req(p_rwnx_hw, SET_MAC_ADDR, sizeof(mac_addr), (u8_l *)&mac_addr, NULL);
++ } else if (strcasecmp(argv[0], "GET_MAC_ADDR")==0) {
++ u32_l addr0, addr1;
++ AICWFDBG(LOGINFO, "get mac addr\n");
++ rwnx_send_rftest_req(p_rwnx_hw, GET_MAC_ADDR, 0, NULL, &cfm);
++ memcpy(command, &cfm.rftest_result[0], 8);
++ bytes_written = 8;
++ addr0 = cfm.rftest_result[0];
++ if ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC) ||
++ (g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW)) {
++ int rem_cnt = (cfm.rftest_result[1] >> 16) & 0x00FF;
++ addr1 = cfm.rftest_result[1] & 0x0000FFFF;
++ AICWFDBG(LOGINFO, "0x%x,0x%x (remain:%x)\n", addr0, addr1, rem_cnt);
++ } else {
++ addr1 = cfm.rftest_result[1];
++ AICWFDBG(LOGINFO, "0x%x,0x%x\n", addr0, addr1);
++ }
++ } else if (strcasecmp(argv[0], "SET_BT_MAC_ADDR") == 0) {
++ AICWFDBG(LOGINFO, "set_bt_mac_addr\n");
++ if (argc < 7) {
++ AICWFDBG(LOGERROR, "wrong param\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ mac_addr[5] = command_strtoul(argv[1], NULL, 16);
++ mac_addr[4] = command_strtoul(argv[2], NULL, 16);
++ mac_addr[3] = command_strtoul(argv[3], NULL, 16);
++ mac_addr[2] = command_strtoul(argv[4], NULL, 16);
++ mac_addr[1] = command_strtoul(argv[5], NULL, 16);
++ mac_addr[0] = command_strtoul(argv[6], NULL, 16);
++ AICWFDBG(LOGINFO, "set bt macaddr:%x,%x,%x,%x,%x,%x\n", mac_addr[5], mac_addr[4], mac_addr[3], mac_addr[2], mac_addr[1], mac_addr[0]);
++ rwnx_send_rftest_req(p_rwnx_hw, SET_BT_MAC_ADDR, sizeof(mac_addr), (u8_l *)&mac_addr, NULL);
++ } else if (strcasecmp(argv[0], "GET_BT_MAC_ADDR")==0) {
++ u32_l addr0, addr1;
++ AICWFDBG(LOGINFO, "get bt mac addr\n");
++ rwnx_send_rftest_req(p_rwnx_hw, GET_BT_MAC_ADDR, 0, NULL, &cfm);
++ memcpy(command, &cfm.rftest_result[0], 8);
++ bytes_written = 8;
++ addr0 = cfm.rftest_result[0];
++ if ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC) ||
++ (g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW)) {
++ int rem_cnt = (cfm.rftest_result[1] >> 16) & 0x00FF;
++ addr1 = cfm.rftest_result[1] & 0x0000FFFF;
++ AICWFDBG(LOGINFO, "0x%x,0x%x (remain:%x)\n", addr0, addr1, rem_cnt);
++ } else {
++ addr1 = cfm.rftest_result[1];
++ AICWFDBG(LOGINFO, "0x%x,0x%x\n", addr0, addr1);
++ }
++ } else if (strcasecmp(argv[0], "SET_VENDOR_INFO")==0) {
++ vendor_info = command_strtoul(argv[1], NULL, 16);
++ AICWFDBG(LOGINFO, "set vendor info:%x\n", vendor_info);
++ rwnx_send_rftest_req(p_rwnx_hw, SET_VENDOR_INFO, 1, &vendor_info, &cfm);
++ if ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC) ||
++ (g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW)) {
++ memcpy(command, &cfm.rftest_result[0], 2);
++ bytes_written = 2;
++ } else {
++ memcpy(command, &cfm.rftest_result[0], 1);
++ bytes_written = 1;
++ }
++ AICWFDBG(LOGINFO, "0x%x\n", cfm.rftest_result[0]);
++ } else if (strcasecmp(argv[0], "GET_VENDOR_INFO")==0) {
++ AICWFDBG(LOGINFO, "get vendor info\n");
++ rwnx_send_rftest_req(p_rwnx_hw, GET_VENDOR_INFO, 0, NULL, &cfm);
++ if ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC) ||
++ (g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW)) {
++ memcpy(command, &cfm.rftest_result[0], 2);
++ bytes_written = 2;
++ } else {
++ memcpy(command, &cfm.rftest_result[0], 1);
++ bytes_written = 1;
++ }
++ AICWFDBG(LOGINFO, "0x%x\n", cfm.rftest_result[0]);
++ } else if (strcasecmp(argv[0], "GET_FREQ_CAL") == 0) {
++ unsigned int val;
++ AICWFDBG(LOGINFO, "get freq cal\n");
++ rwnx_send_rftest_req(p_rwnx_hw, GET_FREQ_CAL, 0, NULL, &cfm);
++ memcpy(command, &cfm.rftest_result[0], 4);
++ bytes_written = 4;
++ val = cfm.rftest_result[0];
++ if ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC) ||
++ (g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW)) {
++ AICWFDBG(LOGINFO, "cap=0x%x (remain:%x), cap_fine=%x (remain:%x)\n",
++ val & 0xff, (val >> 8) & 0xff, (val >> 16) & 0xff, (val >> 24) & 0xff);
++ } else {
++ AICWFDBG(LOGINFO, "cap=0x%x, cap_fine=0x%x\n", val & 0xff, (val >> 8) & 0xff);
++ }
++ } else if (strcasecmp(argv[0], "RDWR_PWRMM") == 0) {
++ AICWFDBG(LOGINFO, "read/write txpwr manul mode\n");
++ if (argc <= 1) { // read cur
++ rwnx_send_rftest_req(p_rwnx_hw, RDWR_PWRMM, 0, NULL, &cfm);
++ } else { // write
++ u8_l pwrmm = (u8_l)command_strtoul(argv[1], NULL, 16);
++ pwrmm = (pwrmm) ? 1 : 0;
++ AICWFDBG(LOGINFO, "set pwrmm = %x\r\n", pwrmm);
++ rwnx_send_rftest_req(p_rwnx_hw, RDWR_PWRMM, sizeof(pwrmm), (u8_l *)&pwrmm, &cfm);
++ }
++ memcpy(command, &cfm.rftest_result[0], 4);
++ bytes_written = 4;
++ } else if (strcasecmp(argv[0], "RDWR_PWRIDX") == 0) {
++ u8_l func = 0;
++ #ifdef AICWF_USB_SUPPORT
++ if (g_rwnx_plat->usbdev->chipid != PRODUCT_ID_AIC8801) {
++ AICWFDBG(LOGERROR, "unsupported cmd\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ #endif
++ AICWFDBG(LOGINFO, "read/write txpwr index\n");
++ if (argc > 1) {
++ func = (u8_l)command_strtoul(argv[1], NULL, 16);
++ }
++ if (func == 0) { // read cur
++ rwnx_send_rftest_req(p_rwnx_hw, RDWR_PWRIDX, 0, NULL, &cfm);
++ } else if (func <= 2) { // write 2.4g/5g pwr idx
++ if (argc > 3) {
++ u8_l type = (u8_l)command_strtoul(argv[2], NULL, 16);
++ u8_l pwridx = (u8_l)command_strtoul(argv[3], NULL, 10);
++ u8_l buf[3] = {func, type, pwridx};
++ AICWFDBG(LOGINFO, "set pwridx:[%x][%x]=%x\r\n", func, type, pwridx);
++ rwnx_send_rftest_req(p_rwnx_hw, RDWR_PWRIDX, sizeof(buf), buf, &cfm);
++ } else {
++ AICWFDBG(LOGERROR, "wrong args\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ } else {
++ AICWFDBG(LOGERROR, "wrong func: %x\n", func);
++ bytes_written = -EINVAL;
++ break;
++ }
++ memcpy(command, &cfm.rftest_result[0], 9);
++ bytes_written = 9;
++ } else if (strcasecmp(argv[0], "RDWR_PWRLVL") == 0) {
++ u8_l func = 0;
++ #ifdef AICWF_USB_SUPPORT
++ if ((g_rwnx_plat->usbdev->chipid != PRODUCT_ID_AIC8800DC)
++ && (g_rwnx_plat->usbdev->chipid != PRODUCT_ID_AIC8800DW)
++ && (g_rwnx_plat->usbdev->chipid != PRODUCT_ID_AIC8800D81)) {
++ AICWFDBG(LOGERROR, "unsupported cmd\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ #endif
++ printk("read/write txpwr level\n");
++ if (argc > 1) {
++ func = (u8_l)command_strtoul(argv[1], NULL, 16);
++ }
++ if (func == 0) { // read cur
++ rwnx_send_rftest_req(p_rwnx_hw, RDWR_PWRLVL, 0, NULL, &cfm);
++ } else if (func <= 2) { // write 2.4g/5g pwr lvl
++ if (argc > 4) {
++ u8_l grp = (u8_l)command_strtoul(argv[2], NULL, 16);
++ u8_l idx, size;
++ u8_l buf[14] = {func, grp,};
++ if (argc > 12) { // set all grp
++ printk("set pwrlvl %s:\n"
++ " [%x] =", (func == 1) ? "2.4g" : "5g", grp);
++ if (grp == 1) { // TXPWR_LVL_GRP_11N_11AC
++ size = 10;
++ } else {
++ size = 12;
++ }
++ for (idx = 0; idx < size; idx++) {
++ s8_l pwrlvl = (s8_l)command_strtoul(argv[3 + idx], NULL, 10);
++ buf[2 + idx] = (u8_l)pwrlvl;
++ if (idx && !(idx & 0x3)) {
++ printk(" ");
++ }
++ printk(" %2d", pwrlvl);
++ }
++ printk("\n");
++ size += 2;
++ } else { // set grp[idx]
++ u8_l idx = (u8_l)command_strtoul(argv[3], NULL, 10);
++ s8_l pwrlvl = (s8_l)command_strtoul(argv[4], NULL, 10);
++ buf[2] = idx;
++ buf[3] = (u8_l)pwrlvl;
++ size = 4;
++ printk("set pwrlvl %s:\n"
++ " [%x][%d] = %d\n", (func == 1) ? "2.4g" : "5g", grp, idx, pwrlvl);
++ }
++ rwnx_send_rftest_req(p_rwnx_hw, RDWR_PWRLVL, size, buf, &cfm);
++ } else {
++ printk("wrong args\n");
++ }
++ } else {
++ printk("wrong func: %x\n", func);
++ }
++ if(g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800D81){
++ memcpy(command, &cfm.rftest_result[0], 6 * 12);
++ bytes_written = 6 * 12;
++ } else {
++ memcpy(command, &cfm.rftest_result[0], 3 * 12);
++ bytes_written = 3 * 12;
++ }
++ } else if (strcasecmp(argv[0], "RDWR_PWROFST") == 0) {
++ u8_l func = 0;
++ AICWFDBG(LOGINFO, "read/write txpwr offset\n");
++ if (argc > 1) {
++ func = (u8_l)command_strtoul(argv[1], NULL, 16);
++ }
++ if (func == 0) { // read cur
++ rwnx_send_rftest_req(p_rwnx_hw, RDWR_PWROFST, 0, NULL, &cfm);
++ } else if (func <= 2) { // write 2.4g/5g pwr ofst
++ if (argc > 3) {
++ u8_l chgrp = (u8_l)command_strtoul(argv[2], NULL, 16);
++ s8_l pwrofst = (u8_l)command_strtoul(argv[3], NULL, 10);
++ u8_l buf[3] = {func, chgrp, (u8_l)pwrofst};
++ AICWFDBG(LOGINFO, "set pwrofst:[%x][%x]=%d\r\n", func, chgrp, pwrofst);
++ rwnx_send_rftest_req(p_rwnx_hw, RDWR_PWROFST, sizeof(buf), buf, &cfm);
++ } else {
++ AICWFDBG(LOGERROR, "wrong args\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ } else {
++ AICWFDBG(LOGERROR, "wrong func: %x\n", func);
++ bytes_written = -EINVAL;
++ break;
++ }
++ memcpy(command, &cfm.rftest_result[0], 7);
++ bytes_written = 7;
++ } else if (strcasecmp(argv[0], "RDWR_PWROFSTFINE") == 0) {
++ u8_l func = 0;
++ AICWFDBG(LOGINFO, "read/write txpwr offset fine\n");
++ if (argc > 1) {
++ func = (u8_l)command_strtoul(argv[1], NULL, 16);
++ }
++ if (func == 0) { // read cur
++ rwnx_send_rftest_req(p_rwnx_hw, RDWR_PWROFSTFINE, 0, NULL, &cfm);
++ } else if (func <= 2) { // write 2.4g/5g pwr ofst
++ if (argc > 3) {
++ u8_l chgrp = (u8_l)command_strtoul(argv[2], NULL, 16);
++ s8_l pwrofst = (u8_l)command_strtoul(argv[3], NULL, 10);
++ u8_l buf[3] = {func, chgrp, (u8_l)pwrofst};
++ AICWFDBG(LOGINFO, "set pwrofstfine:[%x][%x]=%d\r\n", func, chgrp, pwrofst);
++ rwnx_send_rftest_req(p_rwnx_hw, RDWR_PWROFSTFINE, sizeof(buf), buf, &cfm);
++ } else {
++ AICWFDBG(LOGERROR, "wrong args\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ } else {
++ AICWFDBG(LOGERROR, "wrong func: %x\n", func);
++ bytes_written = -EINVAL;
++ break;
++ }
++ memcpy(command, &cfm.rftest_result[0], 7);
++ bytes_written = 7;
++ } else if (strcasecmp(argv[0], "RDWR_DRVIBIT") == 0) {
++ u8_l func = 0;
++ AICWFDBG(LOGINFO, "read/write pa drv_ibit\n");
++ if (argc > 1) {
++ func = (u8_l)command_strtoul(argv[1], NULL, 16);
++ }
++ if (func == 0) { // read cur
++ rwnx_send_rftest_req(p_rwnx_hw, RDWR_DRVIBIT, 0, NULL, &cfm);
++ } else if (func == 1) { // write 2.4g pa drv_ibit
++ if (argc > 2) {
++ u8_l ibit = (u8_l)command_strtoul(argv[2], NULL, 16);
++ u8_l buf[2] = {func, ibit};
++ AICWFDBG(LOGINFO, "set drvibit:[%x]=%x\r\n", func, ibit);
++ rwnx_send_rftest_req(p_rwnx_hw, RDWR_DRVIBIT, sizeof(buf), buf, &cfm);
++ } else {
++ AICWFDBG(LOGERROR, "wrong args\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ } else {
++ AICWFDBG(LOGERROR, "wrong func: %x\n", func);
++ bytes_written = -EINVAL;
++ break;
++ }
++ memcpy(command, &cfm.rftest_result[0], 16);
++ bytes_written = 16;
++ } else if (strcasecmp(argv[0], "RDWR_EFUSE_PWROFST") == 0) {
++ u8_l func = 0;
++ AICWFDBG(LOGINFO, "read/write txpwr offset into efuse\n");
++ if (argc > 1) {
++ func = (u8_l)command_strtoul(argv[1], NULL, 16);
++ }
++ if (func == 0) { // read cur
++ rwnx_send_rftest_req(p_rwnx_hw, RDWR_EFUSE_PWROFST, 0, NULL, &cfm);
++ } else if (func <= 2) { // write 2.4g/5g pwr ofst
++ if (argc > 3) {
++ u8_l chgrp = (u8_l)command_strtoul(argv[2], NULL, 16);
++ s8_l pwrofst = (u8_l)command_strtoul(argv[3], NULL, 10);
++ u8_l buf[3] = {func, chgrp, (u8_l)pwrofst};
++ AICWFDBG(LOGINFO, "set efuse pwrofst:[%x][%x]=%d\r\n", func, chgrp, pwrofst);
++ rwnx_send_rftest_req(p_rwnx_hw, RDWR_EFUSE_PWROFST, sizeof(buf), buf, &cfm);
++ } else {
++ AICWFDBG(LOGERROR, "wrong args\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ } else {
++ AICWFDBG(LOGERROR, "wrong func: %x\n", func);
++ bytes_written = -EINVAL;
++ break;
++ }
++ if ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC) ||
++ (g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW)) { // 6 = 3 (2.4g) * 2
++ memcpy(command, &cfm.rftest_result[0], 6);
++ bytes_written = 6;
++ } else { // 7 = 3(2.4g) + 4(5g)
++ memcpy(command, &cfm.rftest_result[0], 7);
++ bytes_written = 7;
++ }
++ } else if (strcasecmp(argv[0], "RDWR_EFUSE_DRVIBIT") == 0) {
++ u8_l func = 0;
++ AICWFDBG(LOGINFO, "read/write pa drv_ibit into efuse\n");
++ if (argc > 1) {
++ func = (u8_l)command_strtoul(argv[1], NULL, 16);
++ }
++ if (func == 0) { // read cur
++ rwnx_send_rftest_req(p_rwnx_hw, RDWR_EFUSE_DRVIBIT, 0, NULL, &cfm);
++ } else if (func == 1) { // write 2.4g pa drv_ibit
++ if (argc > 2) {
++ u8_l ibit = (u8_l)command_strtoul(argv[2], NULL, 16);
++ u8_l buf[2] = {func, ibit};
++ AICWFDBG(LOGINFO, "set efuse drvibit:[%x]=%x\r\n", func, ibit);
++ rwnx_send_rftest_req(p_rwnx_hw, RDWR_EFUSE_DRVIBIT, sizeof(buf), buf, &cfm);
++ } else {
++ AICWFDBG(LOGERROR, "wrong args\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ } else {
++ AICWFDBG(LOGERROR, "wrong func: %x\n", func);
++ bytes_written = -EINVAL;
++ break;
++ }
++ memcpy(command, &cfm.rftest_result[0], 4);
++ bytes_written = 4;
++ } else if (strcasecmp(argv[0], "RDWR_EFUSE_PWROFSTFINE") == 0) {
++ u8_l func = 0;
++ AICWFDBG(LOGINFO, "read/write txpwr offset fine into efuse\n");
++ if (argc > 1) {
++ func = (u8_l)command_strtoul(argv[1], NULL, 16);
++ }
++ if (func == 0) { // read cur
++ rwnx_send_rftest_req(p_rwnx_hw, RDWR_EFUSE_PWROFSTFINE, 0, NULL, &cfm);
++ } else if (func <= 2) { // write 2.4g/5g pwr ofst
++ if (argc > 3) {
++ u8_l chgrp = (u8_l)command_strtoul(argv[2], NULL, 16);
++ s8_l pwrofst = (u8_l)command_strtoul(argv[3], NULL, 10);
++ u8_l buf[3] = {func, chgrp, (u8_l)pwrofst};
++ AICWFDBG(LOGINFO, "set efuse pwrofstfine:[%x][%x]=%d\r\n", func, chgrp, pwrofst);
++ rwnx_send_rftest_req(p_rwnx_hw, RDWR_EFUSE_PWROFSTFINE, sizeof(buf), buf, &cfm);
++ } else {
++ AICWFDBG(LOGERROR, "wrong args\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ } else {
++ AICWFDBG(LOGERROR, "wrong func: %x\n", func);
++ bytes_written = -EINVAL;
++ break;
++ }
++ if ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC) ||
++ (g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW)) { // 6 = 3 (2.4g) * 2
++ memcpy(command, &cfm.rftest_result[0], 6);
++ bytes_written = 6;
++ } else { // 7 = 3(2.4g) + 4(5g)
++ memcpy(command, &cfm.rftest_result[0], 7);
++ bytes_written = 7;
++ }
++ } else if (strcasecmp(argv[0], "RDWR_EFUSE_DRVIBIT") == 0) {
++ u8_l func = 0;
++ AICWFDBG(LOGINFO, "read/write pa drv_ibit into efuse\n");
++ if (argc > 1) {
++ func = (u8_l)command_strtoul(argv[1], NULL, 16);
++ }
++ if (func == 0) { // read cur
++ rwnx_send_rftest_req(p_rwnx_hw, RDWR_EFUSE_DRVIBIT, 0, NULL, &cfm);
++ } else if (func == 1) { // write 2.4g pa drv_ibit
++ if (argc > 2) {
++ u8_l ibit = (u8_l)command_strtoul(argv[2], NULL, 16);
++ u8_l buf[2] = {func, ibit};
++ AICWFDBG(LOGINFO, "set efuse drvibit:[%x]=%x\r\n", func, ibit);
++ rwnx_send_rftest_req(p_rwnx_hw, RDWR_EFUSE_DRVIBIT, sizeof(buf), buf, &cfm);
++ } else {
++ AICWFDBG(LOGERROR, "wrong args\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ } else {
++ AICWFDBG(LOGERROR, "wrong func: %x\n", func);
++ bytes_written = -EINVAL;
++ break;
++ }
++ memcpy(command, &cfm.rftest_result[0], 4);
++ bytes_written = 4;
++ } else if (strcasecmp(argv[0], "SET_CAL_XTAL") == 0) {
++ AICWFDBG(LOGINFO, "set_cal_xtal\n");
++ rwnx_send_rftest_req(p_rwnx_hw, SET_CAL_XTAL, 0, NULL, NULL);
++ } else if (strcasecmp(argv[0], "GET_CAL_XTAL_RES") == 0) {
++ AICWFDBG(LOGINFO, "get_cal_xtal_res\n");
++ rwnx_send_rftest_req(p_rwnx_hw, GET_CAL_XTAL_RES, 0, NULL, &cfm);
++ memcpy(command, &cfm.rftest_result[0], 4);
++ bytes_written = 4;
++ AICWFDBG(LOGINFO, "cap=0x%x, cap_fine=0x%x\n", cfm.rftest_result[0] & 0x0000ffff, (cfm.rftest_result[0] >> 16) & 0x0000ffff);
++ } else if (strcasecmp(argv[0], "SET_COB_CAL") == 0) {
++ AICWFDBG(LOGINFO, "set_cob_cal\n");
++ if (argc < 3) {
++ AICWFDBG(LOGERROR, "wrong param\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ setcob_cal.dutid = command_strtoul(argv[1], NULL, 10);
++ setcob_cal.chip_num = command_strtoul(argv[2], NULL, 10);
++ setcob_cal.dis_xtal = command_strtoul(argv[3], NULL, 10);
++ rwnx_send_rftest_req(p_rwnx_hw, SET_COB_CAL, sizeof(cmd_rf_setcobcal_t), (u8_l *)&setcob_cal, NULL);
++ } else if (strcasecmp(argv[0], "GET_COB_CAL_RES")==0) {
++ AICWFDBG(LOGINFO, "get_cob_cal_res\n");
++ rwnx_send_rftest_req(p_rwnx_hw, GET_COB_CAL_RES, 0, NULL, &cfm);
++ state = (cfm.rftest_result[0] >> 16) & 0x000000ff;
++ if (!state){
++ AICWFDBG(LOGINFO, "cap= 0x%x, cap_fine= 0x%x, freq_ofst= %d Hz\n",
++ cfm.rftest_result[0] & 0x000000ff, (cfm.rftest_result[0] >> 8) & 0x000000ff, cfm.rftest_result[1]);
++ cob_result_ptr = (cob_result_ptr_t *) & (cfm.rftest_result[2]);
++ AICWFDBG(LOGINFO, "golden_rcv_dut= %d , tx_rssi= %d dBm, snr = %d dB\ndut_rcv_godlden= %d , rx_rssi= %d dBm",
++ cob_result_ptr->golden_rcv_dut_num, cob_result_ptr->rssi_static, cob_result_ptr->snr_static,
++ cob_result_ptr->dut_rcv_golden_num, cob_result_ptr->dut_rssi_static);
++ memcpy(command, &cfm.rftest_result, 16);
++ bytes_written = 16;
++ } else {
++ AICWFDBG(LOGERROR, "cob not idle\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ } else if (strcasecmp(argv[0], "DO_COB_TEST") == 0) {
++ AICWFDBG(LOGINFO, "do_cob_test\n");
++ setcob_cal.dutid = 1;
++ setcob_cal.chip_num = 1;
++ setcob_cal.dis_xtal = 0;
++ if (argc > 1 ) {
++ setcob_cal.dis_xtal = command_strtoul(argv[1], NULL, 10);
++ }
++ rwnx_send_rftest_req(p_rwnx_hw, SET_COB_CAL, sizeof(cmd_rf_setcobcal_t), (u8_l *)&setcob_cal, NULL);
++ msleep(2000);
++ rwnx_send_rftest_req(p_rwnx_hw, GET_COB_CAL_RES, 0, NULL, &cfm);
++ state = (cfm.rftest_result[0] >> 16) & 0x000000ff;
++ if (!state){
++ AICWFDBG(LOGINFO, "cap= 0x%x, cap_fine= 0x%x, freq_ofst= %d Hz\n",
++ cfm.rftest_result[0] & 0x000000ff, (cfm.rftest_result[0] >> 8) & 0x000000ff, cfm.rftest_result[1]);
++ cob_result_ptr = (cob_result_ptr_t *) & (cfm.rftest_result[2]);
++ AICWFDBG(LOGINFO, "golden_rcv_dut= %d , tx_rssi= %d dBm, snr = %d dB\ndut_rcv_godlden= %d , rx_rssi= %d dBm",
++ cob_result_ptr->golden_rcv_dut_num, cob_result_ptr->rssi_static, cob_result_ptr->snr_static,
++ cob_result_ptr->dut_rcv_golden_num, cob_result_ptr->dut_rssi_static);
++ memcpy(command, &cfm.rftest_result, 16);
++ bytes_written = 16;
++ } else {
++ AICWFDBG(LOGERROR, "cob not idle\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ } else if (strcasecmp(argv[0], "RDWR_EFUSE_USRDATA") == 0) {
++ AICWFDBG(LOGINFO, "read/write efuse usrdata\n");
++ if (argc <= 1) { // read all
++ cmd_ef_usrdata.func = 0;
++ cmd_ef_usrdata.cnt = 3;
++ } else if (argc >= 2) { // read/write
++ cmd_ef_usrdata.func = (u8_l)command_strtoul(argv[1], NULL, 10);
++ cmd_ef_usrdata.cnt = (u8_l)command_strtoul(argv[2], NULL, 10);
++ if (cmd_ef_usrdata.func == 1) {
++ int idx;
++ for (idx = 0; idx < cmd_ef_usrdata.cnt; idx++) {
++ cmd_ef_usrdata.usrdata[idx] = (u32_l)command_strtoul(argv[3 + idx], NULL, 16);
++ }
++ }
++ } else {
++ AICWFDBG(LOGERROR, "wrong argc: %x\n", argc);
++ bytes_written = -EINVAL;
++ break;
++ }
++ rwnx_send_rftest_req(p_rwnx_hw, RDWR_EFUSE_USRDATA, sizeof(cmd_ef_usrdata), (u8_l *)&cmd_ef_usrdata, &cfm);
++ memcpy(command, &cfm.rftest_result[0], 12);
++ bytes_written = 12;
++ } else if (strcasecmp(argv[0], "RDWR_EFUSE_SDIOCFG") == 0) {
++ u8_l func = 0;
++ AICWFDBG(LOGINFO, "read/write sdiocfg_bit into efuse\n");
++ if (argc > 1) {
++ func = (u8_l)command_strtoul(argv[1], NULL, 16);
++ }
++ if (func == 0) { // read cur
++ rwnx_send_rftest_req(p_rwnx_hw, RDWR_EFUSE_SDIOCFG, 0, NULL, &cfm);
++ } else if (func == 1) { // write sdiocfg
++ if (argc > 2) {
++ u8_l ibit = (u8_l)command_strtoul(argv[2], NULL, 16);
++ u8_l buf[2] = {func, ibit};
++ AICWFDBG(LOGINFO, "set efuse sdiocfg:[%x]=%x\r\n", func, ibit);
++ rwnx_send_rftest_req(p_rwnx_hw, RDWR_EFUSE_SDIOCFG, sizeof(buf), buf, &cfm);
++ } else {
++ AICWFDBG(LOGERROR, "wrong args\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ } else {
++ AICWFDBG(LOGERROR, "wrong func: %x\n", func);
++ bytes_written = -EINVAL;
++ break;
++ }
++ memcpy(command, &cfm.rftest_result[0], 4);
++ bytes_written = 4;
++ }
++ #ifdef CONFIG_USB_BT
++ else if (strcasecmp(argv[0], "BT_RESET") == 0) {
++ if (argc == 5) {
++ AICWFDBG(LOGINFO, "btrf reset\n");
++ for (bt_index = 0; bt_index < 4; bt_index++) {
++ dh_cmd_reset[bt_index] = command_strtoul(argv[bt_index+1], NULL, 16);
++ AICWFDBG(LOGINFO, "0x%x ",dh_cmd_reset[bt_index]);
++ }
++ AICWFDBG(LOGINFO, "\n");
++ } else {
++ AICWFDBG(LOGERROR, "wrong param\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ rwnx_send_rftest_req(p_rwnx_hw, BT_RESET, sizeof(dh_cmd_reset), (u8_l *)&dh_cmd_reset, NULL);
++ } else if (strcasecmp(argv[0], "BT_TXDH") == 0) {
++ if (argc == 19) {
++ AICWFDBG(LOGINFO, "btrf txdh\n");
++ for (bt_index = 0; bt_index < 18; bt_index++) {
++ dh_cmd_txdh[bt_index] = command_strtoul(argv[bt_index+1], NULL, 16);
++ AICWFDBG(LOGINFO, "0x%x ", dh_cmd_txdh[bt_index]);
++ }
++ AICWFDBG(LOGINFO, "\n");
++ } else {
++ AICWFDBG(LOGERROR, "wrong param\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ rwnx_send_rftest_req(p_rwnx_hw, BT_TXDH, sizeof(dh_cmd_txdh), (u8_l *)&dh_cmd_txdh, NULL);
++ } else if (strcasecmp(argv[0], "BT_RXDH") == 0) {
++ if (argc == 18) {
++ AICWFDBG(LOGINFO, "btrf rxdh\n");
++ for (bt_index = 0; bt_index < 17; bt_index++) {
++ dh_cmd_rxdh[bt_index] = command_strtoul(argv[bt_index+1], NULL, 16);
++ AICWFDBG(LOGINFO, "0x%x ", dh_cmd_rxdh[bt_index]);
++ }
++ AICWFDBG(LOGINFO, "\n");
++ } else {
++ AICWFDBG(LOGERROR, "wrong param\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ rwnx_send_rftest_req(p_rwnx_hw, BT_RXDH, sizeof(dh_cmd_rxdh), (u8_l *)&dh_cmd_rxdh, NULL);
++ } else if (strcasecmp(argv[0], "BT_STOP") == 0) {
++ if (argc == 6) {
++ AICWFDBG(LOGINFO, "btrf stop\n");
++ for (bt_index = 0; bt_index < 5; bt_index++) {
++ dh_cmd_stop[bt_index] = command_strtoul(argv[bt_index+1], NULL, 16);
++ AICWFDBG(LOGINFO, "0x%x ", dh_cmd_stop[bt_index]);
++ }
++ AICWFDBG(LOGINFO, "\n");
++ } else {
++ AICWFDBG(LOGERROR, "wrong param\n");
++ bytes_written = -EINVAL;
++ break;
++ }
++ rwnx_send_rftest_req(p_rwnx_hw, BT_STOP, sizeof(dh_cmd_stop), (u8_l *)&dh_cmd_stop, NULL);
++ } else if (strcasecmp(argv[0], "GET_BT_RX_RESULT") ==0) {
++ AICWFDBG(LOGINFO, "get_bt_rx_result\n");
++ rwnx_send_rftest_req(p_rwnx_hw, GET_BT_RX_RESULT, 0, NULL, &cfm);
++ memcpy(command, &cfm.rftest_result[0], 12);
++ bytes_written = 12;
++ }
++ #endif
++ else {
++ AICWFDBG(LOGERROR, "wrong cmd:%s in %s\n", cmd, __func__);
++ bytes_written = -EINVAL;
++ break;
++ }
++ #endif
++ } while(0);
++ kfree(cmd);
++ kfree(para);
++ return bytes_written;
++}
++
++//Android private command
++
++#define RWNX_COUNTRY_CODE_LEN 2
++#define CMD_SET_COUNTRY "COUNTRY"
++#define CMD_SET_VENDOR_EX_IE "SET_VENDOR_EX_IE"
++#define CMD_SET_AP_WPS_P2P_IE "SET_AP_WPS_P2P_IE"
++#define CMD_SET_TESTMODE "SET_TESTMODE"
++
++
++struct ieee80211_regdomain *getRegdomainFromRwnxDB(struct wiphy *wiphy, char *alpha2);
++struct ieee80211_regdomain *getRegdomainFromRwnxDBIndex(struct wiphy *wiphy, int index);
++extern int reg_regdb_size;
++
++#ifdef CONFIG_SET_VENDOR_EXTENSION_IE
++extern u8_l vendor_extension_data[256];
++extern u8_l vendor_extension_len;
++
++void set_vendor_extension_ie(char *command){
++
++ char databyte[3]={0x00, 0x00, 0x00};
++ int skip = strlen(CMD_SET_VENDOR_EX_IE) + 1;
++ int command_index = skip;
++ int data_index = 0;
++
++ memset(vendor_extension_data, 0, 256);
++ vendor_extension_len = 0;
++ memcpy(databyte, command + command_index, 2);
++ vendor_extension_len = command_strtoul(databyte, NULL, 16);
++ AICWFDBG(LOGINFO, "%s len:%d \r\n", __func__, vendor_extension_len);
++
++ //parser command and save data in vendor_extension_data
++ for(data_index = 0;data_index < vendor_extension_len; data_index++){
++ command_index = command_index + 3;
++ memcpy(databyte, command + command_index, 2);
++ vendor_extension_data[data_index] = command_strtoul(databyte, NULL, 16);
++ }
++
++}
++#endif//CONFIG_SET_VENDOR_EXTENSION_IE
++
++int android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
++{
++#define PRIVATE_COMMAND_MAX_LEN 8192
++#define PRIVATE_COMMAND_DEF_LEN 4096
++
++ struct rwnx_vif *vif = netdev_priv(net);
++ int ret = 0;
++ char *command = NULL;
++ int bytes_written = 0;
++ android_wifi_priv_cmd priv_cmd;
++ int buf_size = 0;
++ int skip = 0;
++ char *country = NULL;
++ struct ieee80211_regdomain *regdomain;
++ //int index = 0;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ ///todo: add our lock
++ //net_os_wake_lock(net);
++
++
++/* if (!capable(CAP_NET_ADMIN)) {
++ ret = -EPERM;
++ goto exit;
++ }*/
++ if (!ifr->ifr_data) {
++ ret = -EINVAL;
++ goto exit;
++ }
++
++#ifdef CONFIG_COMPAT
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0))
++ if (in_compat_syscall())
++#else
++ if (is_compat_task())
++#endif
++ {
++ compat_android_wifi_priv_cmd compat_priv_cmd;
++ if (copy_from_user(&compat_priv_cmd, ifr->ifr_data, sizeof(compat_android_wifi_priv_cmd))) {
++ ret = -EFAULT;
++ goto exit;
++ }
++ priv_cmd.buf = compat_ptr(compat_priv_cmd.buf);
++ priv_cmd.used_len = compat_priv_cmd.used_len;
++ priv_cmd.total_len = compat_priv_cmd.total_len;
++ } else
++#endif /* CONFIG_COMPAT */
++ {
++ if (copy_from_user(&priv_cmd, ifr->ifr_data, sizeof(android_wifi_priv_cmd))) {
++ ret = -EFAULT;
++ goto exit;
++ }
++ }
++ if ((priv_cmd.total_len > PRIVATE_COMMAND_MAX_LEN) || (priv_cmd.total_len < 0)) {
++ AICWFDBG(LOGERROR, "%s: buf length invalid:%d\n", __FUNCTION__, priv_cmd.total_len);
++ ret = -EINVAL;
++ goto exit;
++ }
++
++ buf_size = max(priv_cmd.total_len, PRIVATE_COMMAND_DEF_LEN);
++ command = kmalloc((buf_size + 1), GFP_KERNEL);
++
++ if (!command)
++ {
++ AICWFDBG(LOGERROR, "%s: failed to allocate memory\n", __FUNCTION__);
++ ret = -ENOMEM;
++ goto exit;
++ }
++ if (copy_from_user(command, priv_cmd.buf, priv_cmd.total_len)) {
++ ret = -EFAULT;
++ goto exit;
++ }
++ command[priv_cmd.total_len] = '\0';
++
++ /* outputs */
++ AICWFDBG(LOGINFO, "%s: Android private cmd \"%s\" on %s\n", __FUNCTION__, command, ifr->ifr_name);
++ AICWFDBG(LOGINFO, "cmd = %d\n", cmd);
++ AICWFDBG(LOGINFO, "buf_size=%d\n", buf_size);
++
++
++#if 1//Handle Android command
++ if(!strncasecmp(command, CMD_SET_COUNTRY, strlen(CMD_SET_COUNTRY))) {
++ skip = strlen(CMD_SET_COUNTRY) + 1;
++ country = command + skip;
++ if (!country || strlen(country) < RWNX_COUNTRY_CODE_LEN) {
++ AICWFDBG(LOGERROR, "%s: invalid country code\n", __func__);
++ ret = -EINVAL;
++ goto exit;
++ }
++#if 0
++ for(index = 0; index < reg_regdb_size; index++){
++ regdomain = getRegdomainFromRwnxDBIndex(vif->rwnx_hw->wiphy, index);
++ if((ret = regulatory_set_wiphy_regd(vif->rwnx_hw->wiphy, regdomain))){
++ AICWFDBG(LOGERROR, "regulatory_set_wiphy_regd fail \r\n");
++ }else{
++ AICWFDBG(LOGINFO, "regulatory_set_wiphy_regd ok \r\n");
++ }
++ }
++#endif
++ AICWFDBG(LOGINFO, "%s country code:%c%c\n", __func__, toupper(country[0]), toupper(country[1]));
++ regdomain = getRegdomainFromRwnxDB(vif->rwnx_hw->wiphy, country);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)
++ if((ret = regulatory_set_wiphy_regd(vif->rwnx_hw->wiphy, regdomain))){
++ AICWFDBG(LOGERROR, "regulatory_set_wiphy_regd fail \r\n");
++ }
++#else
++ wiphy_apply_custom_regulatory(vif->rwnx_hw->wiphy, regdomain);
++#endif
++ }
++#ifdef CONFIG_SET_VENDOR_EXTENSION_IE
++ else if(!strncasecmp(command, CMD_SET_VENDOR_EX_IE, strlen(CMD_SET_VENDOR_EX_IE))){
++ set_vendor_extension_ie(command);
++ }
++#endif//CONFIG_SET_VENDOR_EXTENSION_IE
++ else if(!strncasecmp(command, CMD_SET_AP_WPS_P2P_IE, strlen(CMD_SET_AP_WPS_P2P_IE))){
++ ret = 0;
++ goto exit;
++ }else if(!strncasecmp(command, CMD_SET_TESTMODE, strlen(CMD_SET_TESTMODE))){
++ if(g_rwnx_plat && g_rwnx_plat->usbdev->rwnx_hw){
++ if (g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW ||
++ (g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC)){
++ set_testmode(!testmode);
++ rwnx_send_reboot(g_rwnx_plat->usbdev->rwnx_hw);
++ }
++ }
++ ret = 0;
++ goto exit;
++ }
++#endif//Handle Android command
++
++
++ bytes_written = handle_private_cmd(net, command, priv_cmd.total_len);
++ if (bytes_written >= 0) {
++ if ((bytes_written == 0) && (priv_cmd.total_len > 0)) {
++ command[0] = '\0';
++ }
++ if (bytes_written >= priv_cmd.total_len) {
++ AICWFDBG(LOGINFO, "%s: err. bytes_written:%d >= buf_size:%d \n",
++ __FUNCTION__, bytes_written, buf_size);
++ goto exit;
++ }
++ bytes_written++;
++ priv_cmd.used_len = bytes_written;
++ if (copy_to_user(priv_cmd.buf, command, bytes_written)) {
++ AICWFDBG(LOGERROR, "%s: failed to copy data to user buffer\n", __FUNCTION__);
++ ret = -EFAULT;
++ }
++ }
++ else {
++ /* Propagate the error */
++ ret = bytes_written;
++ }
++
++exit:
++ ///todo: add our unlock
++ //net_os_wake_unlock(net);
++ kfree(command);
++ return ret;
++}
++
++#define IOCTL_HOSTAPD (SIOCIWFIRSTPRIV+28)
++#define IOCTL_WPAS (SIOCIWFIRSTPRIV+30)
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)
++static int rwnx_do_ioctl(struct net_device *net, struct ifreq *req, void __user *data, int cmd)
++#else
++static int rwnx_do_ioctl(struct net_device *net, struct ifreq *req, int cmd)
++#endif
++{
++ int ret = 0;
++ ///TODO: add ioctl command handler later
++ switch(cmd)
++ {
++ case IOCTL_HOSTAPD:
++ AICWFDBG(LOGINFO, "IOCTL_HOSTAPD\n");
++ break;
++ case IOCTL_WPAS:
++ AICWFDBG(LOGINFO, "IOCTL_WPAS\n");
++ break;
++ case SIOCDEVPRIVATE:
++ AICWFDBG(LOGINFO, "IOCTL SIOCDEVPRIVATE\n");
++ break;
++ case (SIOCDEVPRIVATE+1):
++ AICWFDBG(LOGINFO, "IOCTL PRIVATE\n");
++ android_priv_cmd(net, req, cmd);
++ break;
++ default:
++ ret = -EOPNOTSUPP;
++ }
++ return ret;
++}
++
++/**
++ * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
++ * Called when a user wants to get the network device usage
++ * statistics. Drivers must do one of the following:
++ * 1. Define @ndo_get_stats64 to fill in a zero-initialised
++ * rtnl_link_stats64 structure passed by the caller.
++ * 2. Define @ndo_get_stats to update a net_device_stats structure
++ * (which should normally be dev->stats) and return a pointer to
++ * it. The structure may be changed asynchronously only if each
++ * field is written atomically.
++ * 3. Update dev->stats asynchronously and atomically, and define
++ * neither operation.
++ */
++static struct net_device_stats *rwnx_get_stats(struct net_device *dev)
++{
++ struct rwnx_vif *vif = netdev_priv(dev);
++
++ return &vif->net_stats;
++}
++
++/**
++ * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
++ * struct net_device *sb_dev);
++ * Called to decide which queue to when device supports multiple
++ * transmit queues.
++ */
++u16 rwnx_select_queue(struct net_device *dev, struct sk_buff *skb,
++ struct net_device *sb_dev)
++{
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ return rwnx_select_txq(rwnx_vif, skb);
++}
++
++/**
++ * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
++ * This function is called when the Media Access Control address
++ * needs to be changed. If this interface is not defined, the
++ * mac address can not be changed.
++ */
++static int rwnx_set_mac_address(struct net_device *dev, void *addr)
++{
++ struct sockaddr *sa = addr;
++ int ret;
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ printk("%s enter \r\n", __func__);
++
++ ret = eth_mac_addr(dev, sa);
++ memcpy(rwnx_vif->wdev.address, dev->dev_addr, 6);
++
++ return ret;
++}
++
++static const struct net_device_ops rwnx_netdev_ops = {
++ .ndo_open = rwnx_open,
++ .ndo_stop = rwnx_close,
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)
++ .ndo_siocdevprivate = rwnx_do_ioctl,
++ #else
++ .ndo_do_ioctl = rwnx_do_ioctl,
++ #endif
++ .ndo_start_xmit = rwnx_start_xmit,
++ .ndo_get_stats = rwnx_get_stats,
++#ifndef CONFIG_ONE_TXQ
++ .ndo_select_queue = rwnx_select_queue,
++#endif
++#ifdef CONFIG_SUPPORT_REALTIME_CHANGE_MAC
++ .ndo_set_mac_address = rwnx_set_mac_address
++#endif
++// .ndo_set_features = rwnx_set_features,
++// .ndo_set_rx_mode = rwnx_set_multicast_list,
++};
++
++static const struct net_device_ops rwnx_netdev_monitor_ops = {
++ .ndo_open = rwnx_open,
++ .ndo_stop = rwnx_close,
++ #ifdef CONFIG_RWNX_MON_XMIT
++ .ndo_start_xmit = rwnx_start_monitor_if_xmit,
++ .ndo_select_queue = rwnx_select_queue,
++ #endif
++ .ndo_get_stats = rwnx_get_stats,
++ .ndo_set_mac_address = rwnx_set_mac_address,
++};
++
++static void rwnx_netdev_setup(struct net_device *dev)
++{
++ ether_setup(dev);
++ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
++ dev->netdev_ops = &rwnx_netdev_ops;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
++ dev->destructor = free_netdev;
++#else
++ dev->needs_free_netdev = true;
++#endif
++ dev->watchdog_timeo = RWNX_TX_LIFETIME_MS;
++
++ dev->needed_headroom = sizeof(struct rwnx_txhdr) + RWNX_SWTXHDR_ALIGN_SZ - 14;
++#ifdef CONFIG_RWNX_AMSDUS_TX
++ dev->needed_headroom = max(dev->needed_headroom,
++ (unsigned short)(sizeof(struct rwnx_amsdu_txhdr)
++ + sizeof(struct ethhdr) + 4
++ + sizeof(rfc1042_header) + 2));
++#endif /* CONFIG_RWNX_AMSDUS_TX */
++
++ dev->hw_features = 0;
++}
++
++#ifndef CONFIG_USE_WIRELESS_EXT
++#ifdef CONFIG_WIRELESS_EXT
++ #include <net/iw_handler.h>
++ struct iw_handler_def aic_handlers_def;
++#endif
++#endif
++
++
++/*********************************************************************
++ * Cfg80211 callbacks (and helper)
++ *********************************************************************/
++static struct rwnx_vif *rwnx_interface_add(struct rwnx_hw *rwnx_hw,
++ const char *name,
++ unsigned char name_assign_type,
++ enum nl80211_iftype type,
++ struct vif_params *params)
++{
++ struct net_device *ndev;
++ struct rwnx_vif *vif;
++ int min_idx, max_idx;
++ int vif_idx = -1;
++ int i;
++ int nx_nb_ndev_txq = NX_NB_NDEV_TXQ;
++
++
++ if((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8801) ||
++ ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC ||
++ g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW) && chip_id < 3)){
++ nx_nb_ndev_txq = NX_NB_NDEV_TXQ_FOR_OLD_IC;
++ }
++
++ AICWFDBG(LOGINFO, "rwnx_interface_add: %s, %d, %d\r\n", name, type, NL80211_IFTYPE_P2P_DEVICE);
++ // Look for an available VIF
++ if (type == NL80211_IFTYPE_AP_VLAN) {
++ min_idx = NX_VIRT_DEV_MAX;
++ max_idx = NX_VIRT_DEV_MAX + NX_REMOTE_STA_MAX;
++ } else {
++ min_idx = 0;
++ max_idx = NX_VIRT_DEV_MAX;
++ }
++
++ for (i = min_idx; i < max_idx; i++) {
++ if ((rwnx_hw->avail_idx_map) & BIT(i)) {
++ vif_idx = i;
++ break;
++ }
++ }
++ if (vif_idx < 0)
++ return NULL;
++
++ #ifndef CONFIG_RWNX_MON_DATA
++ list_for_each_entry(vif, &rwnx_hw->vifs, list) {
++ // Check if monitor interface already exists or type is monitor
++ if ((RWNX_VIF_TYPE(vif) == NL80211_IFTYPE_MONITOR) ||
++ (type == NL80211_IFTYPE_MONITOR)) {
++ wiphy_err(rwnx_hw->wiphy,
++ "Monitor+Data interface support (MON_DATA) disabled\n");
++ return NULL;
++ }
++ }
++ #endif
++
++#ifndef CONFIG_ONE_TXQ
++ ndev = alloc_netdev_mqs(sizeof(*vif), name, name_assign_type,
++ rwnx_netdev_setup, nx_nb_ndev_txq, 1);
++#else
++ ndev = alloc_netdev_mqs(sizeof(*vif), name, name_assign_type,
++ rwnx_netdev_setup, 1, 1);
++#endif
++
++ if (!ndev)
++ return NULL;
++
++ vif = netdev_priv(ndev);
++ vif->key_has_add = 0;
++ ndev->ieee80211_ptr = &vif->wdev;
++ vif->wdev.wiphy = rwnx_hw->wiphy;
++ vif->rwnx_hw = rwnx_hw;
++ vif->ndev = ndev;
++ vif->drv_vif_index = vif_idx;
++ SET_NETDEV_DEV(ndev, wiphy_dev(vif->wdev.wiphy));
++ vif->wdev.netdev = ndev;
++ vif->wdev.iftype = type;
++ vif->up = false;
++ vif->ch_index = RWNX_CH_NOT_SET;
++ memset(&vif->net_stats, 0, sizeof(vif->net_stats));
++ vif->is_p2p_vif = 0;
++#ifdef CONFIG_BR_SUPPORT
++ spin_lock_init(&vif->br_ext_lock);
++#endif /* CONFIG_BR_SUPPORT */
++
++ switch (type) {
++ case NL80211_IFTYPE_STATION:
++ vif->sta.ap = NULL;
++ vif->sta.tdls_sta = NULL;
++ vif->sta.external_auth = false;
++ break;
++ case NL80211_IFTYPE_P2P_CLIENT:
++ vif->sta.ap = NULL;
++ vif->sta.tdls_sta = NULL;
++ vif->sta.external_auth = false;
++ vif->is_p2p_vif = 1;
++ break;
++ case NL80211_IFTYPE_MESH_POINT:
++ INIT_LIST_HEAD(&vif->ap.mpath_list);
++ INIT_LIST_HEAD(&vif->ap.proxy_list);
++ vif->ap.create_path = false;
++ vif->ap.generation = 0;
++ vif->ap.mesh_pm = NL80211_MESH_POWER_ACTIVE;
++ vif->ap.next_mesh_pm = NL80211_MESH_POWER_ACTIVE;
++ // no break
++ case NL80211_IFTYPE_AP:
++ INIT_LIST_HEAD(&vif->ap.sta_list);
++ memset(&vif->ap.bcn, 0, sizeof(vif->ap.bcn));
++ break;
++ case NL80211_IFTYPE_P2P_GO:
++ INIT_LIST_HEAD(&vif->ap.sta_list);
++ memset(&vif->ap.bcn, 0, sizeof(vif->ap.bcn));
++ vif->is_p2p_vif = 1;
++ break;
++ case NL80211_IFTYPE_AP_VLAN:
++ {
++ struct rwnx_vif *master_vif;
++ bool found = false;
++ list_for_each_entry(master_vif, &rwnx_hw->vifs, list) {
++ if ((RWNX_VIF_TYPE(master_vif) == NL80211_IFTYPE_AP) &&
++ !(!memcmp(master_vif->ndev->dev_addr, params->macaddr,
++ ETH_ALEN))) {
++ found=true;
++ break;
++ }
++ }
++
++ if (!found)
++ goto err;
++
++ vif->ap_vlan.master = master_vif;
++ vif->ap_vlan.sta_4a = NULL;
++ break;
++ }
++ case NL80211_IFTYPE_MONITOR:
++ ndev->type = ARPHRD_IEEE80211_RADIOTAP;
++ ndev->netdev_ops = &rwnx_netdev_monitor_ops;
++ break;
++ default:
++ break;
++ }
++
++ if (type == NL80211_IFTYPE_AP_VLAN) {
++ memcpy(ndev->dev_addr, params->macaddr, ETH_ALEN);
++ memcpy(vif->wdev.address, params->macaddr, ETH_ALEN);
++ }
++ else {
++#if 1
++ unsigned char mac_addr[6];
++ memcpy(mac_addr, rwnx_hw->wiphy->perm_addr, ETH_ALEN);
++ memcpy(mac_addr, rwnx_hw->wiphy->perm_addr, ETH_ALEN);
++ mac_addr[5] ^= vif_idx;
++// memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
++ eth_hw_addr_set(ndev, mac_addr);
++ memcpy(vif->wdev.address, ndev->dev_addr, ETH_ALEN);
++#else
++ memcpy(ndev->dev_addr, rwnx_hw->wiphy->perm_addr, ETH_ALEN);
++ ndev->dev_addr[5] ^= vif_idx;
++ memcpy(vif->wdev.address, ndev->dev_addr, ETH_ALEN);
++#endif
++ }
++
++ AICWFDBG(LOGINFO, "interface add:%x %x %x %x %x %x\n", vif->wdev.address[0], vif->wdev.address[1],
++ vif->wdev.address[2], vif->wdev.address[3], vif->wdev.address[4], vif->wdev.address[5]);
++
++ if (params) {
++ vif->use_4addr = params->use_4addr;
++ ndev->ieee80211_ptr->use_4addr = params->use_4addr;
++ } else
++ vif->use_4addr = false;
++
++#ifdef CONFIG_USE_WIRELESS_EXT
++ aicwf_set_wireless_ext(ndev, rwnx_hw);
++#else
++#ifdef CONFIG_WIRELESS_EXT
++ memset(&aic_handlers_def, 0,sizeof(struct iw_handler_def));
++ ndev->wireless_handlers = (struct iw_handler_def *)&aic_handlers_def;
++#endif
++#endif
++
++
++ if (register_netdevice(ndev))
++ goto err;
++
++ spin_lock_bh(&rwnx_hw->cb_lock);
++ list_add_tail(&vif->list, &rwnx_hw->vifs);
++ spin_unlock_bh(&rwnx_hw->cb_lock);
++ rwnx_hw->avail_idx_map &= ~BIT(vif_idx);
++
++ return vif;
++
++err:
++ free_netdev(ndev);
++ return NULL;
++}
++
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
++void aicwf_p2p_alive_timeout(ulong data)
++#else
++void aicwf_p2p_alive_timeout(struct timer_list *t)
++#endif
++{
++ struct rwnx_hw *rwnx_hw;
++ struct rwnx_vif *rwnx_vif;
++ struct rwnx_vif *rwnx_vif1, *tmp;
++ u8_l p2p = 0;
++ #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
++ rwnx_vif = (struct rwnx_vif *)data;
++ rwnx_hw = rwnx_vif->rwnx_hw;
++ #else
++ rwnx_hw = from_timer(rwnx_hw, t, p2p_alive_timer);
++ rwnx_vif = rwnx_hw->p2p_dev_vif;
++ #endif
++
++ //printk("%s enter %d \r\n", __func__, atomic_read(&rwnx_hw->p2p_alive_timer_count));
++
++#if 1 //AIDEN workaround
++ if(atomic_read(&rwnx_hw->p2p_alive_timer_count) > 2){
++ p2p_working = 0;
++ }
++#endif
++
++ list_for_each_entry_safe(rwnx_vif1, tmp, &rwnx_hw->vifs, list) {
++ if ((rwnx_hw->avail_idx_map & BIT(rwnx_vif1->drv_vif_index)) == 0) {
++ switch(RWNX_VIF_TYPE(rwnx_vif1)) {
++ case NL80211_IFTYPE_P2P_CLIENT:
++ case NL80211_IFTYPE_P2P_GO:
++ rwnx_hw->is_p2p_alive = 1;
++ p2p = 1;
++ break;
++ default:
++ break;
++ }
++ }
++ }
++
++ if (p2p){
++ atomic_set(&rwnx_hw->p2p_alive_timer_count, 0);
++ }else{
++ atomic_inc(&rwnx_hw->p2p_alive_timer_count);
++ }
++
++ if (atomic_read(&rwnx_hw->p2p_alive_timer_count) < P2P_ALIVE_TIME_COUNT) {
++ mod_timer(&rwnx_hw->p2p_alive_timer,
++ jiffies + msecs_to_jiffies(P2P_ALIVE_TIME_MS));
++ return;
++ } else
++ atomic_set(&rwnx_hw->p2p_alive_timer_count, 0);
++
++ rwnx_hw->is_p2p_alive = 0;
++ rwnx_send_remove_if(rwnx_hw, rwnx_vif->vif_index, true);
++
++ /* Ensure that we won't process disconnect ind */
++ spin_lock_bh(&rwnx_hw->cb_lock);
++
++ rwnx_vif->up = false;
++ rwnx_hw->vif_table[rwnx_vif->vif_index] = NULL;
++ rwnx_hw->vif_started--;
++ spin_unlock_bh(&rwnx_hw->cb_lock);
++}
++
++
++/*********************************************************************
++ * Cfg80211 callbacks (and helper)
++ *********************************************************************/
++static struct wireless_dev *rwnx_virtual_interface_add(struct rwnx_hw *rwnx_hw,
++ const char *name,
++ unsigned char name_assign_type,
++ enum nl80211_iftype type,
++ struct vif_params *params)
++{
++ struct wireless_dev *wdev = NULL;
++ struct rwnx_vif *vif;
++ int min_idx, max_idx;
++ int vif_idx = -1;
++ int i;
++
++ AICWFDBG(LOGINFO, "rwnx_virtual_interface_add: %d, %s\n", type, name);
++
++ if (type == NL80211_IFTYPE_AP_VLAN) {
++ min_idx = NX_VIRT_DEV_MAX;
++ max_idx = NX_VIRT_DEV_MAX + NX_REMOTE_STA_MAX;
++ } else {
++ min_idx = 0;
++ max_idx = NX_VIRT_DEV_MAX;
++ }
++
++ for (i = min_idx; i < max_idx; i++) {
++ if ((rwnx_hw->avail_idx_map) & BIT(i)) {
++ vif_idx = i;
++ break;
++ }
++ }
++
++ if (vif_idx < 0) {
++ AICWFDBG(LOGERROR, "virtual_interface_add %s fail\n", name);
++ return NULL;
++ }
++
++ vif = kzalloc(sizeof(struct rwnx_vif), GFP_KERNEL);
++ if (unlikely(!vif)) {
++ AICWFDBG(LOGERROR, "Could not allocate wireless device\n");
++ return NULL;
++ }
++ wdev = &vif->wdev;
++ wdev->wiphy = rwnx_hw->wiphy;
++ wdev->iftype = type;
++
++ AICWFDBG(LOGINFO, "rwnx_virtual_interface_add, ifname=%s, wdev=%p, vif_idx=%d\n", name, wdev, vif_idx);
++
++ #ifndef CONFIG_USE_P2P0
++ vif->is_p2p_vif = 1;
++ vif->rwnx_hw = rwnx_hw;
++ vif->vif_index = vif_idx;
++ vif->wdev.wiphy = rwnx_hw->wiphy;
++ vif->drv_vif_index = vif_idx;
++ vif->up = false;
++ vif->ch_index = RWNX_CH_NOT_SET;
++ memset(&vif->net_stats, 0, sizeof(vif->net_stats));
++ vif->use_4addr = false;
++
++ spin_lock_bh(&rwnx_hw->cb_lock);
++ list_add_tail(&vif->list, &rwnx_hw->vifs);
++ spin_unlock_bh(&rwnx_hw->cb_lock);
++
++ if (rwnx_hw->is_p2p_alive == 0) {
++ #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
++ init_timer(&rwnx_hw->p2p_alive_timer);
++ rwnx_hw->p2p_alive_timer.data = (unsigned long)vif;
++ rwnx_hw->p2p_alive_timer.function = aicwf_p2p_alive_timeout;
++ #else
++ timer_setup(&rwnx_hw->p2p_alive_timer, aicwf_p2p_alive_timeout, 0);
++ #endif
++ rwnx_hw->is_p2p_alive = 0;
++ rwnx_hw->is_p2p_connected = 0;
++ rwnx_hw->p2p_dev_vif = vif;
++ atomic_set(&rwnx_hw->p2p_alive_timer_count, 0);
++ }
++ #endif
++ rwnx_hw->avail_idx_map &= ~BIT(vif_idx);
++
++ memcpy(vif->wdev.address, rwnx_hw->wiphy->perm_addr, ETH_ALEN);
++ vif->wdev.address[5] ^= vif_idx;
++ AICWFDBG(LOGERROR, "p2p dev addr=%x %x %x %x %x %x\n", vif->wdev.address[0], vif->wdev.address[1], \
++ vif->wdev.address[2], vif->wdev.address[3], vif->wdev.address[4], vif->wdev.address[5]);
++
++ return wdev;
++}
++
++/*
++ * @brief Retrieve the rwnx_sta object allocated for a given MAC address
++ * and a given role.
++ */
++
++static struct rwnx_sta *rwnx_retrieve_sta(struct rwnx_hw *rwnx_hw,
++ struct rwnx_vif *rwnx_vif, u8 *addr,
++ __le16 fc, bool ap)
++{
++ if (ap) {
++ /* only deauth, disassoc and action are bufferable MMPDUs */
++ bool bufferable = ieee80211_is_deauth(fc) ||
++ ieee80211_is_disassoc(fc) ||
++ ieee80211_is_action(fc);
++
++ /* Check if the packet is bufferable or not */
++ if (bufferable)
++ {
++ /* Check if address is a broadcast or a multicast address */
++ if (is_broadcast_ether_addr(addr) || is_multicast_ether_addr(addr)) {
++ /* Returned STA pointer */
++ struct rwnx_sta *rwnx_sta = &rwnx_hw->sta_table[rwnx_vif->ap.bcmc_index];
++
++ if (rwnx_sta->valid)
++ return rwnx_sta;
++ } else {
++ /* Returned STA pointer */
++ struct rwnx_sta *rwnx_sta;
++
++ /* Go through list of STAs linked with the provided VIF */
++ spin_lock_bh(&rwnx_vif->rwnx_hw->cb_lock);
++ list_for_each_entry(rwnx_sta, &rwnx_vif->ap.sta_list, list) {
++ AICWFDBG(LOGDEBUG, "%s mac_addr:%x %x %x %x %x %x addr:%x %x %x %x %x %x \r\n", __func__,
++ rwnx_sta->mac_addr[0],rwnx_sta->mac_addr[1],rwnx_sta->mac_addr[2],
++ rwnx_sta->mac_addr[3],rwnx_sta->mac_addr[4],rwnx_sta->mac_addr[5],
++ addr[0],addr[1],addr[2],addr[3],addr[4],addr[5]);
++ if (rwnx_sta->valid &&
++ ether_addr_equal(rwnx_sta->mac_addr, addr)) {
++ /* Return the found STA */
++ spin_unlock_bh(&rwnx_vif->rwnx_hw->cb_lock);
++ return rwnx_sta;
++ }
++ }
++ spin_unlock_bh(&rwnx_vif->rwnx_hw->cb_lock);
++ }
++ }
++ } else {
++ return rwnx_vif->sta.ap;
++ }
++
++ return NULL;
++}
++
++/**
++ * @add_virtual_intf: create a new virtual interface with the given name,
++ * must set the struct wireless_dev's iftype. Beware: You must create
++ * the new netdev in the wiphy's network namespace! Returns the struct
++ * wireless_dev, or an ERR_PTR. For P2P device wdevs, the driver must
++ * also set the address member in the wdev.
++ */
++static struct wireless_dev *rwnx_cfg80211_add_iface(struct wiphy *wiphy,
++ const char *name,
++ unsigned char name_assign_type,
++ enum nl80211_iftype type,
++ struct vif_params *params)
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct wireless_dev *wdev;
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 0))
++ unsigned char name_assign_type = NET_NAME_UNKNOWN;
++#endif
++
++ if (type != NL80211_IFTYPE_P2P_DEVICE) {
++ struct rwnx_vif *vif= rwnx_interface_add(rwnx_hw, name, name_assign_type, type, params);
++ if (!vif)
++ return ERR_PTR(-EINVAL);
++ return &vif->wdev;
++
++ } else {
++ wdev = rwnx_virtual_interface_add(rwnx_hw, name, name_assign_type, type, params);
++ if (!wdev)
++ return ERR_PTR(-EINVAL);
++ return wdev;
++ }
++}
++
++/**
++ * @del_virtual_intf: remove the virtual interface
++ */
++static int rwnx_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev)
++{
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
++ struct net_device *dev = wdev->netdev;
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *rwnx_vif = container_of(wdev, struct rwnx_vif, wdev);
++#else
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++#endif
++ // printk("del_iface: %p\n",wdev);
++
++ AICWFDBG(LOGINFO, "del_iface: %p, %x\n",wdev, wdev->address[5]);
++
++ if (!dev || !rwnx_vif->ndev) {
++#if 0
++ if (rwnx_vif == rwnx_hw->p2p_dev_vif) {
++ if (timer_pending(&rwnx_hw->p2p_alive_timer)) {
++ del_timer_sync(&rwnx_hw->p2p_alive_timer);
++ }
++ }
++#endif
++ cfg80211_unregister_wdev(wdev);
++ spin_lock_bh(&rwnx_hw->cb_lock);
++ list_del(&rwnx_vif->list);
++ spin_unlock_bh(&rwnx_hw->cb_lock);
++ rwnx_hw->avail_idx_map |= BIT(rwnx_vif->drv_vif_index);
++ rwnx_vif->ndev = NULL;
++ kfree(rwnx_vif);
++ return 0;
++ }
++#if 0
++ netdev_info(dev, "Remove Interface");
++#endif
++
++ AICWFDBG(LOGINFO, "%s Remove Interface \r\n", dev->name);
++ if (dev->reg_state == NETREG_REGISTERED) {
++ /* Will call rwnx_close if interface is UP */
++ unregister_netdevice(dev);
++ }
++
++ spin_lock_bh(&rwnx_hw->cb_lock);
++ list_del(&rwnx_vif->list);
++ spin_unlock_bh(&rwnx_hw->cb_lock);
++ rwnx_hw->avail_idx_map |= BIT(rwnx_vif->drv_vif_index);
++ rwnx_vif->ndev = NULL;
++
++ /* Clear the priv in adapter */
++ dev->ieee80211_ptr = NULL;
++
++ return 0;
++}
++
++/**
++ * @change_virtual_intf: change type/configuration of virtual interface,
++ * keep the struct wireless_dev's iftype updated.
++ */
++static int rwnx_cfg80211_change_iface(struct wiphy *wiphy,
++ struct net_device *dev,
++ enum nl80211_iftype type,
++ struct vif_params *params)
++{
++#ifndef CONFIG_RWNX_MON_DATA
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++#endif
++ struct rwnx_vif *vif = netdev_priv(dev);
++ struct mm_add_if_cfm add_if_cfm;
++ bool_l p2p = false;
++ int ret;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++ AICWFDBG(LOGINFO, "change_if: %d to %d, %d, %d", vif->wdev.iftype, type, NL80211_IFTYPE_P2P_CLIENT, NL80211_IFTYPE_STATION);
++
++#ifdef CONFIG_COEX
++ if (type == NL80211_IFTYPE_AP || type == NL80211_IFTYPE_P2P_GO)
++ rwnx_send_coex_req(vif->rwnx_hw, 1, 0);
++ if (RWNX_VIF_TYPE(vif) == NL80211_IFTYPE_AP || RWNX_VIF_TYPE(vif) == NL80211_IFTYPE_P2P_GO)
++ rwnx_send_coex_req(vif->rwnx_hw, 0, 1);
++#endif
++#ifndef CONFIG_RWNX_MON_DATA
++ if ((type == NL80211_IFTYPE_MONITOR) &&
++ (RWNX_VIF_TYPE(vif) != NL80211_IFTYPE_MONITOR)) {
++ struct rwnx_vif *vif_el;
++ list_for_each_entry(vif_el, &rwnx_hw->vifs, list) {
++ // Check if data interface already exists
++ if ((vif_el != vif) &&
++ (RWNX_VIF_TYPE(vif) != NL80211_IFTYPE_MONITOR)) {
++ wiphy_err(rwnx_hw->wiphy,
++ "Monitor+Data interface support (MON_DATA) disabled\n");
++ return -EIO;
++ }
++ }
++ }
++#endif
++
++ // Reset to default case (i.e. not monitor)
++ dev->type = ARPHRD_ETHER;
++ dev->netdev_ops = &rwnx_netdev_ops;
++
++ switch (type) {
++ case NL80211_IFTYPE_STATION:
++ case NL80211_IFTYPE_P2P_CLIENT:
++ vif->sta.ap = NULL;
++ vif->sta.tdls_sta = NULL;
++ vif->sta.external_auth = false;
++ break;
++ case NL80211_IFTYPE_MESH_POINT:
++ INIT_LIST_HEAD(&vif->ap.mpath_list);
++ INIT_LIST_HEAD(&vif->ap.proxy_list);
++ vif->ap.create_path = false;
++ vif->ap.generation = 0;
++ // no break
++ case NL80211_IFTYPE_AP:
++ case NL80211_IFTYPE_P2P_GO:
++ INIT_LIST_HEAD(&vif->ap.sta_list);
++ memset(&vif->ap.bcn, 0, sizeof(vif->ap.bcn));
++ break;
++ case NL80211_IFTYPE_AP_VLAN:
++ return -EPERM;
++ case NL80211_IFTYPE_MONITOR:
++ dev->type = ARPHRD_IEEE80211_RADIOTAP;
++ dev->netdev_ops = &rwnx_netdev_monitor_ops;
++ break;
++ default:
++ break;
++ }
++
++ vif->wdev.iftype = type;
++ if (params->use_4addr != -1)
++ vif->use_4addr = params->use_4addr;
++ if (type == NL80211_IFTYPE_P2P_CLIENT || type == NL80211_IFTYPE_P2P_GO){
++ p2p = true;
++ }
++ if (vif->up) {
++ /* Abort scan request on the vif */
++ if (vif->rwnx_hw->scan_request &&
++ vif->rwnx_hw->scan_request->wdev == &vif->wdev) {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
++ struct cfg80211_scan_info info = {
++ .aborted = true,
++ };
++ cfg80211_scan_done(vif->rwnx_hw->scan_request, &info);
++#else
++ cfg80211_scan_done(vif->rwnx_hw->scan_request, true);
++#endif
++ if ((ret = rwnx_send_scanu_cancel_req(vif->rwnx_hw, NULL))) {
++ AICWFDBG(LOGERROR, "scanu_cancel fail\n");
++ return ret;
++ }
++ vif->rwnx_hw->scan_request = NULL;
++ }
++ if ((ret = rwnx_send_remove_if(vif->rwnx_hw, vif->vif_index, false))) {
++ AICWFDBG(LOGERROR, "remove_if fail\n");
++ return ret;
++ }
++ vif->rwnx_hw->vif_table[vif->vif_index] = NULL;
++ AICWFDBG(LOGINFO, "change_if from %d \n", vif->vif_index);
++ if ((ret = rwnx_send_add_if(vif->rwnx_hw, vif->wdev.address,
++ RWNX_VIF_TYPE(vif), p2p, &add_if_cfm))) {
++ AICWFDBG(LOGERROR, "add if fail\n");
++ return ret;
++ }
++ if (add_if_cfm.status != 0) {
++ AICWFDBG(LOGERROR, "add if status fail\n");
++ return -EIO;
++ }
++
++ AICWFDBG(LOGINFO, "change_if to %d \n", add_if_cfm.inst_nbr);
++ /* Save the index retrieved from LMAC */
++ spin_lock_bh(&vif->rwnx_hw->cb_lock);
++ vif->vif_index = add_if_cfm.inst_nbr;
++ vif->rwnx_hw->vif_table[add_if_cfm.inst_nbr] = vif;
++ spin_unlock_bh(&vif->rwnx_hw->cb_lock);
++ }
++
++ if (type == NL80211_IFTYPE_MONITOR) {
++ vif->rwnx_hw->monitor_vif = vif->vif_index;
++ #if defined(CONFIG_RWNX_MON_XMIT)
++ rwnx_txq_unk_vif_init(vif);
++ #endif
++ #if defined(CONFIG_RWNX_MON_RXFILTER)
++ rwnx_send_set_filter(vif->rwnx_hw, (FIF_BCN_PRBRESP_PROMISC | FIF_OTHER_BSS | FIF_PSPOLL | FIF_PROBE_REQ));
++ #endif
++ } else {
++ vif->rwnx_hw->monitor_vif = RWNX_INVALID_VIF;
++ }
++
++ return 0;
++}
++
++static int rwnx_cfgp2p_start_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev)
++{
++ int ret = 0;
++
++ //do nothing
++ AICWFDBG(LOGINFO, "P2P interface started\n");
++
++ return ret;
++}
++
++static void rwnx_cfgp2p_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev)
++{
++#if 0
++ int ret = 0;
++ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
++
++ if (!cfg)
++ return;
++
++ CFGP2P_DBG(("Enter\n"));
++
++ ret = wl_cfg80211_scan_stop(cfg, wdev);
++ if (unlikely(ret < 0)) {
++ CFGP2P_ERR(("P2P scan stop failed, ret=%d\n", ret));
++ }
++
++ if (!cfg->p2p)
++ return;
++
++ /* Cancel any on-going listen */
++ wl_cfgp2p_cancel_listen(cfg, bcmcfg_to_prmry_ndev(cfg), wdev, TRUE);
++
++ ret = wl_cfgp2p_disable_discovery(cfg);
++ if (unlikely(ret < 0)) {
++ CFGP2P_ERR(("P2P disable discovery failed, ret=%d\n", ret));
++ }
++
++ p2p_on(cfg) = false;
++#endif
++ int ret = 0;
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *rwnx_vif = container_of(wdev, struct rwnx_vif, wdev);
++ /* Abort scan request on the vif */
++ if (rwnx_hw->scan_request &&
++ rwnx_hw->scan_request->wdev == &rwnx_vif->wdev) {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
++ struct cfg80211_scan_info info = {
++ .aborted = true,
++ };
++
++ cfg80211_scan_done(rwnx_hw->scan_request, &info);
++#else
++ cfg80211_scan_done(rwnx_hw->scan_request, true);
++#endif
++ rwnx_hw->scan_request = NULL;
++ ret = rwnx_send_scanu_cancel_req(rwnx_hw, NULL);
++ if (ret){
++ AICWFDBG(LOGERROR, "scanu_cancel fail\n");
++ }
++ }
++
++ if (rwnx_vif == rwnx_hw->p2p_dev_vif) {
++ rwnx_hw->is_p2p_alive = 0;
++ if (timer_pending(&rwnx_hw->p2p_alive_timer)) {
++ del_timer_sync(&rwnx_hw->p2p_alive_timer);
++ }
++ if (rwnx_vif->up) {
++ rwnx_send_remove_if(rwnx_hw, rwnx_vif->vif_index, true);
++ /* Ensure that we won't process disconnect ind */
++ spin_lock_bh(&rwnx_hw->cb_lock);
++ rwnx_vif->up = false;
++ rwnx_hw->vif_table[rwnx_vif->vif_index] = NULL;
++ rwnx_hw->vif_started--;
++ spin_unlock_bh(&rwnx_hw->cb_lock);
++ }
++ }
++
++ AICWFDBG(LOGINFO, "Exit. P2P interface stopped\n");
++
++ return;
++}
++
++int rwnx_send_check_p2p(struct cfg80211_scan_request *param){
++ int index = (u8)min_t(int, SCAN_SSID_MAX, param->n_ssids);
++ int i = 0;
++
++ for(i = 0;i < index;i++){
++ if (!memcmp("DIRECT-", param->ssids[i].ssid,
++ (sizeof("DIRECT-") - 1))) {
++ //printk("AIDEN rwnx_send_check_p2p!!\r\n");
++ return 1;
++ }
++ }
++ return 0;
++}
++
++/**
++ * @scan: Request to do a scan. If returning zero, the scan request is given
++ * the driver, and will be valid until passed to cfg80211_scan_done().
++ * For scan results, call cfg80211_inform_bss(); you can call this outside
++ * the scan/scan_done bracket too.
++ */
++static int rwnx_cfg80211_scan(struct wiphy *wiphy,
++ #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)
++ struct net_device *dev,
++ #endif
++ struct cfg80211_scan_request *request)
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
++ struct rwnx_vif *rwnx_vif = container_of(request->wdev, struct rwnx_vif,
++ wdev);
++#else
++ struct rwnx_vif* rwnx_vif = netdev_priv(request->dev);
++#endif
++ int error;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ if(testmode){
++ AICWFDBG(LOGERROR, "%s in testmode return busy\r\n", __func__);
++ return -EBUSY;
++ }
++
++ if((int)atomic_read(&rwnx_vif->drv_conn_state) == (int)RWNX_DRV_STATUS_CONNECTING){
++ AICWFDBG(LOGERROR, "%s in connecting return busy\r\n", __func__);
++ return -EBUSY;
++ }
++
++#ifndef CONFIG_STA_SCAN_WHEN_P2P_WORKING
++ if (p2p_working && RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_P2P_CLIENT &&
++ !rwnx_send_check_p2p(request)) {
++ AICWFDBG(LOGINFO, "p2p is working, scan abort\n");
++ return -EBUSY;
++ }
++#endif
++
++ if (scanning) {
++ AICWFDBG(LOGERROR, "%s is scanning, abort\n", __func__);
++ #if 0//AIDEN test
++ error = rwnx_send_scanu_cancel_req(rwnx_hw, NULL);
++ if (error)
++ return error;
++ msleep(150);
++ #endif
++ return -EBUSY;
++ }
++
++ if((RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_STATION ||RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_P2P_CLIENT) && rwnx_vif->sta.external_auth){
++ AICWFDBG(LOGINFO, "scan about: external auth\r\n");
++ return -EBUSY;
++ }
++
++ rwnx_hw->scan_request = request;
++ if ((error = rwnx_send_scanu_req(rwnx_hw, rwnx_vif, request)))
++ return error;
++
++ return 0;
++}
++
++bool key_flag = false;
++/**
++ * @add_key: add a key with the given parameters. @mac_addr will be %NULL
++ * when adding a group key.
++ */
++static int rwnx_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
++#if (LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION)
++ int link_id,
++#endif
++ u8 key_index, bool pairwise, const u8 *mac_addr,
++ struct key_params *params)
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *vif = netdev_priv(netdev);
++ int i, error = 0;
++ struct mm_key_add_cfm key_add_cfm;
++ u8_l cipher = 0;
++ struct rwnx_sta *sta = NULL;
++ struct rwnx_key *rwnx_key;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ if (mac_addr) {
++ sta = rwnx_get_sta(rwnx_hw, mac_addr);
++ if (!sta)
++ return -EINVAL;
++ rwnx_key = &sta->key;
++ if (vif->wdev.iftype == NL80211_IFTYPE_STATION || vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)
++ vif->sta.paired_cipher_type = params->cipher;
++ }
++ else {
++ rwnx_key = &vif->key[key_index];
++ vif->key_has_add = 1;
++ if (vif->wdev.iftype == NL80211_IFTYPE_STATION || vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)
++ vif->sta.group_cipher_type = params->cipher;
++ }
++
++ /* Retrieve the cipher suite selector */
++ switch (params->cipher) {
++ case WLAN_CIPHER_SUITE_WEP40:
++ cipher = MAC_CIPHER_WEP40;
++ break;
++ case WLAN_CIPHER_SUITE_WEP104:
++ cipher = MAC_CIPHER_WEP104;
++ break;
++ case WLAN_CIPHER_SUITE_TKIP:
++ cipher = MAC_CIPHER_TKIP;
++ break;
++ case WLAN_CIPHER_SUITE_CCMP:
++ cipher = MAC_CIPHER_CCMP;
++ break;
++ case WLAN_CIPHER_SUITE_AES_CMAC:
++ cipher = MAC_CIPHER_BIP_CMAC_128;
++ break;
++ case WLAN_CIPHER_SUITE_SMS4:
++ {
++ // Need to reverse key order
++ u8 tmp, *key = (u8 *)params->key;
++ cipher = MAC_CIPHER_WPI_SMS4;
++ for (i = 0; i < WPI_SUBKEY_LEN/2; i++) {
++ tmp = key[i];
++ key[i] = key[WPI_SUBKEY_LEN - 1 - i];
++ key[WPI_SUBKEY_LEN - 1 - i] = tmp;
++ }
++ for (i = 0; i < WPI_SUBKEY_LEN/2; i++) {
++ tmp = key[i + WPI_SUBKEY_LEN];
++ key[i + WPI_SUBKEY_LEN] = key[WPI_KEY_LEN - 1 - i];
++ key[WPI_KEY_LEN - 1 - i] = tmp;
++ }
++ break;
++ }
++ default:
++ return -EINVAL;
++ }
++
++ key_flag = false;
++ if ((error = rwnx_send_key_add(rwnx_hw, vif->vif_index,
++ (sta ? sta->sta_idx : 0xFF), pairwise,
++ (u8 *)params->key, params->key_len,
++ key_index, cipher, &key_add_cfm)))
++ return error;
++
++ if (key_add_cfm.status != 0) {
++ RWNX_PRINT_CFM_ERR(key_add);
++ return -EIO;
++ }
++
++ /* Save the index retrieved from LMAC */
++ rwnx_key->hw_idx = key_add_cfm.hw_key_idx;
++
++ return 0;
++}
++
++/**
++ * @get_key: get information about the key with the given parameters.
++ * @mac_addr will be %NULL when requesting information for a group
++ * key. All pointers given to the @callback function need not be valid
++ * after it returns. This function should return an error if it is
++ * not possible to retrieve the key, -ENOENT if it doesn't exist.
++ *
++ */
++static int rwnx_cfg80211_get_key(struct wiphy *wiphy, struct net_device *netdev,
++#if (LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION)
++ int link_id,
++#endif
++
++ u8 key_index, bool pairwise, const u8 *mac_addr,
++ void *cookie,
++ void (*callback)(void *cookie, struct key_params*))
++{
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ return -1;
++}
++
++
++/**
++ * @del_key: remove a key given the @mac_addr (%NULL for a group key)
++ * and @key_index, return -ENOENT if the key doesn't exist.
++ */
++static int rwnx_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
++#if (LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION)
++ int link_id,
++#endif
++
++ u8 key_index, bool pairwise, const u8 *mac_addr)
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *vif = netdev_priv(netdev);
++ int error;
++ struct rwnx_sta *sta = NULL;
++ struct rwnx_key *rwnx_key;
++ if (!key_flag && vif->wdev.iftype == NL80211_IFTYPE_STATION)
++ return 0;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++ if (mac_addr) {
++ sta = rwnx_get_sta(rwnx_hw, mac_addr);
++ if (!sta)
++ return -EINVAL;
++ rwnx_key = &sta->key;
++ if (vif->wdev.iftype == NL80211_IFTYPE_STATION || vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)
++ vif->sta.paired_cipher_type = 0xff;
++ }
++ else {
++ rwnx_key = &vif->key[key_index];
++ vif->key_has_add = 0;
++ if (vif->wdev.iftype == NL80211_IFTYPE_STATION || vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)
++ vif->sta.group_cipher_type = 0xff;
++ }
++
++ error = rwnx_send_key_del(rwnx_hw, rwnx_key->hw_idx);
++
++ rwnx_key->hw_idx = 0;
++ return error;
++}
++
++/**
++ * @set_default_key: set the default key on an interface
++ */
++static int rwnx_cfg80211_set_default_key(struct wiphy *wiphy,
++ struct net_device *netdev,
++#if (LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION)
++ int link_id,
++#endif
++ u8 key_index, bool unicast, bool multicast)
++{
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ return 0;
++}
++
++/**
++ * @set_default_mgmt_key: set the default management frame key on an interface
++ */
++static int rwnx_cfg80211_set_default_mgmt_key(struct wiphy *wiphy,
++ struct net_device *netdev,
++#if (LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION)
++ int link_id,
++#endif
++ u8 key_index)
++{
++ return 0;
++}
++
++/**
++ * @connect: Connect to the ESS with the specified parameters. When connected,
++ * call cfg80211_connect_result() with status code %WLAN_STATUS_SUCCESS.
++ * If the connection fails for some reason, call cfg80211_connect_result()
++ * with the status from the AP.
++ * (invoked with the wireless_dev mutex held)
++ */
++
++static int rwnx_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
++ struct cfg80211_connect_params *sme)
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ struct sm_connect_cfm sm_connect_cfm;
++ int error = 0;
++ int is_wep = ((sme->crypto.cipher_group == WLAN_CIPHER_SUITE_WEP40) ||
++ (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_WEP104) ||
++ (sme->crypto.ciphers_pairwise[0] == WLAN_CIPHER_SUITE_WEP40) ||
++ (sme->crypto.ciphers_pairwise[0] == WLAN_CIPHER_SUITE_WEP104));
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++#if 1
++#if 0
++ if((int)atomic_read(&rwnx_vif->drv_conn_state) == (int)RWNX_DRV_STATUS_CONNECTED){
++ AICWFDBG(LOGERROR, "%s driver was connected return it \r\n", __func__);
++ return -EALREADY;
++ }
++#endif
++ if((int)atomic_read(&rwnx_vif->drv_conn_state) == (int)RWNX_DRV_STATUS_DISCONNECTING){
++ AICWFDBG(LOGERROR, "%s driver is disconnecting return it \r\n", __func__);
++ return -EALREADY;
++ }
++#endif
++
++ atomic_set(&rwnx_vif->drv_conn_state, (int)RWNX_DRV_STATUS_CONNECTING);
++
++ if(is_wep) {
++ if(sme->auth_type == NL80211_AUTHTYPE_AUTOMATIC) {
++ if(rwnx_vif->wep_enabled && rwnx_vif->wep_auth_err) {
++ if(rwnx_vif->last_auth_type == NL80211_AUTHTYPE_SHARED_KEY)
++ sme->auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM;
++ else
++ sme->auth_type = NL80211_AUTHTYPE_SHARED_KEY;
++ } else {
++ if((rwnx_vif->wep_enabled && !rwnx_vif->wep_auth_err))
++ sme->auth_type = rwnx_vif->last_auth_type;
++ else
++ sme->auth_type = NL80211_AUTHTYPE_SHARED_KEY;
++ }
++ AICWFDBG(LOGINFO, "auto: use sme->auth_type = %d\r\n", sme->auth_type);
++ } else {
++ if (rwnx_vif->wep_enabled && rwnx_vif->wep_auth_err && (sme->auth_type == rwnx_vif->last_auth_type)) {
++ if(sme->auth_type == NL80211_AUTHTYPE_SHARED_KEY) {
++ sme->auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM;
++ AICWFDBG(LOGINFO, "start connect, auth_type changed, shared --> open\n");
++ } else if(sme->auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM) {
++ sme->auth_type = NL80211_AUTHTYPE_SHARED_KEY;
++ AICWFDBG(LOGINFO, "start connect, auth_type changed, open --> shared\n");
++ }
++ }
++ }
++ }
++
++ /* For SHARED-KEY authentication, must install key first */
++ if (sme->auth_type == NL80211_AUTHTYPE_SHARED_KEY && sme->key)
++ {
++ struct key_params key_params;
++ key_params.key = (u8*)sme->key;
++ key_params.seq = NULL;
++ key_params.key_len = sme->key_len;
++ key_params.seq_len = 0;
++ key_params.cipher = sme->crypto.cipher_group;
++ rwnx_cfg80211_add_key(wiphy, dev,
++#if (LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION)
++ 0,
++#endif
++ sme->key_idx, false, NULL, &key_params);
++ }
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) || defined(CONFIG_WPA3_FOR_OLD_KERNEL)
++ else if ((sme->auth_type == NL80211_AUTHTYPE_SAE) &&
++ !(sme->flags & CONNECT_REQ_EXTERNAL_AUTH_SUPPORT)) {
++ netdev_err(dev, "Doesn't support SAE without external authentication\n");
++ return -EINVAL;
++ }
++#endif
++
++ if (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) {
++ rwnx_hw->is_p2p_connected = 1;
++ }
++
++ if (rwnx_vif->wdev.iftype == NL80211_IFTYPE_STATION || rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) {
++ rwnx_vif->sta.paired_cipher_type = 0xff;
++ rwnx_vif->sta.group_cipher_type = 0xff;
++ }
++
++
++ /* Forward the information to the LMAC */
++ if ((error = rwnx_send_sm_connect_req(rwnx_hw, rwnx_vif, sme, &sm_connect_cfm)))
++ return error;
++
++ // Check the status
++ switch (sm_connect_cfm.status)
++ {
++ case CO_OK:
++ error = 0;
++ break;
++ case CO_BUSY:
++ error = -EINPROGRESS;
++ break;
++ case CO_OP_IN_PROGRESS:
++ error = -EALREADY;
++ break;
++ default:
++ error = -EIO;
++ break;
++ }
++
++ return error;
++}
++
++/**
++ * @disconnect: Disconnect from the BSS/ESS.
++ * (invoked with the wireless_dev mutex held)
++ */
++static int rwnx_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
++ u16 reason_code)
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++ AICWFDBG(LOGINFO, "%s drv_vif_index:%d disconnect reason:%d \r\n",
++ __func__, rwnx_vif->drv_vif_index, reason_code);
++
++#if 0
++ while(atomic_read(&rwnx_vif->drv_conn_state) == RWNX_DRV_STATUS_CONNECTING){
++ AICWFDBG(LOGERROR, "%s driver connecting waiting 100ms \r\n", __func__);
++ msleep(100);
++ retry--;
++ if(retry == 0){
++ break;
++ }
++ if(rwnx_hw->usbdev->state == USB_DOWN_ST){
++ break;
++ }
++ }
++ if(atomic_read(&rwnx_vif->drv_conn_state) == RWNX_DRV_STATUS_CONNECTED){
++
++ atomic_set(&rwnx_vif->drv_conn_state, RWNX_DRV_STATUS_DISCONNECTING);
++ }
++#endif
++ if(atomic_read(&rwnx_vif->drv_conn_state) == RWNX_DRV_STATUS_CONNECTING){
++ AICWFDBG(LOGINFO, "%s call cfg80211_connect_result reason:%d \r\n",
++ __func__, reason_code);
++ msleep(500);
++ }
++
++ if(atomic_read(&rwnx_vif->drv_conn_state) == RWNX_DRV_STATUS_CONNECTED){
++ atomic_set(&rwnx_vif->drv_conn_state, RWNX_DRV_STATUS_DISCONNECTING);
++
++ #ifdef CONFIG_USE_WIRELESS_EXT
++ memset(rwnx_hw->wext_essid, 0, 32);
++ #endif
++ key_flag = true;
++ return(rwnx_send_sm_disconnect_req(rwnx_hw, rwnx_vif, reason_code));
++ }else{
++ cfg80211_connect_result(dev, NULL, NULL, 0, NULL, 0,
++ reason_code?reason_code:WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_ATOMIC);
++ atomic_set(&rwnx_vif->drv_conn_state, RWNX_DRV_STATUS_DISCONNECTED);
++ return 0;
++ }
++
++}
++
++#ifdef CONFIG_SCHED_SCAN
++
++static int rwnx_cfg80211_sched_scan_stop(struct wiphy *wiphy,
++ struct net_device *ndev
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
++ ,u64 reqid)
++#else
++ )
++#endif
++{
++
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ //struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ AICWFDBG(LOGINFO, "%s enter wiphy:%p\r\n", __func__, wiphy);
++
++ if(rwnx_hw->scan_request){
++ AICWFDBG(LOGINFO, "%s rwnx_send_scanu_cancel_req\r\n", __func__);
++ return rwnx_send_scanu_cancel_req(rwnx_hw, NULL);
++ }else{
++ return 0;
++ }
++}
++
++
++static int rwnx_cfg80211_sched_scan_start(struct wiphy *wiphy,
++ struct net_device *dev,
++ struct cfg80211_sched_scan_request *request)
++
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ struct cfg80211_scan_request *scan_request = NULL;
++
++ int ret = 0;
++ int index = 0;
++
++ AICWFDBG(LOGINFO, "%s enter wiphy:%p\r\n", __func__, wiphy);
++
++ if(rwnx_hw->is_sched_scan || scanning){
++ AICWFDBG(LOGERROR, "%s is_sched_scanning and scanning, busy", __func__);
++ return -EBUSY;
++ }
++
++ scan_request = (struct cfg80211_scan_request *)kmalloc(sizeof(struct cfg80211_scan_request), GFP_KERNEL);
++
++ scan_request->ssids = request->ssids;
++ scan_request->n_channels = request->n_channels;
++ scan_request->n_ssids = request->n_match_sets;
++ scan_request->no_cck = false;
++ scan_request->ie = request->ie;
++ scan_request->ie_len = request->ie_len;
++ scan_request->flags = request->flags;
++ scan_request->wiphy = wiphy;
++ scan_request->scan_start = request->scan_start;
++ memcpy(scan_request->mac_addr, request->mac_addr, ETH_ALEN);
++ memcpy(scan_request->mac_addr_mask, request->mac_addr_mask, ETH_ALEN);
++ rwnx_hw->sched_scan_req = request;
++ scan_request->wdev = &rwnx_vif->wdev;
++ AICWFDBG(LOGDEBUG, "%s scan_request->n_channels:%d \r\n", __func__, scan_request->n_channels);
++ AICWFDBG(LOGDEBUG, "%s scan_request->n_ssids:%d \r\n", __func__, scan_request->n_ssids);
++
++ for(index = 0; index < scan_request->n_ssids; index++){
++ memset(scan_request->ssids[index].ssid, 0, IEEE80211_MAX_SSID_LEN);
++
++ memcpy(scan_request->ssids[index].ssid,
++ request->match_sets[index].ssid.ssid,
++ IEEE80211_MAX_SSID_LEN);
++
++ scan_request->ssids[index].ssid_len = request->match_sets[index].ssid.ssid_len;
++
++ AICWFDBG(LOGDEBUG, "%s request ssid:%s len:%d \r\n", __func__,
++ scan_request->ssids[index].ssid, scan_request->ssids[index].ssid_len);
++ }
++
++ for(index = 0;index < scan_request->n_channels; index++){
++ scan_request->channels[index] = request->channels[index];
++
++ AICWFDBG(LOGDEBUG, "%s scan_request->channels[%d]:%d \r\n", __func__, index,
++ scan_request->channels[index]->center_freq);
++
++ if(scan_request->channels[index] == NULL){
++ AICWFDBG(LOGERROR, "%s ERROR!!! channels is NULL", __func__);
++ continue;
++ }
++ }
++
++ rwnx_hw->is_sched_scan = true;
++
++ if(scanning){
++ AICWFDBG(LOGERROR, "%s scanning, about it", __func__);
++ kfree(scan_request);
++ return -EBUSY;
++ }else{
++ ret = rwnx_cfg80211_scan(wiphy, scan_request);
++ }
++
++ return ret;
++}
++#endif //CONFIG_SCHED_SCAN
++
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) || defined(CONFIG_WPA3_FOR_OLD_KERNEL)
++/**
++ * @external_auth: indicates result of offloaded authentication processing from
++ * user space
++ */
++static int rwnx_cfg80211_external_auth(struct wiphy *wiphy, struct net_device *dev,
++ struct cfg80211_external_auth_params *params)
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++
++ //printk("%s Enter \r\n");
++ if (!rwnx_vif->sta.external_auth)
++ return -EINVAL;
++
++ rwnx_external_auth_disable(rwnx_vif);
++ return rwnx_send_sm_external_auth_required_rsp(rwnx_hw, rwnx_vif,
++ params->status);
++}
++#endif
++
++/**
++ * @add_station: Add a new station.
++ */
++static int rwnx_cfg80211_add_station(struct wiphy *wiphy,
++ struct net_device *dev,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0))
++ u8 *mac,
++#else
++ const u8 *mac,
++#endif
++ struct station_parameters *params)
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ struct me_sta_add_cfm me_sta_add_cfm;
++ int error = 0;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ WARN_ON(RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_AP_VLAN);
++
++ /* Do not add TDLS station */
++ if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
++ return 0;
++
++ /* Indicate we are in a STA addition process - This will allow handling
++ * potential PS mode change indications correctly
++ */
++ rwnx_hw->adding_sta = true;
++
++ /* Forward the information to the LMAC */
++ if ((error = rwnx_send_me_sta_add(rwnx_hw, params, mac, rwnx_vif->vif_index,
++ &me_sta_add_cfm)))
++ return error;
++
++ // Check the status
++ switch (me_sta_add_cfm.status)
++ {
++ case CO_OK:
++ {
++ struct rwnx_sta *sta = &rwnx_hw->sta_table[me_sta_add_cfm.sta_idx];
++ int tid;
++ sta->aid = params->aid;
++
++ sta->sta_idx = me_sta_add_cfm.sta_idx;
++ sta->ch_idx = rwnx_vif->ch_index;
++ sta->vif_idx = rwnx_vif->vif_index;
++ sta->vlan_idx = sta->vif_idx;
++ sta->qos = (params->sta_flags_set & BIT(NL80211_STA_FLAG_WME)) != 0;
++#if LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION
++ sta->ht = params->link_sta_params.ht_capa ? 1 : 0;
++ sta->vht = params->link_sta_params.vht_capa ? 1 : 0;
++#else
++ sta->ht = params->ht_capa ? 1 : 0;
++ sta->vht = params->vht_capa ? 1 : 0;
++#endif
++
++ sta->acm = 0;
++ sta->key.hw_idx = 0;
++
++ if (params->local_pm != NL80211_MESH_POWER_UNKNOWN)
++ sta->mesh_pm = params->local_pm;
++ else
++ sta->mesh_pm = rwnx_vif->ap.next_mesh_pm;
++ rwnx_update_mesh_power_mode(rwnx_vif);
++
++ for (tid = 0; tid < NX_NB_TXQ_PER_STA; tid++) {
++ int uapsd_bit = rwnx_hwq2uapsd[rwnx_tid2hwq[tid]];
++ if (params->uapsd_queues & uapsd_bit)
++ sta->uapsd_tids |= 1 << tid;
++ else
++ sta->uapsd_tids &= ~(1 << tid);
++ }
++ memcpy(sta->mac_addr, mac, ETH_ALEN);
++#ifdef CONFIG_DEBUG_FS
++ rwnx_dbgfs_register_rc_stat(rwnx_hw, sta);
++#endif
++ /* Ensure that we won't process PS change or channel switch ind*/
++ spin_lock_bh(&rwnx_hw->cb_lock);
++ rwnx_txq_sta_init(rwnx_hw, sta, rwnx_txq_vif_get_status(rwnx_vif));
++ list_add_tail(&sta->list, &rwnx_vif->ap.sta_list);
++ sta->valid = true;
++ rwnx_ps_bh_enable(rwnx_hw, sta, sta->ps.active || me_sta_add_cfm.pm_state);
++ spin_unlock_bh(&rwnx_hw->cb_lock);
++
++ error = 0;
++ if(atomic_read(&rwnx_hw->sta_flowctrl[sta->sta_idx].tx_pending_cnt) > 0)
++ AICWFDBG(LOGDEBUG, "sta idx %d fc error %d.\n", sta->sta_idx, atomic_read(&rwnx_hw->sta_flowctrl[sta->sta_idx].tx_pending_cnt));
++
++ if (rwnx_vif->wdev.iftype == NL80211_IFTYPE_AP || rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) {
++ struct station_info sinfo;
++ memset(&sinfo, 0, sizeof(struct station_info));
++ sinfo.assoc_req_ies = NULL;
++ sinfo.assoc_req_ies_len = 0;
++ #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
++ sinfo.filled |= STATION_INFO_ASSOC_REQ_IES;
++ #endif
++ cfg80211_new_sta(rwnx_vif->ndev, sta->mac_addr, &sinfo, GFP_KERNEL);
++ }
++#ifdef CONFIG_RWNX_BFMER
++ if (rwnx_hw->mod_params->bfmer)
++ rwnx_send_bfmer_enable(rwnx_hw, sta, params->vht_capa);
++
++ rwnx_mu_group_sta_init(sta, params->vht_capa);
++#endif /* CONFIG_RWNX_BFMER */
++
++ #define PRINT_STA_FLAG(f) \
++ (params->sta_flags_set & BIT(NL80211_STA_FLAG_##f) ? "["#f"]" : "")
++
++ netdev_info(dev, "Add sta %d (%pM) flags=%s%s%s%s%s%s%s",
++ sta->sta_idx, mac,
++ PRINT_STA_FLAG(AUTHORIZED),
++ PRINT_STA_FLAG(SHORT_PREAMBLE),
++ PRINT_STA_FLAG(WME),
++ PRINT_STA_FLAG(MFP),
++ PRINT_STA_FLAG(AUTHENTICATED),
++ PRINT_STA_FLAG(TDLS_PEER),
++ PRINT_STA_FLAG(ASSOCIATED));
++ #undef PRINT_STA_FLAG
++ break;
++ }
++ default:
++ error = -EBUSY;
++ break;
++ }
++
++ rwnx_hw->adding_sta = false;
++
++ return error;
++}
++
++/**
++ * @del_station: Remove a station
++ */
++static int rwnx_cfg80211_del_station_compat(struct wiphy *wiphy,
++ struct net_device *dev,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0))
++ u8 *mac
++#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0))
++ const u8 *mac
++#else
++ struct station_del_parameters *params
++#endif
++)
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ struct rwnx_sta *cur, *tmp;
++ int error = 0, found = 0;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
++ const u8 *mac = NULL;
++#endif
++#ifdef AICWF_RX_REORDER
++ struct reord_ctrl_info *reord_info, *reord_tmp;
++ u8 *macaddr;
++ struct aicwf_rx_priv *rx_priv;
++#endif
++
++ //RWNX_DBG(RWNX_FN_ENTRY_STR);
++ AICWFDBG(LOGDEBUG ,"%s: %pM\n", __func__, mac);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
++ if (params)
++ mac = params->mac;
++#endif
++
++ do {
++ spin_lock_bh(&rwnx_hw->cb_lock);
++ if(list_empty(&rwnx_vif->ap.sta_list)) {
++ spin_unlock_bh(&rwnx_hw->cb_lock);
++ break;
++ }
++
++ list_for_each_entry_safe(cur, tmp, &rwnx_vif->ap.sta_list, list) {
++ if ((!mac) || (!memcmp(cur->mac_addr, mac, ETH_ALEN))) {
++ found = 1;
++ break;
++ }
++ }
++
++ if(found) {
++ cur->ps.active = false;
++ cur->valid = false;
++ list_del(&cur->list);
++ }
++ spin_unlock_bh(&rwnx_hw->cb_lock);
++
++ if(found) {
++ netdev_info(dev, "Del sta %d (%pM)", cur->sta_idx, cur->mac_addr);
++ if (cur->vif_idx != cur->vlan_idx) {
++ struct rwnx_vif *vlan_vif;
++ vlan_vif = rwnx_hw->vif_table[cur->vlan_idx];
++ if (vlan_vif->up) {
++ if ((RWNX_VIF_TYPE(vlan_vif) == NL80211_IFTYPE_AP_VLAN) &&
++ (vlan_vif->use_4addr)) {
++ vlan_vif->ap_vlan.sta_4a = NULL;
++ } else {
++ WARN(1, "Deleting sta belonging to VLAN other than AP_VLAN 4A");
++ }
++ }
++ }
++ if (rwnx_vif->wdev.iftype == NL80211_IFTYPE_AP || rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) {
++ cfg80211_del_sta(rwnx_vif->ndev, cur->mac_addr, GFP_KERNEL);
++ }
++
++#ifdef AICWF_RX_REORDER
++#ifdef AICWF_SDIO_SUPPORT
++ rx_priv = rwnx_hw->sdiodev->rx_priv;
++#else
++ rx_priv = rwnx_hw->usbdev->rx_priv;
++#endif
++ if ((rwnx_vif->wdev.iftype == NL80211_IFTYPE_STATION) || (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)) {
++ BUG();//should be other function
++ }
++ else if ((rwnx_vif->wdev.iftype == NL80211_IFTYPE_AP) || (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_GO)){
++ macaddr = cur->mac_addr;
++ AICWFDBG(LOGINFO, "deinit:macaddr:%x,%x,%x,%x,%x,%x\r\n", macaddr[0],macaddr[1],macaddr[2], \
++ macaddr[3],macaddr[4],macaddr[5]);
++ list_for_each_entry_safe(reord_info, reord_tmp,
++ &rx_priv->stas_reord_list, list) {
++ AICWFDBG(LOGINFO, "reord_mac:%x,%x,%x,%x,%x,%x\r\n", reord_info->mac_addr[0],reord_info->mac_addr[1],reord_info->mac_addr[2], \
++ reord_info->mac_addr[3],reord_info->mac_addr[4],reord_info->mac_addr[5]);
++ if (!memcmp(reord_info->mac_addr, macaddr, 6)) {
++ reord_deinit_sta(rx_priv, reord_info);
++ break;
++ }
++ }
++ }
++#endif
++
++ rwnx_txq_sta_deinit(rwnx_hw, cur);
++ error = rwnx_send_me_sta_del(rwnx_hw, cur->sta_idx, false);
++ if ((error != 0) && (error != -EPIPE))
++ return error;
++
++#ifdef CONFIG_RWNX_BFMER
++ // Disable Beamformer if supported
++ rwnx_bfmer_report_del(rwnx_hw, cur);
++ rwnx_mu_group_sta_del(rwnx_hw, cur);
++#endif /* CONFIG_RWNX_BFMER */
++
++#ifdef CONFIG_DEBUG_FS
++ rwnx_dbgfs_unregister_rc_stat(rwnx_hw, cur);
++#endif
++ }
++
++ if(mac)
++ break;
++ } while (1);
++
++ rwnx_update_mesh_power_mode(rwnx_vif);
++
++ if(!found && mac != NULL)
++ return -ENOENT;
++ else
++ return 0;
++}
++
++
++void apm_staloss_work_process(struct work_struct *work)
++{
++ struct rwnx_hw *rwnx_hw = container_of(work, struct rwnx_hw, apmStalossWork);
++ struct rwnx_sta *cur, *tmp;
++ int error = 0;
++
++#ifdef AICWF_RX_REORDER
++ struct reord_ctrl_info *reord_info, *reord_tmp;
++ u8 *macaddr;
++ struct aicwf_rx_priv *rx_priv;
++#endif
++ struct rwnx_vif *rwnx_vif;
++ bool_l found = false;
++ const u8 *mac = rwnx_hw->sta_mac_addr;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ // Look for VIF entry
++ list_for_each_entry(rwnx_vif, &rwnx_hw->vifs, list) {
++ if (rwnx_vif->vif_index == rwnx_hw->apm_vif_idx) {
++ found = true;
++ break;
++ }
++ }
++
++ AICWFDBG(LOGINFO, "apm vif idx=%d, found=%d, mac addr=%pM\n", rwnx_hw->apm_vif_idx, found, mac);
++ if (!found || !rwnx_vif || (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_AP && RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_P2P_GO))
++ {
++ return;
++ }
++
++ found = false;
++ spin_lock_bh(&rwnx_hw->cb_lock);
++ list_for_each_entry_safe(cur, tmp, &rwnx_vif->ap.sta_list, list) {
++ if ((mac) && (!memcmp(cur->mac_addr, mac, ETH_ALEN))) {
++ found = true;
++ break;
++ }
++ }
++ if(found) {
++ cur->ps.active = false;
++ cur->valid = false;
++ list_del(&cur->list);
++ }
++ spin_unlock_bh(&rwnx_hw->cb_lock);
++
++ if(found) {
++ netdev_info(rwnx_vif->ndev, "Del sta %d (%pM)", cur->sta_idx, cur->mac_addr);
++ if (cur->vif_idx != cur->vlan_idx) {
++ struct rwnx_vif *vlan_vif;
++ vlan_vif = rwnx_hw->vif_table[cur->vlan_idx];
++ if (vlan_vif->up) {
++ if ((RWNX_VIF_TYPE(vlan_vif) == NL80211_IFTYPE_AP_VLAN) &&
++ (vlan_vif->use_4addr)) {
++ vlan_vif->ap_vlan.sta_4a = NULL;
++ } else {
++ WARN(1, "Deleting sta belonging to VLAN other than AP_VLAN 4A");
++ }
++ }
++ }
++ // if (rwnx_vif->wdev.iftype == NL80211_IFTYPE_AP || rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) {
++ // cfg80211_del_sta(rwnx_vif->ndev, cur->mac_addr, GFP_KERNEL);
++ // }
++
++#ifdef AICWF_RX_REORDER
++#ifdef AICWF_SDIO_SUPPORT
++ rx_priv = rwnx_hw->sdiodev->rx_priv;
++#else
++ rx_priv = rwnx_hw->usbdev->rx_priv;
++#endif
++ if ((rwnx_vif->wdev.iftype == NL80211_IFTYPE_STATION) || (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)) {
++ BUG();//should be other function
++ } else if ((rwnx_vif->wdev.iftype == NL80211_IFTYPE_AP) || (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_GO)) {
++ macaddr = cur->mac_addr;
++ AICWFDBG(LOGINFO, "deinit:macaddr:%x,%x,%x,%x,%x,%x\r\n", macaddr[0], macaddr[1], macaddr[2], \
++ macaddr[3], macaddr[4], macaddr[5]);
++ //spin_lock_bh(&rx_priv->stas_reord_lock);
++ list_for_each_entry_safe(reord_info, reord_tmp,
++ &rx_priv->stas_reord_list, list) {
++ AICWFDBG(LOGINFO, "reord_mac:%x,%x,%x,%x,%x,%x\r\n", reord_info->mac_addr[0], reord_info->mac_addr[1], reord_info->mac_addr[2], \
++ reord_info->mac_addr[3], reord_info->mac_addr[4], reord_info->mac_addr[5]);
++ if (!memcmp(reord_info->mac_addr, macaddr, 6)) {
++ reord_deinit_sta(rx_priv, reord_info);
++ break;
++ }
++ }
++ //spin_unlock_bh(&rx_priv->stas_reord_lock);
++ }
++#endif
++
++ rwnx_txq_sta_deinit(rwnx_hw, cur);
++ error = rwnx_send_me_sta_del(rwnx_hw, cur->sta_idx, false);
++ if ((error != 0) && (error != -EPIPE))
++ return;
++
++#ifdef CONFIG_RWNX_BFMER
++ // Disable Beamformer if supported
++ rwnx_bfmer_report_del(rwnx_hw, cur);
++ rwnx_mu_group_sta_del(rwnx_hw, cur);
++#endif /* CONFIG_RWNX_BFMER */
++
++
++#ifdef CONFIG_DEBUG_FS
++ rwnx_dbgfs_unregister_rc_stat(rwnx_hw, cur);
++#endif
++ }else {
++ printk("sta not found: %pM\n", mac);
++ return;
++ }
++
++ rwnx_update_mesh_power_mode(rwnx_vif);
++}
++
++
++void apm_probe_sta_work_process(struct work_struct *work)
++{
++ struct apm_probe_sta *probe_sta = container_of(work, struct apm_probe_sta, apmprobestaWork);
++ struct rwnx_vif *rwnx_vif = container_of(probe_sta, struct rwnx_vif, sta_probe);
++ bool found = false;
++ struct rwnx_sta *cur, *tmp;
++
++ u8 *mac = rwnx_vif->sta_probe.sta_mac_addr;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ spin_lock_bh(&rwnx_vif->rwnx_hw->cb_lock);
++ list_for_each_entry_safe(cur, tmp, &rwnx_vif->ap.sta_list, list) {
++ if (!memcmp(cur->mac_addr, mac, ETH_ALEN)) {
++ found = true;
++ break;
++ }
++ }
++ spin_unlock_bh(&rwnx_vif->rwnx_hw->cb_lock);
++
++ printk("sta %pM found = %d\n", mac, found);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0)
++ if(found)
++ cfg80211_probe_status(rwnx_vif->ndev, mac, (u64)rwnx_vif->sta_probe.probe_id, 1, 0, false, GFP_ATOMIC);
++ else
++ cfg80211_probe_status(rwnx_vif->ndev, mac, (u64)rwnx_vif->sta_probe.probe_id, 0, 0, false, GFP_ATOMIC);
++#else
++ if(found)
++ cfg80211_probe_status(rwnx_vif->ndev, mac, (u64)rwnx_vif->sta_probe.probe_id, 1, GFP_ATOMIC);
++ else
++ cfg80211_probe_status(rwnx_vif->ndev, mac, (u64)rwnx_vif->sta_probe.probe_id, 0, GFP_ATOMIC);
++#endif
++ rwnx_vif->sta_probe.probe_id ++;
++}
++
++/**
++ * @change_station: Modify a given station. Note that flags changes are not much
++ * validated in cfg80211, in particular the auth/assoc/authorized flags
++ * might come to the driver in invalid combinations -- make sure to check
++ * them, also against the existing state! Drivers must call
++ * cfg80211_check_station_change() to validate the information.
++ */
++static int rwnx_cfg80211_change_station(struct wiphy *wiphy, struct net_device *dev,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0))
++ u8 *mac,
++#else
++ const u8 *mac,
++#endif
++ struct station_parameters *params)
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *vif = netdev_priv(dev);
++ struct rwnx_sta *sta;
++
++ sta = rwnx_get_sta(rwnx_hw, mac);
++ if (!sta)
++ {
++ /* Add the TDLS station */
++ if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
++ {
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ struct me_sta_add_cfm me_sta_add_cfm;
++ int error = 0;
++
++ /* Indicate we are in a STA addition process - This will allow handling
++ * potential PS mode change indications correctly
++ */
++ rwnx_hw->adding_sta = true;
++
++ /* Forward the information to the LMAC */
++ if ((error = rwnx_send_me_sta_add(rwnx_hw, params, mac, rwnx_vif->vif_index,
++ &me_sta_add_cfm)))
++ return error;
++
++ // Check the status
++ switch (me_sta_add_cfm.status)
++ {
++ case CO_OK:
++ {
++ int tid;
++ sta = &rwnx_hw->sta_table[me_sta_add_cfm.sta_idx];
++ sta->aid = params->aid;
++ sta->sta_idx = me_sta_add_cfm.sta_idx;
++ sta->ch_idx = rwnx_vif->ch_index;
++ sta->vif_idx = rwnx_vif->vif_index;
++ sta->vlan_idx = sta->vif_idx;
++ sta->qos = (params->sta_flags_set & BIT(NL80211_STA_FLAG_WME)) != 0;
++#if LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION
++ sta->ht = params->link_sta_params.ht_capa ? 1 : 0;
++ sta->vht = params->link_sta_params.vht_capa ? 1 : 0;
++#else
++ sta->ht = params->ht_capa ? 1 : 0;
++ sta->vht = params->vht_capa ? 1 : 0;
++#endif
++ sta->acm = 0;
++ for (tid = 0; tid < NX_NB_TXQ_PER_STA; tid++) {
++ int uapsd_bit = rwnx_hwq2uapsd[rwnx_tid2hwq[tid]];
++ if (params->uapsd_queues & uapsd_bit)
++ sta->uapsd_tids |= 1 << tid;
++ else
++ sta->uapsd_tids &= ~(1 << tid);
++ }
++ memcpy(sta->mac_addr, mac, ETH_ALEN);
++#ifdef CONFIG_DEBUG_FS
++ rwnx_dbgfs_register_rc_stat(rwnx_hw, sta);
++#endif
++ /* Ensure that we won't process PS change or channel switch ind*/
++ spin_lock_bh(&rwnx_hw->cb_lock);
++ rwnx_txq_sta_init(rwnx_hw, sta, rwnx_txq_vif_get_status(rwnx_vif));
++ if (rwnx_vif->tdls_status == TDLS_SETUP_RSP_TX) {
++ rwnx_vif->tdls_status = TDLS_LINK_ACTIVE;
++ sta->tdls.initiator = true;
++ sta->tdls.active = true;
++ }
++ /* Set TDLS channel switch capability */
++ if ((params->ext_capab[3] & WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH) &&
++ !rwnx_vif->tdls_chsw_prohibited)
++ sta->tdls.chsw_allowed = true;
++ rwnx_vif->sta.tdls_sta = sta;
++ sta->valid = true;
++ spin_unlock_bh(&rwnx_hw->cb_lock);
++#ifdef CONFIG_RWNX_BFMER
++ if (rwnx_hw->mod_params->bfmer)
++ rwnx_send_bfmer_enable(rwnx_hw, sta, params->vht_capa);
++
++ rwnx_mu_group_sta_init(sta, NULL);
++#endif /* CONFIG_RWNX_BFMER */
++
++ #define PRINT_STA_FLAG(f) \
++ (params->sta_flags_set & BIT(NL80211_STA_FLAG_##f) ? "["#f"]" : "")
++
++ netdev_info(dev, "Add %s TDLS sta %d (%pM) flags=%s%s%s%s%s%s%s",
++ sta->tdls.initiator ? "initiator" : "responder",
++ sta->sta_idx, mac,
++ PRINT_STA_FLAG(AUTHORIZED),
++ PRINT_STA_FLAG(SHORT_PREAMBLE),
++ PRINT_STA_FLAG(WME),
++ PRINT_STA_FLAG(MFP),
++ PRINT_STA_FLAG(AUTHENTICATED),
++ PRINT_STA_FLAG(TDLS_PEER),
++ PRINT_STA_FLAG(ASSOCIATED));
++ #undef PRINT_STA_FLAG
++
++ break;
++ }
++ default:
++ error = -EBUSY;
++ break;
++ }
++
++ rwnx_hw->adding_sta = false;
++ } else {
++ return -EINVAL;
++ }
++ }
++
++ if (params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))
++ rwnx_send_me_set_control_port_req(rwnx_hw,
++ (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED)) != 0,
++ sta->sta_idx);
++
++ if (RWNX_VIF_TYPE(vif) == NL80211_IFTYPE_MESH_POINT) {
++ if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE) {
++ if (params->plink_state < NUM_NL80211_PLINK_STATES) {
++ rwnx_send_mesh_peer_update_ntf(rwnx_hw, vif, sta->sta_idx, params->plink_state);
++ }
++ }
++
++ if (params->local_pm != NL80211_MESH_POWER_UNKNOWN) {
++ sta->mesh_pm = params->local_pm;
++ rwnx_update_mesh_power_mode(vif);
++ }
++ }
++
++ if (params->vlan) {
++ uint8_t vlan_idx;
++
++ vif = netdev_priv(params->vlan);
++ vlan_idx = vif->vif_index;
++
++ if (sta->vlan_idx != vlan_idx) {
++ struct rwnx_vif *old_vif;
++ old_vif = rwnx_hw->vif_table[sta->vlan_idx];
++ rwnx_txq_sta_switch_vif(sta, old_vif, vif);
++ sta->vlan_idx = vlan_idx;
++
++ if ((RWNX_VIF_TYPE(vif) == NL80211_IFTYPE_AP_VLAN) &&
++ (vif->use_4addr)) {
++ WARN((vif->ap_vlan.sta_4a),
++ "4A AP_VLAN interface with more than one sta");
++ vif->ap_vlan.sta_4a = sta;
++ }
++
++ if ((RWNX_VIF_TYPE(old_vif) == NL80211_IFTYPE_AP_VLAN) &&
++ (old_vif->use_4addr)) {
++ old_vif->ap_vlan.sta_4a = NULL;
++ }
++ }
++ }
++
++ return 0;
++}
++
++/**
++ * @start_ap: Start acting in AP mode defined by the parameters.
++ */
++static int rwnx_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
++ struct cfg80211_ap_settings *settings)
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ struct apm_start_cfm apm_start_cfm;
++ struct rwnx_ipc_elem_var elem;
++ struct rwnx_sta *sta;
++ int error = 0;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ INIT_WORK(&rwnx_vif->sta_probe.apmprobestaWork, apm_probe_sta_work_process);
++ rwnx_vif->sta_probe.apmprobesta_wq = create_singlethread_workqueue("apmprobe_wq");
++ if (!rwnx_vif->sta_probe.apmprobesta_wq) {
++ txrx_err("insufficient memory to create apmprobe_wq.\n");
++ return -ENOBUFS;
++ }
++ if (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_GO)
++ rwnx_hw->is_p2p_connected = 1;
++ /* Forward the information to the LMAC */
++ if ((error = rwnx_send_apm_start_req(rwnx_hw, rwnx_vif, settings,
++ &apm_start_cfm, &elem)))
++ goto end;
++
++ // Check the status
++ switch (apm_start_cfm.status)
++ {
++ case CO_OK:
++ {
++ u8 txq_status = 0;
++ rwnx_vif->ap.bcmc_index = apm_start_cfm.bcmc_idx;
++ rwnx_vif->ap.flags = 0;
++ #if (defined CONFIG_HE_FOR_OLD_KERNEL) || (defined CONFIG_VHT_FOR_OLD_KERNEL)
++ rwnx_vif->ap.aic_index = 0;
++ #endif
++ sta = &rwnx_hw->sta_table[apm_start_cfm.bcmc_idx];
++ sta->valid = true;
++ sta->aid = 0;
++ sta->sta_idx = apm_start_cfm.bcmc_idx;
++ sta->ch_idx = apm_start_cfm.ch_idx;
++ sta->vif_idx = rwnx_vif->vif_index;
++ sta->qos = false;
++ sta->acm = 0;
++ sta->ps.active = false;
++ rwnx_mu_group_sta_init(sta, NULL);
++ spin_lock_bh(&rwnx_hw->cb_lock);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
++ rwnx_chanctx_link(rwnx_vif, apm_start_cfm.ch_idx,
++ NULL);
++#else
++ rwnx_chanctx_link(rwnx_vif, apm_start_cfm.ch_idx,
++ &settings->chandef);
++#endif
++ if (rwnx_hw->cur_chanctx != apm_start_cfm.ch_idx) {
++ txq_status = RWNX_TXQ_STOP_CHAN;
++ }
++ rwnx_txq_vif_init(rwnx_hw, rwnx_vif, txq_status);
++ spin_unlock_bh(&rwnx_hw->cb_lock);
++
++ netif_tx_start_all_queues(dev);
++ netif_carrier_on(dev);
++ error = 0;
++ /* If the AP channel is already the active, we probably skip radar
++ activation on MM_CHANNEL_SWITCH_IND (unless another vif use this
++ ctxt). In anycase retest if radar detection must be activated
++ */
++ if (txq_status == 0) {
++ rwnx_radar_detection_enable_on_cur_channel(rwnx_hw);
++ }
++ break;
++ }
++ case CO_BUSY:
++ error = -EINPROGRESS;
++ break;
++ case CO_OP_IN_PROGRESS:
++ error = -EALREADY;
++ break;
++ default:
++ error = -EIO;
++ break;
++ }
++
++ if (error) {
++ netdev_info(dev, "Failed to start AP (%d)", error);
++ } else {
++ netdev_info(dev, "AP started: ch=%d, bcmc_idx=%d channel=%d bw=%d",
++ rwnx_vif->ch_index, rwnx_vif->ap.bcmc_index,
++ ((settings->chandef).chan)->center_freq,
++ ((settings->chandef).width));
++ }
++
++ end:
++ //rwnx_ipc_elem_var_deallocs(rwnx_hw, &elem);
++
++ return error;
++}
++
++
++/**
++ * @change_beacon: Change the beacon parameters for an access point mode
++ * interface. This should reject the call when AP mode wasn't started.
++ */
++static int rwnx_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
++ struct cfg80211_ap_update *info)
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *vif = netdev_priv(dev);
++ struct rwnx_bcn *bcn = &vif->ap.bcn;
++ struct rwnx_ipc_elem_var elem;
++ u8 *buf;
++ int error = 0;
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ //elem init
++ elem.dma_addr = 0;
++
++ // Build the beacon
++ buf = rwnx_build_bcn(bcn, &info->beacon);
++ if (!buf)
++ return -ENOMEM;
++
++ rwnx_send_bcn(rwnx_hw, buf, vif->vif_index, bcn->len);
++
++#if 0
++ // Sync buffer for FW
++ if ((error = rwnx_ipc_elem_var_allocs(rwnx_hw, &elem, bcn->len, DMA_TO_DEVICE,
++ buf, NULL, NULL)))
++ return error;
++#endif
++ // Forward the information to the LMAC
++ error = rwnx_send_bcn_change(rwnx_hw, vif->vif_index, elem.dma_addr,
++ bcn->len, bcn->head_len, bcn->tim_len, NULL);
++
++#if 0
++ rwnx_ipc_elem_var_deallocs(rwnx_hw, &elem);
++#else
++
++
++#endif
++ return error;
++}
++
++/**
++ * * @stop_ap: Stop being an AP, including stopping beaconing.
++ */
++#if (LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION)
++static int rwnx_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev, unsigned int link_id)
++#else
++static int rwnx_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
++#endif
++
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ struct rwnx_sta *sta;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ netif_tx_stop_all_queues(dev);
++ netif_carrier_off(dev);
++
++ if (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_GO)
++ rwnx_hw->is_p2p_connected = 0;
++ rwnx_radar_cancel_cac(&rwnx_hw->radar);
++ rwnx_send_apm_stop_req(rwnx_hw, rwnx_vif);
++ spin_lock_bh(&rwnx_hw->cb_lock);
++ rwnx_chanctx_unlink(rwnx_vif);
++ spin_unlock_bh(&rwnx_hw->cb_lock);
++
++ /* delete any remaining STA*/
++ while (!list_empty(&rwnx_vif->ap.sta_list)) {
++ rwnx_cfg80211_del_station_compat(wiphy, dev, NULL);
++ }
++
++ /* delete BC/MC STA */
++ sta = &rwnx_hw->sta_table[rwnx_vif->ap.bcmc_index];
++ rwnx_txq_vif_deinit(rwnx_hw, rwnx_vif);
++ rwnx_del_bcn(&rwnx_vif->ap.bcn);
++ rwnx_del_csa(rwnx_vif);
++
++ flush_workqueue(rwnx_vif->sta_probe.apmprobesta_wq);
++ destroy_workqueue(rwnx_vif->sta_probe.apmprobesta_wq);
++
++ netdev_info(dev, "AP Stopped");
++
++ return 0;
++}
++
++/**
++ * @set_monitor_channel: Set the monitor mode channel for the device. If other
++ * interfaces are active this callback should reject the configuration.
++ * If no interfaces are active or the device is down, the channel should
++ * be stored for when a monitor interface becomes active.
++ *
++ * Also called internaly with chandef set to NULL simply to retrieve the channel
++ * configured at firmware level.
++ */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
++static inline bool
++cfg80211_chandef_identical(const struct cfg80211_chan_def *chandef1,
++ const struct cfg80211_chan_def *chandef2)
++{
++ return (chandef1->chan == chandef2->chan &&
++ chandef1->width == chandef2->width &&
++ chandef1->center_freq1 == chandef2->center_freq1 &&
++ chandef1->center_freq2 == chandef2->center_freq2);
++}
++#endif
++
++static int rwnx_cfg80211_set_monitor_channel(struct wiphy *wiphy,
++ struct cfg80211_chan_def *chandef)
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *rwnx_vif;
++ struct me_config_monitor_cfm cfm;
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ if (rwnx_hw->monitor_vif == RWNX_INVALID_VIF)
++ return -EINVAL;
++
++ rwnx_vif = rwnx_hw->vif_table[rwnx_hw->monitor_vif];
++
++ // Do nothing if monitor interface is already configured with the requested channel
++ if (rwnx_chanctx_valid(rwnx_hw, rwnx_vif->ch_index)) {
++ struct rwnx_chanctx *ctxt;
++ ctxt = &rwnx_vif->rwnx_hw->chanctx_table[rwnx_vif->ch_index];
++ if (chandef && cfg80211_chandef_identical(&ctxt->chan_def, chandef))
++ return 0;
++ }
++
++ // Always send command to firmware. It allows to retrieve channel context index
++ // and its configuration.
++ if (rwnx_send_config_monitor_req(rwnx_hw, chandef, &cfm))
++ return -EIO;
++
++ // Always re-set channel context info
++ rwnx_chanctx_unlink(rwnx_vif);
++
++
++
++ // If there is also a STA interface not yet connected then monitor interface
++ // will only have a channel context after the connection of the STA interface.
++ if (cfm.chan_index != RWNX_CH_NOT_SET)
++ {
++ struct cfg80211_chan_def mon_chandef;
++
++ if (rwnx_hw->vif_started > 1) {
++ // In this case we just want to update the channel context index not
++ // the channel configuration
++ rwnx_chanctx_link(rwnx_vif, cfm.chan_index, NULL);
++ return -EBUSY;
++ }
++
++ mon_chandef.chan = ieee80211_get_channel(wiphy, cfm.chan.prim20_freq);
++ mon_chandef.center_freq1 = cfm.chan.center1_freq;
++ mon_chandef.center_freq2 = cfm.chan.center2_freq;
++ mon_chandef.width = chnl2bw[cfm.chan.type];
++ rwnx_chanctx_link(rwnx_vif, cfm.chan_index, &mon_chandef);
++ }
++
++ return 0;
++}
++
++/**
++ * @probe_client: probe an associated client, must return a cookie that it
++ * later passes to cfg80211_probe_status().
++ */
++int rwnx_cfg80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
++ const u8 *peer, u64 *cookie)
++{
++ //struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *vif = netdev_priv(dev);
++ struct rwnx_sta *sta = NULL;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ if((RWNX_VIF_TYPE(vif) != NL80211_IFTYPE_AP) && (RWNX_VIF_TYPE(vif) != NL80211_IFTYPE_P2P_GO) &&
++ (RWNX_VIF_TYPE(vif) != NL80211_IFTYPE_AP_VLAN))
++ return -EINVAL;
++
++ spin_lock_bh(&vif->rwnx_hw->cb_lock);
++ list_for_each_entry(sta, &vif->ap.sta_list, list){
++ if (sta->valid && ether_addr_equal(sta->mac_addr, peer))
++ break;
++ }
++ spin_unlock_bh(&vif->rwnx_hw->cb_lock);
++
++ if (!sta)
++ return -ENOENT;
++
++
++ memcpy(vif->sta_probe.sta_mac_addr, peer, 6);
++ queue_work(vif->sta_probe.apmprobesta_wq, &vif->sta_probe.apmprobestaWork);
++
++ *cookie = vif->sta_probe.probe_id;
++
++ return 0;
++}
++
++/**
++ * @mgmt_frame_register: Notify driver that a management frame type was
++ * registered. Note that this callback may not sleep, and cannot run
++ * concurrently with itself.
++ */
++void rwnx_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0))
++ struct net_device *dev,
++#else
++ struct wireless_dev *wdev,
++#endif
++ u16 frame_type, bool reg)
++{
++}
++
++/**
++ * @set_wiphy_params: Notify that wiphy parameters have changed;
++ * @changed bitfield (see &enum wiphy_params_flags) describes which values
++ * have changed. The actual parameter values are available in
++ * struct wiphy. If returning an error, no value should be changed.
++ */
++static int rwnx_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
++{
++ return 0;
++}
++
++
++/**
++ * @set_tx_power: set the transmit power according to the parameters,
++ * the power passed is in mBm, to get dBm use MBM_TO_DBM(). The
++ * wdev may be %NULL if power was set for the wiphy, and will
++ * always be %NULL unless the driver supports per-vif TX power
++ * (as advertised by the nl80211 feature flag.)
++ */
++static int rwnx_cfg80211_set_tx_power(struct wiphy *wiphy,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
++ struct wireless_dev *wdev,
++#endif
++ enum nl80211_tx_power_setting type, int mbm)
++{
++ #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
++ struct wireless_dev *wdev = NULL;
++ #endif
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *vif;
++ s8 pwr;
++ int res = 0;
++
++ if (type == NL80211_TX_POWER_AUTOMATIC) {
++ pwr = 0x7f;
++ } else {
++ pwr = MBM_TO_DBM(mbm);
++ }
++
++ if (wdev) {
++ vif = container_of(wdev, struct rwnx_vif, wdev);
++ res = rwnx_send_set_power(rwnx_hw, vif->vif_index, pwr, NULL);
++ } else {
++ list_for_each_entry(vif, &rwnx_hw->vifs, list) {
++ res = rwnx_send_set_power(rwnx_hw, vif->vif_index, pwr, NULL);
++ if (res)
++ break;
++ }
++ }
++
++ return res;
++}
++
++
++/**
++ * @set_power_mgmt: set the power save to one of those two modes:
++ * Power-save off
++ * Power-save on - Dynamic mode
++ */
++static int rwnx_cfg80211_set_power_mgmt(struct wiphy *wiphy,
++ struct net_device *dev,
++ bool enabled, int timeout)
++{
++#if 0
++
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ u8 ps_mode;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++ if (timeout >= 0)
++ netdev_info(dev, "Ignore timeout value %d", timeout);
++
++ if (!(rwnx_hw->version_cfm.features & BIT(MM_FEAT_PS_BIT)))
++ enabled = false;
++
++ if (enabled) {
++ /* Switch to Dynamic Power Save */
++ ps_mode = MM_PS_MODE_ON_DYN;
++ } else {
++ /* Exit Power Save */
++ ps_mode = MM_PS_MODE_OFF;
++ }
++
++ return rwnx_send_me_set_ps_mode(rwnx_hw, ps_mode);
++#else
++ return 0;
++#endif
++}
++
++
++static int rwnx_cfg80211_set_txq_params(struct wiphy *wiphy, struct net_device *dev,
++ struct ieee80211_txq_params *params)
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ u8 hw_queue, aifs, cwmin, cwmax;
++ u32 param;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ hw_queue = rwnx_ac2hwq[0][params->queue];
++#else
++ hw_queue = rwnx_ac2hwq[0][params->ac];
++#endif
++
++ aifs = params->aifs;
++ cwmin = fls(params->cwmin);
++ cwmax = fls(params->cwmax);
++
++ /* Store queue information in general structure */
++ param = (u32) (aifs << 0);
++ param |= (u32) (cwmin << 4);
++ param |= (u32) (cwmax << 8);
++ param |= (u32) (params->txop) << 12;
++
++ /* Send the MM_SET_EDCA_REQ message to the FW */
++ return rwnx_send_set_edca(rwnx_hw, hw_queue, param, false, rwnx_vif->vif_index);
++}
++
++
++/**
++ * @remain_on_channel: Request the driver to remain awake on the specified
++ * channel for the specified duration to complete an off-channel
++ * operation (e.g., public action frame exchange). When the driver is
++ * ready on the requested channel, it must indicate this with an event
++ * notification by calling cfg80211_ready_on_channel().
++ */
++
++static int
++rwnx_cfg80211_remain_on_channel(struct wiphy *wiphy,
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
++ struct wireless_dev *wdev,
++ #else
++ struct net_device *dev,
++ #endif
++ struct ieee80211_channel *chan,
++ #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
++ enum nl80211_channel_type channel_type,
++ #endif
++ unsigned int duration, u64 *cookie)
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
++ struct rwnx_vif *rwnx_vif = container_of(wdev, struct rwnx_vif, wdev);
++#else
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ struct wireless_dev *wdev = &rwnx_vif->wdev;
++#endif
++ struct rwnx_roc_elem *roc_elem;
++ struct mm_add_if_cfm add_if_cfm;
++ struct mm_remain_on_channel_cfm roc_cfm;
++ int error;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* For debug purpose (use ftrace kernel option) */
++#ifdef CREATE_TRACE_POINTS
++ trace_roc(rwnx_vif->vif_index, chan->center_freq, duration);
++#endif
++
++#if 1// Fix up layer to send 50ms duration.
++ if(duration < 100){
++ AICWFDBG(LOGINFO, "%s duration time change to 200ms \r\n", __func__);
++ duration = 200;
++ }
++#endif
++
++ /* Check that no other RoC procedure has been launched */
++ if (rwnx_hw->roc_elem) {
++ msleep(2);
++ if (rwnx_hw->roc_elem) {
++ AICWFDBG(LOGERROR, "remain_on_channel fail\n");
++#if 0//AIDEN test
++ roc_elem = rwnx_hw->roc_elem;
++ kfree(roc_elem);
++ rwnx_hw->roc_elem = NULL;
++ //msleep(500);
++#else
++ return -EBUSY;
++#endif
++ }
++ }
++ //msleep(500);
++ AICWFDBG(LOGINFO, "remain:%d,%d,%d,duration:%d\n",rwnx_vif->vif_index, rwnx_vif->is_p2p_vif, rwnx_hw->is_p2p_alive, duration);
++ //if (rwnx_vif->is_p2p_vif) {
++ AICWFDBG(LOGINFO, "rwnx_vif:%p rwnx_hw->p2p_dev_vif:%p rwnx_vif->up:%d\r\n",
++ rwnx_vif, rwnx_hw->p2p_dev_vif, rwnx_vif->up);
++#ifdef CONFIG_USE_P2P0
++ if (rwnx_vif->is_p2p_vif) {
++#else
++ if (rwnx_vif == rwnx_hw->p2p_dev_vif && !rwnx_vif->up) {
++#endif
++ if (!rwnx_hw->is_p2p_alive) {
++ error = rwnx_send_add_if (rwnx_hw, rwnx_vif->wdev.address, //wdev->netdev->dev_addr,
++ RWNX_VIF_TYPE(rwnx_vif), false, &add_if_cfm);
++ if (error)
++ return -EIO;
++
++ if (add_if_cfm.status != 0) {
++ return -EIO;
++ }
++ /* Save the index retrieved from LMAC */
++ spin_lock_bh(&rwnx_hw->cb_lock);
++ rwnx_vif->vif_index = add_if_cfm.inst_nbr;
++ rwnx_vif->up = true;
++ rwnx_hw->vif_started++;
++ rwnx_hw->vif_table[add_if_cfm.inst_nbr] = rwnx_vif;
++ spin_unlock_bh(&rwnx_hw->cb_lock);
++ rwnx_hw->is_p2p_alive = 1;
++ mod_timer(&rwnx_hw->p2p_alive_timer, jiffies + msecs_to_jiffies(1000));
++ atomic_set(&rwnx_hw->p2p_alive_timer_count, 0);
++ }
++ else {
++ mod_timer(&rwnx_hw->p2p_alive_timer, jiffies + msecs_to_jiffies(1000));
++ atomic_set(&rwnx_hw->p2p_alive_timer_count, 0);
++ }
++ }
++
++ /* Allocate a temporary RoC element */
++ roc_elem = kmalloc(sizeof(struct rwnx_roc_elem), GFP_KERNEL);
++
++ /* Verify that element has well been allocated */
++ if (!roc_elem) {
++ return -ENOMEM;
++ }
++
++ /* Initialize the RoC information element */
++ roc_elem->wdev = wdev;
++ roc_elem->chan = chan;
++ roc_elem->duration = duration;
++ roc_elem->mgmt_roc = false;
++ roc_elem->on_chan = false;
++
++ /* Initialize the OFFCHAN TX queue to allow off-channel transmissions */
++ rwnx_txq_offchan_init(rwnx_vif);
++
++ /* Forward the information to the FMAC */
++ rwnx_hw->roc_elem = roc_elem;
++ error = rwnx_send_roc(rwnx_hw, rwnx_vif, chan, duration, &roc_cfm);
++
++ /* If no error, keep all the information for handling of end of procedure */
++ if (error == 0) {
++
++ /* Set the cookie value */
++ *cookie = (u64)(rwnx_hw->roc_cookie_cnt);
++ if(roc_cfm.status) {
++ // failed to roc
++ rwnx_hw->roc_elem = NULL;
++ kfree(roc_elem);
++ rwnx_txq_offchan_deinit(rwnx_vif);
++ return -EBUSY;
++ }
++ } else {
++ /* Free the allocated element */
++ rwnx_hw->roc_elem = NULL;
++ kfree(roc_elem);
++ rwnx_txq_offchan_deinit(rwnx_vif);
++ }
++
++ return error;
++}
++
++/**
++ * @cancel_remain_on_channel: Cancel an on-going remain-on-channel operation.
++ * This allows the operation to be terminated prior to timeout based on
++ * the duration value.
++ */
++static int rwnx_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
++ #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)
++ struct net_device *dev,
++ #else
++ struct wireless_dev *wdev,
++ #endif
++ u64 cookie)
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++#ifdef CREATE_TRACE_POINTS
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++#else
++ struct rwnx_vif *rwnx_vif = container_of(wdev, struct rwnx_vif, wdev);
++#endif
++#endif
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++#ifdef CREATE_TRACE_POINTS
++ /* For debug purpose (use ftrace kernel option) */
++ trace_cancel_roc(rwnx_vif->vif_index);
++#endif
++ /* Check if a RoC procedure is pending */
++ if (!rwnx_hw->roc_elem) {
++ return 0;
++ }
++
++ /* Forward the information to the FMAC */
++ return rwnx_send_cancel_roc(rwnx_hw);
++}
++
++/**
++ * @dump_survey: get site survey information.
++ */
++static int rwnx_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *netdev,
++ int idx, struct survey_info *info)
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct ieee80211_supported_band *sband;
++ struct rwnx_survey_info *rwnx_survey;
++
++ //RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ if (idx >= ARRAY_SIZE(rwnx_hw->survey))
++ return -ENOENT;
++
++ rwnx_survey = &rwnx_hw->survey[idx];
++
++ // Check if provided index matches with a supported 2.4GHz channel
++ sband = wiphy->bands[NL80211_BAND_2GHZ];
++ if (sband && idx >= sband->n_channels) {
++ idx -= sband->n_channels;
++ sband = NULL;
++ }
++
++ //#ifdef USE_5G
++ if (rwnx_hw->band_5g_support) {
++ if (!sband) {
++ // Check if provided index matches with a supported 5GHz channel
++ sband = wiphy->bands[NL80211_BAND_5GHZ];
++
++ if (!sband || idx >= sband->n_channels)
++ return -ENOENT;
++ }
++ }else{
++ //#else
++ if (!sband || idx >= sband->n_channels)
++ return -ENOENT;
++ }
++ //#endif
++
++ // Fill the survey
++ info->channel = &sband->channels[idx];
++ info->filled = rwnx_survey->filled;
++
++ if (rwnx_survey->filled != 0) {
++ SURVEY_TIME(info) = (u64)rwnx_survey->chan_time_ms;
++ SURVEY_TIME_BUSY(info) = (u64)rwnx_survey->chan_time_busy_ms;
++ info->noise = rwnx_survey->noise_dbm;
++
++ // Set the survey report as not used
++ if(info->noise == 0){
++ rwnx_survey->filled = 0;
++ }else{
++ rwnx_survey->filled |= SURVEY_INFO_NOISE_DBM;
++ }
++
++ }
++
++ return 0;
++}
++
++/**
++ * @get_channel: Get the current operating channel for the virtual interface.
++ * For monitor interfaces, it should return %NULL unless there's a single
++ * current monitoring channel.
++ */
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
++static int rwnx_cfg80211_get_channel(struct wiphy *wiphy,
++ struct wireless_dev *wdev,
++#if LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION
++ unsigned int link_id,
++#endif
++ struct cfg80211_chan_def *chandef)
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *rwnx_vif = container_of(wdev, struct rwnx_vif, wdev);
++ struct rwnx_chanctx *ctxt;
++
++ if (!rwnx_vif->up) {
++ return -ENODATA;
++ }
++
++ if (rwnx_vif->vif_index == rwnx_hw->monitor_vif)
++ {
++ //retrieve channel from firmware
++ rwnx_cfg80211_set_monitor_channel(wiphy, NULL);
++ }
++
++ //Check if channel context is valid
++ if (!rwnx_chanctx_valid(rwnx_hw, rwnx_vif->ch_index)){
++ return -ENODATA;
++ }
++
++ ctxt = &rwnx_hw->chanctx_table[rwnx_vif->ch_index];
++ *chandef = ctxt->chan_def;
++
++ return 0;
++}
++#else
++struct ieee80211_channel *rwnx_cfg80211_get_channel(struct wiphy *wiphy)
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct ieee80211_channel *chan = NULL;
++ struct rwnx_vif *vif;
++ bool_l found = false;
++ //may TBD
++
++ list_for_each_entry(vif, &rwnx_hw->vifs, list)
++ {
++ if(vif->wdev.iftype == NL80211_IFTYPE_AP)
++ {
++ found = true;
++ break;
++ }
++ }
++
++ if(found && rwnx_hw->set_chan.center_freq) {
++ chan = kzalloc(sizeof(struct ieee80211_channel), GFP_KERNEL);
++ memcpy((u8 *)chan, (u8 *)&rwnx_hw->set_chan, sizeof(struct ieee80211_channel));
++ }
++
++ return chan;
++}
++#endif
++
++
++/**
++ * @mgmt_tx: Transmit a management frame.
++ */
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
++static int rwnx_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
++ struct cfg80211_mgmt_tx_params *params,
++ u64 *cookie)
++#else
++static int rwnx_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
++ struct ieee80211_channel *channel, bool offchan,
++ unsigned int wait, const u8* buf, size_t len,
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
++ bool no_cck,
++ #endif
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
++ bool dont_wait_for_ack,
++ #endif
++ u64 *cookie)
++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ struct wireless_dev *wdev = &rwnx_vif->wdev;
++#else
++ struct rwnx_vif *rwnx_vif = container_of(wdev, struct rwnx_vif, wdev);
++#endif
++ struct rwnx_sta *rwnx_sta = NULL;
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
++ struct ieee80211_channel *channel = params->chan;
++ const u8 *buf = params->buf;
++ //size_t len = params->len;
++ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
++ struct ieee80211_mgmt *mgmt = (void *)buf;
++ bool ap = false;
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
++ bool offchan = false;
++ #endif
++
++ /* Check if provided VIF is an AP or a STA one */
++ switch (RWNX_VIF_TYPE(rwnx_vif)) {
++ case NL80211_IFTYPE_AP_VLAN:
++ rwnx_vif = rwnx_vif->ap_vlan.master;
++ case NL80211_IFTYPE_AP:
++ case NL80211_IFTYPE_P2P_GO:
++ case NL80211_IFTYPE_MESH_POINT:
++ ap = true;
++ break;
++ case NL80211_IFTYPE_STATION:
++ case NL80211_IFTYPE_P2P_CLIENT:
++ default:
++ break;
++ }
++
++
++ /* Get STA on which management frame has to be sent */
++ rwnx_sta = rwnx_retrieve_sta(rwnx_hw, rwnx_vif, mgmt->da,
++ mgmt->frame_control, ap);
++
++ AICWFDBG(LOGDEBUG, "%s rwnx_sta:%p RWNX_VIF_TYPE(rwnx_vif):%d \r\n", __func__, rwnx_sta, RWNX_VIF_TYPE(rwnx_vif));
++
++#ifdef CREATE_TRACE_POINTS
++ trace_mgmt_tx((channel) ? channel->center_freq : 0,
++ rwnx_vif->vif_index, (rwnx_sta) ? rwnx_sta->sta_idx : 0xFF,
++ mgmt);
++#endif
++ if (ap || rwnx_sta)
++ goto send_frame;
++
++ /* Not an AP interface sending frame to unknown STA:
++ * This is allowed for external authetication */
++ if (rwnx_vif->sta.external_auth && ieee80211_is_auth(mgmt->frame_control))
++ goto send_frame;
++
++ /* Otherwise ROC is needed */
++ if (!channel) {
++ AICWFDBG(LOGERROR, "mgmt_tx fail since channel\n");
++ return -EINVAL;
++ }
++
++ /* Check that a RoC is already pending */
++ if (rwnx_hw->roc_elem) {
++ /* Get VIF used for current ROC */
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
++ struct rwnx_vif *rwnx_roc_vif = netdev_priv(rwnx_hw->roc_elem->wdev->netdev);
++#else
++ struct rwnx_vif *rwnx_roc_vif = container_of(rwnx_hw->roc_elem->wdev, struct rwnx_vif, wdev);//netdev_priv(rwnx_hw->roc_elem->wdev->netdev);
++#endif
++
++ /* Check if RoC channel is the same than the required one */
++ if ((rwnx_hw->roc_elem->chan->center_freq != channel->center_freq)
++ || (rwnx_vif->vif_index != rwnx_roc_vif->vif_index)) {
++ AICWFDBG(LOGERROR, "mgmt rx chan invalid: %d, %d", rwnx_hw->roc_elem->chan->center_freq, channel->center_freq);
++ return -EINVAL;
++ }
++ } else {
++ u64 cookie;
++ int error;
++
++ AICWFDBG(LOGINFO, "mgmt rx remain on chan\n");
++
++ /* Start a ROC procedure for 30ms */
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
++ error = rwnx_cfg80211_remain_on_channel(wiphy, wdev, channel,
++ 30, &cookie);
++#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
++ error = rwnx_cfg80211_remain_on_channel(wiphy, wdev, channel, NL80211_CHAN_NO_HT,
++ 30, &cookie);
++#else
++ error = rwnx_cfg80211_remain_on_channel(wiphy, dev, channel, NL80211_CHAN_NO_HT,
++ 30, &cookie);
++#endif
++
++ if (error) {
++ AICWFDBG(LOGERROR, "mgmt rx chan err\n");
++ return error;
++ }
++ /* Need to keep in mind that RoC has been launched internally in order to
++ * avoid to call the cfg80211 callback once expired */
++ rwnx_hw->roc_elem->mgmt_roc = true;
++ }
++
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
++ offchan = true;
++ #endif
++
++send_frame:
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
++ return rwnx_start_mgmt_xmit(rwnx_vif, rwnx_sta, params, offchan, cookie);
++ #else
++ return rwnx_start_mgmt_xmit(rwnx_vif, rwnx_sta, channel, offchan, wait, buf, len, no_cck, dont_wait_for_ack, cookie);
++ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
++}
++
++/**
++ * @start_radar_detection: Start radar detection in the driver.
++ */
++static
++int rwnx_cfg80211_start_radar_detection(struct wiphy *wiphy,
++ struct net_device *dev,
++ struct cfg80211_chan_def *chandef
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
++ , u32 cac_time_ms, int link_id
++ #endif
++ )
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ struct apm_start_cac_cfm cfm;
++
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
++ rwnx_radar_start_cac(&rwnx_hw->radar, cac_time_ms, rwnx_vif);
++ #endif
++ rwnx_send_apm_start_cac_req(rwnx_hw, rwnx_vif, chandef, &cfm);
++
++ if (cfm.status == CO_OK) {
++ spin_lock_bh(&rwnx_hw->cb_lock);
++ rwnx_chanctx_link(rwnx_vif, cfm.ch_idx, chandef);
++ if (rwnx_hw->cur_chanctx == rwnx_vif->ch_index)
++ rwnx_radar_detection_enable(&rwnx_hw->radar,
++ RWNX_RADAR_DETECT_REPORT,
++ RWNX_RADAR_RIU);
++ spin_unlock_bh(&rwnx_hw->cb_lock);
++ } else {
++ return -EIO;
++ }
++
++ return 0;
++}
++
++/**
++ * @update_ft_ies: Provide updated Fast BSS Transition information to the
++ * driver. If the SME is in the driver/firmware, this information can be
++ * used in building Authentication and Reassociation Request frames.
++ */
++static
++int rwnx_cfg80211_update_ft_ies(struct wiphy *wiphy,
++ struct net_device *dev,
++ struct cfg80211_update_ft_ies_params *ftie)
++{
++ return 0;
++}
++
++/**
++ * @set_cqm_rssi_config: Configure connection quality monitor RSSI threshold.
++ */
++static
++int rwnx_cfg80211_set_cqm_rssi_config(struct wiphy *wiphy,
++ struct net_device *dev,
++ int32_t rssi_thold, uint32_t rssi_hyst)
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++
++ return rwnx_send_cfg_rssi_req(rwnx_hw, rwnx_vif->vif_index, rssi_thold, rssi_hyst);
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
++/**
++ *
++ * @channel_switch: initiate channel-switch procedure (with CSA). Driver is
++ * responsible for veryfing if the switch is possible. Since this is
++ * inherently tricky driver may decide to disconnect an interface later
++ * with cfg80211_stop_iface(). This doesn't mean driver can accept
++ * everything. It should do it's best to verify requests and reject them
++ * as soon as possible.
++ */
++int rwnx_cfg80211_channel_switch(struct wiphy *wiphy,
++ struct net_device *dev,
++ struct cfg80211_csa_settings *params)
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *vif = netdev_priv(dev);
++ struct rwnx_ipc_elem_var elem;
++ struct rwnx_bcn *bcn, *bcn_after;
++ struct rwnx_csa *csa;
++ u16 csa_oft[BCN_MAX_CSA_CPT];
++ u8 *buf;
++ int i, error = 0;
++
++ //elem init
++ elem.dma_addr = 0;
++
++ if (vif->ap.csa)
++ return -EBUSY;
++
++ if (params->n_counter_offsets_beacon > BCN_MAX_CSA_CPT)
++ return -EINVAL;
++
++ /* Build the new beacon with CSA IE */
++ bcn = &vif->ap.bcn;
++ buf = rwnx_build_bcn(bcn, ¶ms->beacon_csa);
++ if (!buf)
++ return -ENOMEM;
++
++ memset(csa_oft, 0, sizeof(csa_oft));
++ for (i = 0; i < params->n_counter_offsets_beacon; i++)
++ {
++ csa_oft[i] = params->counter_offsets_beacon[i] + bcn->head_len +
++ bcn->tim_len;
++ }
++
++ /* If count is set to 0 (i.e anytime after this beacon) force it to 2 */
++ if (params->count == 0) {
++ params->count = 2;
++ for (i = 0; i < params->n_counter_offsets_beacon; i++)
++ {
++ buf[csa_oft[i]] = 2;
++ }
++ }
++
++ #if 0
++ if ((error = rwnx_ipc_elem_var_allocs(rwnx_hw, &elem, bcn->len,
++ DMA_TO_DEVICE, buf, NULL, NULL))) {
++ goto end;
++ }
++ #else
++ error = rwnx_send_bcn(rwnx_hw, buf, vif->vif_index, bcn->len);
++ if (error) {
++ goto end;
++ }
++ #endif
++
++ /* Build the beacon to use after CSA. It will only be sent to fw once
++ CSA is over, but do it before sending the beacon as it must be ready
++ when CSA is finished. */
++ csa = kzalloc(sizeof(struct rwnx_csa), GFP_KERNEL);
++ if (!csa) {
++ error = -ENOMEM;
++ goto end;
++ }
++
++ bcn_after = &csa->bcn;
++ buf = rwnx_build_bcn(bcn_after, ¶ms->beacon_after);
++ if (!buf) {
++ error = -ENOMEM;
++ rwnx_del_csa(vif);
++ goto end;
++ }
++
++ vif->ap.csa = csa;
++ csa->vif = vif;
++ csa->chandef = params->chandef;
++
++ /* Send new Beacon. FW will extract channel and count from the beacon */
++ error = rwnx_send_bcn_change(rwnx_hw, vif->vif_index, elem.dma_addr,
++ bcn->len, bcn->head_len, bcn->tim_len, csa_oft);
++
++ if (error) {
++ rwnx_del_csa(vif);
++ goto end;
++ } else {
++ INIT_WORK(&csa->work, rwnx_csa_finish);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 9, 0)
++ cfg80211_ch_switch_started_notify(dev, &csa->chandef, 0, params->count, false);
++#elif LINUX_VERSION_CODE >= HIGH_KERNEL_VERSION
++ cfg80211_ch_switch_started_notify(dev, &csa->chandef, 0, params->count, false, 0);
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
++ cfg80211_ch_switch_started_notify(dev, &csa->chandef, params->count, params->block_tx);
++#else
++ cfg80211_ch_switch_started_notify(dev, &csa->chandef, params->count);
++#endif
++
++ }
++
++ end:
++ return error;
++}
++#endif
++
++#if 0
++/*
++ * @tdls_mgmt: prepare TDLS action frame packets and forward them to FW
++ */
++
++static int
++rwnx_cfg80211_tdls_mgmt(struct wiphy *wiphy,
++ struct net_device *dev,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
++ const u8 *peer,
++#else
++ u8 *peer,
++#endif
++ u8 action_code,
++ u8 dialog_token,
++ u16 status_code,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
++ u32 peer_capability,
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
++ bool initiator,
++#endif
++ const u8 *buf,
++ size_t len)
++{
++ #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)
++ u32 peer_capability = 0;
++ #endif
++ #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
++ bool initiator = false;
++ #endif
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ int ret = 0;
++
++ /* make sure we support TDLS */
++ if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
++ return -ENOTSUPP;
++
++ /* make sure we are in station mode (and connected) */
++ if ((RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_STATION) ||
++ (!rwnx_vif->up) || (!rwnx_vif->sta.ap))
++ return -ENOTSUPP;
++
++ /* only one TDLS link is supported */
++ if ((action_code == WLAN_TDLS_SETUP_REQUEST) &&
++ (rwnx_vif->sta.tdls_sta) &&
++ (rwnx_vif->tdls_status == TDLS_LINK_ACTIVE)) {
++ printk("%s: only one TDLS link is supported!\n", __func__);
++ return -ENOTSUPP;
++ }
++
++ if ((action_code == WLAN_TDLS_DISCOVERY_REQUEST) &&
++ (rwnx_hw->mod_params->ps_on)) {
++ printk("%s: discovery request is not supported when "
++ "power-save is enabled!\n", __func__);
++ return -ENOTSUPP;
++ }
++
++ switch (action_code) {
++ case WLAN_TDLS_SETUP_RESPONSE:
++ /* only one TDLS link is supported */
++ if ((status_code == 0) &&
++ (rwnx_vif->sta.tdls_sta) &&
++ (rwnx_vif->tdls_status == TDLS_LINK_ACTIVE)) {
++ printk("%s: only one TDLS link is supported!\n", __func__);
++ status_code = WLAN_STATUS_REQUEST_DECLINED;
++ }
++ /* fall-through */
++ case WLAN_TDLS_SETUP_REQUEST:
++ case WLAN_TDLS_TEARDOWN:
++ case WLAN_TDLS_DISCOVERY_REQUEST:
++ case WLAN_TDLS_SETUP_CONFIRM:
++ case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
++ ret = rwnx_tdls_send_mgmt_packet_data(rwnx_hw, rwnx_vif, peer, action_code,
++ dialog_token, status_code, peer_capability, initiator, buf, len, 0, NULL);
++ break;
++
++ default:
++ printk("%s: Unknown TDLS mgmt/action frame %pM\n",
++ __func__, peer);
++ ret = -EOPNOTSUPP;
++ break;
++ }
++
++ if (action_code == WLAN_TDLS_SETUP_REQUEST) {
++ rwnx_vif->tdls_status = TDLS_SETUP_REQ_TX;
++ } else if (action_code == WLAN_TDLS_SETUP_RESPONSE) {
++ rwnx_vif->tdls_status = TDLS_SETUP_RSP_TX;
++ } else if ((action_code == WLAN_TDLS_SETUP_CONFIRM) && (ret == CO_OK)) {
++ rwnx_vif->tdls_status = TDLS_LINK_ACTIVE;
++ /* Set TDLS active */
++ rwnx_vif->sta.tdls_sta->tdls.active = true;
++ }
++
++ return ret;
++}
++#endif
++#if 0
++/*
++ * @tdls_oper: execute TDLS operation
++ */
++static int
++rwnx_cfg80211_tdls_oper(struct wiphy *wiphy,
++ struct net_device *dev,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
++ const u8 *peer,
++#else
++ u8 *peer,
++#endif
++ enum nl80211_tdls_operation oper
++)
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ int error;
++
++ if (oper != NL80211_TDLS_DISABLE_LINK)
++ return 0;
++
++ if (!rwnx_vif->sta.tdls_sta) {
++ printk("%s: TDLS station %pM does not exist\n", __func__, peer);
++ return -ENOLINK;
++ }
++
++ if (memcmp(rwnx_vif->sta.tdls_sta->mac_addr, peer, ETH_ALEN) == 0) {
++ /* Disable Channel Switch */
++ if (!rwnx_send_tdls_cancel_chan_switch_req(rwnx_hw, rwnx_vif,
++ rwnx_vif->sta.tdls_sta,
++ NULL))
++ rwnx_vif->sta.tdls_sta->tdls.chsw_en = false;
++
++ netdev_info(dev, "Del TDLS sta %d (%pM)",
++ rwnx_vif->sta.tdls_sta->sta_idx,
++ rwnx_vif->sta.tdls_sta->mac_addr);
++ /* Ensure that we won't process PS change ind */
++ spin_lock_bh(&rwnx_hw->cb_lock);
++ rwnx_vif->sta.tdls_sta->ps.active = false;
++ rwnx_vif->sta.tdls_sta->valid = false;
++ spin_unlock_bh(&rwnx_hw->cb_lock);
++ rwnx_txq_sta_deinit(rwnx_hw, rwnx_vif->sta.tdls_sta);
++ error = rwnx_send_me_sta_del(rwnx_hw, rwnx_vif->sta.tdls_sta->sta_idx, true);
++ if ((error != 0) && (error != -EPIPE))
++ return error;
++
++#ifdef CONFIG_RWNX_BFMER
++ // Disable Beamformer if supported
++ rwnx_bfmer_report_del(rwnx_hw, rwnx_vif->sta.tdls_sta);
++ rwnx_mu_group_sta_del(rwnx_hw, rwnx_vif->sta.tdls_sta);
++#endif /* CONFIG_RWNX_BFMER */
++
++ /* Set TDLS not active */
++ rwnx_vif->sta.tdls_sta->tdls.active = false;
++#ifdef CONFIG_DEBUG_FS
++ rwnx_dbgfs_unregister_rc_stat(rwnx_hw, rwnx_vif->sta.tdls_sta);
++#endif
++ // Remove TDLS station
++ rwnx_vif->tdls_status = TDLS_LINK_IDLE;
++ rwnx_vif->sta.tdls_sta = NULL;
++ }
++
++ return 0;
++}
++#endif
++#if 0
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
++/*
++ * @tdls_channel_switch: enable TDLS channel switch
++ */
++static int
++rwnx_cfg80211_tdls_channel_switch(struct wiphy *wiphy,
++ struct net_device *dev,
++ const u8 *addr, u8 oper_class,
++ struct cfg80211_chan_def *chandef)
++{
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_sta *rwnx_sta = rwnx_vif->sta.tdls_sta;
++ struct tdls_chan_switch_cfm cfm;
++ int error;
++
++ if ((!rwnx_sta) || (memcmp(addr, rwnx_sta->mac_addr, ETH_ALEN))) {
++ printk("%s: TDLS station %pM doesn't exist\n", __func__, addr);
++ return -ENOLINK;
++ }
++
++ if (!rwnx_sta->tdls.chsw_allowed) {
++ printk("%s: TDLS station %pM does not support TDLS channel switch\n", __func__, addr);
++ return -ENOTSUPP;
++ }
++
++ error = rwnx_send_tdls_chan_switch_req(rwnx_hw, rwnx_vif, rwnx_sta,
++ rwnx_sta->tdls.initiator,
++ oper_class, chandef, &cfm);
++ if (error)
++ return error;
++
++ if (!cfm.status) {
++ rwnx_sta->tdls.chsw_en = true;
++ return 0;
++ } else {
++ printk("%s: TDLS channel switch already enabled and only one is supported\n", __func__);
++ return -EALREADY;
++ }
++}
++#endif
++#if 0
++/*
++ * @tdls_cancel_channel_switch: disable TDLS channel switch
++ */
++static void
++rwnx_cfg80211_tdls_cancel_channel_switch(struct wiphy *wiphy,
++ struct net_device *dev,
++ const u8 *addr)
++{
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_sta *rwnx_sta = rwnx_vif->sta.tdls_sta;
++ struct tdls_cancel_chan_switch_cfm cfm;
++
++ if (!rwnx_sta)
++ return;
++
++ if (!rwnx_send_tdls_cancel_chan_switch_req(rwnx_hw, rwnx_vif,
++ rwnx_sta, &cfm))
++ rwnx_sta->tdls.chsw_en = false;
++}
++#endif /* version >= 3.19 */
++#endif
++/**
++ * @change_bss: Modify parameters for a given BSS (mainly for AP mode).
++ */
++int rwnx_cfg80211_change_bss(struct wiphy *wiphy, struct net_device *dev,
++ struct bss_parameters *params)
++{
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ int res = -EOPNOTSUPP;
++
++ if (((RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_AP) ||
++ (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_P2P_GO)) &&
++ (params->ap_isolate > -1)) {
++
++ if (params->ap_isolate)
++ rwnx_vif->ap.flags |= RWNX_AP_ISOLATE;
++ else
++ rwnx_vif->ap.flags &= ~RWNX_AP_ISOLATE;
++
++ res = 0;
++ }
++
++ return res;
++}
++
++static int rwnx_fill_station_info(struct rwnx_sta *sta, struct rwnx_vif *vif,
++ struct station_info *sinfo)
++{
++
++ struct rwnx_sta_stats *stats = &sta->stats;
++ struct rx_vector_1 *rx_vect1 = &stats->last_rx.rx_vect1;
++ union rwnx_rate_ctrl_info *rate_info;
++ struct mm_get_sta_info_cfm cfm;
++
++ rwnx_send_get_sta_info_req(vif->rwnx_hw, sta->sta_idx, &cfm);
++ sinfo->tx_failed = cfm.txfailed;
++ rate_info = (union rwnx_rate_ctrl_info *)&cfm.rate_info;
++
++
++ AICWFDBG(LOGDEBUG, "%s ModTx:%d TxIndex:%d ModRx:%d RxHTIndex:%d RxVHTIndex:%d RxHEIndex:%d RSSI:%d \r\n", __func__,
++ rate_info->formatModTx, rate_info->mcsIndexTx, rx_vect1->format_mod,
++ rx_vect1->ht.mcs,
++ rx_vect1->vht.mcs,
++ rx_vect1->he.mcs,
++ (s8)cfm.rssi);
++
++
++ switch (rate_info->formatModTx) {
++ case FORMATMOD_NON_HT:
++ case FORMATMOD_NON_HT_DUP_OFDM:
++ sinfo->txrate.flags = 0;
++ sinfo->txrate.legacy = tx_legrates_lut_rate[rate_info->mcsIndexTx];
++ break;
++ case FORMATMOD_HT_MF:
++ case FORMATMOD_HT_GF:
++ sinfo->txrate.flags = RATE_INFO_FLAGS_MCS;
++ sinfo->txrate.mcs = rate_info->mcsIndexTx;
++ break;
++ case FORMATMOD_VHT:
++ sinfo->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
++ sinfo->txrate.mcs = rate_info->mcsIndexTx;
++ break;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)
++ case FORMATMOD_HE_MU:
++ case FORMATMOD_HE_SU:
++ case FORMATMOD_HE_ER:
++ sinfo->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
++ sinfo->txrate.mcs = rate_info->mcsIndexTx;
++ break;
++#else
++ //kernel not support he
++ case FORMATMOD_HE_MU:
++ case FORMATMOD_HE_SU:
++ case FORMATMOD_HE_ER:
++ sinfo->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
++ sinfo->txrate.mcs = rate_info->mcsIndexTx;
++ break;
++#endif
++ default:
++ return -EINVAL;
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)
++ switch (rate_info->bwTx) {
++ case PHY_CHNL_BW_20:
++ sinfo->txrate.bw = RATE_INFO_BW_20;
++ break;
++ case PHY_CHNL_BW_40:
++ sinfo->txrate.bw = RATE_INFO_BW_40;
++ break;
++ case PHY_CHNL_BW_80:
++ sinfo->txrate.bw = RATE_INFO_BW_80;
++ break;
++ case PHY_CHNL_BW_160:
++ sinfo->txrate.bw = RATE_INFO_BW_160;
++ break;
++ default:
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)
++ sinfo->txrate.bw = RATE_INFO_BW_HE_RU;
++#else
++ sinfo->txrate.bw = RATE_INFO_BW_20;
++#endif
++ break;
++ }
++#endif
++
++ sinfo->txrate.nss = 1;
++ sinfo->filled |= (BIT(NL80211_STA_INFO_TX_BITRATE) | BIT(NL80211_STA_INFO_TX_FAILED));
++
++ sinfo->inactive_time = jiffies_to_msecs(jiffies - vif->rwnx_hw->stats.last_tx);
++ sinfo->rx_bytes = vif->net_stats.rx_bytes;
++ sinfo->tx_bytes = vif->net_stats.tx_bytes;
++ sinfo->tx_packets = vif->net_stats.tx_packets;
++ sinfo->rx_packets = vif->net_stats.rx_packets;
++ sinfo->signal = (s8)cfm.rssi;
++ sinfo->rxrate.nss = 1;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)
++ switch (rx_vect1->ch_bw) {
++ case PHY_CHNL_BW_20:
++ sinfo->rxrate.bw = RATE_INFO_BW_20;
++ break;
++ case PHY_CHNL_BW_40:
++ sinfo->rxrate.bw = RATE_INFO_BW_40;
++ break;
++ case PHY_CHNL_BW_80:
++ sinfo->rxrate.bw = RATE_INFO_BW_80;
++ break;
++ case PHY_CHNL_BW_160:
++ sinfo->rxrate.bw = RATE_INFO_BW_160;
++ break;
++ default:
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)
++ sinfo->rxrate.bw = RATE_INFO_BW_HE_RU;
++#else
++ sinfo->rxrate.bw = RATE_INFO_BW_20;
++#endif
++ break;
++ }
++#endif
++
++ switch (rx_vect1->format_mod) {
++ case FORMATMOD_NON_HT:
++ case FORMATMOD_NON_HT_DUP_OFDM:
++ sinfo->rxrate.flags = 0;
++ sinfo->rxrate.legacy = legrates_lut_rate[legrates_lut[rx_vect1->leg_rate]];
++ break;
++ case FORMATMOD_HT_MF:
++ case FORMATMOD_HT_GF:
++ sinfo->rxrate.flags = RATE_INFO_FLAGS_MCS;
++ if (rx_vect1->ht.short_gi)
++ sinfo->rxrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
++ sinfo->rxrate.mcs = rx_vect1->ht.mcs;
++ break;
++ case FORMATMOD_VHT:
++ sinfo->rxrate.flags = RATE_INFO_FLAGS_VHT_MCS;
++ if (rx_vect1->vht.short_gi)
++ sinfo->rxrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
++ sinfo->rxrate.mcs = rx_vect1->vht.mcs;
++ break;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)
++ case FORMATMOD_HE_MU:
++ sinfo->rxrate.he_ru_alloc = rx_vect1->he.ru_size;
++ case FORMATMOD_HE_SU:
++ case FORMATMOD_HE_ER:
++ sinfo->rxrate.flags = RATE_INFO_FLAGS_HE_MCS;
++ sinfo->rxrate.mcs = rx_vect1->he.mcs;
++ sinfo->rxrate.he_gi = rx_vect1->he.gi_type;
++ sinfo->rxrate.he_dcm = rx_vect1->he.dcm;
++ break;
++#else
++ //kernel not support he
++ case FORMATMOD_HE_MU:
++ case FORMATMOD_HE_SU:
++ case FORMATMOD_HE_ER:
++ sinfo->rxrate.flags = RATE_INFO_FLAGS_VHT_MCS;
++ sinfo->rxrate.mcs = rx_vect1->he.mcs;
++ break;
++#endif
++ default:
++ return -EINVAL;
++ }
++
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
++ sinfo->filled |= (STATION_INFO_INACTIVE_TIME |
++ STATION_INFO_RX_BYTES64 |
++ STATION_INFO_TX_BYTES64 |
++ STATION_INFO_RX_PACKETS |
++ STATION_INFO_TX_PACKETS |
++ STATION_INFO_SIGNAL |
++ STATION_INFO_RX_BITRATE);
++#else
++ sinfo->filled |= (BIT(NL80211_STA_INFO_INACTIVE_TIME) |
++ BIT(NL80211_STA_INFO_RX_BYTES64) |
++ BIT(NL80211_STA_INFO_TX_BYTES64) |
++ BIT(NL80211_STA_INFO_RX_PACKETS) |
++ BIT(NL80211_STA_INFO_TX_PACKETS) |
++ BIT(NL80211_STA_INFO_SIGNAL) |
++ BIT(NL80211_STA_INFO_RX_BITRATE));
++#endif
++
++ return 0;
++}
++
++
++/**
++ * @get_station: get station information for the station identified by @mac
++ */
++static int rwnx_cfg80211_get_station(struct wiphy *wiphy,
++ struct net_device *dev,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0))
++ u8 *mac,
++#else
++ const u8 *mac,
++#endif
++ struct station_info *sinfo)
++{
++ struct rwnx_vif *vif = netdev_priv(dev);
++ struct rwnx_sta *sta = NULL;
++
++ if (RWNX_VIF_TYPE(vif) == NL80211_IFTYPE_MONITOR)
++ return -EINVAL;
++ else if ((RWNX_VIF_TYPE(vif) == NL80211_IFTYPE_STATION) ||
++ (RWNX_VIF_TYPE(vif) == NL80211_IFTYPE_P2P_CLIENT)) {
++ if (vif->sta.ap && ether_addr_equal(vif->sta.ap->mac_addr, mac))
++ sta = vif->sta.ap;
++ }
++ else
++ {
++ struct rwnx_sta *sta_iter;
++ spin_lock_bh(&vif->rwnx_hw->cb_lock);
++ list_for_each_entry(sta_iter, &vif->ap.sta_list, list) {
++ if (sta_iter->valid && ether_addr_equal(sta_iter->mac_addr, mac)) {
++ sta = sta_iter;
++ break;
++ }
++ }
++ spin_unlock_bh(&vif->rwnx_hw->cb_lock);
++ }
++
++ if (sta)
++ return rwnx_fill_station_info(sta, vif, sinfo);
++
++ return -ENOENT;
++}
++
++
++/**
++ * @dump_station: dump station callback -- resume dump at index @idx
++ */
++static int rwnx_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
++ int idx, u8 *mac, struct station_info *sinfo)
++{
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_sta *sta_iter, *sta = NULL;
++ struct mesh_peer_info_cfm peer_info_cfm;
++ int i = 0;
++
++ if (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT)
++ return -ENOTSUPP;
++
++ list_for_each_entry(sta_iter, &rwnx_vif->ap.sta_list, list) {
++ if (i < idx) {
++ i++;
++ continue;
++ }
++
++ sta = sta_iter;
++ break;
++ }
++
++ if (sta == NULL)
++ return -ENOENT;
++
++ /* Forward the information to the UMAC */
++ if (rwnx_send_mesh_peer_info_req(rwnx_hw, rwnx_vif, sta->sta_idx,
++ &peer_info_cfm))
++ return -ENOMEM;
++
++ /* Copy peer MAC address */
++ memcpy(mac, &sta->mac_addr, ETH_ALEN);
++
++ /* Fill station information */
++ sinfo->llid = peer_info_cfm.local_link_id;
++ sinfo->plid = peer_info_cfm.peer_link_id;
++ sinfo->plink_state = peer_info_cfm.link_state;
++ sinfo->local_pm = peer_info_cfm.local_ps_mode;
++ sinfo->peer_pm = peer_info_cfm.peer_ps_mode;
++ sinfo->nonpeer_pm = peer_info_cfm.non_peer_ps_mode;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
++ sinfo->filled = (STATION_INFO_LLID |
++ STATION_INFO_PLID |
++ STATION_INFO_PLINK_STATE |
++ STATION_INFO_LOCAL_PM |
++ STATION_INFO_PEER_PM |
++ STATION_INFO_NONPEER_PM);
++#else
++ sinfo->filled = (BIT(NL80211_STA_INFO_LLID) |
++ BIT(NL80211_STA_INFO_PLID) |
++ BIT(NL80211_STA_INFO_PLINK_STATE) |
++ BIT(NL80211_STA_INFO_LOCAL_PM) |
++ BIT(NL80211_STA_INFO_PEER_PM) |
++ BIT(NL80211_STA_INFO_NONPEER_PM));
++#endif
++
++ return 0;
++}
++
++/**
++ * @add_mpath: add a fixed mesh path
++ */
++static int rwnx_cfg80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0))
++ u8 *dst,
++ u8 *next_hop
++#else
++ const u8 *dst,
++ const u8 *next_hop
++#endif
++)
++{
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct mesh_path_update_cfm cfm;
++
++ if (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT)
++ return -ENOTSUPP;
++
++ return rwnx_send_mesh_path_update_req(rwnx_hw, rwnx_vif, dst, next_hop, &cfm);
++}
++
++/**
++ * @del_mpath: delete a given mesh path
++ */
++static int rwnx_cfg80211_del_mpath(struct wiphy *wiphy, struct net_device *dev,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0))
++ u8 *dst
++#else
++ const u8 *dst
++#endif
++)
++{
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct mesh_path_update_cfm cfm;
++
++ if (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT)
++ return -ENOTSUPP;
++
++ return rwnx_send_mesh_path_update_req(rwnx_hw, rwnx_vif, dst, NULL, &cfm);
++}
++
++/**
++ * @change_mpath: change a given mesh path
++ */
++static int rwnx_cfg80211_change_mpath(struct wiphy *wiphy, struct net_device *dev,
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0))
++ u8 *dst,
++ u8 *next_hop
++#else
++ const u8 *dst,
++ const u8 *next_hop
++#endif
++)
++{
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct mesh_path_update_cfm cfm;
++
++ if (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT)
++ return -ENOTSUPP;
++
++ return rwnx_send_mesh_path_update_req(rwnx_hw, rwnx_vif, dst, next_hop, &cfm);
++}
++
++/**
++ * @get_mpath: get a mesh path for the given parameters
++ */
++static int rwnx_cfg80211_get_mpath(struct wiphy *wiphy, struct net_device *dev,
++ u8 *dst, u8 *next_hop, struct mpath_info *pinfo)
++{
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ struct rwnx_mesh_path *mesh_path = NULL;
++ struct rwnx_mesh_path *cur;
++
++ if (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT)
++ return -ENOTSUPP;
++
++ list_for_each_entry(cur, &rwnx_vif->ap.mpath_list, list) {
++ /* Compare the path target address and the provided destination address */
++ if (memcmp(dst, &cur->tgt_mac_addr, ETH_ALEN)) {
++ continue;
++ }
++
++ mesh_path = cur;
++ break;
++ }
++
++ if (mesh_path == NULL)
++ return -ENOENT;
++
++ /* Copy next HOP MAC address */
++ if (mesh_path->p_nhop_sta)
++ memcpy(next_hop, &mesh_path->p_nhop_sta->mac_addr, ETH_ALEN);
++
++ /* Fill path information */
++ pinfo->filled = 0;
++ pinfo->generation = rwnx_vif->ap.generation;
++
++ return 0;
++}
++
++/**
++ * @dump_mpath: dump mesh path callback -- resume dump at index @idx
++ */
++static int rwnx_cfg80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev,
++ int idx, u8 *dst, u8 *next_hop,
++ struct mpath_info *pinfo)
++{
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ struct rwnx_mesh_path *mesh_path = NULL;
++ struct rwnx_mesh_path *cur;
++ int i = 0;
++
++ if (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT)
++ return -ENOTSUPP;
++
++ list_for_each_entry(cur, &rwnx_vif->ap.mpath_list, list) {
++ if (i < idx) {
++ i++;
++ continue;
++ }
++
++ mesh_path = cur;
++ break;
++ }
++
++ if (mesh_path == NULL)
++ return -ENOENT;
++
++ /* Copy target and next hop MAC address */
++ memcpy(dst, &mesh_path->tgt_mac_addr, ETH_ALEN);
++ if (mesh_path->p_nhop_sta)
++ memcpy(next_hop, &mesh_path->p_nhop_sta->mac_addr, ETH_ALEN);
++
++ /* Fill path information */
++ pinfo->filled = 0;
++ pinfo->generation = rwnx_vif->ap.generation;
++
++ return 0;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
++/**
++ * @get_mpp: get a mesh proxy path for the given parameters
++ */
++static int rwnx_cfg80211_get_mpp(struct wiphy *wiphy, struct net_device *dev,
++ u8 *dst, u8 *mpp, struct mpath_info *pinfo)
++{
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ struct rwnx_mesh_proxy *mesh_proxy = NULL;
++ struct rwnx_mesh_proxy *cur;
++
++ if (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT)
++ return -ENOTSUPP;
++
++ list_for_each_entry(cur, &rwnx_vif->ap.proxy_list, list) {
++ if (cur->local) {
++ continue;
++ }
++
++ /* Compare the path target address and the provided destination address */
++ if (memcmp(dst, &cur->ext_sta_addr, ETH_ALEN)) {
++ continue;
++ }
++
++ mesh_proxy = cur;
++ break;
++ }
++
++ if (mesh_proxy == NULL)
++ return -ENOENT;
++
++ memcpy(mpp, &mesh_proxy->proxy_addr, ETH_ALEN);
++
++ /* Fill path information */
++ pinfo->filled = 0;
++ pinfo->generation = rwnx_vif->ap.generation;
++
++ return 0;
++}
++
++/**
++ * @dump_mpp: dump mesh proxy path callback -- resume dump at index @idx
++ */
++static int rwnx_cfg80211_dump_mpp(struct wiphy *wiphy, struct net_device *dev,
++ int idx, u8 *dst, u8 *mpp, struct mpath_info *pinfo)
++{
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ struct rwnx_mesh_proxy *mesh_proxy = NULL;
++ struct rwnx_mesh_proxy *cur;
++ int i = 0;
++
++ if (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT)
++ return -ENOTSUPP;
++
++ list_for_each_entry(cur, &rwnx_vif->ap.proxy_list, list) {
++ if (cur->local) {
++ continue;
++ }
++
++ if (i < idx) {
++ i++;
++ continue;
++ }
++
++ mesh_proxy = cur;
++ break;
++ }
++
++ if (mesh_proxy == NULL)
++ return -ENOENT;
++
++ /* Copy target MAC address */
++ memcpy(dst, &mesh_proxy->ext_sta_addr, ETH_ALEN);
++ memcpy(mpp, &mesh_proxy->proxy_addr, ETH_ALEN);
++
++ /* Fill path information */
++ pinfo->filled = 0;
++ pinfo->generation = rwnx_vif->ap.generation;
++
++ return 0;
++}
++#endif /* version >= 3.19 */
++
++/**
++ * @get_mesh_config: Get the current mesh configuration
++ */
++static int rwnx_cfg80211_get_mesh_config(struct wiphy *wiphy, struct net_device *dev,
++ struct mesh_config *conf)
++{
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++
++ if (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT)
++ return -ENOTSUPP;
++
++ return 0;
++}
++
++/**
++ * @update_mesh_config: Update mesh parameters on a running mesh.
++ */
++static int rwnx_cfg80211_update_mesh_config(struct wiphy *wiphy, struct net_device *dev,
++ u32 mask, const struct mesh_config *nconf)
++{
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct mesh_update_cfm cfm;
++ int status;
++
++ if (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT)
++ return -ENOTSUPP;
++
++ if (mask & CO_BIT(NL80211_MESHCONF_POWER_MODE - 1)) {
++ rwnx_vif->ap.next_mesh_pm = nconf->power_mode;
++
++ if (!list_empty(&rwnx_vif->ap.sta_list)) {
++ // If there are mesh links we don't want to update the power mode
++ // It will be updated with rwnx_update_mesh_power_mode() when the
++ // ps mode of a link is updated or when a new link is added/removed
++ mask &= ~BIT(NL80211_MESHCONF_POWER_MODE - 1);
++
++ if (!mask)
++ return 0;
++ }
++ }
++
++ status = rwnx_send_mesh_update_req(rwnx_hw, rwnx_vif, mask, nconf, &cfm);
++
++ if (!status && (cfm.status != 0))
++ status = -EINVAL;
++
++ return status;
++}
++
++/**
++ * @join_mesh: join the mesh network with the specified parameters
++ * (invoked with the wireless_dev mutex held)
++ */
++static int rwnx_cfg80211_join_mesh(struct wiphy *wiphy, struct net_device *dev,
++ const struct mesh_config *conf, const struct mesh_setup *setup)
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ struct mesh_start_cfm mesh_start_cfm;
++ int error = 0;
++ u8 txq_status = 0;
++ /* STA for BC/MC traffic */
++ struct rwnx_sta *sta;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ if (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT)
++ return -ENOTSUPP;
++
++ /* Forward the information to the UMAC */
++ if ((error = rwnx_send_mesh_start_req(rwnx_hw, rwnx_vif, conf, setup, &mesh_start_cfm))) {
++ return error;
++ }
++
++ /* Check the status */
++ switch (mesh_start_cfm.status) {
++ case CO_OK:
++ rwnx_vif->ap.bcmc_index = mesh_start_cfm.bcmc_idx;
++ rwnx_vif->ap.flags = 0;
++ rwnx_vif->use_4addr = true;
++ rwnx_vif->user_mpm = setup->user_mpm;
++
++ sta = &rwnx_hw->sta_table[mesh_start_cfm.bcmc_idx];
++ sta->valid = true;
++ sta->aid = 0;
++ sta->sta_idx = mesh_start_cfm.bcmc_idx;
++ sta->ch_idx = mesh_start_cfm.ch_idx;
++ sta->vif_idx = rwnx_vif->vif_index;
++ sta->qos = true;
++ sta->acm = 0;
++ sta->ps.active = false;
++ rwnx_mu_group_sta_init(sta, NULL);
++ spin_lock_bh(&rwnx_hw->cb_lock);
++ rwnx_chanctx_link(rwnx_vif, mesh_start_cfm.ch_idx,
++ (struct cfg80211_chan_def *)(&setup->chandef));
++ if (rwnx_hw->cur_chanctx != mesh_start_cfm.ch_idx) {
++ txq_status = RWNX_TXQ_STOP_CHAN;
++ }
++ rwnx_txq_vif_init(rwnx_hw, rwnx_vif, txq_status);
++ spin_unlock_bh(&rwnx_hw->cb_lock);
++
++ netif_tx_start_all_queues(dev);
++ netif_carrier_on(dev);
++
++ /* If the AP channel is already the active, we probably skip radar
++ activation on MM_CHANNEL_SWITCH_IND (unless another vif use this
++ ctxt). In anycase retest if radar detection must be activated
++ */
++ if (rwnx_hw->cur_chanctx == mesh_start_cfm.ch_idx) {
++ rwnx_radar_detection_enable_on_cur_channel(rwnx_hw);
++ }
++ break;
++
++ case CO_BUSY:
++ error = -EINPROGRESS;
++ break;
++
++ default:
++ error = -EIO;
++ break;
++ }
++
++ /* Print information about the operation */
++ if (error) {
++ netdev_info(dev, "Failed to start MP (%d)", error);
++ } else {
++ netdev_info(dev, "MP started: ch=%d, bcmc_idx=%d",
++ rwnx_vif->ch_index, rwnx_vif->ap.bcmc_index);
++ }
++
++ return error;
++}
++
++/**
++ * @leave_mesh: leave the current mesh network
++ * (invoked with the wireless_dev mutex held)
++ */
++static int rwnx_cfg80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev)
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ struct mesh_stop_cfm mesh_stop_cfm;
++ int error = 0;
++
++ error = rwnx_send_mesh_stop_req(rwnx_hw, rwnx_vif, &mesh_stop_cfm);
++
++ if (error == 0) {
++ /* Check the status */
++ switch (mesh_stop_cfm.status) {
++ case CO_OK:
++ spin_lock_bh(&rwnx_hw->cb_lock);
++ rwnx_chanctx_unlink(rwnx_vif);
++ rwnx_radar_cancel_cac(&rwnx_hw->radar);
++ spin_unlock_bh(&rwnx_hw->cb_lock);
++ /* delete BC/MC STA */
++ rwnx_txq_vif_deinit(rwnx_hw, rwnx_vif);
++ rwnx_del_bcn(&rwnx_vif->ap.bcn);
++
++ netif_tx_stop_all_queues(dev);
++ netif_carrier_off(dev);
++
++ break;
++
++ default:
++ error = -EIO;
++ break;
++ }
++ }
++
++ if (error) {
++ netdev_info(dev, "Failed to stop MP");
++ } else {
++ netdev_info(dev, "MP Stopped");
++ }
++
++ return 0;
++}
++
++static struct cfg80211_ops rwnx_cfg80211_ops = {
++ .add_virtual_intf = rwnx_cfg80211_add_iface,
++ .del_virtual_intf = rwnx_cfg80211_del_iface,
++ .change_virtual_intf = rwnx_cfg80211_change_iface,
++ .start_p2p_device = rwnx_cfgp2p_start_p2p_device,
++ .stop_p2p_device = rwnx_cfgp2p_stop_p2p_device,
++ .scan = rwnx_cfg80211_scan,
++ .connect = rwnx_cfg80211_connect,
++ .disconnect = rwnx_cfg80211_disconnect,
++ .add_key = rwnx_cfg80211_add_key,
++ .get_key = rwnx_cfg80211_get_key,
++ .del_key = rwnx_cfg80211_del_key,
++ .set_default_key = rwnx_cfg80211_set_default_key,
++ .set_default_mgmt_key = rwnx_cfg80211_set_default_mgmt_key,
++ .add_station = rwnx_cfg80211_add_station,
++ .del_station = rwnx_cfg80211_del_station_compat,
++ .change_station = rwnx_cfg80211_change_station,
++ .mgmt_tx = rwnx_cfg80211_mgmt_tx,
++ .start_ap = rwnx_cfg80211_start_ap,
++ .change_beacon = rwnx_cfg80211_change_beacon,
++ .stop_ap = rwnx_cfg80211_stop_ap,
++ .set_monitor_channel = rwnx_cfg80211_set_monitor_channel,
++ .probe_client = rwnx_cfg80211_probe_client,
++// .mgmt_frame_register = rwnx_cfg80211_mgmt_frame_register,
++ .set_wiphy_params = rwnx_cfg80211_set_wiphy_params,
++ .set_txq_params = rwnx_cfg80211_set_txq_params,
++ .set_tx_power = rwnx_cfg80211_set_tx_power,
++// .get_tx_power = rwnx_cfg80211_get_tx_power,
++ .set_power_mgmt = rwnx_cfg80211_set_power_mgmt,
++ .get_station = rwnx_cfg80211_get_station,
++ .remain_on_channel = rwnx_cfg80211_remain_on_channel,
++ .cancel_remain_on_channel = rwnx_cfg80211_cancel_remain_on_channel,
++ .dump_survey = rwnx_cfg80211_dump_survey,
++ .get_channel = rwnx_cfg80211_get_channel,
++ .start_radar_detection = rwnx_cfg80211_start_radar_detection,
++ .update_ft_ies = rwnx_cfg80211_update_ft_ies,
++ .set_cqm_rssi_config = rwnx_cfg80211_set_cqm_rssi_config,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)
++ .channel_switch = rwnx_cfg80211_channel_switch,
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
++ //.tdls_channel_switch = rwnx_cfg80211_tdls_channel_switch,
++ //.tdls_cancel_channel_switch = rwnx_cfg80211_tdls_cancel_channel_switch,
++#endif
++ //.tdls_mgmt = rwnx_cfg80211_tdls_mgmt,
++ //.tdls_oper = rwnx_cfg80211_tdls_oper,
++ .change_bss = rwnx_cfg80211_change_bss,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) || defined(CONFIG_WPA3_FOR_OLD_KERNEL)
++ .external_auth = rwnx_cfg80211_external_auth,
++#endif
++#ifdef CONFIG_SCHED_SCAN
++ .sched_scan_start = rwnx_cfg80211_sched_scan_start,
++ .sched_scan_stop = rwnx_cfg80211_sched_scan_stop,
++#endif
++
++};
++
++
++/*********************************************************************
++ * Init/Exit functions
++ *********************************************************************/
++static void rwnx_wdev_unregister(struct rwnx_hw *rwnx_hw)
++{
++ struct rwnx_vif *rwnx_vif, *tmp;
++
++ rtnl_lock();
++ list_for_each_entry_safe(rwnx_vif, tmp, &rwnx_hw->vifs, list) {
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
++ rwnx_cfg80211_del_iface(rwnx_hw->wiphy, &rwnx_vif->wdev);
++ #else
++ rwnx_cfg80211_del_iface(rwnx_hw->wiphy, rwnx_vif->ndev);
++ #endif
++ }
++ rtnl_unlock();
++}
++
++static void rwnx_set_vers(struct rwnx_hw *rwnx_hw)
++{
++ u32 vers = rwnx_hw->version_cfm.version_lmac;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ snprintf(rwnx_hw->wiphy->fw_version,
++ sizeof(rwnx_hw->wiphy->fw_version), "%d.%d.%d.%d",
++ (vers & (0xff << 24)) >> 24, (vers & (0xff << 16)) >> 16,
++ (vers & (0xff << 8)) >> 8, (vers & (0xff << 0)) >> 0);
++}
++
++static void rwnx_reg_notifier(struct wiphy *wiphy,
++ struct regulatory_request *request)
++{
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++
++ printk("%s Enter\r\n", __func__);
++
++ // For now trust all initiator
++ rwnx_radar_set_domain(&rwnx_hw->radar, request->dfs_region);
++
++ if (rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8801 ||
++ ((rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC ||
++ rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW ||
++ rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81) && testmode == 0)){
++ rwnx_send_me_chan_config_req(rwnx_hw);
++ }
++}
++
++static void rwnx_enable_mesh(struct rwnx_hw *rwnx_hw)
++{
++ struct wiphy *wiphy = rwnx_hw->wiphy;
++
++ if (!rwnx_mod_params.mesh)
++ return;
++
++ rwnx_cfg80211_ops.get_station = rwnx_cfg80211_get_station;
++ rwnx_cfg80211_ops.dump_station = rwnx_cfg80211_dump_station;
++ rwnx_cfg80211_ops.add_mpath = rwnx_cfg80211_add_mpath;
++ rwnx_cfg80211_ops.del_mpath = rwnx_cfg80211_del_mpath;
++ rwnx_cfg80211_ops.change_mpath = rwnx_cfg80211_change_mpath;
++ rwnx_cfg80211_ops.get_mpath = rwnx_cfg80211_get_mpath;
++ rwnx_cfg80211_ops.dump_mpath = rwnx_cfg80211_dump_mpath;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
++ rwnx_cfg80211_ops.get_mpp = rwnx_cfg80211_get_mpp;
++ rwnx_cfg80211_ops.dump_mpp = rwnx_cfg80211_dump_mpp;
++#endif
++ rwnx_cfg80211_ops.get_mesh_config = rwnx_cfg80211_get_mesh_config;
++ rwnx_cfg80211_ops.update_mesh_config = rwnx_cfg80211_update_mesh_config;
++ rwnx_cfg80211_ops.join_mesh = rwnx_cfg80211_join_mesh;
++ rwnx_cfg80211_ops.leave_mesh = rwnx_cfg80211_leave_mesh;
++
++ wiphy->flags |= (WIPHY_FLAG_MESH_AUTH | WIPHY_FLAG_IBSS_RSN);
++ wiphy->features |= NL80211_FEATURE_USERSPACE_MPM;
++ wiphy->interface_modes |= BIT(NL80211_IFTYPE_MESH_POINT);
++
++ rwnx_limits[0].types |= BIT(NL80211_IFTYPE_MESH_POINT);
++ rwnx_limits_dfs[0].types |= BIT(NL80211_IFTYPE_MESH_POINT);
++}
++
++extern int rwnx_init_aic(struct rwnx_hw *rwnx_hw);
++#ifdef AICWF_USB_SUPPORT
++u32 patch_tbl[][2] =
++{
++ {0x0044, 0x00000002}, //hosttype
++ {0x0048, 0x00000060},
++ {0x004c, 0x00000046},
++ {0x0050, 0x00000000}, //ipc base
++ {0x0054, 0x001a0000}, //buf base
++ {0x0058, 0x001a0140}, //desc base
++ {0x005c, 0x00001020}, //desc size
++ {0x0060, 0x001a1020}, //pkt base
++ {0x0064, 0x000207e0}, //pkt size
++ {0x0068, 0x00000008},
++ {0x006c, 0x00000040},
++ {0x0070, 0x00000040},
++ {0x0074, 0x00000020},
++ {0x0078, 0x00000000},
++ {0x007c, 0x00000040},
++ {0x0080, 0x00190000},
++ {0x0084, 0x0000fc00},//63kB
++ {0x0088, 0x0019fc00},
++ #ifdef USE_5G
++ {0x00b4, 0xf3010001},
++ #else
++ {0x00b4, 0xf3010000},
++ #endif
++ //{0x00b8, 0x0f010a01}, //tx enhanced en, tx enhanced lo rate
++ //{0x00bc, 0x00000008}, //tx enhanced hi rate
++};
++#else
++//#ifdef CONFIG_ROM_PATCH_EN
++#if 0
++u32 patch_tbl[][2] =
++{
++ {0x0044, 0x02000001},
++ {0x0058, 0x001a0000},
++ {0x005c, 0x00002020},
++ {0x0060, 0x001a2020}, //pkt base
++ {0x0064, 0x00021820}, //pkt size
++ {0x0080, 0x00190000},
++ {0x0084, 0x0000fc00},//63kB
++ {0x0088, 0x0019fc00}
++};
++#else
++u32 patch_tbl[][2] =
++{
++ #ifdef USE_5G
++ {0x00b4, 0xf3010001},
++ #else
++ {0x00b4, 0xf3010000},
++ #endif
++};
++#endif
++#endif
++
++u32 patch_tbl_1[14][2] =
++{
++ {0x171b24, 0x1c4021}, //61
++ {0x171c00, 0x1c40b1}, //116
++ {0x172124, 0x1c43ed}, //12*8 + 1720c4
++ {0x171bfc, 0x1c4849}, //115, 171a30 + 115 * 4
++ {0x171ee4, 0x1c4941}, //301
++ {0x171ee8, 0x1c4b09}, //302
++ {0x172134, 0x1c4d65}, //14/15/16/17/18 * 8 + 1720c4{0x172134, 0x1c4d65},
++ {0x17213c, 0x1c4d65},
++ {0x172144, 0x1c4d65},
++ {0x17214c, 0x1c4d65},
++ {0x172154, 0x1c4d65},
++ {0x1721d0, 0x1c53dd}, // 1721c4 + 1*8 + 4
++ {0x1721f0, 0x1c5415}, // 1721c4 + 5*8 + 4
++ {0x171eb0, 0x1c54a1}, // 288
++};
++
++u32 func_tbl[1721] =
++{
++ 0x8cc88cc3,
++ 0xd8084283,
++ 0x1ac0d205,
++ 0xbfcc283f,
++ 0x20012000,
++ 0x20014770,
++ 0x20004770,
++ 0xbf004770,
++ 0x481d4a1c,
++ 0xb538491d,
++ 0x68056913,
++ 0xf8b16914,
++ 0xf8b100b0,
++ 0xf5a310b2,
++ 0xeb0363fa,
++ 0x1b1b1345,
++ 0x1a5b1a1b,
++ 0xdb1a2b00,
++ 0x681b4b15,
++ 0x68dcb143,
++ 0x1ae36913,
++ 0x63faf5a3,
++ 0x1a5b1a1b,
++ 0xdb012b00,
++ 0xbd382001,
++ 0x681b4b0f,
++ 0x3000f9b3,
++ 0xda062b00,
++ 0x1ae46913,
++ 0x549cf504,
++ 0x2c003408,
++ 0x2000db01,
++ 0x4909bd38,
++ 0xf44f4809,
++ 0xf66b7202,
++ 0x2000fad9,
++ 0xbf00bd38,
++ 0x40501000,
++ 0x40328040,
++ 0x0017192c,
++ 0x00178bf0,
++ 0x00173250,
++ 0x001c5a3c,
++ 0x001c56b4,
++ 0x4ff0e92d,
++ 0x8c05461c,
++ 0x3062f893,
++ 0x9020f8d2,
++ 0xf8d06987,
++ 0xb089b01c,
++ 0x02ad4616,
++ 0x2012e9dd,
++ 0xf8b4b9d3,
++ 0xf1bcc068,
++ 0xd0150f00,
++ 0x6ab38b52,
++ 0xf202fb0c,
++ 0x07189205,
++ 0xf20cfb05,
++ 0xd0199206,
++ 0xebb72300,
++ 0xf44f0a09,
++ 0x930776fa,
++ 0xf5039b05,
++ 0xe9c473c8,
++ 0xe071a31f,
++ 0xf0402800,
++ 0x950680ba,
++ 0x0a01f04f,
++ 0x6ab28b50,
++ 0xf000fb0a,
++ 0x90050712,
++ 0x8122f040,
++ 0xf8df6af2,
++ 0xf8dfc2bc,
++ 0xf8dc82bc,
++ 0x48983000,
++ 0x1203f3c2,
++ 0x037ff023,
++ 0x2002f818,
++ 0xf8cc4313,
++ 0x6ab33000,
++ 0xf3c34a93,
++ 0xea4113c0,
++ 0xf04f5103,
++ 0x60014300,
++ 0xf3bf6013,
++ 0xbf008f4f,
++ 0x00586813,
++ 0x4b8dd5fc,
++ 0xf9b16819,
++ 0x29001000,
++ 0x80aef2c0,
++ 0x68124a88,
++ 0xfa82fa1f,
++ 0x07126ab2,
++ 0x809df040,
++ 0xf8df6af2,
++ 0x4882c25c,
++ 0x1000f8dc,
++ 0x1203f3c2,
++ 0x017ff021,
++ 0x2002f818,
++ 0xf8cc430a,
++ 0x6ab22000,
++ 0xf3c2497c,
++ 0x051212c0,
++ 0x0218f042,
++ 0x4600f04f,
++ 0x600e6002,
++ 0x8f4ff3bf,
++ 0x680abf00,
++ 0xd5fc0056,
++ 0xf9b3681b,
++ 0x2b003000,
++ 0x4a72db6e,
++ 0x3062f894,
++ 0x22006816,
++ 0xebaab2b6,
++ 0x92070a06,
++ 0x0909ebb7,
++ 0x0a0aeb19,
++ 0xd0872b00,
++ 0x79e5ea4f,
++ 0x464b462a,
++ 0x46594638,
++ 0xfc50f679,
++ 0x92021bba,
++ 0xfb009a07,
++ 0xeb6bf309,
++ 0xfb050202,
++ 0x92033301,
++ 0x0105fba0,
++ 0xe9dd4419,
++ 0x42992302,
++ 0x4290bf08,
++ 0xe9cdbf38,
++ 0x4b5e0102,
++ 0x6702e9dd,
++ 0x9b066819,
++ 0x018918f6,
++ 0x9905d430,
++ 0xf5a11a71,
++ 0x4b597ac8,
++ 0x69194a59,
++ 0x691b6812,
++ 0x44511a89,
++ 0xf5a31acb,
++ 0x3b1453a5,
++ 0x6a632b00,
++ 0x1949bfb8,
++ 0xd022428b,
++ 0x6a1a4b52,
++ 0xd04142a2,
++ 0xf1044d51,
++ 0xf8d50018,
++ 0x479831e0,
++ 0x30a8f8d5,
++ 0xb0094620,
++ 0x4ff0e8bd,
++ 0xf8904718,
++ 0xf1baa002,
++ 0xd1010f00,
++ 0xa003f890,
++ 0xf00afb05,
++ 0xe73d9006,
++ 0x98054946,
++ 0xeba11a09,
++ 0x44b20a0a,
++ 0xb009e7cb,
++ 0x8ff0e8bd,
++ 0x0058680b,
++ 0x4941d48d,
++ 0xf44f4841,
++ 0xf66b72ae,
++ 0x2200f98f,
++ 0x3062f894,
++ 0xf5aa9207,
++ 0xf44f7afa,
++ 0xe78776fa,
++ 0x00516812,
++ 0xaf4ef53f,
++ 0x48384937,
++ 0x72aef44f,
++ 0xf97cf66b,
++ 0x7afaf44f,
++ 0xe7474b2c,
++ 0x62614a2c,
++ 0x01926812,
++ 0x4a32d4b8,
++ 0x68124d32,
++ 0x1024f893,
++ 0x4207f3c2,
++ 0x2d23f885,
++ 0x4a2fbb61,
++ 0x6812492f,
++ 0x20016809,
++ 0xf885b2d2,
++ 0xf8832d24,
++ 0x780b0024,
++ 0xd1054283,
++ 0x4a2b492a,
++ 0x6813600b,
++ 0x60134303,
++ 0x2d23f895,
++ 0x3d24f895,
++ 0xd21b429a,
++ 0x681b4b17,
++ 0x3000f9b3,
++ 0xda0d2b00,
++ 0x2d23f895,
++ 0x3d24f895,
++ 0xd307429a,
++ 0x48214920,
++ 0xf44f4d15,
++ 0xf66b7221,
++ 0xe787f96f,
++ 0xe7854d12,
++ 0xf44f2200,
++ 0x920776fa,
++ 0xe7354692,
++ 0x4b124814,
++ 0x1d23f895,
++ 0x2d24f895,
++ 0x6806681b,
++ 0xb2f64816,
++ 0x96000c1b,
++ 0xff82f66a,
++ 0xbf00e7d4,
++ 0x40328160,
++ 0x4032816c,
++ 0x00173250,
++ 0x4032004c,
++ 0x40501000,
++ 0x403280a4,
++ 0x00178d3c,
++ 0x00171a30,
++ 0xfffffe70,
++ 0x001c5a20,
++ 0x001c56e0,
++ 0x40328044,
++ 0x001e4000,
++ 0x40320090,
++ 0x00173270,
++ 0x40328070,
++ 0x40328074,
++ 0x001c5a58,
++ 0x001c5718,
++ 0x001c5704,
++ 0x40328164,
++ 0x00041830,
++ 0x4ff0e92d,
++ 0x9354f8df,
++ 0xb020f8d9,
++ 0xf44fb083,
++ 0xf6692000,
++ 0xf1bbfd9b,
++ 0xf0000f00,
++ 0x4cb480f1,
++ 0xa33cf8df,
++ 0x4fb37fa1,
++ 0x8338f8df,
++ 0x29004eb2,
++ 0x80e6f000,
++ 0xd50e0708,
++ 0xf8db4bb0,
++ 0x681b0070,
++ 0x2010f8da,
++ 0x4403685b,
++ 0x2b001a9b,
++ 0x80c7f2c0,
++ 0x01f7f001,
++ 0x074a77a1,
++ 0x8095f100,
++ 0xd515078b,
++ 0x3004f8db,
++ 0x0302f023,
++ 0x3004f8cb,
++ 0x301df899,
++ 0xd1082b05,
++ 0x48a34da2,
++ 0x31d8f8d5,
++ 0x23004798,
++ 0xf8897fa1,
++ 0xf001301d,
++ 0x77a101fd,
++ 0xd52d07cd,
++ 0x01586833,
++ 0x809ff140,
++ 0x0c096831,
++ 0x7f7cf411,
++ 0x80a6f000,
++ 0x4b983910,
++ 0x01fff001,
++ 0x1281eb01,
++ 0x03c2eb03,
++ 0x2021f893,
++ 0xf0002a00,
++ 0x4a93809d,
++ 0x68137f99,
++ 0xf9b34a92,
++ 0xf44f3000,
++ 0xfb0060a4,
++ 0x2b002201,
++ 0xf2c09200,
++ 0x683b809c,
++ 0x2fe0f413,
++ 0x80a5f000,
++ 0xf0017fa1,
++ 0x77a101fe,
++ 0xd59e068a,
++ 0x49894e88,
++ 0xf3c56835,
++ 0x463a1741,
++ 0xf66a2002,
++ 0x2300ff23,
++ 0x1547f3c5,
++ 0x3078f88b,
++ 0xf0402f00,
++ 0x4e7b80af,
++ 0x4d824a81,
++ 0xf4236813,
++ 0x60137300,
++ 0x3004f8db,
++ 0x21d8f8d6,
++ 0x0301f023,
++ 0x3004f8cb,
++ 0x0028f10b,
++ 0x682a4790,
++ 0x2b027813,
++ 0x8114f000,
++ 0x680b4974,
++ 0x0301f023,
++ 0x7813600b,
++ 0xd1122b01,
++ 0x41fff501,
++ 0x4a733134,
++ 0x3010f8da,
++ 0xf8b26808,
++ 0xf8d610b2,
++ 0xeb0321e0,
++ 0x1a591340,
++ 0x0018f10b,
++ 0x682b4790,
++ 0x2b02781b,
++ 0x80b4f000,
++ 0xf0017fa1,
++ 0x77a101df,
++ 0x4968e74f,
++ 0x20024d5d,
++ 0xfedcf66a,
++ 0x68634966,
++ 0xf022680a,
++ 0x600a0204,
++ 0xf4436822,
++ 0x431a7300,
++ 0xf8d5614a,
++ 0x60631498,
++ 0x0063f89b,
++ 0x3494f8d5,
++ 0x4798465a,
++ 0x7fa1683b,
++ 0x737cf023,
++ 0xf8d8603b,
++ 0xf0013000,
++ 0xf44301fb,
++ 0xf8c80380,
++ 0x77a13000,
++ 0x4856e742,
++ 0xfe66f66a,
++ 0x2200e782,
++ 0x006cf89b,
++ 0xf6584611,
++ 0xb160fd17,
++ 0xe72f7fa1,
++ 0xf66a4850,
++ 0xe775fe59,
++ 0xf66a484f,
++ 0xe771fe55,
++ 0xe8bdb003,
++ 0xf8da8ff0,
++ 0x7fa13010,
++ 0x3070f8cb,
++ 0x4593e71e,
++ 0xaf61f43f,
++ 0x48494948,
++ 0x22e6f240,
++ 0xf818f66b,
++ 0xf413683b,
++ 0xf47f2fe0,
++ 0x6832af5b,
++ 0x49446833,
++ 0xf3c30fd0,
++ 0x46057380,
++ 0x20024602,
++ 0xf66a9301,
++ 0x9b01fe81,
++ 0x0205ea53,
++ 0xf43f4628,
++ 0x4d2baf49,
++ 0x46199a00,
++ 0x323cf8d5,
++ 0x68334798,
++ 0x4300f023,
++ 0x68336033,
++ 0x4380f023,
++ 0xe7396033,
++ 0x2010f8da,
++ 0x529cf502,
++ 0x32084631,
++ 0xf8dae005,
++ 0x1ad33010,
++ 0xf2c02b00,
++ 0x680b80b2,
++ 0xd5f6071b,
++ 0x8080f8df,
++ 0x3000f8d8,
++ 0x0308f023,
++ 0x3000f8c8,
++ 0x3000f8d8,
++ 0xd51206de,
++ 0x49274e15,
++ 0xf66a2002,
++ 0xf8d6fe4b,
++ 0xf005323c,
++ 0x08780101,
++ 0x4798465a,
++ 0x3000f8d8,
++ 0x0310f023,
++ 0x3000f8c8,
++ 0x4b1fe722,
++ 0x4e0b491f,
++ 0x601d2501,
++ 0xf66a2002,
++ 0x4b1dfe35,
++ 0xe717601d,
++ 0x22304b1c,
++ 0xf657601a,
++ 0xe745fddb,
++ 0x00178bb8,
++ 0x40320074,
++ 0x40320070,
++ 0x00173244,
++ 0x00171a30,
++ 0x00178d48,
++ 0x00175aa8,
++ 0x00173250,
++ 0x00177738,
++ 0x4032008c,
++ 0x001c57ec,
++ 0x4033b390,
++ 0x00173270,
++ 0x0017192c,
++ 0x001c578c,
++ 0x4032004c,
++ 0x001c5790,
++ 0x001c57a8,
++ 0x001c57bc,
++ 0x001c5a70,
++ 0x001c57d0,
++ 0x001c57e0,
++ 0x001c580c,
++ 0x40328564,
++ 0x001c5818,
++ 0x40328568,
++ 0x40320038,
++ 0x00178d3c,
++ 0x40501000,
++ 0x4032006c,
++ 0x8310f3ef,
++ 0xd40307d8,
++ 0x4b31b672,
++ 0x601a2201,
++ 0x4f314b30,
++ 0x3201681a,
++ 0x2100601a,
++ 0x6039683a,
++ 0x080ff002,
++ 0x2010f8da,
++ 0x46164631,
++ 0xf247460a,
++ 0xe0045030,
++ 0x1010f8da,
++ 0x42811b89,
++ 0x6839d81b,
++ 0xd1f70709,
++ 0x49264825,
++ 0xc000f8d0,
++ 0x4616680f,
++ 0x2010f8da,
++ 0x0f00f1b8,
++ 0x4a22d11a,
++ 0x60112104,
++ 0xb132681a,
++ 0x3a01491a,
++ 0x680b601a,
++ 0xb103b90a,
++ 0x682ab662,
++ 0x491ce6b0,
++ 0x20029200,
++ 0xfdb0f66a,
++ 0x9a004b14,
++ 0x4919e7d3,
++ 0xf66a2002,
++ 0xe74bfda9,
++ 0x070cea07,
++ 0xd4e0077f,
++ 0x1600e9cd,
++ 0x46164680,
++ 0x077ae001,
++ 0x9a00d412,
++ 0x7000f8d8,
++ 0xf8da6812,
++ 0xf2471010,
++ 0x1b895030,
++ 0xea074281,
++ 0xd9f00702,
++ 0x2002490b,
++ 0xfd8cf66a,
++ 0xe7ea4b02,
++ 0xe7c49e01,
++ 0x0017569c,
++ 0x00172600,
++ 0x40320038,
++ 0x4032806c,
++ 0x40328074,
++ 0x40328070,
++ 0x001c5828,
++ 0x001c57f8,
++ 0x001c5834,
++ 0xf890b5f8,
++ 0x2b003064,
++ 0xf890d03a,
++ 0x4604308a,
++ 0xd1362b00,
++ 0xf8944e32,
++ 0x4a32306c,
++ 0x6a674932,
++ 0xeb036a09,
++ 0xeb021383,
++ 0x42a103c3,
++ 0x443d685d,
++ 0xf8d6d044,
++ 0x462931e0,
++ 0x0018f104,
++ 0x46204798,
++ 0xfafaf65e,
++ 0x1080f8d4,
++ 0x3214f8d6,
++ 0x46204439,
++ 0xf8d64798,
++ 0x462a30a4,
++ 0x46204639,
++ 0xb9784798,
++ 0x3078f894,
++ 0x49216862,
++ 0xb2db3301,
++ 0x0201f042,
++ 0xf8846809,
++ 0x60623078,
++ 0x4293780a,
++ 0xd029d811,
++ 0x3b01bdf8,
++ 0x2b01b2db,
++ 0x308af880,
++ 0x2b02d912,
++ 0xd1c04e13,
++ 0x0063f890,
++ 0x31c0f8d6,
++ 0x47982100,
++ 0xf8d6e7b9,
++ 0xf8941164,
++ 0x4622006c,
++ 0x40f8e8bd,
++ 0xbb84f658,
++ 0x40f8e8bd,
++ 0xbb3af65e,
++ 0x62654b0c,
++ 0x30b5f893,
++ 0xd1ba2b00,
++ 0x681b4b0a,
++ 0x2b02781b,
++ 0xe7b4d1af,
++ 0xe8bd4620,
++ 0xf66540f8,
++ 0xbf00b8d1,
++ 0x00171a30,
++ 0x00175aa8,
++ 0x00178d3c,
++ 0x00173244,
++ 0x0017192c,
++ 0x00173270,
++ 0x4c5cb538,
++ 0x68224b5c,
++ 0xf002495c,
++ 0x701a020f,
++ 0xf66a2002,
++ 0x6823fcef,
++ 0xd02e0718,
++ 0x4a594958,
++ 0x2000680b,
++ 0x4300f023,
++ 0x6020600b,
++ 0x07596813,
++ 0x4b55d5fc,
++ 0x4a554952,
++ 0x60182004,
++ 0xf043680b,
++ 0x600b4300,
++ 0xf0436813,
++ 0x60136300,
++ 0x20024950,
++ 0xfcd0f66a,
++ 0x4a4f4b47,
++ 0x494f4847,
++ 0x601c2420,
++ 0x24016813,
++ 0x0310f043,
++ 0x70446013,
++ 0x781b680b,
++ 0xd0382b03,
++ 0xd0062b01,
++ 0x4a44bd38,
++ 0xf0236813,
++ 0x60136300,
++ 0x4a45e7e2,
++ 0x035b6813,
++ 0x4a44d5fc,
++ 0x311c6911,
++ 0x1acb6913,
++ 0xdafb2b00,
++ 0x681c4b41,
++ 0x4b41b174,
++ 0xf9b3681b,
++ 0x2b003000,
++ 0x4a3fdb4e,
++ 0xf8b268e3,
++ 0x4a3a10b2,
++ 0x21041a5b,
++ 0x60916313,
++ 0x4a3c493b,
++ 0xf443680b,
++ 0x600b4300,
++ 0xf4436d13,
++ 0x65134300,
++ 0xf0236dd3,
++ 0xf0430303,
++ 0xf0434300,
++ 0x65d30301,
++ 0x4b2fbd38,
++ 0xb174681c,
++ 0x681b4b2e,
++ 0x3000f9b3,
++ 0xdb152b00,
++ 0x68e34a2c,
++ 0x10b2f8b2,
++ 0x1a5b4a27,
++ 0x63132104,
++ 0x4b2b6091,
++ 0xf042681a,
++ 0x601a0201,
++ 0x075a681b,
++ 0x4b28d5ae,
++ 0x7200f44f,
++ 0xbd38601a,
++ 0x4d214b1e,
++ 0x68e3691a,
++ 0x10b2f8b5,
++ 0x1a521a9a,
++ 0xdae32a00,
++ 0x48224921,
++ 0x32dbf240,
++ 0xfddef66a,
++ 0xf8b568e3,
++ 0xe7d910b2,
++ 0x69124d17,
++ 0xf8b568e3,
++ 0x1a9a10b2,
++ 0x2a001a52,
++ 0x4918daab,
++ 0xf2404818,
++ 0xf66a32e9,
++ 0x68e3fdcb,
++ 0x10b2f8b5,
++ 0xbf00e7a1,
++ 0x40320038,
++ 0x00172604,
++ 0x001c5844,
++ 0x40328074,
++ 0x4032806c,
++ 0x40328070,
++ 0x40328048,
++ 0x001c584c,
++ 0x40580010,
++ 0x00173274,
++ 0x40041020,
++ 0x40501000,
++ 0x00178bf0,
++ 0x00173250,
++ 0x0017192c,
++ 0x40240014,
++ 0x40506000,
++ 0x40044084,
++ 0x40044100,
++ 0x001c5a8c,
++ 0x001c5854,
++ 0xf890b380,
++ 0xb36b3064,
++ 0x47f0e92d,
++ 0x4a7b4b7a,
++ 0xf8df4e7b,
++ 0xf8df920c,
++ 0x4d7a8240,
++ 0x460c487a,
++ 0x2702497a,
++ 0xc470f8d1,
++ 0x7180f8c3,
++ 0x49786892,
++ 0xc044f8c2,
++ 0x601f2201,
++ 0xf8d97032,
++ 0x680b2010,
++ 0xf0236072,
++ 0x600b0302,
++ 0x1000f8d8,
++ 0xc0b2f8b5,
++ 0x68018f8b,
++ 0xebb34463,
++ 0xea4f1f41,
++ 0xd3021a41,
++ 0x87f0e8bd,
++ 0x496b4770,
++ 0xf66a4638,
++ 0xf8d9fbdf,
++ 0x68f32010,
++ 0x2b001a9b,
++ 0xf2804b67,
++ 0xf89380a2,
++ 0xf8932d23,
++ 0x2a012d24,
++ 0xf893d90a,
++ 0x68612d23,
++ 0xf0002a00,
++ 0xf893809e,
++ 0x3a012d23,
++ 0xaa02fb01,
++ 0x3d23f893,
++ 0x4652495d,
++ 0xf66a2002,
++ 0xf8d8fbbf,
++ 0xf8b53000,
++ 0x8f9a00b2,
++ 0x4b594955,
++ 0xc178f8df,
++ 0x680b691c,
++ 0xebaa4402,
++ 0xf0220202,
++ 0xf0030703,
++ 0x433b0303,
++ 0x600b4f53,
++ 0x60b2683b,
++ 0x0301f043,
++ 0xf8dc603b,
++ 0xf8dc3000,
++ 0xf3c31000,
++ 0xf4210309,
++ 0xf043717f,
++ 0xf0210301,
++ 0x430b0103,
++ 0x3000f8cc,
++ 0x4422683b,
++ 0x60f2075b,
++ 0x4b47d503,
++ 0x7200f44f,
++ 0x4a46601a,
++ 0x68134c46,
++ 0xf043493d,
++ 0x60130308,
++ 0xf0436823,
++ 0x60230302,
++ 0xf5a2680b,
++ 0x3a1022fe,
++ 0x0301f043,
++ 0x6911600b,
++ 0x7196f501,
++ 0x1a5b6913,
++ 0xdbfb2b00,
++ 0x681b4b3b,
++ 0x2b01781b,
++ 0x493ad188,
++ 0x6a4b4c3a,
++ 0xf0236824,
++ 0xf04303ff,
++ 0x624b03df,
++ 0xf4236a4b,
++ 0xf443437f,
++ 0x624b435f,
++ 0x4b34b194,
++ 0xf9b3681b,
++ 0x2b003000,
++ 0x68e2db31,
++ 0x48316861,
++ 0xfb04f66a,
++ 0x10b2f8b5,
++ 0x4a2568e3,
++ 0x21041a5b,
++ 0x60916313,
++ 0x4a28492c,
++ 0xf443680b,
++ 0x600b4300,
++ 0xf4436d13,
++ 0x65134300,
++ 0xf0236dd3,
++ 0xf0430303,
++ 0xf0434300,
++ 0x65d30301,
++ 0xf4436813,
++ 0x60135380,
++ 0x4922e74e,
++ 0x3d23f893,
++ 0x46524638,
++ 0xfb2ef66a,
++ 0xf893e76d,
++ 0x3a012d24,
++ 0xaa02fb01,
++ 0x6913e760,
++ 0x1ad368e2,
++ 0x28001a18,
++ 0x4919dac8,
++ 0xf2404819,
++ 0xf66a4231,
++ 0xe7c0fca1,
++ 0xe000e100,
++ 0xe000ed00,
++ 0x00173254,
++ 0x0017192c,
++ 0x40328040,
++ 0x00171a30,
++ 0x40320084,
++ 0x001c589c,
++ 0x001e4000,
++ 0x001c58b8,
++ 0x40501000,
++ 0x40044084,
++ 0x40044100,
++ 0x40580010,
++ 0x40580018,
++ 0x00173274,
++ 0x40506000,
++ 0x00178bf0,
++ 0x00173250,
++ 0x001c58c8,
++ 0x40240014,
++ 0x001c58a4,
++ 0x001c5aac,
++ 0x001c5854,
++ 0x00173244,
++ 0x4ff0e92d,
++ 0x4abd4bbc,
++ 0xf852681b,
++ 0xf9b34020,
++ 0x4abb3000,
++ 0x8b02ed2d,
++ 0x02c0eb02,
++ 0xee082b00,
++ 0xb08b2a10,
++ 0xea4f4683,
++ 0xf2c005c0,
++ 0x462082c4,
++ 0xf8d0f669,
++ 0xf8534bb2,
++ 0xf1b9903b,
++ 0xf0000f00,
++ 0x230080e5,
++ 0x3303e9cd,
++ 0xf5059302,
++ 0x9301629e,
++ 0xf10b469a,
++ 0x9208039e,
++ 0x464c9305,
++ 0xf898e079,
++ 0x06df3004,
++ 0x80d2f140,
++ 0xf4226932,
++ 0x9a010900,
++ 0x9010f8c6,
++ 0x065d3201,
++ 0xf1009201,
++ 0x4fa180e0,
++ 0xf8d74620,
++ 0x47983418,
++ 0xf4036b63,
++ 0xf5b31360,
++ 0xbf081f20,
++ 0xf3ef46a2,
++ 0x07d98310,
++ 0xb672d403,
++ 0x22014b99,
++ 0x4d99601a,
++ 0xee18682b,
++ 0x33010a10,
++ 0xf669602b,
++ 0x6b63f951,
++ 0x1360f403,
++ 0x1f60f5b3,
++ 0x80a9f000,
++ 0xb133682b,
++ 0x3b014a8f,
++ 0x602b6812,
++ 0xb102b90b,
++ 0xf8dfb662,
++ 0xf8d88240,
++ 0x78193000,
++ 0xf419b391,
++ 0xd1050300,
++ 0x002ef9b4,
++ 0x28008de2,
++ 0x817ef2c0,
++ 0x302cf894,
++ 0x202af894,
++ 0xf8b44884,
++ 0xeb03c008,
++ 0x441303c3,
++ 0x530ef203,
++ 0xf8502901,
++ 0xeba22023,
++ 0xf840020c,
++ 0xf0002023,
++ 0x6ce080d5,
++ 0xf650b138,
++ 0xf8d8fd4f,
++ 0x781e3000,
++ 0xf0002e01,
++ 0x4621810f,
++ 0xf66a4658,
++ 0x6b63fc95,
++ 0x1360f403,
++ 0x1f60f5b3,
++ 0x80eaf000,
++ 0xf8534b6d,
++ 0x2c00403b,
++ 0xf8d4d05c,
++ 0x6d268048,
++ 0x0f00f1b8,
++ 0xaf7ff47f,
++ 0x8310f3ef,
++ 0xd40307d9,
++ 0x4b67b672,
++ 0x601a2201,
++ 0x682b4d66,
++ 0x0a10ee18,
++ 0x602b3301,
++ 0xf8ecf669,
++ 0xb133682b,
++ 0x3b014a60,
++ 0x602b6812,
++ 0xb102b90b,
++ 0x4f5cb662,
++ 0x9178f8df,
++ 0x817cf8df,
++ 0xf6764620,
++ 0xf8d7ff21,
++ 0x46203418,
++ 0xf8d74798,
++ 0x220033ac,
++ 0xf18bfa5f,
++ 0x47984620,
++ 0x302cf894,
++ 0x102af894,
++ 0xeb038922,
++ 0x440b03c3,
++ 0x530ef203,
++ 0x1023f859,
++ 0xf8491a8a,
++ 0xf8d82023,
++ 0x781b3000,
++ 0xd0642b01,
++ 0xd0532b02,
++ 0xd1082b03,
++ 0x3054f894,
++ 0xf0002b01,
++ 0x8de381db,
++ 0xf10007d9,
++ 0x4621814c,
++ 0xf66a4658,
++ 0x4b3ffc31,
++ 0x403bf853,
++ 0xd1a22c00,
++ 0xecbdb00b,
++ 0xe8bd8b02,
++ 0x4b388ff0,
++ 0xf9b3681b,
++ 0x2b003000,
++ 0x808bf2c0,
++ 0xe9dd4650,
++ 0xf6761201,
++ 0x4640fee7,
++ 0xfbeef659,
++ 0xe9cd2300,
++ 0x469a3301,
++ 0xf8d8e742,
++ 0xb1b220dc,
++ 0x8ce38811,
++ 0x1311eba3,
++ 0x030bf3c3,
++ 0x71fef240,
++ 0xd816428b,
++ 0xea4f2b3f,
++ 0xd8121113,
++ 0x0241eb02,
++ 0x030ff003,
++ 0xfa428852,
++ 0x07d8f303,
++ 0x9b02d509,
++ 0x93023301,
++ 0x0304f44f,
++ 0x0903ea49,
++ 0x9010f8c6,
++ 0xf44fe6fb,
++ 0xe7f72380,
++ 0x3054f894,
++ 0xf0002b01,
++ 0x8de38117,
++ 0xd5ae07da,
++ 0xf6682080,
++ 0xf8d8ff75,
++ 0x781b3000,
++ 0xf894e79c,
++ 0x2b013054,
++ 0x80f7f000,
++ 0x07db8de3,
++ 0x2080d59f,
++ 0xff66f668,
++ 0x3000f8d8,
++ 0xe78b781b,
++ 0x3054f894,
++ 0xf47f2b01,
++ 0xf890af26,
++ 0xf0433f20,
++ 0xf1060302,
++ 0xf8800110,
++ 0x22043f20,
++ 0xf6512012,
++ 0x8ba1fd2b,
++ 0x46224809,
++ 0xf91ef66a,
++ 0xbf00e713,
++ 0x00173250,
++ 0x0003fc40,
++ 0x00175980,
++ 0x00171a30,
++ 0x0017569c,
++ 0x00172600,
++ 0x00173278,
++ 0x001c5904,
++ 0x00173274,
++ 0x2b009b03,
++ 0x808bf040,
++ 0xf48bfa5f,
++ 0xf6682080,
++ 0x4620ff2f,
++ 0xfebef659,
++ 0x93032300,
++ 0xf1bae706,
++ 0xf47f0f00,
++ 0x49aaaf71,
++ 0xf24048aa,
++ 0xf66a42e7,
++ 0xe769fac7,
++ 0xe9d34ba8,
++ 0x69130201,
++ 0x46804798,
++ 0xf43f2800,
++ 0xf650aee8,
++ 0x4603fd49,
++ 0xf0002800,
++ 0x22008096,
++ 0x600249a1,
++ 0x605a6808,
++ 0x609a4440,
++ 0xf3ef6018,
++ 0x07d28210,
++ 0x812ef140,
++ 0x6829489c,
++ 0x31016802,
++ 0x0201f042,
++ 0x60026029,
++ 0x68124a98,
++ 0xd4fb0790,
++ 0x68124a97,
++ 0xf0002a00,
++ 0x4e968116,
++ 0x2a006872,
++ 0x8149f000,
++ 0x4a926053,
++ 0x681289b0,
++ 0x4b906073,
++ 0x30013201,
++ 0x601a81b0,
++ 0x68134a8c,
++ 0x0301f023,
++ 0x29006013,
++ 0xaeadf43f,
++ 0x39014b8b,
++ 0x6029681b,
++ 0xf47f2900,
++ 0x2b00aea6,
++ 0xaea3f43f,
++ 0xe6a0b662,
++ 0x49866d20,
++ 0x64a36503,
++ 0xf8946103,
++ 0x63e3502b,
++ 0xf3c29b05,
++ 0x20a4020e,
++ 0x0201f042,
++ 0x3005fb10,
++ 0x63a4f44f,
++ 0x1305fb03,
++ 0xeb0185e2,
++ 0x4a7c00c0,
++ 0x46219304,
++ 0xffe2f668,
++ 0xf4036b63,
++ 0xf5b31360,
++ 0xd0021f60,
++ 0x93032301,
++ 0x9804e686,
++ 0xfd60f656,
++ 0xf43f2800,
++ 0x4d73af6f,
++ 0x31fff895,
++ 0xf47f2b00,
++ 0x9b04af69,
++ 0xf8cd9a08,
++ 0xfa5fb00c,
++ 0xf8ddf48b,
++ 0x46d39014,
++ 0x4698189e,
++ 0xe00946ba,
++ 0xff74f668,
++ 0x3424f8da,
++ 0x46214638,
++ 0xf8954798,
++ 0xb92331ff,
++ 0x7039f858,
++ 0x2f004630,
++ 0x46dad1f0,
++ 0xb00cf8dd,
++ 0x2080e74a,
++ 0xfe7af668,
++ 0x4640e6af,
++ 0xfb92f650,
++ 0xf899e647,
++ 0x22043f20,
++ 0x0302f043,
++ 0x0110f106,
++ 0xf8892012,
++ 0xf6513f20,
++ 0x8de3fc43,
++ 0xf53f07da,
++ 0xe6fdaefc,
++ 0xf6539306,
++ 0x9b06f95d,
++ 0x28009007,
++ 0x80c0f000,
++ 0x4b509309,
++ 0x2a00681a,
++ 0x80c2f000,
++ 0xf6684618,
++ 0x9b07ff39,
++ 0xf04f2204,
++ 0x21120e00,
++ 0x70994684,
++ 0xf883701a,
++ 0xf883e001,
++ 0xf106e003,
++ 0x18980110,
++ 0xc018f8cd,
++ 0xfdc4f678,
++ 0x8a194b42,
++ 0xf5b19b09,
++ 0xd85a7fc3,
++ 0xb29b1c4b,
++ 0x00ca9309,
++ 0x4b3e9806,
++ 0x68188181,
++ 0x4b3d9907,
++ 0x0c02eb00,
++ 0x0e01f04f,
++ 0x1004f8cc,
++ 0x400b5881,
++ 0x6380f043,
++ 0x0308f043,
++ 0xf8995083,
++ 0x4a333782,
++ 0x82119909,
++ 0xf8894473,
++ 0x9b063782,
++ 0x22082100,
++ 0xc004f8c3,
++ 0xe00ef883,
++ 0x609a6019,
++ 0x8310f3ef,
++ 0xd40307db,
++ 0x4b25b672,
++ 0xe000f8c3,
++ 0x482a682b,
++ 0x33019906,
++ 0xf668602b,
++ 0xf8d7fea5,
++ 0x47983444,
++ 0xb133682b,
++ 0x3b014a1d,
++ 0x602b6812,
++ 0xb102b90b,
++ 0x8de3b662,
++ 0xf53f07d8,
++ 0xe67cae7b,
++ 0x69324b1f,
++ 0x601a681b,
++ 0xffaef64e,
++ 0x4b1de61d,
++ 0x421c681b,
++ 0xad37f47f,
++ 0x481b490a,
++ 0x6293f44f,
++ 0xf988f66a,
++ 0x2200e52f,
++ 0x46119309,
++ 0x4a17e7a4,
++ 0xbb6a6812,
++ 0x4e094a15,
++ 0xe6e86013,
++ 0x4a08b672,
++ 0xe6cd6016,
++ 0x001c5ad0,
++ 0x001c58ec,
++ 0x001755b4,
++ 0x001719e4,
++ 0x40240060,
++ 0x40240064,
++ 0x0017559c,
++ 0x0017569c,
++ 0x00177738,
++ 0x001c4001,
++ 0x00175780,
++ 0x0017469c,
++ 0x001755d4,
++ 0x31ff0000,
++ 0x001746a4,
++ 0x00180000,
++ 0x0017a5a0,
++ 0x001c58d4,
++ 0x40240068,
++ 0x9306480b,
++ 0xff78f669,
++ 0x9b066829,
++ 0x4809e7ca,
++ 0xf6699306,
++ 0x6829ff71,
++ 0xe6b09b06,
++ 0xb00b4806,
++ 0x8b02ecbd,
++ 0x4ff0e8bd,
++ 0xbcdef64f,
++ 0xe7f64803,
++ 0x001c591c,
++ 0x001c5924,
++ 0x001c5938,
++ 0x001c594c,
++ 0xf240b530,
++ 0xb0834003,
++ 0x4619460d,
++ 0xf6682308,
++ 0xe9d5f98d,
++ 0x46043200,
++ 0x3200e9c0,
++ 0xe9cd4611,
++ 0x48042200,
++ 0xff48f669,
++ 0xf6684620,
++ 0x2000f9af,
++ 0xbd30b003,
++ 0x001c5970,
++ 0x460cb570,
++ 0x4619b084,
++ 0x4012f240,
++ 0xf6682308,
++ 0x6822f971,
++ 0x429a4b12,
++ 0xd0104605,
++ 0x686168a3,
++ 0xe9c54810,
++ 0xe9cd2300,
++ 0x92003301,
++ 0xf669461a,
++ 0x4628ff27,
++ 0xf98ef668,
++ 0xb0042000,
++ 0xe9d4bd70,
++ 0x68116301,
++ 0x404b4808,
++ 0x404b4033,
++ 0xe9d46013,
++ 0x680a1300,
++ 0x68a29200,
++ 0xff12f669,
++ 0xe7dd6822,
++ 0x40344058,
++ 0x001c59cc,
++ 0x001c59b0,
++ 0xbf002332,
++ 0xf0133b01,
++ 0xd1fa03ff,
++ 0xbf004770,
++ 0xbf0023c8,
++ 0xf0133b01,
++ 0xd1fa03ff,
++ 0xbf004770,
++ 0x49724a71,
++ 0xf8df6813,
++ 0xf023c208,
++ 0xb5f00302,
++ 0x680b6013,
++ 0x4c6f4a6e,
++ 0xf4234f6f,
++ 0x600b6300,
++ 0x496e6813,
++ 0xf4232800,
++ 0xbf1c7380,
++ 0x468c4627,
++ 0x20326013,
++ 0x3801bf00,
++ 0x00fff010,
++ 0xf8dfd1fa,
++ 0x4a63e18c,
++ 0x3000f8de,
++ 0xf4234e65,
++ 0xf8ce5380,
++ 0x68133000,
++ 0x7300f423,
++ 0xf8de6013,
++ 0xf4433000,
++ 0xf8ce6380,
++ 0xf8de3000,
++ 0xf4433000,
++ 0xf8ce6300,
++ 0x46723000,
++ 0x1cc125ff,
++ 0x4664b2c9,
++ 0xf0236813,
++ 0x430b03ff,
++ 0xf8546013,
++ 0x60333b04,
++ 0xf4436813,
++ 0x60137380,
++ 0xbf00bf00,
++ 0xbf00bf00,
++ 0x049b6813,
++ 0x3901d5fc,
++ 0x428db2c9,
++ 0x3004d1e8,
++ 0x28803504,
++ 0xf10cb2ed,
++ 0xd1de0c10,
++ 0x3000f8de,
++ 0x6380f423,
++ 0x3000f8ce,
++ 0xbf0023c8,
++ 0xf0133b01,
++ 0xd1fa03ff,
++ 0x493f4c3e,
++ 0x48436822,
++ 0x6200f422,
++ 0x680a6022,
++ 0x0280f042,
++ 0x680a600a,
++ 0x7280f442,
++ 0x600a3f04,
++ 0xf022680a,
++ 0x431a021f,
++ 0xf857600a,
++ 0x60022f04,
++ 0xf042680a,
++ 0x600a0220,
++ 0xbf00bf00,
++ 0xbf00bf00,
++ 0x0552680a,
++ 0x3301d5fc,
++ 0xd1e92b10,
++ 0xf023680b,
++ 0x600b0380,
++ 0xbf0023c8,
++ 0xf0133b01,
++ 0xd1fa03ff,
++ 0x4a2d4927,
++ 0xf423680b,
++ 0x600b7380,
++ 0xf4436813,
++ 0x60131300,
++ 0xbf002332,
++ 0xf0133b01,
++ 0xd1fa03ff,
++ 0x4a1f4b1e,
++ 0x4d256819,
++ 0x48264c25,
++ 0xf4414e26,
++ 0x60195180,
++ 0xf4416811,
++ 0x60117100,
++ 0xf4416819,
++ 0x60196100,
++ 0x4b216811,
++ 0x7180f441,
++ 0x682a6011,
++ 0xf442491f,
++ 0x602a5280,
++ 0xf4226822,
++ 0x60222280,
++ 0xf0226802,
++ 0x60025200,
++ 0x60306818,
++ 0x45bbf5a5,
++ 0x685c3d78,
++ 0x689d602c,
++ 0x4a16600d,
++ 0x601568dd,
++ 0x691d4815,
++ 0xe9d36005,
++ 0x4c145005,
++ 0x602569db,
++ 0x61136108,
++ 0xbf00bdf0,
++ 0x40580018,
++ 0x40344060,
++ 0x4034406c,
++ 0x001718a4,
++ 0x00171824,
++ 0x00171624,
++ 0x40344064,
++ 0x40344070,
++ 0x40344058,
++ 0x40342014,
++ 0x40342018,
++ 0x4034201c,
++ 0x4033c218,
++ 0x001718e4,
++ 0x4033c220,
++ 0x4033c224,
++ 0x4033c228,
++ 0x4033c22c,
++ 0x00171324,
++ 0x6c616821,
++ 0x6e6f615f,
++ 0x656d6974,
++ 0x69745f72,
++ 0x705f656d,
++ 0x28747361,
++ 0x656d6974,
++ 0x743e2d72,
++ 0x20656d69,
++ 0x3035202b,
++ 0x00293030,
++ 0x616d786e,
++ 0x69745f63,
++ 0x6f5f656d,
++ 0x69615f6e,
++ 0x61765f72,
++ 0x5f64696c,
++ 0x66746567,
++ 0x21202928,
++ 0x0030203d,
++ 0x6d697464,
++ 0x2c64253a,
++ 0x252c6425,
++ 0x64252c64,
++ 0x00000a0d,
++ 0x6f76282a,
++ 0x6974616c,
++ 0x7520656c,
++ 0x38746e69,
++ 0x2a20745f,
++ 0x5f672629,
++ 0x5f6e6f61,
++ 0x72616873,
++ 0x642e6465,
++ 0x5f6d6974,
++ 0x5f746e63,
++ 0x736e6f61,
++ 0x65726168,
++ 0x203c2064,
++ 0x6f76282a,
++ 0x6974616c,
++ 0x7520656c,
++ 0x38746e69,
++ 0x2a20745f,
++ 0x5f672629,
++ 0x5f6e6f61,
++ 0x72616873,
++ 0x642e6465,
++ 0x5f6d6974,
++ 0x69726570,
++ 0x615f646f,
++ 0x68736e6f,
++ 0x64657261,
++ 0x00000000,
++ 0x00002c4c,
++ 0x654b849b,
++ 0x78646979,
++ 0x766e6920,
++ 0x64696c61,
++ 0x3230252c,
++ 0x00000a58,
++ 0x6e49849b,
++ 0x696c6176,
++ 0x656b2064,
++ 0x78646979,
++ 0x0000000a,
++ 0x6e49849b,
++ 0x696c6176,
++ 0x54532064,
++ 0x64253a41,
++ 0x0000000a,
++ 0x5f666976,
++ 0x20617473,
++ 0x76203d3d,
++ 0x00006669,
++ 0x64253d54,
++ 0x0d64252c,
++ 0x0000000a,
++ 0x3a4e4342,
++ 0x0a0d6425,
++ 0x00000000,
++ 0x206e6362,
++ 0x656e6f64,
++ 0x6d697420,
++ 0x74756f65,
++ 0x00000a0d,
++ 0x20736366,
++ 0x0a0d6b6f,
++ 0x00000000,
++ 0x20736366,
++ 0x20746f6e,
++ 0x0a0d6b6f,
++ 0x00000000,
++ 0x656c6469,
++ 0x72726520,
++ 0x00000a0d,
++ 0x656c6469,
++ 0x746e6920,
++ 0x72726520,
++ 0x00000a0d,
++ 0x2c642564,
++ 0x00000000,
++ 0x0a0d6564,
++ 0x00000000,
++ 0x6c616821,
++ 0x63616d5f,
++ 0x745f7768,
++ 0x5f656d69,
++ 0x74736170,
++ 0x6d697428,
++ 0x3e2d7265,
++ 0x656d6974,
++ 0x67202d20,
++ 0x6669775f,
++ 0x65735f69,
++ 0x6e697474,
++ 0x702e7367,
++ 0x6f5f7277,
++ 0x5f6e6570,
++ 0x64737973,
++ 0x79616c65,
++ 0x00000029,
++ 0x78253d74,
++ 0x00000a0d,
++ 0x20746f6e,
++ 0x74736170,
++ 0x6425203a,
++ 0x0d64252c,
++ 0x0000000a,
++ 0x73736170,
++ 0x6425203a,
++ 0x0d64252c,
++ 0x0000000a,
++ 0x3a706c73,
++ 0x252c7825,
++ 0x000a0d78,
++ 0x655f656b,
++ 0x675f7476,
++ 0x29287465,
++ 0x65202620,
++ 0x625f7476,
++ 0x00007469,
++ 0x65647874,
++ 0x665f6373,
++ 0x74737269,
++ 0x203d2120,
++ 0x4c4c554e,
++ 0x00000000,
++ 0x73212121,
++ 0x20646e65,
++ 0x206d6663,
++ 0x78253a31,
++ 0x0d78252c,
++ 0x0000000a,
++ 0x0d677562,
++ 0x0000000a,
++ 0x6f696473,
++ 0x69617420,
++ 0x7265206c,
++ 0x0d726f72,
++ 0x0000000a,
++ 0x3a727265,
++ 0x206f6e20,
++ 0x2067736d,
++ 0x21746b70,
++ 0x00000a0d,
++ 0x21727265,
++ 0x74202121,
++ 0x63206c78,
++ 0x6e206d66,
++ 0x7562206f,
++ 0x72656666,
++ 0x726f6620,
++ 0x62737520,
++ 0x00000a0d,
++ 0x4244819d,
++ 0x57203a47,
++ 0x69746972,
++ 0x6d20676e,
++ 0x726f6d65,
++ 0x69772079,
++ 0x30206874,
++ 0x38302578,
++ 0x202f2078,
++ 0x203a6425,
++ 0x2578305b,
++ 0x5d783830,
++ 0x30203d20,
++ 0x38302578,
++ 0x202f2078,
++ 0x000a6425,
++ 0x6b73616d,
++ 0x69727720,
++ 0x253a6574,
++ 0x78252c78,
++ 0x2c78252c,
++ 0x0a0d7825,
++ 0x00000000,
++ 0x4244819d,
++ 0x57203a47,
++ 0x69746972,
++ 0x6d20676e,
++ 0x726f6d65,
++ 0x69772079,
++ 0x6d206874,
++ 0x3a6b7361,
++ 0x30257830,
++ 0x202c7838,
++ 0x61746164,
++ 0x2578303a,
++ 0x20783830,
++ 0x6425202f,
++ 0x305b203a,
++ 0x38302578,
++ 0x3d205d78,
++ 0x25783020,
++ 0x20783830,
++ 0x6425202f,
++ 0x0000000a,
++ 0x5f6c6168,
++ 0x6863616d,
++ 0x78725f77,
++ 0x6e63625f,
++ 0x7275645f,
++ 0x6f697461,
++ 0x0000006e,
++ 0x5f6c6168,
++ 0x6863616d,
++ 0x6c735f77,
++ 0x5f706565,
++ 0x63656863,
++ 0x61705f6b,
++ 0x00686374,
++ 0x745f6d6d,
++ 0x5f747462,
++ 0x706d6f63,
++ 0x5f657475,
++ 0x63746170,
++ 0x00000068,
++ 0x735f6d6d,
++ 0x7065656c,
++ 0x6f666e69,
++ 0x5f78725f,
++ 0x5f747665,
++ 0x63746170,
++ 0x00000068,
++ 0x786e7772,
++ 0x656c735f,
++ 0x635f7065,
++ 0x61676b6c,
++ 0x635f6574,
++ 0x69666e6f,
++ 0x61705f67,
++ 0x00686374,
++ 0x786e7772,
++ 0x656c735f,
++ 0x645f7065,
++ 0x73706565,
++ 0x7065656c,
++ 0x6e6f635f,
++ 0x5f676966,
++ 0x63746170,
++ 0x00000068,
++ 0x5f6c7874,
++ 0x5f6d6663,
++ 0x5f747665,
++ 0x63746170,
++ 0x00000068,
++
++};
++
++struct aic_feature_t {
++ int hwinfo;
++ int fwlog_en;
++};
++
++#define CHIP_REV_U02 0x3
++#define CHIP_REV_U03 0x7
++#define CHIP_SUB_REV_U04 0x20
++u8 chip_id = 0;
++u8 chip_sub_id = 0; // rom_id for 8800dc
++u8 chip_mcu_id = 0;
++
++#ifdef CONFIG_PMIC_SETTING
++u32 syscfg_tbl_pmic_u02[][2] = {
++ {0x40040000, 0x00001AC8}, // 1) fix panic
++ {0x40040084, 0x00011580},
++ {0x40040080, 0x00000001},
++ {0x40100058, 0x00000000},
++};
++#endif /* CONFIG_PMIC_SETTING */
++
++u32 syscfg_tbl_u04[][2] = {
++ {0x40040000, 0x0000042C}, // protect usb replenish rxq / flush rxq, skip flush rxq before start_app
++ {0x40040004, 0x0000DD44},
++ {0x40040008, 0x00000448},
++ {0x4004000C, 0x0000044C},
++ {0x0019B800, 0xB9F0F19B},
++ {0x0019B804, 0x0019B81D},
++ {0x0019B808, 0xBF00FA79},
++ {0x0019B80C, 0xF007BF00},
++ {0x0019B810, 0x4605B672}, // code
++ {0x0019B814, 0x21E0F04F},
++ {0x0019B818, 0xBE0BF664},
++ {0x0019B81C, 0xF665B510},
++ {0x0019B820, 0x4804FC9D},
++ {0x0019B824, 0xFA9EF66C},
++ {0x0019B828, 0xFCA8F665},
++ {0x0019B82C, 0x4010E8BD},
++ {0x0019B830, 0xBAC6F66C},
++ {0x0019B834, 0x0019A0C4},
++ {0x40040084, 0x0019B800}, // out base
++ {0x40040080, 0x0000000F},
++ {0x40100058, 0x00000000},
++};
++
++u32 syscfg_tbl[][2] = {
++ {0x40500014, 0x00000101}, // 1)
++ {0x40500018, 0x0000010D}, // 2)
++ {0x40500004, 0x00000010}, // 3) the order should not be changed
++ #ifdef CONFIG_PMIC_SETTING
++ {0x50000000, 0x03220204}, // 2) pmic interface init
++ {0x50019150, 0x00000002}, // 3) for 26m xtal, set div1
++ {0x50017008, 0x00000000}, // 4) stop wdg
++ #endif /* CONFIG_PMIC_SETTING */
++};
++
++u32 syscfg_tbl_masked[][3] = {
++ {0x40506024, 0x000000FF, 0x000000DF}, // for clk gate lp_level
++ #ifdef CONFIG_PMIC_SETTING
++ //{0x50017008, 0x00000002, 0x00000000}, // stop wdg
++ #endif /* CONFIG_PMIC_SETTING */
++};
++
++
++u32 rf_tbl_masked[][3] = {
++ {0x40344058, 0x00800000, 0x00000000},// pll trx
++};
++
++u32 wdt_tbl_masked[][3] = {
++ {0x4010300c, 0x00000001, 0x00000001},
++};
++
++static void system_config_8800(struct rwnx_hw *rwnx_hw){
++ int syscfg_num;
++ int ret, cnt;
++ const u32 mem_addr = 0x40500000;
++ struct dbg_mem_read_cfm rd_mem_addr_cfm;
++ ret = rwnx_send_dbg_mem_read_req(rwnx_hw, mem_addr, &rd_mem_addr_cfm);
++ if (ret) {
++ AICWFDBG(LOGERROR, "%x rd fail: %d\n", mem_addr, ret);
++ return;
++ }
++ chip_id = (u8)(rd_mem_addr_cfm.memdata >> 16);
++ //printk("%x=%x\n", rd_mem_addr_cfm.memaddr, rd_mem_addr_cfm.memdata);
++ ret = rwnx_send_dbg_mem_read_req(rwnx_hw, 0x00000004, &rd_mem_addr_cfm);
++ if (ret) {
++ AICWFDBG(LOGERROR, "[0x00000004] rd fail: %d\n", ret);
++ return;
++ }
++ chip_sub_id = (u8)(rd_mem_addr_cfm.memdata >> 4);
++ //printk("%x=%x\n", rd_mem_addr_cfm.memaddr, rd_mem_addr_cfm.memdata);
++ AICWFDBG(LOGINFO, "chip_id=%x, chip_sub_id=%x\n", chip_id, chip_sub_id);
++
++
++#ifdef CONFIG_PMIC_SETTING
++ if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8801){
++ if (chip_id == CHIP_REV_U02) {
++ syscfg_num = sizeof(syscfg_tbl_pmic_u02) / sizeof(u32) / 2;
++ for (cnt = 0; cnt < syscfg_num; cnt++) {
++ ret = rwnx_send_dbg_mem_write_req(rwnx_hw, syscfg_tbl_pmic_u02[cnt][0], syscfg_tbl_pmic_u02[cnt][1]);
++ if (ret) {
++ AICWFDBG(LOGERROR, "%x write fail: %d\n", syscfg_tbl_pmic_u02[cnt][0], ret);
++ return;
++ }
++ }
++ }
++ }
++#endif
++ if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8801){
++ if ((chip_id == CHIP_REV_U03) && (chip_sub_id == CHIP_SUB_REV_U04)) {
++ syscfg_num = sizeof(syscfg_tbl_u04) / sizeof(u32) / 2;
++ AICWFDBG(LOGINFO, "cfg u04\n");
++ for (cnt = 0; cnt < syscfg_num; cnt++) {
++ ret = rwnx_send_dbg_mem_write_req(rwnx_hw, syscfg_tbl_u04[cnt][0], syscfg_tbl_u04[cnt][1]);
++ if (ret) {
++ AICWFDBG(LOGERROR, "%x write fail: %d\n", syscfg_tbl_u04[cnt][0], ret);
++ return;
++ }
++ }
++ }
++ }
++
++ syscfg_num = sizeof(syscfg_tbl) / sizeof(u32) / 2;
++
++ for (cnt = 0; cnt < syscfg_num; cnt++) {
++ ret = rwnx_send_dbg_mem_write_req(rwnx_hw, syscfg_tbl[cnt][0], syscfg_tbl[cnt][1]);
++ if (ret) {
++ AICWFDBG(LOGERROR, "%x write fail: %d\n", syscfg_tbl[cnt][0], ret);
++ return;
++ }
++ }
++ syscfg_num = sizeof(syscfg_tbl_masked) / sizeof(u32) / 3;
++ for (cnt = 0; cnt < syscfg_num; cnt++) {
++ if (syscfg_tbl_masked[cnt][0] == 0x00000000) {
++ break;
++ }
++
++ ret = rwnx_send_dbg_mem_mask_write_req(rwnx_hw,
++ syscfg_tbl_masked[cnt][0], syscfg_tbl_masked[cnt][1], syscfg_tbl_masked[cnt][2]);
++ if (ret) {
++ AICWFDBG(LOGERROR, "%x mask write fail: %d\n", syscfg_tbl_masked[cnt][0], ret);
++ return;
++ }
++ }
++
++
++}
++
++
++static void system_config(struct rwnx_hw *rwnx_hw)
++{
++ if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8801){
++ system_config_8800(rwnx_hw);
++ }else if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC ||
++ rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW){
++ system_config_8800dc(rwnx_hw);
++ }
++}
++
++static int wdt_config(struct rwnx_hw *rwnx_hw)
++{
++ int ret = 0;
++ ret = rwnx_send_dbg_mem_mask_write_req(rwnx_hw,
++ wdt_tbl_masked[0][0], wdt_tbl_masked[0][1], wdt_tbl_masked[0][2]);
++ if (ret) {
++ printk("wdt config %x write fail: %d\n", wdt_tbl_masked[0][0], ret);
++ }
++ return ret;
++}
++#if 0
++static void rf_config(struct rwnx_hw *rwnx_hw)
++{
++ int ret;
++ ret = rwnx_send_dbg_mem_mask_write_req(rwnx_hw,
++ rf_tbl_masked[0][0], rf_tbl_masked[0][1], rf_tbl_masked[0][2]);
++ if (ret) {
++ printk("rf config %x write fail: %d\n", rf_tbl_masked[0][0], ret);
++ }
++}
++#endif
++#if 0
++static int start_from_bootrom(struct rwnx_hw *rwnx_hw)
++{
++ int ret = 0;
++
++ /* memory access */
++#ifdef CONFIG_ROM_PATCH_EN
++ const u32 rd_addr = ROM_FMAC_FW_ADDR;
++ const u32 fw_addr = ROM_FMAC_FW_ADDR;
++#else
++ const u32 rd_addr = RAM_FMAC_FW_ADDR;
++ const u32 fw_addr = RAM_FMAC_FW_ADDR;
++#endif
++ struct dbg_mem_read_cfm rd_cfm;
++ printk("Read FW mem: %08x\n", rd_addr);
++ if ((ret = rwnx_send_dbg_mem_read_req(rwnx_hw, rd_addr, &rd_cfm))) {
++ return -1;
++ }
++ printk("cfm: [%08x] = %08x\n", rd_cfm.memaddr, rd_cfm.memdata);
++
++ /* fw start */
++ printk("Start app: %08x\n", fw_addr);
++ if ((ret = rwnx_send_dbg_start_app_req(rwnx_hw, fw_addr, HOST_START_APP_AUTO))) {
++ return -1;
++ }
++ return 0;
++}
++#endif
++/**
++ *
++ */
++
++
++#ifdef CONFIG_GPIO_WAKEUP
++static const struct wiphy_wowlan_support aic_wowlan_support = {
++ .flags = WIPHY_WOWLAN_ANY | WIPHY_WOWLAN_MAGIC_PKT,
++};
++#endif
++
++extern int get_hardware_info(void);
++
++#ifdef AICWF_USB_SUPPORT
++u32 usbcfg_tbl[][2] = {
++ {0x40200028, 0x0021047e},
++ {0x40200024, 0x0000011d},
++};
++
++static void aicwf_usb_config(struct rwnx_hw *rwnx_hw)
++{
++ int usbcfg_num = 0;
++ int ret = 0, cnt = 0;
++ struct dbg_mem_read_cfm rd_mem_addr_cfm;
++ const u32 mem_addr = 0x40200024;
++
++ ret = rwnx_send_dbg_mem_read_req(rwnx_hw, mem_addr, &rd_mem_addr_cfm);
++ if (ret) {
++ AICWFDBG(LOGERROR, "%x rd fail: %d\n", mem_addr, ret);
++ return;
++ }
++ AICWFDBG(LOGINFO, "usb config read %x\n", rd_mem_addr_cfm.memdata);
++ if ((rd_mem_addr_cfm.memdata & 0xffff) == 0x119) {
++ cnt = 0;
++ usbcfg_num = sizeof(usbcfg_tbl) / sizeof(u32) / 2;
++ for (cnt = 0; cnt < usbcfg_num; cnt++) {
++ ret = rwnx_send_dbg_mem_write_req(rwnx_hw, usbcfg_tbl[cnt][0], usbcfg_tbl[cnt][1]);
++ if (ret) {
++ AICWFDBG(LOGERROR, "%x write fail: %d\n", usbcfg_tbl[cnt][0], ret);
++ return;
++ }
++ }
++ }
++}
++#endif // (AICWF_USB_SUPPORT)
++
++static int start_from_bootrom(struct rwnx_hw *rwnx_hw)
++{
++ int ret = 0;
++
++ /* memory access */
++#ifdef CONFIG_ROM_PATCH_EN
++ const u32 rd_addr = RAM_LMAC_FW_ADDR;
++ const u32 fw_addr = RAM_LMAC_FW_ADDR;
++#else
++ const u32 rd_addr = RAM_FMAC_FW_ADDR;
++ const u32 fw_addr = RAM_FMAC_FW_ADDR;
++#endif
++ u32 boot_type;
++ struct dbg_mem_read_cfm rd_cfm;
++ AICWFDBG(LOGINFO, "Read FW mem: %08x\n", rd_addr);
++ if ((ret = rwnx_send_dbg_mem_read_req(rwnx_hw, rd_addr, &rd_cfm))) {
++ return -1;
++ }
++ AICWFDBG(LOGINFO, "cfm: [%08x] = %08x\n", rd_cfm.memaddr, rd_cfm.memdata);
++
++ if (testmode == 0) {
++ boot_type = HOST_START_APP_DUMMY;
++ } else {
++ boot_type = HOST_START_APP_AUTO;
++ }
++ /* fw start */
++ AICWFDBG(LOGINFO, "Start app: %08x, %d\n", fw_addr, boot_type);
++ if ((ret = rwnx_send_dbg_start_app_req(rwnx_hw, fw_addr, boot_type))) {
++ return -1;
++ }
++ return 0;
++}
++
++
++int rwnx_ic_system_init(struct rwnx_hw *rwnx_hw){
++
++ if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8801){
++ system_config(rwnx_hw);
++ }else if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC ||
++ rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW){
++#ifdef AICWF_USB_SUPPORT
++ aicwf_usb_config(rwnx_hw);
++#endif
++ system_config(rwnx_hw);
++ if (rwnx_platform_on(rwnx_hw, NULL))
++ return -1;
++
++#if defined(CONFIG_START_FROM_BOOTROM)
++ if (chip_sub_id < 2) {
++ if (wdt_config(rwnx_hw)) {
++ return -1;
++ }
++ }
++ if (start_from_bootrom(rwnx_hw))
++ return -1;
++#endif
++ }else if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81){
++ rwnx_plat_userconfig_load_8800d80(rwnx_hw);
++ }
++
++ return 0;
++}
++
++
++int rwnx_ic_rf_init(struct rwnx_hw *rwnx_hw){
++ struct mm_set_rf_calib_cfm cfm;
++ int ret = 0;
++#ifdef CONFIG_5M10M
++ uint32_t hwconfig_id = 4;
++ int32_t param[1];
++ param[0] = BWMODE10M;
++#endif
++ if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8801){
++ if ((ret = rwnx_send_txpwr_idx_req(rwnx_hw))) {
++ return -1;
++ }
++
++ if ((ret = rwnx_send_txpwr_ofst_req(rwnx_hw))) {
++ return -1;
++ }
++
++ if (testmode == 0) {
++ if ((ret = rwnx_send_rf_calib_req(rwnx_hw, &cfm)))
++ return -1;
++ }
++
++ }else if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC ||
++ rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW){
++ if ((ret = aicwf_set_rf_config_8800dc(rwnx_hw, &cfm)))
++ return -1;
++ }else if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81){
++ if ((ret = aicwf_set_rf_config_8800d80(rwnx_hw, &cfm)))
++ return -1;
++ }
++#ifdef CONFIG_5M10M
++ rwnx_send_vendor_hwconfig_req(rwnx_hw, hwconfig_id, param);
++#endif
++ return 0;
++}
++
++extern void *aicwf_prealloc_txq_alloc(size_t size);
++extern int aicwf_vendor_init(struct wiphy *wiphy);
++int rwnx_cfg80211_init(struct rwnx_plat *rwnx_plat, void **platform_data)
++{
++ struct rwnx_hw *rwnx_hw;
++ struct rwnx_conf_file init_conf;
++ int ret = 0;
++ struct wiphy *wiphy;
++ struct rwnx_vif *vif;
++ int i;
++ u8 dflt_mac[ETH_ALEN] = { 0x88, 0x00, 0x33, 0x77, 0x10, 0x99};
++ struct mm_get_fw_version_cfm fw_version;
++ u8_l mac_addr_efuse[ETH_ALEN];
++#ifndef USE_5G
++ struct aic_feature_t feature;
++#endif
++ struct mm_set_stack_start_cfm set_start_cfm;
++
++ int nx_remote_sta_max = NX_REMOTE_STA_MAX;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++
++if((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8801) ||
++ ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC ||
++ g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW) && chip_id < 3)){
++ nx_remote_sta_max = NX_REMOTE_STA_MAX_FOR_OLD_IC;
++}
++
++
++//#ifndef CONFIG_RFTEST
++#ifdef CONFIG_MAC_RANDOM_IF_NO_MAC_IN_EFUSE
++ get_random_bytes(&dflt_mac[4], 2);
++#endif
++//#endif
++ /* create a new wiphy for use with cfg80211 */
++ AICWFDBG(LOGINFO, "%s sizeof(struct rwnx_hw):%d \r\n", __func__, (int)sizeof(struct rwnx_hw));
++ wiphy = wiphy_new(&rwnx_cfg80211_ops, sizeof(struct rwnx_hw));
++ //dev_set_name(&wiphy->dev,"aicphy%d",0);
++
++ if (!wiphy) {
++ dev_err(rwnx_platform_get_dev(rwnx_plat), "Failed to create new wiphy\n");
++ ret = -ENOMEM;
++ goto err_out;
++ }
++
++ rwnx_hw = wiphy_priv(wiphy);
++ rwnx_hw->wiphy = wiphy;
++ rwnx_hw->plat = rwnx_plat;
++ rwnx_hw->dev = rwnx_platform_get_dev(rwnx_plat);
++#ifdef AICWF_SDIO_SUPPORT
++ rwnx_hw->sdiodev = rwnx_plat->sdiodev;
++ rwnx_plat->sdiodev->rwnx_hw = rwnx_hw;
++ rwnx_hw->cmd_mgr = &rwnx_plat->sdiodev->cmd_mgr;
++#else
++ rwnx_hw->usbdev = rwnx_plat->usbdev;
++ rwnx_plat->usbdev->rwnx_hw = rwnx_hw;
++ rwnx_hw->cmd_mgr = &rwnx_plat->usbdev->cmd_mgr;
++#endif
++ rwnx_hw->mod_params = &rwnx_mod_params;
++ rwnx_hw->tcp_pacing_shift = 7;
++
++#ifdef CONFIG_SCHED_SCAN
++ rwnx_hw->is_sched_scan = false;
++#endif//CONFIG_SCHED_SCAN
++
++ rwnx_init_aic(rwnx_hw);
++ /* set device pointer for wiphy */
++ set_wiphy_dev(wiphy, rwnx_hw->dev);
++
++ /* Create cache to allocate sw_txhdr */
++ rwnx_hw->sw_txhdr_cache = KMEM_CACHE(rwnx_sw_txhdr, 0);
++ if (!rwnx_hw->sw_txhdr_cache) {
++ wiphy_err(wiphy, "Cannot allocate cache for sw TX header\n");
++ ret = -ENOMEM;
++ goto err_cache;
++ }
++
++#if 0
++ if ((ret = rwnx_parse_configfile(rwnx_hw, RWNX_CONFIG_FW_NAME, &init_conf))) {
++ wiphy_err(wiphy, "rwnx_parse_configfile failed\n");
++ goto err_config;
++ }
++#else
++ memcpy(init_conf.mac_addr, dflt_mac, ETH_ALEN);
++#endif
++
++ rwnx_hw->vif_started = 0;
++ rwnx_hw->monitor_vif = RWNX_INVALID_VIF;
++ rwnx_hw->adding_sta = false;
++
++ rwnx_hw->scan_ie.addr = NULL;
++
++ for (i = 0; i < NX_VIRT_DEV_MAX + nx_remote_sta_max; i++){
++ rwnx_hw->avail_idx_map |= BIT(i);
++ }
++
++ rwnx_hwq_init(rwnx_hw);
++
++#ifdef CONFIG_PREALLOC_TXQ
++ rwnx_hw->txq = (struct rwnx_txq*)aicwf_prealloc_txq_alloc(sizeof(struct rwnx_txq)*NX_NB_TXQ);
++#endif
++
++ for (i = 0; i < NX_NB_TXQ; i++) {
++ rwnx_hw->txq[i].idx = TXQ_INACTIVE;
++ }
++
++ rwnx_mu_group_init(rwnx_hw);
++
++ /* Initialize RoC element pointer to NULL, indicate that RoC can be started */
++ rwnx_hw->roc_elem = NULL;
++ /* Cookie can not be 0 */
++ rwnx_hw->roc_cookie_cnt = 1;
++
++ INIT_LIST_HEAD(&rwnx_hw->vifs);
++ mutex_init(&rwnx_hw->mutex);
++ mutex_init(&rwnx_hw->dbgdump_elem.mutex);
++ spin_lock_init(&rwnx_hw->tx_lock);
++ spin_lock_init(&rwnx_hw->cb_lock);
++
++ INIT_WORK(&rwnx_hw->apmStalossWork, apm_staloss_work_process);
++ rwnx_hw->apmStaloss_wq = create_singlethread_workqueue("apmStaloss_wq");
++ if (!rwnx_hw->apmStaloss_wq) {
++ txrx_err("insufficient memory to create apmStaloss workqueue.\n");
++ goto err_cache;
++ }
++
++ wiphy->mgmt_stypes = rwnx_default_mgmt_stypes;
++
++#ifdef CONFIG_FWLOG_EN
++ rwnx_hw->fwlog_en = true;
++#else
++ rwnx_hw->fwlog_en = false;
++#endif
++ //init ic system
++ if((ret = rwnx_ic_system_init(rwnx_hw))){
++ goto err_lmac_reqs;
++ }
++
++#ifdef USE_5G
++ if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC ||
++ rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW){
++ ret = rwnx_send_set_stack_start_req(rwnx_hw, 1, 0, 0, rwnx_hw->fwlog_en, &set_start_cfm);
++ } else {
++ ret = rwnx_send_set_stack_start_req(rwnx_hw, 1, 0, CO_BIT(5), rwnx_hw->fwlog_en, &set_start_cfm);
++ }
++#else
++ if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81){
++ ret = rwnx_send_set_stack_start_req(rwnx_hw, 1, 0, CO_BIT(5), rwnx_hw->fwlog_en, &set_start_cfm);
++ } else {
++ ret = rwnx_send_set_stack_start_req(rwnx_hw, 1, get_hardware_info(), feature.hwinfo, rwnx_hw->fwlog_en, &set_start_cfm);
++ }
++#endif
++
++ if (ret){
++ goto err_lmac_reqs;
++ }
++
++ AICWFDBG(LOGINFO, "is 5g support = %d, vendor_info = 0x%02X\n", set_start_cfm.is_5g_support, set_start_cfm.vendor_info);
++ rwnx_hw->band_5g_support = set_start_cfm.is_5g_support;
++
++ ret = rwnx_send_get_fw_version_req(rwnx_hw, &fw_version);
++ memcpy(wiphy->fw_version, fw_version.fw_version, fw_version.fw_version_len>32? 32 : fw_version.fw_version_len>32);
++ AICWFDBG(LOGINFO, "Firmware Version: %s\r\n", fw_version.fw_version);
++
++ wiphy->bands[NL80211_BAND_2GHZ] = &rwnx_band_2GHz;
++//#ifdef USE_5G
++ if(rwnx_hw->band_5g_support){
++ wiphy->bands[NL80211_BAND_5GHZ] = &rwnx_band_5GHz;
++ }
++//#endif
++ wiphy->interface_modes =
++ BIT(NL80211_IFTYPE_STATION) |
++ BIT(NL80211_IFTYPE_AP) |
++ BIT(NL80211_IFTYPE_AP_VLAN) |
++ BIT(NL80211_IFTYPE_P2P_CLIENT) |
++ BIT(NL80211_IFTYPE_P2P_GO) |
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
++ #ifndef CONFIG_USE_P2P0
++ BIT(NL80211_IFTYPE_P2P_DEVICE) |
++ #endif
++ #endif
++ BIT(NL80211_IFTYPE_MONITOR);
++
++#ifdef CONFIG_GPIO_WAKEUP
++ /* Set WoWLAN flags */
++ printk("%s Wowlan support\r\n", __func__);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)
++ wiphy->wowlan = &aic_wowlan_support;
++#else
++ wiphy->wowlan.flags = aic_wowlan_support.flags;
++#endif
++#endif
++
++ wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
++ WIPHY_FLAG_HAS_CHANNEL_SWITCH |
++ #endif
++ WIPHY_FLAG_4ADDR_STATION |
++ WIPHY_FLAG_4ADDR_AP;
++
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
++ wiphy->max_num_csa_counters = BCN_MAX_CSA_CPT;
++ #endif
++
++ wiphy->max_remain_on_channel_duration = rwnx_hw->mod_params->roc_dur_max;
++
++ wiphy->features |= NL80211_FEATURE_NEED_OBSS_SCAN |
++ NL80211_FEATURE_SK_TX_STATUS |
++ NL80211_FEATURE_VIF_TXPOWER |
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)
++ NL80211_FEATURE_ACTIVE_MONITOR |
++ #endif
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
++ NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
++ #endif
++ 0;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) || defined(CONFIG_WPA3_FOR_OLD_KERNEL)
++ wiphy->features |= NL80211_FEATURE_SAE;
++#endif
++
++ if (rwnx_mod_params.tdls)
++ /* TDLS support */
++ wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
++
++ wiphy->iface_combinations = rwnx_combinations;
++ /* -1 not to include combination with radar detection, will be re-added in
++ rwnx_handle_dynparams if supported */
++ wiphy->n_iface_combinations = ARRAY_SIZE(rwnx_combinations) - 1;
++ wiphy->reg_notifier = rwnx_reg_notifier;
++
++ wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
++
++ rwnx_enable_wapi(rwnx_hw);
++
++ wiphy->cipher_suites = cipher_suites;
++ wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites) - NB_RESERVED_CIPHER;
++
++ rwnx_hw->ext_capa[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING;
++ rwnx_hw->ext_capa[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
++ wiphy->extended_capabilities = rwnx_hw->ext_capa;
++ wiphy->extended_capabilities_mask = rwnx_hw->ext_capa;
++ wiphy->extended_capabilities_len = ARRAY_SIZE(rwnx_hw->ext_capa);
++#endif
++#ifdef CONFIG_SCHED_SCAN
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
++ wiphy->max_sched_scan_reqs = 1;
++#endif
++ wiphy->max_sched_scan_ssids = SCAN_SSID_MAX;//16;
++ wiphy->max_match_sets = SCAN_SSID_MAX;//16;
++ wiphy->max_sched_scan_ie_len = 2048;
++#endif//CONFIG_SCHED_SCAN
++
++ tasklet_init(&rwnx_hw->task, rwnx_task, (unsigned long)rwnx_hw);
++
++ //init ic rf
++ if((ret = rwnx_ic_rf_init(rwnx_hw))){
++ goto err_lmac_reqs;
++ }
++
++ if ((ret = rwnx_send_get_macaddr_req(rwnx_hw, (struct mm_get_mac_addr_cfm *)mac_addr_efuse)))
++ goto err_lmac_reqs;
++ if (mac_addr_efuse[0] | mac_addr_efuse[1] | mac_addr_efuse[2] | mac_addr_efuse[3])
++ {
++ memcpy(init_conf.mac_addr, mac_addr_efuse, ETH_ALEN);
++ }else{
++ AICWFDBG(LOGERROR, "no mac address in efuse!");
++ }
++
++ AICWFDBG(LOGINFO, "get macaddr:%x,%x\r\n", mac_addr_efuse[0], mac_addr_efuse[5]);
++
++
++ memcpy(wiphy->perm_addr, init_conf.mac_addr, ETH_ALEN);
++
++ /* Reset FW */
++ if ((ret = rwnx_send_reset(rwnx_hw)))
++ goto err_lmac_reqs;
++
++ if ((ret = rwnx_send_version_req(rwnx_hw, &rwnx_hw->version_cfm)))
++ goto err_lmac_reqs;
++ rwnx_set_vers(rwnx_hw);
++
++ if ((ret = rwnx_handle_dynparams(rwnx_hw, rwnx_hw->wiphy)))
++ goto err_lmac_reqs;
++
++ rwnx_enable_mesh(rwnx_hw);
++ rwnx_radar_detection_init(&rwnx_hw->radar);
++
++ /* Set parameters to firmware */
++
++ if (rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8801 ||
++ ((rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC ||
++ rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW ||
++ rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81) && testmode == 0)) {
++ rwnx_send_me_config_req(rwnx_hw);
++ }
++
++ /* Only monitor mode supported when custom channels are enabled */
++ if (rwnx_mod_params.custchan) {
++ rwnx_limits[0].types = BIT(NL80211_IFTYPE_MONITOR);
++ rwnx_limits_dfs[0].types = BIT(NL80211_IFTYPE_MONITOR);
++ }
++
++ aicwf_vendor_init(wiphy);
++
++ if ((ret = wiphy_register(wiphy))) {
++ wiphy_err(wiphy, "Could not register wiphy device\n");
++ goto err_register_wiphy;
++ }
++
++ /* Update regulatory (if needed) and set channel parameters to firmware
++ (must be done after WiPHY registration) */
++ rwnx_custregd(rwnx_hw, wiphy);
++ if (rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8801 ||
++ ((rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC ||
++ rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW ||
++ rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81) && testmode == 0)) {
++ rwnx_send_me_chan_config_req(rwnx_hw);
++ #ifdef CONFIG_COEX
++ rwnx_send_coex_req(rwnx_hw, 0, 1);
++ #endif
++ }
++ *platform_data = rwnx_hw;
++
++#ifdef CONFIG_DEBUG_FS
++ if ((ret = rwnx_dbgfs_register(rwnx_hw, "rwnx"))) {
++ wiphy_err(wiphy, "Failed to register debugfs entries");
++ goto err_debugfs;
++ }
++#endif
++ rtnl_lock();
++
++ /* Add an initial station interface */
++ vif = rwnx_interface_add(rwnx_hw, "wlan%d", NET_NAME_UNKNOWN,
++ NL80211_IFTYPE_STATION, NULL);
++
++
++ #ifdef CONFIG_RWNX_MON_DATA
++ /* Add an initial station interface */
++ vif = rwnx_interface_add(rwnx_hw, "wlan%d", 1,
++ NL80211_IFTYPE_MONITOR, NULL);
++ #endif
++
++ rtnl_unlock();
++
++ if (!vif) {
++ wiphy_err(wiphy, "Failed to instantiate a network device\n");
++ ret = -ENOMEM;
++ goto err_add_interface;
++ }
++
++#if 0
++ wiphy_info(wiphy, "New interface create %s", vif->ndev->name);
++#endif
++
++ AICWFDBG(LOGINFO, "New interface create %s\r\n", vif->ndev->name);
++
++#ifdef CONFIG_USE_P2P0
++
++ rtnl_lock();
++ /* Add an initial p2p0 interface */
++ vif = rwnx_interface_add(rwnx_hw, "p2p%d", NET_NAME_UNKNOWN,
++ NL80211_IFTYPE_STATION, NULL);
++ vif->is_p2p_vif = 1;
++ rtnl_unlock();
++
++ if (!vif) {
++ wiphy_err(wiphy, "Failed to instantiate a network device\n");
++ ret = -ENOMEM;
++ goto err_add_interface;
++ }
++
++ //wiphy_info(wiphy, "New interface create %s", vif->ndev->name);
++ AICWFDBG(LOGINFO, "New interface create %s \r\n", vif->ndev->name);
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
++ init_timer(&rwnx_hw->p2p_alive_timer);
++ rwnx_hw->p2p_alive_timer.data = (unsigned long)vif;
++ rwnx_hw->p2p_alive_timer.function = aicwf_p2p_alive_timeout;
++#else
++ timer_setup(&rwnx_hw->p2p_alive_timer, aicwf_p2p_alive_timeout, 0);
++#endif
++ rwnx_hw->is_p2p_alive = 0;
++ rwnx_hw->is_p2p_connected = 0;
++ atomic_set(&rwnx_hw->p2p_alive_timer_count, 0);
++#endif
++
++ return 0;
++
++err_add_interface:
++#ifdef CONFIG_DEBUG_FS
++ rwnx_dbgfs_unregister(rwnx_hw);
++err_debugfs:
++#endif
++if(rwnx_hw->wiphy){
++ wiphy_unregister(rwnx_hw->wiphy);
++}
++err_register_wiphy:
++err_lmac_reqs:
++ AICWFDBG(LOGERROR, "err_lmac_reqs\n");
++ //rwnx_fw_trace_dump(rwnx_hw);
++ rwnx_platform_off(rwnx_hw, NULL);
++//err_platon:
++//err_config:
++ kmem_cache_destroy(rwnx_hw->sw_txhdr_cache);
++err_cache:
++ wiphy_free(wiphy);
++err_out:
++ return ret;
++}
++
++/**
++ *
++ */
++
++void rwnx_cfg80211_deinit(struct rwnx_hw *rwnx_hw)
++{
++ struct mm_set_stack_start_cfm set_start_cfm;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ rwnx_send_set_stack_start_req(rwnx_hw, 0, 0, 0, 0, &set_start_cfm);
++
++ rwnx_hw->fwlog_en = 0;
++
++#ifdef CONFIG_DEBUG_FS
++ rwnx_dbgfs_unregister(rwnx_hw);
++#endif
++ flush_workqueue(rwnx_hw->apmStaloss_wq);
++ destroy_workqueue(rwnx_hw->apmStaloss_wq);
++
++ rwnx_wdev_unregister(rwnx_hw);
++ if(rwnx_hw->wiphy){
++ AICWFDBG(LOGINFO, "%s wiphy_unregister \r\n", __func__);
++ wiphy_unregister(rwnx_hw->wiphy);
++ }
++ rwnx_radar_detection_deinit(&rwnx_hw->radar);
++ rwnx_platform_off(rwnx_hw, NULL);
++ kmem_cache_destroy(rwnx_hw->sw_txhdr_cache);
++ if(rwnx_hw->wiphy){
++ wiphy_free(rwnx_hw->wiphy);
++ }
++}
++
++static void aicsmac_driver_register(void)
++{
++#ifdef AICWF_SDIO_SUPPORT
++ aicwf_sdio_register();
++#endif
++#ifdef AICWF_USB_SUPPORT
++ aicwf_usb_register();
++#endif
++#ifdef AICWF_PCIE_SUPPORT
++ aicwf_pcie_register();
++#endif
++}
++
++//static DECLARE_WORK(aicsmac_driver_work, aicsmac_driver_register);
++
++struct completion hostif_register_done;
++
++#define REGISTRATION_TIMEOUT 9000
++
++void aicwf_hostif_ready(void)
++{
++ g_rwnx_plat->enabled = true;
++ complete(&hostif_register_done);
++}
++
++static int __init rwnx_mod_init(void)
++{
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++ rwnx_print_version();
++ AICWFDBG(LOGINFO, "RELEASE DATE:%s \r\n", RELEASE_DATE);
++ rwnx_init_cmd_array();
++
++ sema_init(&aicwf_deinit_sem, 1);
++ atomic_set(&aicwf_deinit_atomic, 1);
++
++ init_completion(&hostif_register_done);
++
++ aicsmac_driver_register();
++
++#ifdef AICWF_SDIO_SUPPORT
++ if ((wait_for_completion_timeout(&hostif_register_done, msecs_to_jiffies(REGISTRATION_TIMEOUT)) == 0)) {
++ AICWFDBG(LOGERROR, "register_driver timeout or error\n");
++ aicwf_sdio_exit();
++ return -ENODEV;
++}
++
++#endif /* AICWF_SDIO_SUPPORT */
++#ifdef AICWF_USB_SUPPORT
++ //aicwf_usb_exit();
++#endif /*AICWF_USB_SUPPORT */
++
++
++#ifdef AICWF_PCIE_SUPPORT
++ return rwnx_platform_register_drv();
++#else
++ return 0;
++#endif
++}
++
++/**
++ *
++ */
++static void __exit rwnx_mod_exit(void)
++{
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++#ifdef AICWF_PCIE_SUPPORT
++ rwnx_platform_unregister_drv();
++#endif
++
++#ifdef AICWF_SDIO_SUPPORT
++ aicwf_sdio_exit();
++#endif
++
++#ifdef AICWF_USB_SUPPORT
++ aicwf_usb_exit();
++#endif
++ rwnx_free_cmd_array();
++ AICWFDBG(LOGINFO, "%s exit\r\n", __func__);
++}
++
++module_init(rwnx_mod_init);
++module_exit(rwnx_mod_exit);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
++MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver);
++#endif
++MODULE_FIRMWARE(RWNX_CONFIG_FW_NAME);
++
++MODULE_DESCRIPTION(RW_DRV_DESCRIPTION);
++MODULE_VERSION(RWNX_VERS_MOD);
++MODULE_AUTHOR(RW_DRV_COPYRIGHT " " RW_DRV_AUTHOR);
++MODULE_LICENSE("GPL");
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_main.h
+@@ -0,0 +1,38 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_main.h
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ******************************************************************************
++ */
++
++#ifndef _RWNX_MAIN_H_
++#define _RWNX_MAIN_H_
++
++#include "rwnx_defs.h"
++
++typedef struct _android_wifi_priv_cmd {
++ char *buf;
++ int used_len;
++ int total_len;
++} android_wifi_priv_cmd;
++
++#ifdef CONFIG_COMPAT
++typedef struct _compat_android_wifi_priv_cmd {
++ compat_caddr_t buf;
++ int used_len;
++ int total_len;
++} compat_android_wifi_priv_cmd;
++#endif /* CONFIG_COMPAT */
++
++int rwnx_cfg80211_init(struct rwnx_plat *rwnx_plat, void **platform_data);
++void rwnx_cfg80211_deinit(struct rwnx_hw *rwnx_hw);
++extern int testmode;
++extern u8 chip_id;
++extern u8 chip_sub_id;
++extern u8 chip_mcu_id;
++
++
++#endif /* _RWNX_MAIN_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_mesh.c
+@@ -0,0 +1,42 @@
++/**
++ ****************************************************************************************
++ *
++ * @file rwnx_mesh.c
++ *
++ * Copyright (C) RivieraWaves 2016-2019
++ *
++ ****************************************************************************************
++ */
++
++/**
++ * INCLUDE FILES
++ ****************************************************************************************
++ */
++
++#include "rwnx_mesh.h"
++
++/**
++ * FUNCTION DEFINITIONS
++ ****************************************************************************************
++ */
++
++struct rwnx_mesh_proxy *rwnx_get_mesh_proxy_info(struct rwnx_vif *p_rwnx_vif, u8 *p_sta_addr, bool local)
++{
++ struct rwnx_mesh_proxy *p_mesh_proxy = NULL;
++ struct rwnx_mesh_proxy *p_cur_proxy;
++
++ /* Look for proxied devices with provided address */
++ list_for_each_entry(p_cur_proxy, &p_rwnx_vif->ap.proxy_list, list) {
++ if (p_cur_proxy->local != local) {
++ continue;
++ }
++
++ if (!memcmp(&p_cur_proxy->ext_sta_addr, p_sta_addr, ETH_ALEN)) {
++ p_mesh_proxy = p_cur_proxy;
++ break;
++ }
++ }
++
++ /* Return the found information */
++ return p_mesh_proxy;
++}
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_mesh.h
+@@ -0,0 +1,45 @@
++/**
++ ****************************************************************************************
++ *
++ * @file rwnx_mesh.h
++ *
++ * @brief VHT Beamformer function declarations
++ *
++ * Copyright (C) RivieraWaves 2016-2019
++ *
++ ****************************************************************************************
++ */
++
++#ifndef _RWNX_MESH_H_
++#define _RWNX_MESH_H_
++
++/**
++ * INCLUDE FILES
++ ****************************************************************************************
++ */
++
++#include "rwnx_defs.h"
++
++/**
++ * DEFINES
++ ****************************************************************************************
++ */
++
++/**
++ * TYPE DEFINITIONS
++ ****************************************************************************************
++ */
++
++/**
++ * FUNCTION DECLARATIONS
++ ****************************************************************************************
++ */
++
++/**
++ ****************************************************************************************
++ * @brief TODO [LT]
++ ****************************************************************************************
++ */
++struct rwnx_mesh_proxy *rwnx_get_mesh_proxy_info(struct rwnx_vif *p_rwnx_vif, u8 *p_sta_addr, bool local);
++
++#endif /* _RWNX_MESH_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_mod_params.c
+@@ -0,0 +1,1755 @@
++/**
++******************************************************************************
++*
++* @file rwnx_mod_params.c
++*
++* @brief Set configuration according to modules parameters
++*
++* Copyright (C) RivieraWaves 2012-2019
++*
++******************************************************************************
++*/
++#include <linux/module.h>
++#include <linux/rtnetlink.h>
++
++#include "rwnx_defs.h"
++#include "rwnx_tx.h"
++#include "hal_desc.h"
++#include "rwnx_cfgfile.h"
++#include "rwnx_dini.h"
++#include "reg_access.h"
++#include "rwnx_compat.h"
++
++#ifdef CONFIG_RWNX_FULLMAC
++#define COMMON_PARAM(name, default_softmac, default_fullmac) \
++ .name = default_fullmac,
++#define SOFTMAC_PARAM(name, default)
++#define FULLMAC_PARAM(name, default) .name = default,
++#endif /* CONFIG_RWNX_FULLMAC */
++
++struct rwnx_mod_params rwnx_mod_params = {
++ /* common parameters */
++ COMMON_PARAM(ht_on, true, true)
++ COMMON_PARAM(vht_on, true, true)
++ COMMON_PARAM(he_on, true, true)
++ COMMON_PARAM(mcs_map, IEEE80211_VHT_MCS_SUPPORT_0_9, IEEE80211_VHT_MCS_SUPPORT_0_9)
++ COMMON_PARAM(he_mcs_map, IEEE80211_HE_MCS_SUPPORT_0_11, IEEE80211_HE_MCS_SUPPORT_0_11)
++ COMMON_PARAM(he_ul_on, false, false)
++ COMMON_PARAM(ldpc_on, true, true)
++ COMMON_PARAM(stbc_on, true, true)
++ COMMON_PARAM(gf_rx_on, false, false)
++ COMMON_PARAM(phy_cfg, 2, 2)
++ COMMON_PARAM(uapsd_timeout, 300, 300)
++ COMMON_PARAM(ap_uapsd_on, true, true)
++ COMMON_PARAM(sgi, true, true)
++ COMMON_PARAM(sgi80, true, true)
++ COMMON_PARAM(use_2040, 1, 1)
++ COMMON_PARAM(nss, 1, 1)
++ COMMON_PARAM(amsdu_rx_max, 2, 2)
++ COMMON_PARAM(bfmee, true, true)
++ COMMON_PARAM(bfmer, false, false)
++ COMMON_PARAM(mesh, true, true)
++ COMMON_PARAM(murx, true, true)
++ COMMON_PARAM(mutx, true, true)
++ COMMON_PARAM(mutx_on, true, true)
++ COMMON_PARAM(use_80, true, true)
++ COMMON_PARAM(custregd, true, true)
++ COMMON_PARAM(custchan, false, false)
++ COMMON_PARAM(roc_dur_max, 500, 500)
++ COMMON_PARAM(listen_itv, 0, 0)
++ COMMON_PARAM(listen_bcmc, true, true)
++ COMMON_PARAM(lp_clk_ppm, 20, 20)
++ COMMON_PARAM(ps_on, true, true)
++ COMMON_PARAM(tx_lft, RWNX_TX_LIFETIME_MS, RWNX_TX_LIFETIME_MS)
++ COMMON_PARAM(amsdu_maxnb, NX_TX_PAYLOAD_MAX, NX_TX_PAYLOAD_MAX)
++ // By default, only enable UAPSD for Voice queue (see IEEE80211_DEFAULT_UAPSD_QUEUE comment)
++ COMMON_PARAM(uapsd_queues, IEEE80211_WMM_IE_STA_QOSINFO_AC_VO, IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
++ COMMON_PARAM(tdls, false, false)
++ COMMON_PARAM(uf, false, false)
++ COMMON_PARAM(auto_reply, false, false)
++ COMMON_PARAM(ftl, "", "")
++ COMMON_PARAM(dpsm, false, false)
++
++ /* SOFTMAC only parameters */
++ SOFTMAC_PARAM(mfp_on, false)
++ SOFTMAC_PARAM(gf_on, false)
++ SOFTMAC_PARAM(bwsig_on, true)
++ SOFTMAC_PARAM(dynbw_on, true)
++ SOFTMAC_PARAM(agg_tx, true)
++ SOFTMAC_PARAM(amsdu_force, 2)
++ SOFTMAC_PARAM(rc_probes_on, false)
++ SOFTMAC_PARAM(cmon, true)
++ SOFTMAC_PARAM(hwscan, true)
++ SOFTMAC_PARAM(autobcn, true)
++ SOFTMAC_PARAM(dpsm, true)
++
++ /* FULLMAC only parameters */
++ FULLMAC_PARAM(ant_div, true)
++};
++
++#ifdef CONFIG_RWNX_FULLMAC
++/* FULLMAC specific parameters*/
++module_param_named(ant_div, rwnx_mod_params.ant_div, bool, S_IRUGO);
++MODULE_PARM_DESC(ant_div, "Enable Antenna Diversity (Default: 1)");
++#endif /* CONFIG_RWNX_FULLMAC */
++
++module_param_named(ht_on, rwnx_mod_params.ht_on, bool, S_IRUGO);
++MODULE_PARM_DESC(ht_on, "Enable HT (Default: 1)");
++
++module_param_named(vht_on, rwnx_mod_params.vht_on, bool, S_IRUGO);
++MODULE_PARM_DESC(vht_on, "Enable VHT (Default: 1)");
++
++module_param_named(he_on, rwnx_mod_params.he_on, bool, S_IRUGO);
++MODULE_PARM_DESC(he_on, "Enable HE (Default: 1)");
++
++module_param_named(mcs_map, rwnx_mod_params.mcs_map, int, S_IRUGO);
++MODULE_PARM_DESC(mcs_map, "VHT MCS map value 0: MCS0_7, 1: MCS0_8, 2: MCS0_9"
++ " (Default: 2)");
++
++module_param_named(he_mcs_map, rwnx_mod_params.he_mcs_map, int, S_IRUGO);
++MODULE_PARM_DESC(he_mcs_map, "HE MCS map value 0: MCS0_7, 1: MCS0_9, 2: MCS0_11"
++ " (Default: 2)");
++
++module_param_named(he_ul_on, rwnx_mod_params.he_ul_on, bool, S_IRUGO);
++MODULE_PARM_DESC(he_ul_on, "Enable HE OFDMA UL (Default: 0)");
++
++module_param_named(amsdu_maxnb, rwnx_mod_params.amsdu_maxnb, int, S_IRUGO | S_IWUSR);
++MODULE_PARM_DESC(amsdu_maxnb, "Maximum number of MSDUs inside an A-MSDU in TX: (Default: NX_TX_PAYLOAD_MAX)");
++
++module_param_named(ps_on, rwnx_mod_params.ps_on, bool, S_IRUGO);
++MODULE_PARM_DESC(ps_on, "Enable PowerSaving (Default: 1-Enabled)");
++
++module_param_named(tx_lft, rwnx_mod_params.tx_lft, int, 0644);
++MODULE_PARM_DESC(tx_lft, "Tx lifetime (ms) - setting it to 0 disables retries "
++ "(Default: "__stringify(RWNX_TX_LIFETIME_MS)")");
++
++module_param_named(ldpc_on, rwnx_mod_params.ldpc_on, bool, S_IRUGO);
++MODULE_PARM_DESC(ldpc_on, "Enable LDPC (Default: 1)");
++
++module_param_named(stbc_on, rwnx_mod_params.stbc_on, bool, S_IRUGO);
++MODULE_PARM_DESC(stbc_on, "Enable STBC in RX (Default: 1)");
++
++module_param_named(gf_rx_on, rwnx_mod_params.gf_rx_on, bool, S_IRUGO);
++MODULE_PARM_DESC(gf_rx_on, "Enable HT greenfield in reception (Default: 1)");
++
++module_param_named(phycfg, rwnx_mod_params.phy_cfg, int, S_IRUGO);
++MODULE_PARM_DESC(phycfg,
++ "0 <= phycfg <= 5 : RF Channel Conf (Default: 2(C0-A1-B2))");
++
++module_param_named(uapsd_timeout, rwnx_mod_params.uapsd_timeout, int, S_IRUGO | S_IWUSR);
++MODULE_PARM_DESC(uapsd_timeout,
++ "UAPSD Timer timeout, in ms (Default: 300). If 0, UAPSD is disabled");
++
++module_param_named(uapsd_queues, rwnx_mod_params.uapsd_queues, int, S_IRUGO | S_IWUSR);
++MODULE_PARM_DESC(uapsd_queues, "UAPSD Queues, integer value, must be seen as a bitfield\n"
++ " Bit 0 = VO\n"
++ " Bit 1 = VI\n"
++ " Bit 2 = BK\n"
++ " Bit 3 = BE\n"
++ " -> uapsd_queues=7 will enable uapsd for VO, VI and BK queues");
++
++module_param_named(ap_uapsd_on, rwnx_mod_params.ap_uapsd_on, bool, S_IRUGO);
++MODULE_PARM_DESC(ap_uapsd_on, "Enable UAPSD in AP mode (Default: 1)");
++
++module_param_named(sgi, rwnx_mod_params.sgi, bool, S_IRUGO);
++MODULE_PARM_DESC(sgi, "Advertise Short Guard Interval support (Default: 1)");
++
++module_param_named(sgi80, rwnx_mod_params.sgi80, bool, S_IRUGO);
++MODULE_PARM_DESC(sgi80, "Advertise Short Guard Interval support for 80MHz (Default: 1)");
++
++module_param_named(use_2040, rwnx_mod_params.use_2040, bool, S_IRUGO);
++MODULE_PARM_DESC(use_2040, "Use tweaked 20-40MHz mode (Default: 1)");
++
++module_param_named(use_80, rwnx_mod_params.use_80, bool, S_IRUGO);
++MODULE_PARM_DESC(use_80, "Enable 80MHz (Default: 1)");
++
++module_param_named(custregd, rwnx_mod_params.custregd, bool, S_IRUGO);
++MODULE_PARM_DESC(custregd,
++ "Use permissive custom regulatory rules (for testing ONLY) (Default: 0)");
++
++module_param_named(custchan, rwnx_mod_params.custchan, bool, S_IRUGO);
++MODULE_PARM_DESC(custchan,
++ "Extend channel set to non-standard channels (for testing ONLY) (Default: 0)");
++
++module_param_named(nss, rwnx_mod_params.nss, int, S_IRUGO);
++MODULE_PARM_DESC(nss, "1 <= nss <= 2 : Supported number of Spatial Streams (Default: 1)");
++
++module_param_named(amsdu_rx_max, rwnx_mod_params.amsdu_rx_max, int, S_IRUGO);
++MODULE_PARM_DESC(amsdu_rx_max, "0 <= amsdu_rx_max <= 2 : Maximum A-MSDU size supported in RX\n"
++ " 0: 3895 bytes\n"
++ " 1: 7991 bytes\n"
++ " 2: 11454 bytes\n"
++ " This value might be reduced according to the FW capabilities.\n"
++ " Default: 2");
++
++module_param_named(bfmee, rwnx_mod_params.bfmee, bool, S_IRUGO);
++MODULE_PARM_DESC(bfmee, "Enable Beamformee Capability (Default: 1-Enabled)");
++
++module_param_named(bfmer, rwnx_mod_params.bfmer, bool, S_IRUGO);
++MODULE_PARM_DESC(bfmer, "Enable Beamformer Capability (Default: 0-Disabled)");
++
++module_param_named(mesh, rwnx_mod_params.mesh, bool, S_IRUGO);
++MODULE_PARM_DESC(mesh, "Enable Meshing Capability (Default: 1-Enabled)");
++
++module_param_named(murx, rwnx_mod_params.murx, bool, S_IRUGO);
++MODULE_PARM_DESC(murx, "Enable MU-MIMO RX Capability (Default: 1-Enabled)");
++
++module_param_named(mutx, rwnx_mod_params.mutx, bool, S_IRUGO);
++MODULE_PARM_DESC(mutx, "Enable MU-MIMO TX Capability (Default: 1-Enabled)");
++
++module_param_named(mutx_on, rwnx_mod_params.mutx_on, bool, S_IRUGO | S_IWUSR);
++MODULE_PARM_DESC(mutx_on, "Enable MU-MIMO transmissions (Default: 1-Enabled)");
++
++module_param_named(roc_dur_max, rwnx_mod_params.roc_dur_max, int, S_IRUGO);
++MODULE_PARM_DESC(roc_dur_max, "Maximum Remain on Channel duration");
++
++module_param_named(listen_itv, rwnx_mod_params.listen_itv, int, S_IRUGO);
++MODULE_PARM_DESC(listen_itv, "Maximum listen interval");
++
++module_param_named(listen_bcmc, rwnx_mod_params.listen_bcmc, bool, S_IRUGO);
++MODULE_PARM_DESC(listen_bcmc, "Wait for BC/MC traffic following DTIM beacon");
++
++module_param_named(lp_clk_ppm, rwnx_mod_params.lp_clk_ppm, int, S_IRUGO);
++MODULE_PARM_DESC(lp_clk_ppm, "Low Power Clock accuracy of the local device");
++
++module_param_named(tdls, rwnx_mod_params.tdls, bool, S_IRUGO);
++MODULE_PARM_DESC(tdls, "Enable TDLS (Default: 1-Enabled)");
++
++module_param_named(uf, rwnx_mod_params.uf, bool, S_IRUGO | S_IWUSR);
++MODULE_PARM_DESC(uf, "Enable Unsupported HT Frame Logging (Default: 0-Disabled)");
++
++module_param_named(auto_reply, rwnx_mod_params.auto_reply, bool, S_IRUGO | S_IWUSR);
++MODULE_PARM_DESC(auto_reply, "Enable Monitor MacAddr Auto-Reply (Default: 0-Disabled)");
++
++module_param_named(ftl, rwnx_mod_params.ftl, charp, S_IRUGO);
++MODULE_PARM_DESC(ftl, "Firmware trace level (Default: \"\")");
++
++module_param_named(dpsm, rwnx_mod_params.dpsm, bool, S_IRUGO);
++MODULE_PARM_DESC(dpsm, "Enable Dynamic PowerSaving (Default: 1-Enabled)");
++
++#ifdef DEFAULT_COUNTRY_CODE
++char default_ccode[4] = DEFAULT_COUNTRY_CODE;
++#else
++char default_ccode[4] = "00";
++#endif
++
++char country_code[4];
++module_param_string(country_code, country_code, 4, 0600);
++
++#define RWNX_REG_RULE(start, end, bw, reg_flags) REG_RULE(start, end, bw, 0, 0, reg_flags)
++struct rwnx_regdomain {
++ char country_code[4];
++ struct ieee80211_regdomain *prRegdRules;
++};
++
++static const int mcs_map_to_rate[4][3] = {
++ [PHY_CHNL_BW_20][IEEE80211_VHT_MCS_SUPPORT_0_7] = 65,
++ [PHY_CHNL_BW_20][IEEE80211_VHT_MCS_SUPPORT_0_8] = 78,
++ [PHY_CHNL_BW_20][IEEE80211_VHT_MCS_SUPPORT_0_9] = 78,
++ [PHY_CHNL_BW_40][IEEE80211_VHT_MCS_SUPPORT_0_7] = 135,
++ [PHY_CHNL_BW_40][IEEE80211_VHT_MCS_SUPPORT_0_8] = 162,
++ [PHY_CHNL_BW_40][IEEE80211_VHT_MCS_SUPPORT_0_9] = 180,
++ [PHY_CHNL_BW_80][IEEE80211_VHT_MCS_SUPPORT_0_7] = 292,
++ [PHY_CHNL_BW_80][IEEE80211_VHT_MCS_SUPPORT_0_8] = 351,
++ [PHY_CHNL_BW_80][IEEE80211_VHT_MCS_SUPPORT_0_9] = 390,
++ [PHY_CHNL_BW_160][IEEE80211_VHT_MCS_SUPPORT_0_7] = 585,
++ [PHY_CHNL_BW_160][IEEE80211_VHT_MCS_SUPPORT_0_8] = 702,
++ [PHY_CHNL_BW_160][IEEE80211_VHT_MCS_SUPPORT_0_9] = 780,
++};
++
++#define MAX_VHT_RATE(map, nss, bw) (mcs_map_to_rate[bw][map] * (nss))
++
++extern struct ieee80211_regdomain *reg_regdb[];
++
++char ccode_channels[200];
++int index_for_channel_list = 0;
++module_param_string(ccode_channels, ccode_channels, 200, 0600);
++
++void rwnx_get_countrycode_channels(struct wiphy *wiphy,
++ struct ieee80211_regdomain *regdomain){
++ enum nl80211_band band;
++ struct ieee80211_supported_band *sband;
++ int channel_index;
++ int rule_index;
++ int band_num = 0;
++ int rule_num = regdomain->n_reg_rules;
++ int start_freq = 0;
++ int end_freq = 0;
++ int center_freq = 0;
++ char channel[4];
++#ifdef CONFIG_USE_WIRELESS_EXT
++ struct rwnx_hw *rwnx_hw = wiphy_priv(wiphy);
++ int support_freqs_counter = 0;
++#endif
++
++ band_num = NUM_NL80211_BANDS;
++
++ memset(ccode_channels, 0, 200);
++
++ index_for_channel_list = 0;
++
++ for (band = 0; band < band_num; band++) {
++ sband = wiphy->bands[band];// bands: 0:2.4G 1:5G 2:60G
++ if (!sband)
++ continue;
++
++ for (channel_index = 0; channel_index < sband->n_channels; channel_index++) {
++ for(rule_index = 0; rule_index < rule_num; rule_index++){
++ start_freq = regdomain->reg_rules[rule_index].freq_range.start_freq_khz/1000;
++ end_freq = regdomain->reg_rules[rule_index].freq_range.end_freq_khz/1000;
++ center_freq = sband->channels[channel_index].center_freq;
++ if((center_freq - 10) >= start_freq && (center_freq + 10) <= end_freq){
++#ifdef CONFIG_USE_WIRELESS_EXT
++ rwnx_hw->support_freqs[support_freqs_counter++] = center_freq;
++#endif
++ sprintf(channel, "%d",ieee80211_frequency_to_channel(center_freq));
++
++ memcpy(ccode_channels + index_for_channel_list, channel, strlen(channel));
++
++ index_for_channel_list += strlen(channel);
++
++ memcpy(ccode_channels + index_for_channel_list, " ", 1);
++
++ index_for_channel_list += 1;
++ break;
++
++ }
++ }
++ }
++ }
++#ifdef CONFIG_USE_WIRELESS_EXT
++ rwnx_hw->support_freqs_number = support_freqs_counter;
++#endif
++ AICWFDBG(LOGINFO, "%s support channel:%s\r\n", __func__, ccode_channels);
++}
++
++
++struct ieee80211_regdomain *getRegdomainFromRwnxDBIndex(struct wiphy *wiphy,
++ int index)
++{
++ u8 idx;
++
++ idx = index;
++
++ memset(country_code, 0, 4);
++ country_code[0] = reg_regdb[idx]->alpha2[0];
++ country_code[1] = reg_regdb[idx]->alpha2[1];
++
++ AICWFDBG(LOGINFO, "%s set ccode:%s \r\n", __func__, country_code);
++
++ rwnx_get_countrycode_channels(wiphy, reg_regdb[idx]);
++
++ return reg_regdb[idx];
++}
++
++extern int reg_regdb_size;
++
++struct ieee80211_regdomain *getRegdomainFromRwnxDB(struct wiphy *wiphy,
++ char *alpha2)
++{
++ u8 idx;
++
++ memset(country_code, 0, 4);
++
++ AICWFDBG(LOGINFO, "%s set ccode:%s \r\n", __func__, alpha2);
++
++ idx = 0;
++
++ while (reg_regdb[idx]){
++ if((reg_regdb[idx]->alpha2[0] == alpha2[0]) &&
++ (reg_regdb[idx]->alpha2[1] == alpha2[1])){
++ memcpy(country_code, alpha2, 2);
++ rwnx_get_countrycode_channels(wiphy, reg_regdb[idx]);
++ return reg_regdb[idx];
++ }
++ idx++;
++ if(idx == reg_regdb_size){
++ break;
++ }
++ }
++
++ AICWFDBG(LOGERROR, "%s(): Error, wrong country = %s\n",
++ __func__, alpha2);
++ AICWFDBG(LOGINFO, "Set as default 00\n");
++
++ memcpy(country_code, default_ccode, sizeof(default_ccode));
++ rwnx_get_countrycode_channels(wiphy, reg_regdb[0]);
++
++ return reg_regdb[0];
++}
++
++
++/**
++ * Do some sanity check
++ *
++ */
++#if 0
++static int rwnx_check_fw_hw_feature(struct rwnx_hw *rwnx_hw,
++ struct wiphy *wiphy)
++{
++ u32_l sys_feat = rwnx_hw->version_cfm.features;
++ u32_l mac_feat = rwnx_hw->version_cfm.version_machw_1;
++ u32_l phy_feat = rwnx_hw->version_cfm.version_phy_1;
++ u32_l phy_vers = rwnx_hw->version_cfm.version_phy_2;
++ u16_l max_sta_nb = rwnx_hw->version_cfm.max_sta_nb;
++ u8_l max_vif_nb = rwnx_hw->version_cfm.max_vif_nb;
++ int bw, res = 0;
++ int amsdu_rx;
++
++ if (!rwnx_hw->mod_params->custregd)
++ rwnx_hw->mod_params->custchan = false;
++
++ if (rwnx_hw->mod_params->custchan) {
++ rwnx_hw->mod_params->mesh = false;
++ rwnx_hw->mod_params->tdls = false;
++ }
++
++#ifdef CONFIG_RWNX_FULLMAC
++
++ if (!(sys_feat & BIT(MM_FEAT_UMAC_BIT))) {
++ wiphy_err(wiphy,
++ "Loading softmac firmware with fullmac driver\n");
++ res = -1;
++ }
++
++ if (!(sys_feat & BIT(MM_FEAT_ANT_DIV_BIT))) {
++ rwnx_hw->mod_params->ant_div = false;
++ }
++
++#endif /* CONFIG_RWNX_FULLMAC */
++
++ if (!(sys_feat & BIT(MM_FEAT_VHT_BIT))) {
++ rwnx_hw->mod_params->vht_on = false;
++ }
++
++ // Check if HE is supported
++ if (!(sys_feat & BIT(MM_FEAT_HE_BIT))) {
++ rwnx_hw->mod_params->he_on = false;
++ rwnx_hw->mod_params->he_ul_on = false;
++ }
++
++ if (!(sys_feat & BIT(MM_FEAT_PS_BIT))) {
++ rwnx_hw->mod_params->ps_on = false;
++ }
++
++ /* AMSDU (non)support implies different shared structure definition
++ so insure that fw and drv have consistent compilation option */
++ if (sys_feat & BIT(MM_FEAT_AMSDU_BIT)) {
++#ifndef CONFIG_RWNX_SPLIT_TX_BUF
++ wiphy_err(wiphy,
++ "AMSDU enabled in firmware but support not compiled in driver\n");
++ res = -1;
++#else
++ if (rwnx_hw->mod_params->amsdu_maxnb > NX_TX_PAYLOAD_MAX)
++ rwnx_hw->mod_params->amsdu_maxnb = NX_TX_PAYLOAD_MAX;
++#endif /* CONFIG_RWNX_SPLIT_TX_BUF */
++ } else {
++#ifdef CONFIG_RWNX_SPLIT_TX_BUF
++ wiphy_err(wiphy,
++ "AMSDU disabled in firmware but support compiled in driver\n");
++ res = -1;
++#endif /* CONFIG_RWNX_SPLIT_TX_BUF */
++ }
++
++ if (!(sys_feat & BIT(MM_FEAT_UAPSD_BIT))) {
++ rwnx_hw->mod_params->uapsd_timeout = 0;
++ }
++
++ if (!(sys_feat & BIT(MM_FEAT_BFMEE_BIT))) {
++ rwnx_hw->mod_params->bfmee = false;
++ }
++
++ if ((sys_feat & BIT(MM_FEAT_BFMER_BIT))) {
++#ifndef CONFIG_RWNX_BFMER
++ wiphy_err(wiphy,
++ "BFMER enabled in firmware but support not compiled in driver\n");
++ res = -1;
++#endif /* CONFIG_RWNX_BFMER */
++ // Check PHY and MAC HW BFMER support and update parameter accordingly
++ if (!(phy_feat & MDM_BFMER_BIT) || !(mac_feat & NXMAC_BFMER_BIT)) {
++ rwnx_hw->mod_params->bfmer = false;
++ // Disable the feature in the bitfield so that it won't be displayed
++ sys_feat &= ~BIT(MM_FEAT_BFMER_BIT);
++ }
++ } else {
++#ifdef CONFIG_RWNX_BFMER
++ wiphy_err(wiphy,
++ "BFMER disabled in firmware but support compiled in driver\n");
++ res = -1;
++#else
++ rwnx_hw->mod_params->bfmer = false;
++#endif /* CONFIG_RWNX_BFMER */
++ }
++
++ if (!(sys_feat & BIT(MM_FEAT_MESH_BIT))) {
++ rwnx_hw->mod_params->mesh = false;
++ }
++
++ if (!(sys_feat & BIT(MM_FEAT_TDLS_BIT))) {
++ rwnx_hw->mod_params->tdls = false;
++ }
++
++ if (!(sys_feat & BIT(MM_FEAT_UF_BIT))) {
++ rwnx_hw->mod_params->uf = false;
++ }
++
++#ifdef CONFIG_RWNX_FULLMAC
++ if ((sys_feat & BIT(MM_FEAT_MON_DATA_BIT))) {
++#ifndef CONFIG_RWNX_MON_DATA
++ wiphy_err(wiphy,
++ "Monitor+Data interface support (MON_DATA) is enabled in firmware but support not compiled in driver\n");
++ res = -1;
++#endif /* CONFIG_RWNX_MON_DATA */
++ } else {
++#ifdef CONFIG_RWNX_MON_DATA
++ wiphy_err(wiphy,
++ "Monitor+Data interface support (MON_DATA) disabled in firmware but support compiled in driver\n");
++ res = -1;
++#endif /* CONFIG_RWNX_MON_DATA */
++ }
++#endif
++
++ // Check supported AMSDU RX size
++ amsdu_rx = (sys_feat >> MM_AMSDU_MAX_SIZE_BIT0) & 0x03;
++ if (amsdu_rx < rwnx_hw->mod_params->amsdu_rx_max) {
++ rwnx_hw->mod_params->amsdu_rx_max = amsdu_rx;
++ }
++
++ // Check supported BW
++ bw = (phy_feat & MDM_CHBW_MASK) >> MDM_CHBW_LSB;
++ // Check if 80MHz BW is supported
++ if (bw < 2) {
++ rwnx_hw->mod_params->use_80 = false;
++ }
++ // Check if 40MHz BW is supported
++ if (bw < 1)
++ rwnx_hw->mod_params->use_2040 = false;
++
++ // 80MHz BW shall be disabled if 40MHz is not enabled
++ if (!rwnx_hw->mod_params->use_2040)
++ rwnx_hw->mod_params->use_80 = false;
++
++ // Check if HT is supposed to be supported. If not, disable VHT/HE too
++ if (!rwnx_hw->mod_params->ht_on)
++ {
++ rwnx_hw->mod_params->vht_on = false;
++ rwnx_hw->mod_params->he_on = false;
++ rwnx_hw->mod_params->he_ul_on = false;
++ rwnx_hw->mod_params->use_80 = false;
++ rwnx_hw->mod_params->use_2040 = false;
++ }
++
++ // LDPC is mandatory for HE40 and above, so if LDPC is not supported, then disable
++ // HE to use HT/VHT only
++ if (rwnx_hw->mod_params->use_2040 && !rwnx_hw->mod_params->ldpc_on)
++ {
++ rwnx_hw->mod_params->he_on = false;
++ rwnx_hw->mod_params->he_ul_on = false;
++ }
++
++ // HT greenfield is not supported in modem >= 3.0
++ if (__MDM_MAJOR_VERSION(phy_vers) > 0) {
++ rwnx_hw->mod_params->gf_rx_on = false;
++ }
++
++ if (!(sys_feat & BIT(MM_FEAT_MU_MIMO_RX_BIT)) ||
++ !rwnx_hw->mod_params->bfmee) {
++ rwnx_hw->mod_params->murx = false;
++ }
++
++ if ((sys_feat & BIT(MM_FEAT_MU_MIMO_TX_BIT))) {
++#ifndef CONFIG_RWNX_MUMIMO_TX
++ wiphy_err(wiphy,
++ "MU-MIMO TX enabled in firmware but support not compiled in driver\n");
++ res = -1;
++#endif /* CONFIG_RWNX_MUMIMO_TX */
++ if (!rwnx_hw->mod_params->bfmer)
++ rwnx_hw->mod_params->mutx = false;
++ // Check PHY and MAC HW MU-MIMO TX support and update parameter accordingly
++ else if (!(phy_feat & MDM_MUMIMOTX_BIT) || !(mac_feat & NXMAC_MU_MIMO_TX_BIT)) {
++ rwnx_hw->mod_params->mutx = false;
++ // Disable the feature in the bitfield so that it won't be displayed
++ sys_feat &= ~BIT(MM_FEAT_MU_MIMO_TX_BIT);
++ }
++ } else {
++#ifdef CONFIG_RWNX_MUMIMO_TX
++ wiphy_err(wiphy,
++ "MU-MIMO TX disabled in firmware but support compiled in driver\n");
++ res = -1;
++#else
++ rwnx_hw->mod_params->mutx = false;
++#endif /* CONFIG_RWNX_MUMIMO_TX */
++ }
++
++ if (sys_feat & BIT(MM_FEAT_WAPI_BIT)) {
++ rwnx_enable_wapi(rwnx_hw);
++ }
++
++#ifdef CONFIG_RWNX_FULLMAC
++ if (sys_feat & BIT(MM_FEAT_MFP_BIT)) {
++ rwnx_enable_mfp(rwnx_hw);
++ }
++#endif
++
++#ifdef CONFIG_RWNX_FULLMAC
++#define QUEUE_NAME "Broadcast/Multicast queue "
++#endif /* CONFIG_RWNX_FULLMAC */
++
++ if (sys_feat & BIT(MM_FEAT_BCN_BIT)) {
++#if NX_TXQ_CNT == 4
++ wiphy_err(wiphy, QUEUE_NAME
++ "enabled in firmware but support not compiled in driver\n");
++ res = -1;
++#endif /* NX_TXQ_CNT == 4 */
++ } else {
++#if NX_TXQ_CNT == 5
++ wiphy_err(wiphy, QUEUE_NAME
++ "disabled in firmware but support compiled in driver\n");
++ res = -1;
++#endif /* NX_TXQ_CNT == 5 */
++ }
++#undef QUEUE_NAME
++
++#ifdef CONFIG_RWNX_RADAR
++ if (sys_feat & BIT(MM_FEAT_RADAR_BIT)) {
++ /* Enable combination with radar detection */
++ wiphy->n_iface_combinations++;
++ }
++#endif /* CONFIG_RWNX_RADAR */
++
++#ifndef CONFIG_RWNX_SDM
++ switch (__MDM_PHYCFG_FROM_VERS(phy_feat)) {
++ case MDM_PHY_CONFIG_TRIDENT:
++ case MDM_PHY_CONFIG_ELMA:
++ rwnx_hw->mod_params->nss = 1;
++ break;
++ case MDM_PHY_CONFIG_KARST:
++ {
++ int nss_supp = (phy_feat & MDM_NSS_MASK) >> MDM_NSS_LSB;
++ if (rwnx_hw->mod_params->nss > nss_supp)
++ rwnx_hw->mod_params->nss = nss_supp;
++ }
++ break;
++ default:
++ WARN_ON(1);
++ break;
++ }
++#endif /* CONFIG_RWNX_SDM */
++
++ if (rwnx_hw->mod_params->nss < 1 || rwnx_hw->mod_params->nss > 2)
++ rwnx_hw->mod_params->nss = 1;
++
++ if (rwnx_hw->mod_params->phy_cfg < 0 || rwnx_hw->mod_params->phy_cfg > 5)
++ rwnx_hw->mod_params->phy_cfg = 2;
++
++ if (rwnx_hw->mod_params->mcs_map < 0 || rwnx_hw->mod_params->mcs_map > 2)
++ rwnx_hw->mod_params->mcs_map = 0;
++
++ wiphy_info(wiphy, "PHY features: [NSS=%d][CHBW=%d]%s%s\n",
++ rwnx_hw->mod_params->nss,
++ 20 * (1 << ((phy_feat & MDM_CHBW_MASK) >> MDM_CHBW_LSB)),
++ rwnx_hw->mod_params->ldpc_on ? "[LDPC]" : "",
++ rwnx_hw->mod_params->he_on ? "[HE]" : "");
++
++#define PRINT_RWNX_FEAT(feat) \
++ (sys_feat & BIT(MM_FEAT_##feat##_BIT) ? "["#feat"]" : "")
++
++ wiphy_info(wiphy, "FW features: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
++ PRINT_RWNX_FEAT(BCN),
++ PRINT_RWNX_FEAT(AUTOBCN),
++ PRINT_RWNX_FEAT(HWSCAN),
++ PRINT_RWNX_FEAT(CMON),
++ PRINT_RWNX_FEAT(MROLE),
++ PRINT_RWNX_FEAT(RADAR),
++ PRINT_RWNX_FEAT(PS),
++ PRINT_RWNX_FEAT(UAPSD),
++ PRINT_RWNX_FEAT(DPSM),
++ PRINT_RWNX_FEAT(AMPDU),
++ PRINT_RWNX_FEAT(AMSDU),
++ PRINT_RWNX_FEAT(CHNL_CTXT),
++ PRINT_RWNX_FEAT(REORD),
++ PRINT_RWNX_FEAT(P2P),
++ PRINT_RWNX_FEAT(P2P_GO),
++ PRINT_RWNX_FEAT(UMAC),
++ PRINT_RWNX_FEAT(VHT),
++ PRINT_RWNX_FEAT(HE),
++ PRINT_RWNX_FEAT(BFMEE),
++ PRINT_RWNX_FEAT(BFMER),
++ PRINT_RWNX_FEAT(WAPI),
++ PRINT_RWNX_FEAT(MFP),
++ PRINT_RWNX_FEAT(MU_MIMO_RX),
++ PRINT_RWNX_FEAT(MU_MIMO_TX),
++ PRINT_RWNX_FEAT(MESH),
++ PRINT_RWNX_FEAT(TDLS),
++ PRINT_RWNX_FEAT(ANT_DIV));
++#undef PRINT_RWNX_FEAT
++
++ if(max_sta_nb != NX_REMOTE_STA_MAX)
++ {
++ wiphy_err(wiphy, "Different number of supported stations between driver and FW (%d != %d)\n",
++ NX_REMOTE_STA_MAX, max_sta_nb);
++ res = -1;
++ }
++
++ if(max_vif_nb != NX_VIRT_DEV_MAX)
++ {
++ wiphy_err(wiphy, "Different number of supported virtual interfaces between driver and FW (%d != %d)\n",
++ NX_VIRT_DEV_MAX, max_vif_nb);
++ res = -1;
++ }
++
++ return res;
++}
++#endif
++
++
++static void rwnx_set_vht_capa(struct rwnx_hw *rwnx_hw, struct wiphy *wiphy)
++{
++#ifdef CONFIG_VHT_FOR_OLD_KERNEL
++ #ifdef USE_5G
++ struct ieee80211_supported_band *band_5GHz = wiphy->bands[NL80211_BAND_5GHZ];
++ #endif
++ struct ieee80211_supported_band *band_2GHz = wiphy->bands[NL80211_BAND_2GHZ];
++
++ int i;
++ int nss = rwnx_hw->mod_params->nss;
++ int mcs_map;
++ int mcs_map_max;
++ int bw_max;
++
++ if (!rwnx_hw->mod_params->vht_on) {
++ return;
++ }
++
++ rwnx_hw->vht_cap_2G.vht_supported = true;
++ if (rwnx_hw->mod_params->sgi80)
++ rwnx_hw->vht_cap_2G.cap |= IEEE80211_VHT_CAP_SHORT_GI_80;
++ if (rwnx_hw->mod_params->stbc_on)
++ rwnx_hw->vht_cap_2G.cap |= IEEE80211_VHT_CAP_RXSTBC_1;
++ if (rwnx_hw->mod_params->ldpc_on)
++ rwnx_hw->vht_cap_2G.cap |= IEEE80211_VHT_CAP_RXLDPC;
++ if (rwnx_hw->mod_params->bfmee) {
++ rwnx_hw->vht_cap_2G.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
++ rwnx_hw->vht_cap_2G.cap |= 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
++ #else
++ rwnx_hw->vht_cap_2G.cap |= 3 << 13;
++ #endif
++ }
++ if (nss > 1)
++ rwnx_hw->vht_cap_2G.cap |= IEEE80211_VHT_CAP_TXSTBC;
++
++ // Update the AMSDU max RX size (not shifted as located at offset 0 of the VHT cap)
++ rwnx_hw->vht_cap_2G.cap |= rwnx_hw->mod_params->amsdu_rx_max;
++
++ if (rwnx_hw->mod_params->bfmer) {
++ rwnx_hw->vht_cap_2G.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
++ /* Set number of sounding dimensions */
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
++ rwnx_hw->vht_cap_2G.cap |= (nss - 1) << IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
++ #else
++ rwnx_hw->vht_cap_2G.cap |= (nss - 1) << 16;
++ #endif
++ }
++ if (rwnx_hw->mod_params->murx)
++ rwnx_hw->vht_cap_2G.cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
++ if (rwnx_hw->mod_params->mutx)
++ rwnx_hw->vht_cap_2G.cap |= IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE;
++
++ /*
++ * MCS map:
++ * This capabilities are filled according to the mcs_map module parameter.
++ * However currently we have some limitations due to FPGA clock constraints
++ * that prevent always using the range of MCS that is defined by the
++ * parameter:
++ * - in RX, 2SS, we support up to MCS7
++ * - in TX, 2SS, we support up to MCS8
++ */
++ // Get max supported BW
++ if (rwnx_hw->mod_params->use_80)
++ bw_max = PHY_CHNL_BW_80;
++ else if (rwnx_hw->mod_params->use_2040)
++ bw_max = PHY_CHNL_BW_40;
++ else
++ bw_max = PHY_CHNL_BW_20;
++
++ // Check if MCS map should be limited to MCS0_8 due to the standard. Indeed in BW20,
++ // MCS9 is not supported in 1 and 2 SS
++ if (rwnx_hw->mod_params->use_2040)
++ mcs_map_max = IEEE80211_VHT_MCS_SUPPORT_0_9;
++ else
++ mcs_map_max = IEEE80211_VHT_MCS_SUPPORT_0_8;
++
++ mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map, mcs_map_max);
++ rwnx_hw->vht_cap_2G.vht_mcs.rx_mcs_map = cpu_to_le16(0);
++ for (i = 0; i < nss; i++) {
++ rwnx_hw->vht_cap_2G.vht_mcs.rx_mcs_map |= cpu_to_le16(mcs_map << (i*2));
++ rwnx_hw->vht_cap_2G.vht_mcs.rx_highest = MAX_VHT_RATE(mcs_map, nss, bw_max);
++ mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_7;
++ }
++ for (; i < 8; i++) {
++ rwnx_hw->vht_cap_2G.vht_mcs.rx_mcs_map |= cpu_to_le16(
++ IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2));
++ }
++
++ mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map, mcs_map_max);
++ rwnx_hw->vht_cap_2G.vht_mcs.tx_mcs_map = cpu_to_le16(0);
++ for (i = 0; i < nss; i++) {
++ rwnx_hw->vht_cap_2G.vht_mcs.tx_mcs_map |= cpu_to_le16(mcs_map << (i*2));
++ rwnx_hw->vht_cap_2G.vht_mcs.tx_highest = MAX_VHT_RATE(mcs_map, nss, bw_max);
++ mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map,
++ IEEE80211_VHT_MCS_SUPPORT_0_8);
++ }
++ for (; i < 8; i++) {
++ rwnx_hw->vht_cap_2G.vht_mcs.tx_mcs_map |= cpu_to_le16(
++ IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2));
++ }
++
++ if (!rwnx_hw->mod_params->use_80) {
++#ifdef CONFIG_VENDOR_RWNX_VHT_NO80
++ rwnx_hw->vht_cap_2G.cap |= IEEE80211_VHT_CAP_NOT_SUP_WIDTH_80;
++#endif
++ rwnx_hw->vht_cap_2G.cap &= ~IEEE80211_VHT_CAP_SHORT_GI_80;
++ }
++
++ rwnx_hw->vht_cap_2G.cap |= IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
++ printk("%s, vht_capa_info=0x%x\n", __func__, rwnx_hw->vht_cap_2G.cap);
++#ifdef USE_5G
++ if (rwnx_hw->band_5g_support) {
++ rwnx_hw->vht_cap_5G.vht_supported = true;
++ if (rwnx_hw->mod_params->sgi80)
++ rwnx_hw->vht_cap_5G.cap |= IEEE80211_VHT_CAP_SHORT_GI_80;
++ if (rwnx_hw->mod_params->stbc_on)
++ rwnx_hw->vht_cap_5G.cap |= IEEE80211_VHT_CAP_RXSTBC_1;
++ if (rwnx_hw->mod_params->ldpc_on)
++ rwnx_hw->vht_cap_5G.cap |= IEEE80211_VHT_CAP_RXLDPC;
++ if (rwnx_hw->mod_params->bfmee) {
++ rwnx_hw->vht_cap_5G.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
++ rwnx_hw->vht_cap_5G.cap |= 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
++ #else
++ rwnx_hw->vht_cap_5G.cap |= 3 << 13;
++ #endif
++ }
++ if (nss > 1)
++ rwnx_hw->vht_cap_5G.cap |= IEEE80211_VHT_CAP_TXSTBC;
++
++ // Update the AMSDU max RX size (not shifted as located at offset 0 of the VHT cap)
++ rwnx_hw->vht_cap_5G.cap |= rwnx_hw->mod_params->amsdu_rx_max;
++
++ if (rwnx_hw->mod_params->bfmer) {
++ rwnx_hw->vht_cap_5G.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
++ /* Set number of sounding dimensions */
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
++ rwnx_hw->vht_cap_5G.cap |= (nss - 1) << IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
++ #else
++ rwnx_hw->vht_cap_5G.cap |= (nss - 1) << 16;
++ #endif
++ }
++ if (rwnx_hw->mod_params->murx)
++ rwnx_hw->vht_cap_5G.cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
++ if (rwnx_hw->mod_params->mutx)
++ rwnx_hw->vht_cap_5G.cap |= IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE;
++
++ /*
++ * MCS map:
++ * This capabilities are filled according to the mcs_map module parameter.
++ * However currently we have some limitations due to FPGA clock constraints
++ * that prevent always using the range of MCS that is defined by the
++ * parameter:
++ * - in RX, 2SS, we support up to MCS7
++ * - in TX, 2SS, we support up to MCS8
++ */
++ // Get max supported BW
++ if (rwnx_hw->mod_params->use_80)
++ bw_max = PHY_CHNL_BW_80;
++ else if (rwnx_hw->mod_params->use_2040)
++ bw_max = PHY_CHNL_BW_40;
++ else
++ bw_max = PHY_CHNL_BW_20;
++
++ // Check if MCS map should be limited to MCS0_8 due to the standard. Indeed in BW20,
++ // MCS9 is not supported in 1 and 2 SS
++ if (rwnx_hw->mod_params->use_2040)
++ mcs_map_max = IEEE80211_VHT_MCS_SUPPORT_0_9;
++ else
++ mcs_map_max = IEEE80211_VHT_MCS_SUPPORT_0_8;
++
++ mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map, mcs_map_max);
++ rwnx_hw->vht_cap_5G.vht_mcs.rx_mcs_map = cpu_to_le16(0);
++ for (i = 0; i < nss; i++) {
++ rwnx_hw->vht_cap_5G.vht_mcs.rx_mcs_map |= cpu_to_le16(mcs_map << (i*2));
++ rwnx_hw->vht_cap_5G.vht_mcs.rx_highest = MAX_VHT_RATE(mcs_map, nss, bw_max);
++ mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_7;
++ }
++ for (; i < 8; i++) {
++ rwnx_hw->vht_cap_5G.vht_mcs.rx_mcs_map |= cpu_to_le16(
++ IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2));
++ }
++
++ mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map, mcs_map_max);
++ rwnx_hw->vht_cap_5G.vht_mcs.tx_mcs_map = cpu_to_le16(0);
++ for (i = 0; i < nss; i++) {
++ rwnx_hw->vht_cap_5G.vht_mcs.tx_mcs_map |= cpu_to_le16(mcs_map << (i*2));
++ rwnx_hw->vht_cap_5G.vht_mcs.tx_highest = MAX_VHT_RATE(mcs_map, nss, bw_max);
++ mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map,
++ IEEE80211_VHT_MCS_SUPPORT_0_8);
++ }
++ for (; i < 8; i++) {
++ rwnx_hw->vht_cap_5G.vht_mcs.tx_mcs_map |= cpu_to_le16(
++ IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2));
++ }
++
++ if (!rwnx_hw->mod_params->use_80) {
++#ifdef CONFIG_VENDOR_RWNX_VHT_NO80
++ rwnx_hw->vht_cap_5G.cap |= IEEE80211_VHT_CAP_NOT_SUP_WIDTH_80;
++#endif
++ rwnx_hw->vht_cap_5G.cap &= ~IEEE80211_VHT_CAP_SHORT_GI_80;
++ }
++ }
++#endif
++ return;
++#endif
++
++ //#ifdef USE_5G
++ struct ieee80211_supported_band *band_5GHz = wiphy->bands[NL80211_BAND_5GHZ];
++ //#endif
++ struct ieee80211_supported_band *band_2GHz = wiphy->bands[NL80211_BAND_2GHZ];
++
++ int i;
++ int nss = rwnx_hw->mod_params->nss;
++ int mcs_map;
++ int mcs_map_max;
++ int bw_max;
++
++ if (!rwnx_hw->mod_params->vht_on) {
++ return;
++ }
++
++ band_2GHz->vht_cap.vht_supported = true;
++ if (rwnx_hw->mod_params->sgi80)
++ band_2GHz->vht_cap.cap |= IEEE80211_VHT_CAP_SHORT_GI_80;
++ if (rwnx_hw->mod_params->stbc_on)
++ band_2GHz->vht_cap.cap |= IEEE80211_VHT_CAP_RXSTBC_1;
++ if (rwnx_hw->mod_params->ldpc_on)
++ band_2GHz->vht_cap.cap |= IEEE80211_VHT_CAP_RXLDPC;
++ if (rwnx_hw->mod_params->bfmee) {
++ band_2GHz->vht_cap.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
++ band_2GHz->vht_cap.cap |= 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
++ #else
++ band_2GHz->vht_cap.cap |= 3 << 13;
++ #endif
++ }
++ if (nss > 1)
++ band_2GHz->vht_cap.cap |= IEEE80211_VHT_CAP_TXSTBC;
++
++ // Update the AMSDU max RX size (not shifted as located at offset 0 of the VHT cap)
++ band_2GHz->vht_cap.cap |= rwnx_hw->mod_params->amsdu_rx_max;
++
++ if (rwnx_hw->mod_params->bfmer) {
++ band_2GHz->vht_cap.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
++ /* Set number of sounding dimensions */
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
++ band_2GHz->vht_cap.cap |= (nss - 1) << IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
++ #else
++ band_2GHz->vht_cap.cap |= (nss - 1) << 16;
++ #endif
++ }
++ if (rwnx_hw->mod_params->murx)
++ band_2GHz->vht_cap.cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
++ if (rwnx_hw->mod_params->mutx)
++ band_2GHz->vht_cap.cap |= IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE;
++
++ /*
++ * MCS map:
++ * This capabilities are filled according to the mcs_map module parameter.
++ * However currently we have some limitations due to FPGA clock constraints
++ * that prevent always using the range of MCS that is defined by the
++ * parameter:
++ * - in RX, 2SS, we support up to MCS7
++ * - in TX, 2SS, we support up to MCS8
++ */
++ // Get max supported BW
++ if (rwnx_hw->mod_params->use_80)
++ bw_max = PHY_CHNL_BW_80;
++ else if (rwnx_hw->mod_params->use_2040)
++ bw_max = PHY_CHNL_BW_40;
++ else
++ bw_max = PHY_CHNL_BW_20;
++
++ // Check if MCS map should be limited to MCS0_8 due to the standard. Indeed in BW20,
++ // MCS9 is not supported in 1 and 2 SS
++ if (rwnx_hw->mod_params->use_2040)
++ mcs_map_max = IEEE80211_VHT_MCS_SUPPORT_0_9;
++ else
++ mcs_map_max = IEEE80211_VHT_MCS_SUPPORT_0_8;
++
++ mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map, mcs_map_max);
++ band_2GHz->vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(0);
++ for (i = 0; i < nss; i++) {
++ band_2GHz->vht_cap.vht_mcs.rx_mcs_map |= cpu_to_le16(mcs_map << (i*2));
++ band_2GHz->vht_cap.vht_mcs.rx_highest = MAX_VHT_RATE(mcs_map, nss, bw_max);
++ mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_7;
++ }
++ for (; i < 8; i++) {
++ band_2GHz->vht_cap.vht_mcs.rx_mcs_map |= cpu_to_le16(
++ IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2));
++ }
++
++ mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map, mcs_map_max);
++ band_2GHz->vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(0);
++ for (i = 0; i < nss; i++) {
++ band_2GHz->vht_cap.vht_mcs.tx_mcs_map |= cpu_to_le16(mcs_map << (i*2));
++ band_2GHz->vht_cap.vht_mcs.tx_highest = MAX_VHT_RATE(mcs_map, nss, bw_max);
++ mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map,
++ IEEE80211_VHT_MCS_SUPPORT_0_8);
++ }
++ for (; i < 8; i++) {
++ band_2GHz->vht_cap.vht_mcs.tx_mcs_map |= cpu_to_le16(
++ IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2));
++ }
++
++ if (!rwnx_hw->mod_params->use_80) {
++#ifdef CONFIG_VENDOR_RWNX_VHT_NO80
++ band_2GHz->vht_cap.cap |= IEEE80211_VHT_CAP_NOT_SUP_WIDTH_80;
++#endif
++ band_2GHz->vht_cap.cap &= ~IEEE80211_VHT_CAP_SHORT_GI_80;
++ }
++
++
++//#ifdef USE_5G
++ if (rwnx_hw->band_5g_support) {
++ band_5GHz->vht_cap.vht_supported = true;
++ if (rwnx_hw->mod_params->sgi80)
++ band_5GHz->vht_cap.cap |= IEEE80211_VHT_CAP_SHORT_GI_80;
++ if (rwnx_hw->mod_params->stbc_on)
++ band_5GHz->vht_cap.cap |= IEEE80211_VHT_CAP_RXSTBC_1;
++ if (rwnx_hw->mod_params->ldpc_on)
++ band_5GHz->vht_cap.cap |= IEEE80211_VHT_CAP_RXLDPC;
++ if (rwnx_hw->mod_params->bfmee) {
++ band_5GHz->vht_cap.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
++ band_5GHz->vht_cap.cap |= 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
++ #else
++ band_5GHz->vht_cap.cap |= 3 << 13;
++ #endif
++ }
++ if (nss > 1)
++ band_5GHz->vht_cap.cap |= IEEE80211_VHT_CAP_TXSTBC;
++
++ // Update the AMSDU max RX size (not shifted as located at offset 0 of the VHT cap)
++ band_5GHz->vht_cap.cap |= rwnx_hw->mod_params->amsdu_rx_max;
++
++ if (rwnx_hw->mod_params->bfmer) {
++ band_5GHz->vht_cap.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
++ /* Set number of sounding dimensions */
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
++ band_5GHz->vht_cap.cap |= (nss - 1) << IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
++ #else
++ band_5GHz->vht_cap.cap |= (nss - 1) << 16;
++ #endif
++ }
++ if (rwnx_hw->mod_params->murx)
++ band_5GHz->vht_cap.cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
++ if (rwnx_hw->mod_params->mutx)
++ band_5GHz->vht_cap.cap |= IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE;
++
++ /*
++ * MCS map:
++ * This capabilities are filled according to the mcs_map module parameter.
++ * However currently we have some limitations due to FPGA clock constraints
++ * that prevent always using the range of MCS that is defined by the
++ * parameter:
++ * - in RX, 2SS, we support up to MCS7
++ * - in TX, 2SS, we support up to MCS8
++ */
++ // Get max supported BW
++ if (rwnx_hw->mod_params->use_80)
++ bw_max = PHY_CHNL_BW_80;
++ else if (rwnx_hw->mod_params->use_2040)
++ bw_max = PHY_CHNL_BW_40;
++ else
++ bw_max = PHY_CHNL_BW_20;
++
++ // Check if MCS map should be limited to MCS0_8 due to the standard. Indeed in BW20,
++ // MCS9 is not supported in 1 and 2 SS
++ if (rwnx_hw->mod_params->use_2040)
++ mcs_map_max = IEEE80211_VHT_MCS_SUPPORT_0_9;
++ else
++ mcs_map_max = IEEE80211_VHT_MCS_SUPPORT_0_8;
++
++ mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map, mcs_map_max);
++ band_5GHz->vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(0);
++ for (i = 0; i < nss; i++) {
++ band_5GHz->vht_cap.vht_mcs.rx_mcs_map |= cpu_to_le16(mcs_map << (i*2));
++ band_5GHz->vht_cap.vht_mcs.rx_highest = MAX_VHT_RATE(mcs_map, nss, bw_max);
++ mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_7;
++ }
++ for (; i < 8; i++) {
++ band_5GHz->vht_cap.vht_mcs.rx_mcs_map |= cpu_to_le16(
++ IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2));
++ }
++
++ mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map, mcs_map_max);
++ band_5GHz->vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(0);
++ for (i = 0; i < nss; i++) {
++ band_5GHz->vht_cap.vht_mcs.tx_mcs_map |= cpu_to_le16(mcs_map << (i*2));
++ band_5GHz->vht_cap.vht_mcs.tx_highest = MAX_VHT_RATE(mcs_map, nss, bw_max);
++ mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map,
++ IEEE80211_VHT_MCS_SUPPORT_0_8);
++ }
++ for (; i < 8; i++) {
++ band_5GHz->vht_cap.vht_mcs.tx_mcs_map |= cpu_to_le16(
++ IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2));
++ }
++
++ if (!rwnx_hw->mod_params->use_80) {
++#ifdef CONFIG_VENDOR_RWNX_VHT_NO80
++ band_5GHz->vht_cap.cap |= IEEE80211_VHT_CAP_NOT_SUP_WIDTH_80;
++#endif
++ band_5GHz->vht_cap.cap &= ~IEEE80211_VHT_CAP_SHORT_GI_80;
++ }
++//#endif
++ }
++}
++
++static void rwnx_set_ht_capa(struct rwnx_hw *rwnx_hw, struct wiphy *wiphy)
++{
++ //#ifdef USE_5G
++ struct ieee80211_supported_band *band_5GHz = wiphy->bands[NL80211_BAND_5GHZ];
++ //#endif
++
++ struct ieee80211_supported_band *band_2GHz = wiphy->bands[NL80211_BAND_2GHZ];
++ int i;
++ int nss = rwnx_hw->mod_params->nss;
++
++ if (!rwnx_hw->mod_params->ht_on) {
++ band_2GHz->ht_cap.ht_supported = false;
++ //#ifdef USE_5G
++ if (rwnx_hw->band_5g_support){
++ band_5GHz->ht_cap.ht_supported = false;
++ }
++ //#endif
++ return;
++ }
++
++ if (rwnx_hw->mod_params->stbc_on)
++ band_2GHz->ht_cap.cap |= 1 << IEEE80211_HT_CAP_RX_STBC_SHIFT;
++ if (rwnx_hw->mod_params->ldpc_on)
++ band_2GHz->ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
++ if (rwnx_hw->mod_params->use_2040) {
++ band_2GHz->ht_cap.mcs.rx_mask[4] = 0x1; /* MCS32 */
++ band_2GHz->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
++ band_2GHz->ht_cap.mcs.rx_highest = cpu_to_le16(135 * nss);
++ } else {
++ band_2GHz->ht_cap.mcs.rx_highest = cpu_to_le16(65 * nss);
++ }
++ if (nss > 1)
++ band_2GHz->ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
++
++ // Update the AMSDU max RX size
++ if (rwnx_hw->mod_params->amsdu_rx_max)
++ band_2GHz->ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
++
++ if (rwnx_hw->mod_params->sgi) {
++ band_2GHz->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
++ if (rwnx_hw->mod_params->use_2040) {
++ band_2GHz->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
++ band_2GHz->ht_cap.mcs.rx_highest = cpu_to_le16(150 * nss);
++ } else
++ band_2GHz->ht_cap.mcs.rx_highest = cpu_to_le16(72 * nss);
++ }
++ if (rwnx_hw->mod_params->gf_rx_on)
++ band_2GHz->ht_cap.cap |= IEEE80211_HT_CAP_GRN_FLD;
++
++ for (i = 0; i < nss; i++) {
++ band_2GHz->ht_cap.mcs.rx_mask[i] = 0xFF;
++ }
++
++ //#ifdef USE_5G
++ if (rwnx_hw->band_5g_support){
++ band_5GHz->ht_cap = band_2GHz->ht_cap;
++ }
++ //#endif
++}
++
++#ifdef CONFIG_HE_FOR_OLD_KERNEL
++extern struct ieee80211_sband_iftype_data rwnx_he_capa;
++#endif
++static void rwnx_set_he_capa(struct rwnx_hw *rwnx_hw, struct wiphy *wiphy)
++{
++ #ifdef CONFIG_HE_FOR_OLD_KERNEL
++ struct ieee80211_sta_he_cap *he_cap;
++ int i;
++ int nss = rwnx_hw->mod_params->nss;
++ int mcs_map;
++
++ he_cap = (struct ieee80211_sta_he_cap *) &rwnx_he_capa.he_cap;
++ he_cap->has_he = true;
++ he_cap->he_cap_elem.mac_cap_info[2] |= IEEE80211_HE_MAC_CAP2_ALL_ACK;
++ if (rwnx_hw->mod_params->use_2040) {
++ he_cap->he_cap_elem.phy_cap_info[0] |=
++ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
++ he_cap->ppe_thres[0] |= 0x10;
++ }
++ //if (rwnx_hw->mod_params->use_80)
++ {
++ he_cap->he_cap_elem.phy_cap_info[0] |=
++ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
++ }
++ if (rwnx_hw->mod_params->ldpc_on) {
++ he_cap->he_cap_elem.phy_cap_info[1] |= IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD;
++ } else {
++ // If no LDPC is supported, we have to limit to MCS0_9, as LDPC is mandatory
++ // for MCS 10 and 11
++ rwnx_hw->mod_params->he_mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map,
++ IEEE80211_HE_MCS_SUPPORT_0_9);
++ }
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
++ he_cap->he_cap_elem.phy_cap_info[1] |= IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US
++ | IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS;
++
++ he_cap->he_cap_elem.phy_cap_info[2] |= IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS |
++ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
++ IEEE80211_HE_PHY_CAP2_DOPPLER_RX;
++ #else
++ he_cap->he_cap_elem.phy_cap_info[1] |= IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
++ he_cap->he_cap_elem.phy_cap_info[2] |= IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
++ IEEE80211_HE_PHY_CAP2_DOPPLER_RX;
++ #endif
++ if (rwnx_hw->mod_params->stbc_on)
++ he_cap->he_cap_elem.phy_cap_info[2] |= IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ;
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0)
++ he_cap->he_cap_elem.phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM |
++ IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 |
++ IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU;
++ #else
++ he_cap->he_cap_elem.phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM |
++ IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 |
++ IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA;
++ #endif
++ if (rwnx_hw->mod_params->bfmee) {
++ he_cap->he_cap_elem.phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE;
++ he_cap->he_cap_elem.phy_cap_info[4] |=
++ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4;
++ }
++ he_cap->he_cap_elem.phy_cap_info[5] |= IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
++ IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0)
++ he_cap->he_cap_elem.phy_cap_info[6] |= IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
++ IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
++ IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
++ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB |
++ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT |
++ IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO;
++#else
++ he_cap->he_cap_elem.phy_cap_info[6] |= IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
++ IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
++ IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB |
++ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB |
++ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT |
++ IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO;
++#endif
++ he_cap->he_cap_elem.phy_cap_info[7] |= IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI;
++ he_cap->he_cap_elem.phy_cap_info[8] |= IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G;
++ he_cap->he_cap_elem.phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
++ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB;
++ mcs_map = rwnx_hw->mod_params->he_mcs_map;
++ //mcs_map = min_t(int, rwnx_hw->mod_params->he_mcs_map, IEEE80211_HE_MCS_SUPPORT_0_9);
++ memset(&he_cap->he_mcs_nss_supp, 0, sizeof(he_cap->he_mcs_nss_supp));
++ for (i = 0; i < nss; i++) {
++ __le16 unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i*2));
++ he_cap->he_mcs_nss_supp.rx_mcs_80 |= cpu_to_le16(mcs_map << (i*2));
++ he_cap->he_mcs_nss_supp.rx_mcs_160 |= unsup_for_ss;
++ he_cap->he_mcs_nss_supp.rx_mcs_80p80 |= unsup_for_ss;
++ mcs_map = IEEE80211_HE_MCS_SUPPORT_0_7;
++ }
++ for (; i < 8; i++) {
++ __le16 unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i*2));
++ he_cap->he_mcs_nss_supp.rx_mcs_80 |= unsup_for_ss;
++ he_cap->he_mcs_nss_supp.rx_mcs_160 |= unsup_for_ss;
++ he_cap->he_mcs_nss_supp.rx_mcs_80p80 |= unsup_for_ss;
++ }
++ mcs_map = rwnx_hw->mod_params->he_mcs_map;
++ for (i = 0; i < nss; i++) {
++ __le16 unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i*2));
++ he_cap->he_mcs_nss_supp.tx_mcs_80 |= cpu_to_le16(mcs_map << (i*2));
++ he_cap->he_mcs_nss_supp.tx_mcs_160 |= unsup_for_ss;
++ he_cap->he_mcs_nss_supp.tx_mcs_80p80 |= unsup_for_ss;
++ mcs_map = min_t(int, rwnx_hw->mod_params->he_mcs_map,
++ IEEE80211_HE_MCS_SUPPORT_0_7);
++ }
++ for (; i < 8; i++) {
++ __le16 unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i*2));
++ he_cap->he_mcs_nss_supp.tx_mcs_80 |= unsup_for_ss;
++ he_cap->he_mcs_nss_supp.tx_mcs_160 |= unsup_for_ss;
++ he_cap->he_mcs_nss_supp.tx_mcs_80p80 |= unsup_for_ss;
++ }
++
++ return ;
++ #endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)
++ //#ifdef USE_5G
++ struct ieee80211_supported_band *band_5GHz = wiphy->bands[NL80211_BAND_5GHZ];
++ //#endif
++ struct ieee80211_supported_band *band_2GHz = wiphy->bands[NL80211_BAND_2GHZ];
++ int i;
++ int nss = rwnx_hw->mod_params->nss;
++ struct ieee80211_sta_he_cap *he_cap;
++ int mcs_map;
++ if (!rwnx_hw->mod_params->he_on) {
++ band_2GHz->iftype_data = NULL;
++ band_2GHz->n_iftype_data = 0;
++ //#ifdef USE_5G
++ if (rwnx_hw->band_5g_support) {
++ band_5GHz->iftype_data = NULL;
++ band_5GHz->n_iftype_data = 0;
++ }
++ //#endif
++ return;
++ }
++ he_cap = (struct ieee80211_sta_he_cap *) &band_2GHz->iftype_data->he_cap;
++ he_cap->has_he = true;
++ he_cap->he_cap_elem.mac_cap_info[2] |= IEEE80211_HE_MAC_CAP2_ALL_ACK;
++ if (rwnx_hw->mod_params->use_2040) {
++ he_cap->he_cap_elem.phy_cap_info[0] |=
++ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
++ he_cap->ppe_thres[0] |= 0x10;
++ }
++ //if (rwnx_hw->mod_params->use_80)
++ {
++ he_cap->he_cap_elem.phy_cap_info[0] |=
++ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
++ }
++ if (rwnx_hw->mod_params->ldpc_on) {
++ he_cap->he_cap_elem.phy_cap_info[1] |= IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD;
++ } else {
++ // If no LDPC is supported, we have to limit to MCS0_9, as LDPC is mandatory
++ // for MCS 10 and 11
++ rwnx_hw->mod_params->he_mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map,
++ IEEE80211_HE_MCS_SUPPORT_0_9);
++ }
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
++ he_cap->he_cap_elem.phy_cap_info[1] |= IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US
++ | IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS;
++
++ he_cap->he_cap_elem.phy_cap_info[2] |= IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS |
++ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
++ IEEE80211_HE_PHY_CAP2_DOPPLER_RX;
++ #else
++ he_cap->he_cap_elem.phy_cap_info[1] |= IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
++ he_cap->he_cap_elem.phy_cap_info[2] |= IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
++ IEEE80211_HE_PHY_CAP2_DOPPLER_RX;
++ #endif
++ if (rwnx_hw->mod_params->stbc_on)
++ he_cap->he_cap_elem.phy_cap_info[2] |= IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0)
++ he_cap->he_cap_elem.phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM |
++ IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 |
++ IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU;
++#else
++ he_cap->he_cap_elem.phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM |
++ IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 |
++ IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA;
++#endif
++ if (rwnx_hw->mod_params->bfmee) {
++ he_cap->he_cap_elem.phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE;
++ he_cap->he_cap_elem.phy_cap_info[4] |=
++ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4;
++ }
++ he_cap->he_cap_elem.phy_cap_info[5] |= IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
++ IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0)
++ he_cap->he_cap_elem.phy_cap_info[6] |= IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
++ IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
++ IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
++ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB |
++ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT |
++ IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO;
++ #else
++ he_cap->he_cap_elem.phy_cap_info[6] |= IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
++ IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
++ IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB |
++ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB |
++ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT |
++ IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO;
++ #endif
++ he_cap->he_cap_elem.phy_cap_info[7] |= IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI;
++ he_cap->he_cap_elem.phy_cap_info[8] |= IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G;
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
++ he_cap->he_cap_elem.phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
++ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB;
++ #endif
++ mcs_map = rwnx_hw->mod_params->he_mcs_map;
++ //mcs_map = min_t(int, rwnx_hw->mod_params->he_mcs_map, IEEE80211_HE_MCS_SUPPORT_0_9);
++ memset(&he_cap->he_mcs_nss_supp, 0, sizeof(he_cap->he_mcs_nss_supp));
++ for (i = 0; i < nss; i++) {
++ __le16 unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i*2));
++ he_cap->he_mcs_nss_supp.rx_mcs_80 |= cpu_to_le16(mcs_map << (i*2));
++ he_cap->he_mcs_nss_supp.rx_mcs_160 |= unsup_for_ss;
++ he_cap->he_mcs_nss_supp.rx_mcs_80p80 |= unsup_for_ss;
++ mcs_map = IEEE80211_HE_MCS_SUPPORT_0_7;
++ }
++ for (; i < 8; i++) {
++ __le16 unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i*2));
++ he_cap->he_mcs_nss_supp.rx_mcs_80 |= unsup_for_ss;
++ he_cap->he_mcs_nss_supp.rx_mcs_160 |= unsup_for_ss;
++ he_cap->he_mcs_nss_supp.rx_mcs_80p80 |= unsup_for_ss;
++ }
++ mcs_map = rwnx_hw->mod_params->he_mcs_map;
++ for (i = 0; i < nss; i++) {
++ __le16 unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i*2));
++ he_cap->he_mcs_nss_supp.tx_mcs_80 |= cpu_to_le16(mcs_map << (i*2));
++ he_cap->he_mcs_nss_supp.tx_mcs_160 |= unsup_for_ss;
++ he_cap->he_mcs_nss_supp.tx_mcs_80p80 |= unsup_for_ss;
++ mcs_map = min_t(int, rwnx_hw->mod_params->he_mcs_map,
++ IEEE80211_HE_MCS_SUPPORT_0_7);
++ }
++ for (; i < 8; i++) {
++ __le16 unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i*2));
++ he_cap->he_mcs_nss_supp.tx_mcs_80 |= unsup_for_ss;
++ he_cap->he_mcs_nss_supp.tx_mcs_160 |= unsup_for_ss;
++ he_cap->he_mcs_nss_supp.tx_mcs_80p80 |= unsup_for_ss;
++ }
++
++//#ifdef USE_5G
++ if(rwnx_hw->band_5g_support){
++ he_cap = (struct ieee80211_sta_he_cap *) &band_5GHz->iftype_data->he_cap;
++ he_cap->has_he = true;
++ he_cap->he_cap_elem.mac_cap_info[2] |= IEEE80211_HE_MAC_CAP2_ALL_ACK;
++ if (rwnx_hw->mod_params->use_2040) {
++ he_cap->he_cap_elem.phy_cap_info[0] |=
++ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
++ he_cap->ppe_thres[0] |= 0x10;
++ }
++ //if (rwnx_hw->mod_params->use_80)
++ {
++ he_cap->he_cap_elem.phy_cap_info[0] |=
++ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
++ }
++ if (rwnx_hw->mod_params->ldpc_on) {
++ he_cap->he_cap_elem.phy_cap_info[1] |= IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD;
++ } else {
++ // If no LDPC is supported, we have to limit to MCS0_9, as LDPC is mandatory
++ // for MCS 10 and 11
++ rwnx_hw->mod_params->he_mcs_map = min_t(int, rwnx_hw->mod_params->mcs_map,
++ IEEE80211_HE_MCS_SUPPORT_0_9);
++ }
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
++ he_cap->he_cap_elem.phy_cap_info[1] |= IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US |
++ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS;
++ he_cap->he_cap_elem.phy_cap_info[2] |= IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS |
++ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
++ IEEE80211_HE_PHY_CAP2_DOPPLER_RX;
++ #else
++ he_cap->he_cap_elem.phy_cap_info[1] |= IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
++ he_cap->he_cap_elem.phy_cap_info[2] |= IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
++ IEEE80211_HE_PHY_CAP2_DOPPLER_RX;
++ #endif
++ if (rwnx_hw->mod_params->stbc_on)
++ he_cap->he_cap_elem.phy_cap_info[2] |= IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0)
++ he_cap->he_cap_elem.phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM |
++ IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 |
++ IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU;
++#else
++ he_cap->he_cap_elem.phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM |
++ IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 |
++ IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA;
++#endif
++ if (rwnx_hw->mod_params->bfmee) {
++ he_cap->he_cap_elem.phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE;
++ he_cap->he_cap_elem.phy_cap_info[4] |=
++ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4;
++ }
++ he_cap->he_cap_elem.phy_cap_info[5] |= IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
++ IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0)
++ he_cap->he_cap_elem.phy_cap_info[6] |= IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
++ IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
++ IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
++ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB |
++ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT |
++ IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO;
++#else
++ he_cap->he_cap_elem.phy_cap_info[6] |= IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
++ IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
++ IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB |
++ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB |
++ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT |
++ IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO;
++#endif
++ he_cap->he_cap_elem.phy_cap_info[7] |= IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI;
++ he_cap->he_cap_elem.phy_cap_info[8] |= IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G;
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
++ he_cap->he_cap_elem.phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
++ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB;
++ #endif
++ mcs_map = rwnx_hw->mod_params->he_mcs_map;
++ //mcs_map = min_t(int, rwnx_hw->mod_params->he_mcs_map, IEEE80211_HE_MCS_SUPPORT_0_9);
++ memset(&he_cap->he_mcs_nss_supp, 0, sizeof(he_cap->he_mcs_nss_supp));
++ for (i = 0; i < nss; i++) {
++ __le16 unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i*2));
++ he_cap->he_mcs_nss_supp.rx_mcs_80 |= cpu_to_le16(mcs_map << (i*2));
++ he_cap->he_mcs_nss_supp.rx_mcs_160 |= unsup_for_ss;
++ he_cap->he_mcs_nss_supp.rx_mcs_80p80 |= unsup_for_ss;
++ mcs_map = IEEE80211_HE_MCS_SUPPORT_0_7;
++ }
++ for (; i < 8; i++) {
++ __le16 unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i*2));
++ he_cap->he_mcs_nss_supp.rx_mcs_80 |= unsup_for_ss;
++ he_cap->he_mcs_nss_supp.rx_mcs_160 |= unsup_for_ss;
++ he_cap->he_mcs_nss_supp.rx_mcs_80p80 |= unsup_for_ss;
++ }
++ mcs_map = rwnx_hw->mod_params->he_mcs_map;
++ for (i = 0; i < nss; i++) {
++ __le16 unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i*2));
++ he_cap->he_mcs_nss_supp.tx_mcs_80 |= cpu_to_le16(mcs_map << (i*2));
++ he_cap->he_mcs_nss_supp.tx_mcs_160 |= unsup_for_ss;
++ he_cap->he_mcs_nss_supp.tx_mcs_80p80 |= unsup_for_ss;
++ mcs_map = min_t(int, rwnx_hw->mod_params->he_mcs_map,
++ IEEE80211_HE_MCS_SUPPORT_0_7);
++ }
++ for (; i < 8; i++) {
++ __le16 unsup_for_ss = cpu_to_le16(IEEE80211_HE_MCS_NOT_SUPPORTED << (i*2));
++ he_cap->he_mcs_nss_supp.tx_mcs_80 |= unsup_for_ss;
++ he_cap->he_mcs_nss_supp.tx_mcs_160 |= unsup_for_ss;
++ he_cap->he_mcs_nss_supp.tx_mcs_80p80 |= unsup_for_ss;
++ }
++ }
++//#endif
++#endif
++}
++
++static void rwnx_set_wiphy_params(struct rwnx_hw *rwnx_hw, struct wiphy *wiphy)
++{
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0))
++ struct ieee80211_regdomain *regdomain;
++#endif
++
++#ifdef CONFIG_RWNX_FULLMAC
++ /* FULLMAC specific parameters */
++ wiphy->flags |= WIPHY_FLAG_REPORTS_OBSS;
++ wiphy->max_scan_ssids = SCAN_SSID_MAX;
++ wiphy->max_scan_ie_len = SCANU_MAX_IE_LEN;
++#endif /* CONFIG_RWNX_FULLMAC */
++
++ if (rwnx_hw->mod_params->tdls) {
++ /* TDLS support */
++ wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
++#ifdef CONFIG_RWNX_FULLMAC
++ /* TDLS external setup support */
++ wiphy->flags |= WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
++#endif
++ }
++
++ if (rwnx_hw->mod_params->ap_uapsd_on)
++ wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
++
++#ifdef CONFIG_RWNX_FULLMAC
++ if (rwnx_hw->mod_params->ps_on)
++ wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
++ else
++ wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
++#endif
++
++ if (rwnx_hw->mod_params->custregd) {
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
++ // Apply custom regulatory. Note that for recent kernel versions we use instead the
++ // REGULATORY_WIPHY_SELF_MANAGED flag, along with the regulatory_set_wiphy_regd()
++ // function, that needs to be called after wiphy registration
++ memcpy(country_code, default_ccode, sizeof(default_ccode));
++ regdomain = getRegdomainFromRwnxDB(wiphy, default_ccode);
++ printk(KERN_CRIT
++ "\n\n%s: CAUTION: USING PERMISSIVE CUSTOM REGULATORY RULES\n\n",
++ __func__);
++ wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
++ wiphy->regulatory_flags |= REGULATORY_IGNORE_STALE_KICKOFF;
++ wiphy_apply_custom_regulatory(wiphy, regdomain);
++#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
++ memcpy(country_code, default_ccode, sizeof(default_ccode));
++ regdomain = getRegdomainFromRwnxDB(wiphy, default_ccode);
++ printk(KERN_CRIT"%s: Registering custom regulatory\n", __func__);
++ wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
++ wiphy_apply_custom_regulatory(wiphy, regdomain);
++#endif
++ // Check if custom channel set shall be enabled. In such case only monitor mode is
++ // supported
++ if (rwnx_hw->mod_params->custchan) {
++ wiphy->interface_modes = BIT(NL80211_IFTYPE_MONITOR);
++
++ // Enable "extra" channels
++ wiphy->bands[NL80211_BAND_2GHZ]->n_channels += 13;
++ //#ifdef USE_5G
++ if(rwnx_hw->band_5g_support){
++ wiphy->bands[NL80211_BAND_5GHZ]->n_channels += 59;
++ }
++ //#endif
++ }
++ }
++}
++
++#if 0
++static void rwnx_set_rf_params(struct rwnx_hw *rwnx_hw, struct wiphy *wiphy)
++{
++#ifndef CONFIG_RWNX_SDM
++ #ifdef USE_5G
++ struct ieee80211_supported_band *band_5GHz = wiphy->bands[NL80211_BAND_5GHZ];
++ #endif
++ struct ieee80211_supported_band *band_2GHz = wiphy->bands[NL80211_BAND_2GHZ];
++ u32 mdm_phy_cfg = __MDM_PHYCFG_FROM_VERS(rwnx_hw->version_cfm.version_phy_1);
++
++ /*
++ * Get configuration file depending on the RF
++ */
++ if (mdm_phy_cfg == MDM_PHY_CONFIG_TRIDENT) {
++ struct rwnx_phy_conf_file phy_conf;
++ // Retrieve the Trident configuration
++ rwnx_parse_phy_configfile(rwnx_hw, RWNX_PHY_CONFIG_TRD_NAME,
++ &phy_conf, rwnx_hw->mod_params->phy_cfg);
++ memcpy(&rwnx_hw->phy.cfg, &phy_conf.trd, sizeof(phy_conf.trd));
++ } else if (mdm_phy_cfg == MDM_PHY_CONFIG_ELMA) {
++ } else if (mdm_phy_cfg == MDM_PHY_CONFIG_KARST) {
++ struct rwnx_phy_conf_file phy_conf;
++ // We use the NSS parameter as is
++ // Retrieve the Karst configuration
++ rwnx_parse_phy_configfile(rwnx_hw, RWNX_PHY_CONFIG_KARST_NAME,
++ &phy_conf, rwnx_hw->mod_params->phy_cfg);
++
++ memcpy(&rwnx_hw->phy.cfg, &phy_conf.karst, sizeof(phy_conf.karst));
++ } else {
++ WARN_ON(1);
++ }
++
++ /*
++ * adjust caps depending on the RF
++ */
++ switch (mdm_phy_cfg) {
++ case MDM_PHY_CONFIG_TRIDENT:
++ {
++ wiphy_dbg(wiphy, "found Trident phy .. limit BW to 40MHz\n");
++ rwnx_hw->phy.limit_bw = true;
++ #ifdef USE_5G
++#ifdef CONFIG_VENDOR_RWNX_VHT_NO80
++ band_5GHz->vht_cap.cap |= IEEE80211_VHT_CAP_NOT_SUP_WIDTH_80;
++#endif
++ band_5GHz->vht_cap.cap &= ~(IEEE80211_VHT_CAP_SHORT_GI_80 |
++ IEEE80211_VHT_CAP_RXSTBC_MASK);
++ #endif
++ break;
++ }
++ case MDM_PHY_CONFIG_ELMA:
++ wiphy_dbg(wiphy, "found ELMA phy .. disabling 2.4GHz and greenfield rx\n");
++ wiphy->bands[NL80211_BAND_2GHZ] = NULL;
++ band_2GHz->ht_cap.cap &= ~IEEE80211_HT_CAP_GRN_FLD;
++ #ifdef USE_5G
++ band_5GHz->ht_cap.cap &= ~IEEE80211_HT_CAP_GRN_FLD;
++ band_5GHz->vht_cap.cap &= ~IEEE80211_VHT_CAP_RXSTBC_MASK;
++ #endif
++ break;
++ case MDM_PHY_CONFIG_KARST:
++ {
++ wiphy_dbg(wiphy, "found KARST phy\n");
++ break;
++ }
++ default:
++ WARN_ON(1);
++ break;
++ }
++#endif /* CONFIG_RWNX_SDM */
++}
++#endif
++
++int rwnx_handle_dynparams(struct rwnx_hw *rwnx_hw, struct wiphy *wiphy)
++{
++#if 0
++ /* Check compatibility between requested parameters and HW/SW features */
++ int ret;
++
++ ret = rwnx_check_fw_hw_feature(rwnx_hw, wiphy);
++ if (ret)
++ return ret;
++
++ /* Allocate the RX buffers according to the maximum AMSDU RX size */
++ ret = rwnx_ipc_rxbuf_init(rwnx_hw,
++ (4 * (rwnx_hw->mod_params->amsdu_rx_max + 1) + 1) * 1024);
++ if (ret) {
++ wiphy_err(wiphy, "Cannot allocate the RX buffers\n");
++ return ret;
++ }
++#endif
++
++ //check he_mcs max
++ if(rwnx_hw->usbdev->chipid != PRODUCT_ID_AIC8800D81 &&
++ rwnx_hw->mod_params->he_mcs_map > IEEE80211_HE_MCS_SUPPORT_0_9){
++ rwnx_hw->mod_params->he_mcs_map = IEEE80211_HE_MCS_SUPPORT_0_9;
++ }
++
++ //check use_80 support
++ if(rwnx_hw->usbdev->chipid != PRODUCT_ID_AIC8800D81 &&
++ rwnx_hw->mod_params->use_80 == true){
++ rwnx_hw->mod_params->use_80 = false;
++ }
++
++ //check sgi80 support
++ if(rwnx_hw->usbdev->chipid != PRODUCT_ID_AIC8800D81 &&
++ rwnx_hw->mod_params->sgi80 == true){
++ rwnx_hw->mod_params->sgi80 = false;
++ }
++#ifdef CONFIG_5M10M
++ rwnx_hw->mod_params->he_mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_7;
++ rwnx_hw->mod_params->he_mcs_map = IEEE80211_HE_MCS_SUPPORT_0_7;
++ rwnx_hw->mod_params->use_2040 = false;
++ rwnx_hw->mod_params->use_80 = false;
++ rwnx_hw->mod_params->sgi80 = false;
++#endif
++
++ /* Set wiphy parameters */
++ rwnx_set_wiphy_params(rwnx_hw, wiphy);
++ /* Set VHT capabilities */
++ rwnx_set_vht_capa(rwnx_hw, wiphy);
++ /* Set HE capabilities */
++ rwnx_set_he_capa(rwnx_hw, wiphy);
++ /* Set HT capabilities */
++ rwnx_set_ht_capa(rwnx_hw, wiphy);
++ /* Set RF specific parameters (shall be done last as it might change some
++ capabilities previously set) */
++#if 0
++ rwnx_set_rf_params(rwnx_hw, wiphy);
++#endif
++ return 0;
++}
++
++void rwnx_custregd(struct rwnx_hw *rwnx_hw, struct wiphy *wiphy)
++{
++// For older kernel version, the custom regulatory is applied before the wiphy
++// registration (in rwnx_set_wiphy_params()), so nothing has to be done here
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)
++ wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
++
++ if (!rwnx_hw->mod_params->custregd)
++ return;
++
++ rtnl_lock();
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0)
++ if (regulatory_set_wiphy_regd_sync(wiphy, getRegdomainFromRwnxDB(wiphy, default_ccode))){
++ wiphy_err(wiphy, "Failed to set custom regdomain\n");
++ }
++#else
++ if (regulatory_set_wiphy_regd_sync_rtnl(wiphy, getRegdomainFromRwnxDB(wiphy, default_ccode))){
++ wiphy_err(wiphy, "Failed to set custom regdomain\n");
++ }
++#endif
++ else{
++ wiphy_err(wiphy,"\n"
++ "*******************************************************\n"
++ "** CAUTION: USING PERMISSIVE CUSTOM REGULATORY RULES **\n"
++ "*******************************************************\n");
++ }
++ rtnl_unlock();
++#endif
++}
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_mod_params.h
+@@ -0,0 +1,70 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_mod_params.h
++ *
++ * @brief Declaration of module parameters
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ******************************************************************************
++ */
++
++#ifndef _RWNX_MOD_PARAM_H_
++#define _RWNX_MOD_PARAM_H_
++
++struct rwnx_mod_params {
++ bool ht_on;
++ bool vht_on;
++ bool he_on;
++ int mcs_map;
++ int he_mcs_map;
++ bool he_ul_on;
++ bool ldpc_on;
++ bool stbc_on;
++ bool gf_rx_on;
++ int phy_cfg;
++ int uapsd_timeout;
++ bool ap_uapsd_on;
++ bool sgi;
++ bool sgi80;
++ bool use_2040;
++ bool use_80;
++ bool custregd;
++ bool custchan;
++ int nss;
++ int amsdu_rx_max;
++ bool bfmee;
++ bool bfmer;
++ bool mesh;
++ bool murx;
++ bool mutx;
++ bool mutx_on;
++ unsigned int roc_dur_max;
++ int listen_itv;
++ bool listen_bcmc;
++ int lp_clk_ppm;
++ bool ps_on;
++ int tx_lft;
++ int amsdu_maxnb;
++ int uapsd_queues;
++ bool tdls;
++ bool uf;
++ bool auto_reply;
++ char *ftl;
++ bool dpsm;
++#ifdef CONFIG_RWNX_FULLMAC
++ bool ant_div;
++#endif /* CONFIG_RWNX_FULLMAC */
++};
++
++extern struct rwnx_mod_params rwnx_mod_params;
++
++struct rwnx_hw;
++struct wiphy;
++int rwnx_handle_dynparams(struct rwnx_hw *rwnx_hw, struct wiphy *wiphy);
++void rwnx_custregd(struct rwnx_hw *rwnx_hw, struct wiphy *wiphy);
++void rwnx_enable_wapi(struct rwnx_hw *rwnx_hw);
++void rwnx_enable_mfp(struct rwnx_hw *rwnx_hw);
++
++#endif /* _RWNX_MOD_PARAM_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_msg_rx.c
+@@ -0,0 +1,1566 @@
++/**
++ ****************************************************************************************
++ *
++ * @file rwnx_msg_rx.c
++ *
++ * @brief RX function definitions
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ****************************************************************************************
++ */
++
++#include <linux/vmalloc.h>
++#include "rwnx_defs.h"
++#include "rwnx_prof.h"
++#include "rwnx_tx.h"
++#ifdef CONFIG_RWNX_BFMER
++#include "rwnx_bfmer.h"
++#endif //(CONFIG_RWNX_BFMER)
++#ifdef CONFIG_RWNX_FULLMAC
++#include "rwnx_debugfs.h"
++#include "rwnx_msg_tx.h"
++#include "rwnx_tdls.h"
++#endif /* CONFIG_RWNX_FULLMAC */
++#include "rwnx_events.h"
++#include "rwnx_compat.h"
++#include "aicwf_txrxif.h"
++#ifdef CONFIG_USE_WIRELESS_EXT
++#include "aicwf_wext_linux.h"
++#endif
++
++static int rwnx_freq_to_idx(struct rwnx_hw *rwnx_hw, int freq)
++{
++ struct ieee80211_supported_band *sband;
++ int band, ch, idx = 0;
++
++ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
++#ifdef CONFIG_RWNX_FULLMAC
++ sband = rwnx_hw->wiphy->bands[band];
++#endif /* CONFIG_RWNX_FULLMAC */
++ if (!sband) {
++ continue;
++ }
++
++ for (ch = 0; ch < sband->n_channels; ch++, idx++) {
++ if (sband->channels[ch].center_freq == freq) {
++ goto exit;
++ }
++ }
++ }
++
++ BUG_ON(1);
++
++exit:
++ // Channel has been found, return the index
++ return idx;
++}
++
++/***************************************************************************
++ * Messages from MM task
++ **************************************************************************/
++static inline int rwnx_rx_chan_pre_switch_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++ struct rwnx_vif *rwnx_vif;
++ int chan_idx = ((struct mm_channel_pre_switch_ind *)msg->param)->chan_index;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ REG_SW_SET_PROFILING_CHAN(rwnx_hw, SW_PROF_CHAN_CTXT_PSWTCH_BIT);
++
++#ifdef CONFIG_RWNX_FULLMAC
++ list_for_each_entry(rwnx_vif, &rwnx_hw->vifs, list) {
++ if (rwnx_vif->up && rwnx_vif->ch_index == chan_idx) {
++ AICWFDBG(LOGDEBUG, "rwnx_txq_vif_stop\r\n");
++ rwnx_txq_vif_stop(rwnx_vif, RWNX_TXQ_STOP_CHAN, rwnx_hw);
++ }
++ }
++#endif /* CONFIG_RWNX_FULLMAC */
++
++ REG_SW_CLEAR_PROFILING_CHAN(rwnx_hw, SW_PROF_CHAN_CTXT_PSWTCH_BIT);
++
++ return 0;
++}
++
++static inline int rwnx_rx_chan_switch_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++ struct rwnx_vif *rwnx_vif;
++ int chan_idx = ((struct mm_channel_switch_ind *)msg->param)->chan_index;
++ bool roc = ((struct mm_channel_switch_ind *)msg->param)->roc;
++ bool roc_tdls = ((struct mm_channel_switch_ind *)msg->param)->roc_tdls;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ REG_SW_SET_PROFILING_CHAN(rwnx_hw, SW_PROF_CHAN_CTXT_SWTCH_BIT);
++
++#ifdef CONFIG_RWNX_FULLMAC
++ if (roc_tdls) {
++ u8 vif_index = ((struct mm_channel_switch_ind *)msg->param)->vif_index;
++ list_for_each_entry(rwnx_vif, &rwnx_hw->vifs, list) {
++ if (rwnx_vif->vif_index == vif_index) {
++ rwnx_vif->roc_tdls = true;
++ rwnx_txq_tdls_sta_start(rwnx_vif, RWNX_TXQ_STOP_CHAN, rwnx_hw);
++ }
++ }
++ } else if (!roc) {
++ list_for_each_entry(rwnx_vif, &rwnx_hw->vifs, list) {
++ if (rwnx_vif->up && rwnx_vif->ch_index == chan_idx) {
++ rwnx_txq_vif_start(rwnx_vif, RWNX_TXQ_STOP_CHAN, rwnx_hw);
++ }
++ }
++ } else {
++ /* Retrieve the allocated RoC element */
++ struct rwnx_roc_elem *roc_elem = rwnx_hw->roc_elem;
++
++ /* If mgmt_roc is true, remain on channel has been started by ourself */
++ if (!roc_elem->mgmt_roc) {
++ /* Inform the host that we have switch on the indicated off-channel */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)
++ cfg80211_ready_on_channel(roc_elem->wdev->netdev, (u64)(rwnx_hw->roc_cookie_cnt),
++ roc_elem->chan, NL80211_CHAN_HT20, roc_elem->duration, GFP_ATOMIC);
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
++ cfg80211_ready_on_channel(roc_elem->wdev, (u64)(rwnx_hw->roc_cookie_cnt),
++ roc_elem->chan, NL80211_CHAN_HT20, roc_elem->duration, GFP_ATOMIC);
++#else
++ cfg80211_ready_on_channel(roc_elem->wdev, (u64)(rwnx_hw->roc_cookie_cnt),
++ roc_elem->chan, roc_elem->duration, GFP_ATOMIC);
++#endif
++ }
++
++ /* Keep in mind that we have switched on the channel */
++ roc_elem->on_chan = true;
++
++ // Enable traffic on OFF channel queue
++ rwnx_txq_offchan_start(rwnx_hw);
++ }
++
++ tasklet_schedule(&rwnx_hw->task);
++
++ rwnx_hw->cur_chanctx = chan_idx;
++ rwnx_radar_detection_enable_on_cur_channel(rwnx_hw);
++
++#endif /* CONFIG_RWNX_FULLMAC */
++
++ REG_SW_CLEAR_PROFILING_CHAN(rwnx_hw, SW_PROF_CHAN_CTXT_SWTCH_BIT);
++
++ return 0;
++}
++
++static inline int rwnx_rx_tdls_chan_switch_cfm(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++ return 0;
++}
++
++static inline int rwnx_rx_tdls_chan_switch_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++#ifdef CONFIG_RWNX_FULLMAC
++ // Enable traffic on OFF channel queue
++ rwnx_txq_offchan_start(rwnx_hw);
++
++ return 0;
++#endif
++}
++
++static inline int rwnx_rx_tdls_chan_switch_base_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++ struct rwnx_vif *rwnx_vif;
++ u8 vif_index = ((struct tdls_chan_switch_base_ind *)msg->param)->vif_index;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++#ifdef CONFIG_RWNX_FULLMAC
++ list_for_each_entry(rwnx_vif, &rwnx_hw->vifs, list) {
++ if (rwnx_vif->vif_index == vif_index) {
++ rwnx_vif->roc_tdls = false;
++ rwnx_txq_tdls_sta_stop(rwnx_vif, RWNX_TXQ_STOP_CHAN, rwnx_hw);
++ }
++ }
++ return 0;
++#endif
++}
++
++static inline int rwnx_rx_tdls_peer_ps_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++ struct rwnx_vif *rwnx_vif;
++ u8 vif_index = ((struct tdls_peer_ps_ind *)msg->param)->vif_index;
++ bool ps_on = ((struct tdls_peer_ps_ind *)msg->param)->ps_on;
++
++#ifdef CONFIG_RWNX_FULLMAC
++ list_for_each_entry(rwnx_vif, &rwnx_hw->vifs, list) {
++ if (rwnx_vif->vif_index == vif_index) {
++ rwnx_vif->sta.tdls_sta->tdls.ps_on = ps_on;
++ // Update PS status for the TDLS station
++ rwnx_ps_bh_enable(rwnx_hw, rwnx_vif->sta.tdls_sta, ps_on);
++ }
++ }
++
++ return 0;
++#endif
++}
++
++static inline int rwnx_rx_remain_on_channel_exp_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++#ifdef CONFIG_RWNX_FULLMAC
++ /* Retrieve the allocated RoC element */
++ struct rwnx_roc_elem *roc_elem = rwnx_hw->roc_elem;
++ /* Get VIF on which RoC has been started */
++ struct rwnx_vif *rwnx_vif = NULL;// = container_of(roc_elem->wdev, struct rwnx_vif, wdev);
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++ if(roc_elem == NULL){
++ AICWFDBG(LOGERROR,"%s roc_elem == NULL \r\n", __func__);
++ return 0;
++ }
++
++ rwnx_vif = container_of(roc_elem->wdev, struct rwnx_vif, wdev);
++
++
++#ifdef CREATE_TRACE_POINTS
++ /* For debug purpose (use ftrace kernel option) */
++ trace_roc_exp(rwnx_vif->vif_index);
++#endif
++ /* If mgmt_roc is true, remain on channel has been started by ourself */
++ /* If RoC has been cancelled before we switched on channel, do not call cfg80211 */
++ if (!roc_elem->mgmt_roc && roc_elem->on_chan) {
++ /* Inform the host that off-channel period has expired */
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)
++ cfg80211_remain_on_channel_expired(roc_elem->wdev->netdev, (u64)(rwnx_hw->roc_cookie_cnt),
++ roc_elem->chan, NL80211_CHAN_HT20, GFP_ATOMIC);
++#elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
++ cfg80211_remain_on_channel_expired(roc_elem->wdev, (u64)(rwnx_hw->roc_cookie_cnt),
++ roc_elem->chan, NL80211_CHAN_HT20, GFP_ATOMIC);
++#else
++ cfg80211_remain_on_channel_expired(roc_elem->wdev, (u64)(rwnx_hw->roc_cookie_cnt),
++ roc_elem->chan, GFP_ATOMIC);
++#endif
++ }
++
++ /* De-init offchannel TX queue */
++ rwnx_txq_offchan_deinit(rwnx_vif);
++
++ /* Increase the cookie counter cannot be zero */
++ rwnx_hw->roc_cookie_cnt++;
++
++ if (rwnx_hw->roc_cookie_cnt == 0) {
++ rwnx_hw->roc_cookie_cnt = 1;
++ }
++
++ /* Free the allocated RoC element */
++ kfree(roc_elem);
++ rwnx_hw->roc_elem = NULL;
++
++ AICWFDBG(LOGTRACE, "%s exit\r\n", __func__);
++#endif /* CONFIG_RWNX_FULLMAC */
++ return 0;
++}
++
++static inline int rwnx_rx_p2p_vif_ps_change_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++ int vif_idx = ((struct mm_p2p_vif_ps_change_ind *)msg->param)->vif_index;
++ int ps_state = ((struct mm_p2p_vif_ps_change_ind *)msg->param)->ps_state;
++ struct rwnx_vif *vif_entry;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++#ifdef CONFIG_RWNX_FULLMAC
++ vif_entry = rwnx_hw->vif_table[vif_idx];
++
++ if (vif_entry) {
++ goto found_vif;
++ }
++#endif /* CONFIG_RWNX_FULLMAC */
++
++ goto exit;
++
++found_vif:
++
++#ifdef CONFIG_RWNX_FULLMAC
++ if (ps_state == MM_PS_MODE_OFF) {
++ // Start TX queues for provided VIF
++ rwnx_txq_vif_start(vif_entry, RWNX_TXQ_STOP_VIF_PS, rwnx_hw);
++ }
++ else {
++ // Stop TX queues for provided VIF
++ rwnx_txq_vif_stop(vif_entry, RWNX_TXQ_STOP_VIF_PS, rwnx_hw);
++ }
++#endif /* CONFIG_RWNX_FULLMAC */
++
++exit:
++ return 0;
++}
++
++static inline int rwnx_rx_channel_survey_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++ struct mm_channel_survey_ind *ind = (struct mm_channel_survey_ind *)msg->param;
++ // Get the channel index
++ int idx = rwnx_freq_to_idx(rwnx_hw, ind->freq);
++ // Get the survey
++ struct rwnx_survey_info *rwnx_survey;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ if (idx > ARRAY_SIZE(rwnx_hw->survey))
++ return 0;
++
++ rwnx_survey = &rwnx_hw->survey[idx];
++
++ // Store the received parameters
++ rwnx_survey->chan_time_ms = ind->chan_time_ms;
++ rwnx_survey->chan_time_busy_ms = ind->chan_time_busy_ms;
++ rwnx_survey->noise_dbm = ind->noise_dbm;
++ rwnx_survey->filled = (SURVEY_INFO_TIME |
++ SURVEY_INFO_TIME_BUSY);
++
++ if (ind->noise_dbm != 0) {
++ rwnx_survey->filled |= SURVEY_INFO_NOISE_DBM;
++ }
++
++ return 0;
++}
++
++static inline int rwnx_rx_p2p_noa_upd_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++ return 0;
++}
++
++static inline int rwnx_rx_rssi_status_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++ struct mm_rssi_status_ind *ind = (struct mm_rssi_status_ind *)msg->param;
++ int vif_idx = ind->vif_index;
++ bool rssi_status = ind->rssi_status;
++
++ struct rwnx_vif *vif_entry;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++#ifdef CONFIG_RWNX_FULLMAC
++ vif_entry = rwnx_hw->vif_table[vif_idx];
++ if (vif_entry) {
++ cfg80211_cqm_rssi_notify(vif_entry->ndev,
++ rssi_status ? NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW :
++ NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
++ ind->rssi, GFP_ATOMIC);
++ }
++#endif /* CONFIG_RWNX_FULLMAC */
++
++ return 0;
++}
++
++static inline int rwnx_rx_pktloss_notify_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++#ifdef CONFIG_RWNX_FULLMAC
++ struct mm_pktloss_ind *ind = (struct mm_pktloss_ind *)msg->param;
++ struct rwnx_vif *vif_entry;
++ int vif_idx = ind->vif_index;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ vif_entry = rwnx_hw->vif_table[vif_idx];
++ if (vif_entry) {
++ cfg80211_cqm_pktloss_notify(vif_entry->ndev, (const u8 *)ind->mac_addr.array,
++ ind->num_packets, GFP_ATOMIC);
++ }
++#endif /* CONFIG_RWNX_FULLMAC */
++
++ return 0;
++}
++
++static inline int rwnx_apm_staloss_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++ struct mm_apm_staloss_ind *ind = (struct mm_apm_staloss_ind *)msg->param;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ memcpy(rwnx_hw->sta_mac_addr, ind->mac_addr, 6);
++ rwnx_hw->apm_vif_idx = ind->vif_idx;
++
++ queue_work(rwnx_hw->apmStaloss_wq, &rwnx_hw->apmStalossWork);
++
++ return 0;
++}
++
++
++static inline int rwnx_rx_csa_counter_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++ struct mm_csa_counter_ind *ind = (struct mm_csa_counter_ind *)msg->param;
++ struct rwnx_vif *vif;
++ bool found = false;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ // Look for VIF entry
++ list_for_each_entry(vif, &rwnx_hw->vifs, list) {
++ if (vif->vif_index == ind->vif_index) {
++ found=true;
++ break;
++ }
++ }
++
++ if (found) {
++#ifdef CONFIG_RWNX_FULLMAC
++ if (vif->ap.csa)
++ vif->ap.csa->count = ind->csa_count;
++ else
++ netdev_err(vif->ndev, "CSA counter update but no active CSA");
++
++#endif
++ }
++
++ return 0;
++}
++
++#ifdef CONFIG_RWNX_FULLMAC
++static inline int rwnx_rx_csa_finish_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++ struct mm_csa_finish_ind *ind = (struct mm_csa_finish_ind *)msg->param;
++ struct rwnx_vif *vif;
++ bool found = false;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ // Look for VIF entry
++ list_for_each_entry(vif, &rwnx_hw->vifs, list) {
++ if (vif->vif_index == ind->vif_index) {
++ found=true;
++ break;
++ }
++ }
++
++ if (found) {
++ if (RWNX_VIF_TYPE(vif) == NL80211_IFTYPE_AP ||
++ RWNX_VIF_TYPE(vif) == NL80211_IFTYPE_P2P_GO) {
++ if (vif->ap.csa) {
++ vif->ap.csa->status = ind->status;
++ vif->ap.csa->ch_idx = ind->chan_idx;
++ schedule_work(&vif->ap.csa->work);
++ } else
++ netdev_err(vif->ndev, "CSA finish indication but no active CSA");
++ } else {
++ if (ind->status == 0) {
++ rwnx_chanctx_unlink(vif);
++ rwnx_chanctx_link(vif, ind->chan_idx, NULL);
++ if (rwnx_hw->cur_chanctx == ind->chan_idx) {
++ rwnx_radar_detection_enable_on_cur_channel(rwnx_hw);
++ rwnx_txq_vif_start(vif, RWNX_TXQ_STOP_CHAN, rwnx_hw);
++ } else
++ rwnx_txq_vif_stop(vif, RWNX_TXQ_STOP_CHAN, rwnx_hw);
++ }
++ }
++ }
++
++ return 0;
++}
++
++static inline int rwnx_rx_csa_traffic_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++ struct mm_csa_traffic_ind *ind = (struct mm_csa_traffic_ind *)msg->param;
++ struct rwnx_vif *vif;
++ bool found = false;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ // Look for VIF entry
++ list_for_each_entry(vif, &rwnx_hw->vifs, list) {
++ if (vif->vif_index == ind->vif_index) {
++ found=true;
++ break;
++ }
++ }
++
++ if (found) {
++ if (ind->enable)
++ rwnx_txq_vif_start(vif, RWNX_TXQ_STOP_CSA, rwnx_hw);
++ else
++ rwnx_txq_vif_stop(vif, RWNX_TXQ_STOP_CSA, rwnx_hw);
++ }
++
++ return 0;
++}
++
++static inline int rwnx_rx_ps_change_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++ struct mm_ps_change_ind *ind = (struct mm_ps_change_ind *)msg->param;
++ struct rwnx_sta *sta = &rwnx_hw->sta_table[ind->sta_idx];
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++#if 1//2022-01-15 add for rwnx_hw->vif_table[sta->vif_idx] if null when rwnx_close
++ if (!rwnx_hw->vif_table[sta->vif_idx]){
++ wiphy_err(rwnx_hw->wiphy, "rwnx_hw->vif_table[sta->vif_idx] is null\n");
++ return 0;
++ }
++#endif
++
++ if (ind->sta_idx >= (NX_REMOTE_STA_MAX + NX_VIRT_DEV_MAX)) {
++ wiphy_err(rwnx_hw->wiphy, "Invalid sta index reported by fw %d\n",
++ ind->sta_idx);
++ return 1;
++ }
++
++ netdev_dbg(rwnx_hw->vif_table[sta->vif_idx]->ndev,
++ "Sta %d, change PS mode to %s", sta->sta_idx,
++ ind->ps_state ? "ON" : "OFF");
++
++ if (sta->valid) {
++ rwnx_ps_bh_enable(rwnx_hw, sta, ind->ps_state);
++ } else if (rwnx_hw->adding_sta) {
++ sta->ps.active = ind->ps_state ? true : false;
++ } else {
++ netdev_err(rwnx_hw->vif_table[sta->vif_idx]->ndev,
++ "Ignore PS mode change on invalid sta\n");
++ }
++
++
++ return 0;
++}
++
++
++static inline int rwnx_rx_traffic_req_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++ struct mm_traffic_req_ind *ind = (struct mm_traffic_req_ind *)msg->param;
++ struct rwnx_sta *sta = &rwnx_hw->sta_table[ind->sta_idx];
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ netdev_dbg(rwnx_hw->vif_table[sta->vif_idx]->ndev,
++ "Sta %d, asked for %d pkt", sta->sta_idx, ind->pkt_cnt);
++
++ rwnx_ps_bh_traffic_req(rwnx_hw, sta, ind->pkt_cnt,
++ ind->uapsd ? UAPSD_ID : LEGACY_PS_ID);
++
++ return 0;
++}
++#endif /* CONFIG_RWNX_FULLMAC */
++
++/***************************************************************************
++ * Messages from SCAN task
++ **************************************************************************/
++#if 0
++static inline int rwnx_rx_scan_done_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
++ struct cfg80211_scan_info info = {
++ .aborted = false,
++ };
++#endif
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ rwnx_ipc_elem_var_deallocs(rwnx_hw, &rwnx_hw->scan_ie);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
++ ieee80211_scan_completed(rwnx_hw->hw, &info);
++#else
++ ieee80211_scan_completed(rwnx_hw->hw, false);
++#endif
++
++ return 0;
++}
++#endif
++
++/***************************************************************************
++ * Messages from SCANU task
++ **************************************************************************/
++#ifdef CONFIG_RWNX_FULLMAC
++extern uint8_t scanning;
++static inline int rwnx_rx_scanu_start_cfm(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
++ struct cfg80211_scan_info info = {
++ .aborted = false,
++ };
++#endif
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ if (rwnx_hw->scan_request
++#ifdef CONFIG_SCHED_SCAN
++ && !rwnx_hw->is_sched_scan
++#endif//CONFIG_SCHED_SCAN
++#ifdef CONFIG_USE_WIRELESS_EXT
++ && !rwnx_hw->wext_scan) {
++#else
++ ){
++#endif
++
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
++ cfg80211_scan_done(rwnx_hw->scan_request, &info);
++#else
++ cfg80211_scan_done(rwnx_hw->scan_request, false);
++#endif
++ }
++
++
++#ifdef CONFIG_USE_WIRELESS_EXT
++ else if(rwnx_hw->wext_scan){
++ rwnx_hw->wext_scan = 0;
++ AICWFDBG(LOGDEBUG, "%s rwnx_hw->wext_scan done!!\r\n", __func__);
++ if(rwnx_hw->scan_request){
++ vfree(rwnx_hw->scan_request);
++ }
++ complete(&rwnx_hw->wext_scan_com);
++ }
++#endif
++ else {
++ AICWFDBG(LOGERROR, "%s rwnx_hw->scan_request is NULL!!\r\n", __func__);
++ }
++
++#ifdef CONFIG_SCHED_SCAN
++ if(rwnx_hw->is_sched_scan){
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
++ AICWFDBG(LOGINFO, "%s cfg80211_sched_scan_results \r\n", __func__);
++ cfg80211_sched_scan_results(rwnx_hw->scan_request->wiphy,
++ rwnx_hw->sched_scan_req->reqid);
++#else
++ cfg80211_sched_scan_results(rwnx_hw->sched_scan_req->wiphy);
++#endif
++ kfree(rwnx_hw->scan_request);
++ rwnx_hw->is_sched_scan = false;
++ }
++#endif//CONFIG_SCHED_SCAN
++
++ rwnx_hw->scan_request = NULL;
++ scanning = 0;
++
++ return 0;
++}
++
++static inline int rwnx_rx_scanu_result_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++ struct cfg80211_bss *bss = NULL;
++ struct ieee80211_channel *chan;
++ struct scanu_result_ind *ind = (struct scanu_result_ind *)msg->param;
++ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)ind->payload;
++
++#if 0
++ const u8 *ie = mgmt->u.beacon.variable;
++ char *ssid = NULL;
++ int ssid_len = 0;
++ int freq = 0;
++#endif
++
++#ifdef CONFIG_USE_WIRELESS_EXT
++ struct scanu_result_wext *scan_re_wext;
++#endif
++
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ chan = ieee80211_get_channel(rwnx_hw->wiphy, ind->center_freq);
++
++ if (chan != NULL) {
++ #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
++ //ktime_t ts;
++ struct timespec ts;
++ get_monotonic_boottime(&ts);
++ //ts = ktime_get_real();
++ mgmt->u.probe_resp.timestamp = ((u64)ts.tv_sec*1000000) + ts.tv_nsec/1000;
++ #else
++ struct timespec64 ts;
++ ktime_get_real_ts64(&ts);
++ mgmt->u.probe_resp.timestamp = ((u64)ts.tv_sec*1000000) + ts.tv_nsec/1000;
++ #endif
++ bss = cfg80211_inform_bss_frame(rwnx_hw->wiphy, chan,
++ (struct ieee80211_mgmt *)ind->payload,
++ ind->length, ind->rssi * 100, GFP_ATOMIC);
++#if 0
++ //print scan result info start
++ if(ie != NULL){
++ ssid_len = ie[1];
++ ssid = (char *)vmalloc(sizeof(char)* (ssid_len + 1));
++ if(ssid != NULL){
++ memset(ssid, 0, ssid_len + 1);
++ memcpy(ssid, &ie[2], ssid_len);
++ freq = ind->center_freq;
++ AICWFDBG(LOGDEBUG, "%s %02x:%02x:%02x:%02x:%02x:%02x ssid:%s freq:%d timestamp:%ld\r\n", __func__,
++ bss->bssid[0],bss->bssid[1],bss->bssid[2],
++ bss->bssid[3],bss->bssid[4],bss->bssid[5],
++ ssid, freq, (long)mgmt->u.probe_resp.timestamp);
++ vfree(ssid);
++ ssid = NULL;
++ }else{
++ AICWFDBG(LOGERROR, "%s ssid vmalloc fail skip printk ssid info \r\n", __func__);
++ }
++ }
++ //print scan result info end
++#endif
++
++#ifdef CONFIG_USE_WIRELESS_EXT
++ if(rwnx_hw->wext_scan){
++
++ scan_re_wext = (struct scanu_result_wext *)vmalloc(sizeof(struct scanu_result_wext));
++ scan_re_wext->ind = (struct scanu_result_ind *)vmalloc(sizeof(struct scanu_result_ind));
++ scan_re_wext->payload = (u32_l *)vmalloc(sizeof(u32_l) * ind->length);
++
++ memset(scan_re_wext->ind, 0, sizeof(struct scanu_result_ind));
++ memset(scan_re_wext->payload, 0, ind->length);
++
++ memcpy(scan_re_wext->ind, ind, sizeof(struct scanu_result_ind));
++ memcpy(scan_re_wext->payload, ind->payload, ind->length);
++
++ scan_re_wext->bss = bss;
++
++ INIT_LIST_HEAD(&scan_re_wext->scanu_re_list);
++ list_add_tail(&scan_re_wext->scanu_re_list, &rwnx_hw->wext_scanre_list);
++ return 0;
++ }
++#endif
++
++ }
++
++ if (bss != NULL)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
++ cfg80211_put_bss(bss);
++#else
++ cfg80211_put_bss(rwnx_hw->wiphy, bss);
++#endif
++
++ return 0;
++}
++#endif /* CONFIG_RWNX_FULLMAC */
++
++/***************************************************************************
++ * Messages from ME task
++ **************************************************************************/
++#ifdef CONFIG_RWNX_FULLMAC
++static inline int rwnx_rx_me_tkip_mic_failure_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++ struct me_tkip_mic_failure_ind *ind = (struct me_tkip_mic_failure_ind *)msg->param;
++ struct rwnx_vif *rwnx_vif = rwnx_hw->vif_table[ind->vif_idx];
++ struct net_device *dev = rwnx_vif->ndev;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ cfg80211_michael_mic_failure(dev, (u8 *)&ind->addr, (ind->ga?NL80211_KEYTYPE_GROUP:
++ NL80211_KEYTYPE_PAIRWISE), ind->keyid,
++ (u8 *)&ind->tsc, GFP_ATOMIC);
++
++ return 0;
++}
++
++static inline int rwnx_rx_me_tx_credits_update_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++ struct me_tx_credits_update_ind *ind = (struct me_tx_credits_update_ind *)msg->param;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ rwnx_txq_credit_update(rwnx_hw, ind->sta_idx, ind->tid, ind->credits);
++
++ return 0;
++}
++#endif /* CONFIG_RWNX_FULLMAC */
++
++/***************************************************************************
++ * Messages from SM task
++ **************************************************************************/
++#ifdef CONFIG_RWNX_FULLMAC
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
++static inline void cfg80211_chandef_create(struct cfg80211_chan_def *chandef,
++ struct ieee80211_channel *chan,
++ enum nl80211_channel_type chan_type)
++{
++ if (WARN_ON(!chan))
++ return;
++ chandef->chan = chan;
++ chandef->center_freq2 = 0;
++ switch (chan_type) {
++ case NL80211_CHAN_NO_HT:
++ chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
++ chandef->center_freq1 = chan->center_freq;
++ break;
++ case NL80211_CHAN_HT20:
++ chandef->width = NL80211_CHAN_WIDTH_20;
++ chandef->center_freq1 = chan->center_freq;
++ break;
++ case NL80211_CHAN_HT40PLUS:
++ chandef->width = NL80211_CHAN_WIDTH_40;
++ chandef->center_freq1 = chan->center_freq + 10;
++ break;
++ case NL80211_CHAN_HT40MINUS:
++ chandef->width = NL80211_CHAN_WIDTH_40;
++ chandef->center_freq1 = chan->center_freq - 10;
++ break;
++ default:
++ WARN_ON(1);
++ }
++}
++#endif
++static inline int rwnx_rx_sm_connect_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++ struct sm_connect_ind *ind = (struct sm_connect_ind *)msg->param;
++ struct rwnx_vif *rwnx_vif = rwnx_hw->vif_table[ind->vif_idx];
++ struct net_device *dev = NULL;
++ const u8 *req_ie, *rsp_ie;
++ const u8 *extcap_ie;
++ const struct ieee_types_extcap *extcap;
++ struct ieee80211_channel *chan;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ if(!rwnx_vif){
++ AICWFDBG(LOGERROR, "%s rwnx_vif is null \r\n", __func__);
++ return 0;
++ }
++ dev = rwnx_vif->ndev;
++
++
++ /* Retrieve IE addresses and lengths */
++ req_ie = (const u8 *)ind->assoc_ie_buf;
++ rsp_ie = req_ie + ind->assoc_req_ie_len;
++
++
++ if (rwnx_vif->sta.external_auth)
++ rwnx_vif->sta.external_auth = false;
++
++
++ // Fill-in the AP information
++ AICWFDBG(LOGINFO, "%s ind->status_code:%d \r\n", __func__, ind->status_code);
++ if (ind->status_code == 0)
++ {
++ struct rwnx_sta *sta = &rwnx_hw->sta_table[ind->ap_idx];
++ u8 txq_status;
++ struct cfg80211_chan_def chandef;
++
++ sta->valid = true;
++ sta->sta_idx = ind->ap_idx;
++ sta->ch_idx = ind->ch_idx;
++ sta->vif_idx = ind->vif_idx;
++ sta->vlan_idx = sta->vif_idx;
++ sta->qos = ind->qos;
++ sta->acm = ind->acm;
++ sta->ps.active = false;
++ sta->aid = ind->aid;
++ sta->band = ind->band;
++ sta->width = ind->width;
++ sta->center_freq = ind->center_freq;
++ sta->center_freq1 = ind->center_freq1;
++ sta->center_freq2 = ind->center_freq2;
++ rwnx_vif->sta.ap = sta;
++
++ chan = ieee80211_get_channel(rwnx_hw->wiphy, ind->center_freq);
++ cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
++ if (!rwnx_hw->mod_params->ht_on)
++ chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
++ else
++ chandef.width = chnl2bw[ind->width];
++ chandef.center_freq1 = ind->center_freq1;
++ chandef.center_freq2 = ind->center_freq2;
++ rwnx_chanctx_link(rwnx_vif, ind->ch_idx, &chandef);
++ memcpy(sta->mac_addr, ind->bssid.array, ETH_ALEN);
++ if (ind->ch_idx == rwnx_hw->cur_chanctx) {
++ txq_status = 0;
++ } else {
++ txq_status = RWNX_TXQ_STOP_CHAN;
++ }
++ memcpy(sta->ac_param, ind->ac_param, sizeof(sta->ac_param));
++ rwnx_txq_sta_init(rwnx_hw, sta, txq_status);
++ rwnx_txq_tdls_vif_init(rwnx_vif);
++#ifdef CONFIG_DEBUG_FS
++ rwnx_dbgfs_register_rc_stat(rwnx_hw, sta);
++#endif
++ rwnx_mu_group_sta_init(sta, NULL);
++ /* Look for TDLS Channel Switch Prohibited flag in the Extended Capability
++ * Information Element*/
++ extcap_ie = cfg80211_find_ie(WLAN_EID_EXT_CAPABILITY, rsp_ie, ind->assoc_rsp_ie_len);
++ if (extcap_ie && extcap_ie[1] >= 5) {
++ extcap = (void *)(extcap_ie);
++ rwnx_vif->tdls_chsw_prohibited = extcap->ext_capab[4] & WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED;
++ }
++
++ if(atomic_read(&rwnx_hw->sta_flowctrl[sta->sta_idx].tx_pending_cnt) > 0) {
++ AICWFDBG(LOGDEBUG, "sta idx %d fc error %d\n", sta->sta_idx, atomic_read(&rwnx_hw->sta_flowctrl[sta->sta_idx].tx_pending_cnt));
++ }
++
++ if (rwnx_vif->wep_enabled)
++ rwnx_vif->wep_auth_err = false;
++
++#ifdef CONFIG_RWNX_BFMER
++ /* If Beamformer feature is activated, check if features can be used
++ * with the new peer device
++ */
++ if (rwnx_hw->mod_params->bfmer) {
++ const u8 *vht_capa_ie;
++ const struct ieee80211_vht_cap *vht_cap;
++
++ do {
++ /* Look for VHT Capability Information Element */
++ vht_capa_ie = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, rsp_ie,
++ ind->assoc_rsp_ie_len);
++
++ /* Stop here if peer device does not support VHT */
++ if (!vht_capa_ie) {
++ break;
++ }
++
++ vht_cap = (const struct ieee80211_vht_cap *)(vht_capa_ie + 2);
++
++ /* Send MM_BFMER_ENABLE_REQ message if needed */
++ rwnx_send_bfmer_enable(rwnx_hw, sta, vht_cap);
++ } while (0);
++ }
++#endif //(CONFIG_RWNX_BFMER)
++
++#ifdef CONFIG_RWNX_MON_DATA
++ // If there are 1 sta and 1 monitor interface active at the same time then
++ // monitor interface channel context is always the same as the STA interface.
++ // This doesn't work with 2 STA interfaces but we don't want to support it.
++ if (rwnx_hw->monitor_vif != RWNX_INVALID_VIF) {
++ struct rwnx_vif *rwnx_mon_vif = rwnx_hw->vif_table[rwnx_hw->monitor_vif];
++ rwnx_chanctx_unlink(rwnx_mon_vif);
++ rwnx_chanctx_link(rwnx_mon_vif, ind->ch_idx, NULL);
++ }
++#endif
++ atomic_set(&rwnx_vif->drv_conn_state, (int)RWNX_DRV_STATUS_CONNECTED);
++
++ } else if (ind->status_code == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) {
++ if (rwnx_vif->wep_enabled) {
++ rwnx_vif->wep_auth_err = true;
++ printk("con ind wep_auth_err %d\n", rwnx_vif->wep_auth_err);
++ }
++ atomic_set(&rwnx_vif->drv_conn_state, (int)RWNX_DRV_STATUS_DISCONNECTED);
++ }else{
++ atomic_set(&rwnx_vif->drv_conn_state, (int)RWNX_DRV_STATUS_DISCONNECTED);
++ }
++
++ if (!ind->roamed) {
++ cfg80211_connect_result(dev, (const u8 *)ind->bssid.array, req_ie,
++ ind->assoc_req_ie_len, rsp_ie,
++ ind->assoc_rsp_ie_len, ind->status_code,
++ GFP_ATOMIC);
++ }
++ else {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
++ struct cfg80211_roam_info info;
++ memset(&info, 0, sizeof(info));
++ if (rwnx_vif->ch_index < NX_CHAN_CTXT_CNT)
++#if LINUX_VERSION_CODE < HIGH_KERNEL_VERSION
++ info.channel = rwnx_hw->chanctx_table[rwnx_vif->ch_index].chan_def.chan;
++#else
++ info.links[0].channel = rwnx_hw->chanctx_table[rwnx_vif->ch_index].chan_def.chan;
++#endif//LINUX_VERSION_CODE < HIGH_KERNEL_VERSION
++
++#if LINUX_VERSION_CODE < HIGH_KERNEL_VERSION
++ info.bssid = (const u8 *)ind->bssid.array;
++#else
++ info.links[0].bssid = (const u8 *)ind->bssid.array;;
++#endif//LINUX_VERSION_CODE < HIGH_KERNEL_VERSION
++ info.req_ie = req_ie;
++ info.req_ie_len = ind->assoc_req_ie_len;
++ info.resp_ie = rsp_ie;
++ info.resp_ie_len = ind->assoc_rsp_ie_len;
++ cfg80211_roamed(dev, &info, GFP_ATOMIC);
++#else
++ chan = ieee80211_get_channel(rwnx_hw->wiphy, ind->center_freq);
++ cfg80211_roamed(dev
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39) || defined(COMPAT_KERNEL_RELEASE)
++ , chan
++#endif
++ , (const u8 *)ind->bssid.array
++ , req_ie
++ , ind->assoc_req_ie_len
++ , rsp_ie
++ , ind->assoc_rsp_ie_len
++ , GFP_ATOMIC);
++#endif /*LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)*/
++ }
++ netif_tx_start_all_queues(dev);
++ netif_carrier_on(dev);
++
++ return 0;
++}
++
++void rwnx_cfg80211_unlink_bss(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif){
++ struct wiphy *wiphy = rwnx_hw->wiphy;
++ struct cfg80211_bss *bss = NULL;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ bss = cfg80211_get_bss(wiphy, NULL/*notify_channel*/,
++ rwnx_vif->sta.bssid, rwnx_vif->sta.ssid,
++ rwnx_vif->sta.ssid_len,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
++ IEEE80211_BSS_TYPE_ESS,
++ IEEE80211_PRIVACY(true));//temp set true
++#else
++ WLAN_CAPABILITY_ESS,
++ WLAN_CAPABILITY_ESS);
++#endif
++
++ if (bss) {
++ cfg80211_unlink_bss(wiphy, bss);
++ AICWFDBG(LOGINFO, "%s(): cfg80211_unlink %s!!\n", __func__, rwnx_vif->sta.ssid);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
++ cfg80211_put_bss(wiphy, bss);
++#else
++ cfg80211_put_bss(bss);
++#endif
++ }else{
++ AICWFDBG(LOGERROR, "%s(): cfg80211_unlink error %s!!\n", __func__, rwnx_vif->sta.ssid);
++ }
++
++ memset(rwnx_vif->sta.ssid, 0, rwnx_vif->sta.ssid_len);
++ rwnx_vif->sta.ssid_len = 0;
++ memset(rwnx_vif->sta.bssid, 0, ETH_ALEN);
++}
++
++
++extern u8 dhcped;
++static inline int rwnx_rx_sm_disconnect_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++ struct sm_disconnect_ind *ind = (struct sm_disconnect_ind *)msg->param;
++ struct rwnx_vif *rwnx_vif = rwnx_hw->vif_table[ind->vif_idx];
++ struct net_device *dev;
++#ifdef AICWF_RX_REORDER
++ struct reord_ctrl_info *reord_info, *tmp;
++ u8 *macaddr;
++ struct aicwf_rx_priv *rx_priv;
++#endif
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++ dhcped = 0;
++
++ if(!rwnx_vif){
++ AICWFDBG(LOGERROR, "%s rwnx_vif is null \r\n", __func__);
++ //atomic_set(&rwnx_vif->drv_conn_state, RWNX_DRV_STATUS_DISCONNECTED);
++ return 0;
++ }
++ dev = rwnx_vif->ndev;
++
++ rwnx_cfg80211_unlink_bss(rwnx_hw, rwnx_vif);
++
++
++#ifdef CONFIG_BR_SUPPORT
++ struct rwnx_vif *vif = netdev_priv(dev);
++ /* clear bridge database */
++ nat25_db_cleanup(rwnx_vif);
++#endif /* CONFIG_BR_SUPPORT */
++
++ if(rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)
++ rwnx_hw->is_p2p_connected = 0;
++ /* if vif is not up, rwnx_close has already been called */
++ if (rwnx_vif->up) {
++ if (!ind->ft_over_ds && !ind->reassoc) {
++ cfg80211_disconnected(dev, ind->reason_code, NULL, 0,
++ (ind->reason_code <= 1), GFP_ATOMIC);
++ }
++ netif_tx_stop_all_queues(dev);
++ netif_carrier_off(dev);
++ }
++
++#ifdef CONFIG_RWNX_BFMER
++ /* Disable Beamformer if supported */
++ rwnx_bfmer_report_del(rwnx_hw, rwnx_vif->sta.ap);
++#endif //(CONFIG_RWNX_BFMER)
++
++#ifdef AICWF_RX_REORDER
++#ifdef AICWF_SDIO_SUPPORT
++ rx_priv = rwnx_hw->sdiodev->rx_priv;
++#else
++ rx_priv = rwnx_hw->usbdev->rx_priv;
++#endif
++ if((rwnx_vif->wdev.iftype == NL80211_IFTYPE_STATION) || (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)) {
++ macaddr = rwnx_vif->ndev->dev_addr;
++ AICWFDBG(LOGINFO, "deinit:macaddr:%x,%x,%x,%x,%x,%x\r\n", macaddr[0],macaddr[1],macaddr[2], \
++ macaddr[3],macaddr[4],macaddr[5]);
++ list_for_each_entry_safe(reord_info, tmp, &rx_priv->stas_reord_list, list) {
++ macaddr = rwnx_vif->ndev->dev_addr;
++ AICWFDBG(LOGINFO, "reord_mac:%x,%x,%x,%x,%x,%x\r\n", reord_info->mac_addr[0],reord_info->mac_addr[1],reord_info->mac_addr[2], \
++ reord_info->mac_addr[3],reord_info->mac_addr[4],reord_info->mac_addr[5]);
++ if (!memcmp(reord_info->mac_addr, macaddr, 6)) {
++ reord_deinit_sta(rx_priv, reord_info);
++ break;
++ }
++ }
++ }
++ else if((rwnx_vif->wdev.iftype == NL80211_IFTYPE_AP) || (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_GO))
++ BUG();//should be not here: del_sta function
++#endif
++
++ rwnx_txq_sta_deinit(rwnx_hw, rwnx_vif->sta.ap);
++ rwnx_txq_tdls_vif_deinit(rwnx_vif);
++ #if 0
++ rwnx_dbgfs_unregister_rc_stat(rwnx_hw, rwnx_vif->sta.ap);
++ #endif
++ rwnx_vif->sta.ap->valid = false;
++ rwnx_vif->sta.ap = NULL;
++ rwnx_external_auth_disable(rwnx_vif);
++ rwnx_chanctx_unlink(rwnx_vif);
++
++ //msleep(200);
++ atomic_set(&rwnx_vif->drv_conn_state, (int)RWNX_DRV_STATUS_DISCONNECTED);
++ return 0;
++}
++
++static inline int rwnx_rx_sm_external_auth_required_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++ struct sm_external_auth_required_ind *ind =
++ (struct sm_external_auth_required_ind *)msg->param;
++ struct rwnx_vif *rwnx_vif = rwnx_hw->vif_table[ind->vif_idx];
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) || defined(CONFIG_WPA3_FOR_OLD_KERNEL)
++ struct net_device *dev = rwnx_vif->ndev;
++ struct cfg80211_external_auth_params params;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ params.action = NL80211_EXTERNAL_AUTH_START;
++ memcpy(params.bssid, ind->bssid.array, ETH_ALEN);
++ params.ssid.ssid_len = ind->ssid.length;
++ memcpy(params.ssid.ssid, ind->ssid.array,
++ min_t(size_t, ind->ssid.length, sizeof(params.ssid.ssid)));
++ params.key_mgmt_suite = ind->akm;
++
++ if ((ind->vif_idx > NX_VIRT_DEV_MAX) || !rwnx_vif->up ||
++ (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_STATION) ||
++ cfg80211_external_auth_request(dev, ¶ms, GFP_ATOMIC)) {
++ wiphy_err(rwnx_hw->wiphy, "Failed to start external auth on vif %d",
++ ind->vif_idx);
++ rwnx_send_sm_external_auth_required_rsp(rwnx_hw, rwnx_vif,
++ WLAN_STATUS_UNSPECIFIED_FAILURE);
++ return 0;
++ }
++
++ rwnx_external_auth_enable(rwnx_vif);
++#else
++ rwnx_send_sm_external_auth_required_rsp(rwnx_hw, rwnx_vif,
++ WLAN_STATUS_UNSPECIFIED_FAILURE);
++#endif
++ return 0;
++}
++
++
++static inline int rwnx_rx_mesh_path_create_cfm(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++ struct mesh_path_create_cfm *cfm = (struct mesh_path_create_cfm *)msg->param;
++ struct rwnx_vif *rwnx_vif = rwnx_hw->vif_table[cfm->vif_idx];
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Check we well have a Mesh Point Interface */
++ if (rwnx_vif && (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_MESH_POINT)) {
++ rwnx_vif->ap.create_path = false;
++ }
++
++ return 0;
++}
++
++static inline int rwnx_rx_mesh_peer_update_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++ struct mesh_peer_update_ind *ind = (struct mesh_peer_update_ind *)msg->param;
++ struct rwnx_vif *rwnx_vif = rwnx_hw->vif_table[ind->vif_idx];
++ struct rwnx_sta *rwnx_sta = &rwnx_hw->sta_table[ind->sta_idx];
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ if ((ind->vif_idx >= (NX_VIRT_DEV_MAX + NX_REMOTE_STA_MAX)) ||
++ (rwnx_vif && (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT)) ||
++ (ind->sta_idx >= NX_REMOTE_STA_MAX))
++ return 1;
++
++ /* Check we well have a Mesh Point Interface */
++ if (!rwnx_vif->user_mpm)
++ {
++ /* Check if peer link has been established or lost */
++ if (ind->estab) {
++ if (!rwnx_sta->valid) {
++ u8 txq_status;
++
++ rwnx_sta->valid = true;
++ rwnx_sta->sta_idx = ind->sta_idx;
++ rwnx_sta->ch_idx = rwnx_vif->ch_index;
++ rwnx_sta->vif_idx = ind->vif_idx;
++ rwnx_sta->vlan_idx = rwnx_sta->vif_idx;
++ rwnx_sta->ps.active = false;
++ rwnx_sta->qos = true;
++ rwnx_sta->aid = ind->sta_idx + 1;
++ //rwnx_sta->acm = ind->acm;
++ memcpy(rwnx_sta->mac_addr, ind->peer_addr.array, ETH_ALEN);
++
++ rwnx_chanctx_link(rwnx_vif, rwnx_sta->ch_idx, NULL);
++
++ /* Add the station in the list of VIF's stations */
++ INIT_LIST_HEAD(&rwnx_sta->list);
++ list_add_tail(&rwnx_sta->list, &rwnx_vif->ap.sta_list);
++
++ /* Initialize the TX queues */
++ if (rwnx_sta->ch_idx == rwnx_hw->cur_chanctx) {
++ txq_status = 0;
++ } else {
++ txq_status = RWNX_TXQ_STOP_CHAN;
++ }
++
++ rwnx_txq_sta_init(rwnx_hw, rwnx_sta, txq_status);
++#ifdef CONFIG_DEBUG_FS
++ rwnx_dbgfs_register_rc_stat(rwnx_hw, rwnx_sta);
++#endif
++#ifdef CONFIG_RWNX_BFMER
++ // TODO: update indication to contains vht capabilties
++ if (rwnx_hw->mod_params->bfmer)
++ rwnx_send_bfmer_enable(rwnx_hw, rwnx_sta, NULL);
++
++ rwnx_mu_group_sta_init(rwnx_sta, NULL);
++#endif /* CONFIG_RWNX_BFMER */
++
++ } else {
++ WARN_ON(0);
++ }
++ } else {
++ if (rwnx_sta->valid) {
++ rwnx_sta->ps.active = false;
++ rwnx_sta->valid = false;
++
++ /* Remove the station from the list of VIF's station */
++ list_del_init(&rwnx_sta->list);
++
++ rwnx_txq_sta_deinit(rwnx_hw, rwnx_sta);
++#ifdef CONFIG_DEBUG_FS
++ rwnx_dbgfs_unregister_rc_stat(rwnx_hw, rwnx_sta);
++#endif
++ } else {
++ WARN_ON(0);
++ }
++ }
++ } else {
++ if (!ind->estab && rwnx_sta->valid) {
++ /* There is no way to inform upper layer for lost of peer, still
++ clean everything in the driver */
++ rwnx_sta->ps.active = false;
++ rwnx_sta->valid = false;
++
++ /* Remove the station from the list of VIF's station */
++ list_del_init(&rwnx_sta->list);
++
++ rwnx_txq_sta_deinit(rwnx_hw, rwnx_sta);
++#ifdef CONFIG_DEBUG_FS
++ rwnx_dbgfs_unregister_rc_stat(rwnx_hw, rwnx_sta);
++#endif
++ } else {
++ WARN_ON(0);
++ }
++ }
++
++ return 0;
++}
++
++static inline int rwnx_rx_mesh_path_update_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++ struct mesh_path_update_ind *ind = (struct mesh_path_update_ind *)msg->param;
++ struct rwnx_vif *rwnx_vif = rwnx_hw->vif_table[ind->vif_idx];
++ struct rwnx_mesh_path *mesh_path;
++ bool found = false;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ if (ind->vif_idx >= (NX_VIRT_DEV_MAX + NX_REMOTE_STA_MAX))
++ return 1;
++
++ if (!rwnx_vif || (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT))
++ return 0;
++
++ /* Look for path with provided target address */
++ list_for_each_entry(mesh_path, &rwnx_vif->ap.mpath_list, list) {
++ if (mesh_path->path_idx == ind->path_idx) {
++ found = true;
++ break;
++ }
++ }
++
++ /* Check if element has been deleted */
++ if (ind->delete) {
++ if (found) {
++#ifdef CREATE_TRACE_POINTS
++ trace_mesh_delete_path(mesh_path);
++#endif
++ /* Remove element from list */
++ list_del_init(&mesh_path->list);
++ /* Free the element */
++ kfree(mesh_path);
++ }
++ }
++ else {
++ if (found) {
++ // Update the Next Hop STA
++ mesh_path->p_nhop_sta = &rwnx_hw->sta_table[ind->nhop_sta_idx];
++#ifdef CREATE_TRACE_POINTS
++ trace_mesh_update_path(mesh_path);
++#endif
++ } else {
++ // Allocate a Mesh Path structure
++ mesh_path = (struct rwnx_mesh_path *)kmalloc(sizeof(struct rwnx_mesh_path), GFP_ATOMIC);
++
++ if (mesh_path) {
++ INIT_LIST_HEAD(&mesh_path->list);
++
++ mesh_path->path_idx = ind->path_idx;
++ mesh_path->p_nhop_sta = &rwnx_hw->sta_table[ind->nhop_sta_idx];
++ memcpy(&mesh_path->tgt_mac_addr, &ind->tgt_mac_addr, MAC_ADDR_LEN);
++
++ // Insert the path in the list of path
++ list_add_tail(&mesh_path->list, &rwnx_vif->ap.mpath_list);
++#ifdef CREATE_TRACE_POINTS
++ trace_mesh_create_path(mesh_path);
++#endif
++ }
++ }
++ }
++
++ return 0;
++}
++
++static inline int rwnx_rx_mesh_proxy_update_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++ struct mesh_proxy_update_ind *ind = (struct mesh_proxy_update_ind *)msg->param;
++ struct rwnx_vif *rwnx_vif = rwnx_hw->vif_table[ind->vif_idx];
++ struct rwnx_mesh_proxy *mesh_proxy;
++ bool found = false;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ if (ind->vif_idx >= (NX_VIRT_DEV_MAX + NX_REMOTE_STA_MAX))
++ return 1;
++
++ if (!rwnx_vif || (RWNX_VIF_TYPE(rwnx_vif) != NL80211_IFTYPE_MESH_POINT))
++ return 0;
++
++ /* Look for path with provided external STA address */
++ list_for_each_entry(mesh_proxy, &rwnx_vif->ap.proxy_list, list) {
++ if (!memcmp(&ind->ext_sta_addr, &mesh_proxy->ext_sta_addr, ETH_ALEN)) {
++ found = true;
++ break;
++ }
++ }
++
++ if (ind->delete && found) {
++ /* Delete mesh path */
++ list_del_init(&mesh_proxy->list);
++ kfree(mesh_proxy);
++ } else if (!ind->delete && !found) {
++ /* Allocate a Mesh Path structure */
++ mesh_proxy = (struct rwnx_mesh_proxy *)kmalloc(sizeof(*mesh_proxy),
++ GFP_ATOMIC);
++
++ if (mesh_proxy) {
++ INIT_LIST_HEAD(&mesh_proxy->list);
++
++ memcpy(&mesh_proxy->ext_sta_addr, &ind->ext_sta_addr, MAC_ADDR_LEN);
++ mesh_proxy->local = ind->local;
++
++ if (!ind->local) {
++ memcpy(&mesh_proxy->proxy_addr, &ind->proxy_mac_addr, MAC_ADDR_LEN);
++ }
++
++ /* Insert the path in the list of path */
++ list_add_tail(&mesh_proxy->list, &rwnx_vif->ap.proxy_list);
++ }
++ }
++
++ return 0;
++}
++#endif /* CONFIG_RWNX_FULLMAC */
++
++/***************************************************************************
++ * Messages from APM task
++ **************************************************************************/
++
++
++/***************************************************************************
++ * Messages from DEBUG task
++ **************************************************************************/
++static inline int rwnx_rx_dbg_error_ind(struct rwnx_hw *rwnx_hw,
++ struct rwnx_cmd *cmd,
++ struct ipc_e2a_msg *msg)
++{
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ return 0;
++}
++
++#ifdef CONFIG_RWNX_FULLMAC
++
++static msg_cb_fct mm_hdlrs[MSG_I(MM_MAX)] = {
++ [MSG_I(MM_CHANNEL_SWITCH_IND)] = rwnx_rx_chan_switch_ind,
++ [MSG_I(MM_CHANNEL_PRE_SWITCH_IND)] = rwnx_rx_chan_pre_switch_ind,
++ [MSG_I(MM_REMAIN_ON_CHANNEL_EXP_IND)] = rwnx_rx_remain_on_channel_exp_ind,
++ [MSG_I(MM_PS_CHANGE_IND)] = rwnx_rx_ps_change_ind,
++ [MSG_I(MM_TRAFFIC_REQ_IND)] = rwnx_rx_traffic_req_ind,
++ [MSG_I(MM_P2P_VIF_PS_CHANGE_IND)] = rwnx_rx_p2p_vif_ps_change_ind,
++ [MSG_I(MM_CSA_COUNTER_IND)] = rwnx_rx_csa_counter_ind,
++ [MSG_I(MM_CSA_FINISH_IND)] = rwnx_rx_csa_finish_ind,
++ [MSG_I(MM_CSA_TRAFFIC_IND)] = rwnx_rx_csa_traffic_ind,
++ [MSG_I(MM_CHANNEL_SURVEY_IND)] = rwnx_rx_channel_survey_ind,
++ [MSG_I(MM_P2P_NOA_UPD_IND)] = rwnx_rx_p2p_noa_upd_ind,
++ [MSG_I(MM_RSSI_STATUS_IND)] = rwnx_rx_rssi_status_ind,
++ [MSG_I(MM_PKTLOSS_IND)] = rwnx_rx_pktloss_notify_ind,
++ [MSG_I(MM_APM_STALOSS_IND)] = rwnx_apm_staloss_ind,
++};
++
++static msg_cb_fct scan_hdlrs[MSG_I(SCANU_MAX)] = {
++ [MSG_I(SCANU_START_CFM)] = rwnx_rx_scanu_start_cfm,
++ [MSG_I(SCANU_RESULT_IND)] = rwnx_rx_scanu_result_ind,
++};
++
++static msg_cb_fct me_hdlrs[MSG_I(ME_MAX)] = {
++ [MSG_I(ME_TKIP_MIC_FAILURE_IND)] = rwnx_rx_me_tkip_mic_failure_ind,
++ [MSG_I(ME_TX_CREDITS_UPDATE_IND)] = rwnx_rx_me_tx_credits_update_ind,
++};
++
++static msg_cb_fct sm_hdlrs[MSG_I(SM_MAX)] = {
++ [MSG_I(SM_CONNECT_IND)] = rwnx_rx_sm_connect_ind,
++ [MSG_I(SM_DISCONNECT_IND)] = rwnx_rx_sm_disconnect_ind,
++ [MSG_I(SM_EXTERNAL_AUTH_REQUIRED_IND)] = rwnx_rx_sm_external_auth_required_ind,
++};
++
++static msg_cb_fct apm_hdlrs[MSG_I(APM_MAX)] = {
++};
++
++static msg_cb_fct mesh_hdlrs[MSG_I(MESH_MAX)] = {
++ [MSG_I(MESH_PATH_CREATE_CFM)] = rwnx_rx_mesh_path_create_cfm,
++ [MSG_I(MESH_PEER_UPDATE_IND)] = rwnx_rx_mesh_peer_update_ind,
++ [MSG_I(MESH_PATH_UPDATE_IND)] = rwnx_rx_mesh_path_update_ind,
++ [MSG_I(MESH_PROXY_UPDATE_IND)] = rwnx_rx_mesh_proxy_update_ind,
++};
++
++#endif /* CONFIG_RWNX_FULLMAC */
++
++static msg_cb_fct dbg_hdlrs[MSG_I(DBG_MAX)] = {
++ [MSG_I(DBG_ERROR_IND)] = rwnx_rx_dbg_error_ind,
++};
++
++static msg_cb_fct tdls_hdlrs[MSG_I(TDLS_MAX)] = {
++ [MSG_I(TDLS_CHAN_SWITCH_CFM)] = rwnx_rx_tdls_chan_switch_cfm,
++ [MSG_I(TDLS_CHAN_SWITCH_IND)] = rwnx_rx_tdls_chan_switch_ind,
++ [MSG_I(TDLS_CHAN_SWITCH_BASE_IND)] = rwnx_rx_tdls_chan_switch_base_ind,
++ [MSG_I(TDLS_PEER_PS_IND)] = rwnx_rx_tdls_peer_ps_ind,
++};
++
++static msg_cb_fct *msg_hdlrs[] = {
++ [TASK_MM] = mm_hdlrs,
++ [TASK_DBG] = dbg_hdlrs,
++#ifdef CONFIG_RWNX_FULLMAC
++ [TASK_TDLS] = tdls_hdlrs,
++ [TASK_SCANU] = scan_hdlrs,
++ [TASK_ME] = me_hdlrs,
++ [TASK_SM] = sm_hdlrs,
++ [TASK_APM] = apm_hdlrs,
++ [TASK_MESH] = mesh_hdlrs,
++#endif /* CONFIG_RWNX_FULLMAC */
++};
++
++/**
++ *
++ */
++void rwnx_rx_handle_msg(struct rwnx_hw *rwnx_hw, struct ipc_e2a_msg *msg)
++{
++ //printk("%s(%d) MSG_T(msg->id):%d MSG_I(msg->id):%d cmd:%s\r\n", __func__,
++ // msg->id,
++ // MSG_T(msg->id),
++ // MSG_I(msg->id),
++ // rwnx_id2str[MSG_T(msg->id)][MSG_I(msg->id)]);
++
++ rwnx_hw->cmd_mgr->msgind(rwnx_hw->cmd_mgr, msg,
++ msg_hdlrs[MSG_T(msg->id)][MSG_I(msg->id)]);
++}
++
++void rwnx_rx_handle_print(struct rwnx_hw *rwnx_hw, u8 *msg, u32 len)
++{
++ u8 *data_end = NULL;
++ (void)data_end;
++
++ if (!rwnx_hw || !rwnx_hw->fwlog_en) {
++ pr_err("FWLOG-OVFL: %s", msg);
++ return;
++ }
++
++ printk("FWLOG: %s", msg);
++
++#ifdef CONFIG_RWNX_DEBUGFS
++ data_end = rwnx_hw->debugfs.fw_log.buf.dataend;
++
++ if (!rwnx_hw->debugfs.fw_log.buf.data)
++ return ;
++
++ //printk("end=%lx, len=%d\n", (unsigned long)rwnx_hw->debugfs.fw_log.buf.end, len);
++
++ spin_lock_bh(&rwnx_hw->debugfs.fw_log.lock);
++
++ if (rwnx_hw->debugfs.fw_log.buf.end + len > data_end) {
++ int rem = data_end - rwnx_hw->debugfs.fw_log.buf.end;
++ memcpy(rwnx_hw->debugfs.fw_log.buf.end, msg, rem);
++ memcpy(rwnx_hw->debugfs.fw_log.buf.data, &msg[rem], len - rem);
++ rwnx_hw->debugfs.fw_log.buf.end = rwnx_hw->debugfs.fw_log.buf.data + (len - rem);
++ } else {
++ memcpy(rwnx_hw->debugfs.fw_log.buf.end, msg, len);
++ rwnx_hw->debugfs.fw_log.buf.end += len;
++ }
++
++ rwnx_hw->debugfs.fw_log.buf.size += len;
++ if (rwnx_hw->debugfs.fw_log.buf.size > FW_LOG_SIZE)
++ rwnx_hw->debugfs.fw_log.buf.size = FW_LOG_SIZE;
++
++ spin_unlock_bh(&rwnx_hw->debugfs.fw_log.lock);
++#endif
++}
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_msg_rx.h
+@@ -0,0 +1,19 @@
++/**
++ ****************************************************************************************
++ *
++ * @file rwnx_msg_rx.h
++ *
++ * @brief RX function declarations
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ****************************************************************************************
++ */
++
++#ifndef _RWNX_MSG_RX_H_
++#define _RWNX_MSG_RX_H_
++
++void rwnx_rx_handle_msg(struct rwnx_hw *rwnx_hw, struct ipc_e2a_msg *msg);
++void rwnx_rx_handle_print(struct rwnx_hw *rwnx_hw, u8 *msg, u32 len);
++
++#endif /* _RWNX_MSG_RX_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_msg_tx.c
+@@ -0,0 +1,3753 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_msg_tx.c
++ *
++ * @brief TX function definitions
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ******************************************************************************
++ */
++
++#include "rwnx_msg_tx.h"
++#include "rwnx_mod_params.h"
++#include "reg_access.h"
++#ifdef CONFIG_RWNX_BFMER
++#include "rwnx_bfmer.h"
++#endif //(CONFIG_RWNX_BFMER)
++#include "rwnx_compat.h"
++#include "rwnx_cmds.h"
++#include "rwnx_main.h"
++#include "aicwf_txrxif.h"
++#include "rwnx_strs.h"
++
++
++const struct mac_addr mac_addr_bcst = {{0xFFFF, 0xFFFF, 0xFFFF}};
++
++/* Default MAC Rx filters that can be changed by mac80211
++ * (via the configure_filter() callback) */
++#define RWNX_MAC80211_CHANGEABLE ( \
++ NXMAC_ACCEPT_BA_BIT | \
++ NXMAC_ACCEPT_BAR_BIT | \
++ NXMAC_ACCEPT_OTHER_DATA_FRAMES_BIT | \
++ NXMAC_ACCEPT_PROBE_REQ_BIT | \
++ NXMAC_ACCEPT_PS_POLL_BIT \
++ )
++
++/* Default MAC Rx filters that cannot be changed by mac80211 */
++#define RWNX_MAC80211_NOT_CHANGEABLE ( \
++ NXMAC_ACCEPT_QO_S_NULL_BIT | \
++ NXMAC_ACCEPT_Q_DATA_BIT | \
++ NXMAC_ACCEPT_DATA_BIT | \
++ NXMAC_ACCEPT_OTHER_MGMT_FRAMES_BIT | \
++ NXMAC_ACCEPT_MY_UNICAST_BIT | \
++ NXMAC_ACCEPT_BROADCAST_BIT | \
++ NXMAC_ACCEPT_BEACON_BIT | \
++ NXMAC_ACCEPT_PROBE_RESP_BIT \
++ )
++
++/* Default MAC Rx filter */
++#define RWNX_DEFAULT_RX_FILTER (RWNX_MAC80211_CHANGEABLE | RWNX_MAC80211_NOT_CHANGEABLE)
++
++const int bw2chnl[] = {
++ [NL80211_CHAN_WIDTH_20_NOHT] = PHY_CHNL_BW_20,
++ [NL80211_CHAN_WIDTH_20] = PHY_CHNL_BW_20,
++ [NL80211_CHAN_WIDTH_40] = PHY_CHNL_BW_40,
++ [NL80211_CHAN_WIDTH_80] = PHY_CHNL_BW_80,
++ [NL80211_CHAN_WIDTH_160] = PHY_CHNL_BW_160,
++ [NL80211_CHAN_WIDTH_80P80] = PHY_CHNL_BW_80P80,
++};
++
++const int chnl2bw[] = {
++ [PHY_CHNL_BW_20] = NL80211_CHAN_WIDTH_20,
++ [PHY_CHNL_BW_40] = NL80211_CHAN_WIDTH_40,
++ [PHY_CHNL_BW_80] = NL80211_CHAN_WIDTH_80,
++ [PHY_CHNL_BW_160] = NL80211_CHAN_WIDTH_160,
++ [PHY_CHNL_BW_80P80] = NL80211_CHAN_WIDTH_80P80,
++};
++
++#define RWNX_CMD_ARRAY_SIZE 20
++#define RWNX_CMD_HIGH_WATER_SIZE RWNX_CMD_ARRAY_SIZE/2
++//#define RWNX_MSG_ARRAY_SIZE 20
++
++struct rwnx_cmd cmd_array[RWNX_CMD_ARRAY_SIZE];
++//struct lmac_msg msg_array[RWNX_MSG_ARRAY_SIZE];
++
++static spinlock_t cmd_array_lock;
++//static spinlock_t msg_array_lock;
++
++//int msg_array_index = 0;
++int cmd_array_index = 0;
++
++
++
++/*****************************************************************************/
++/*
++ * Parse the ampdu density to retrieve the value in usec, according to the
++ * values defined in ieee80211.h
++ */
++static inline u8 rwnx_ampdudensity2usec(u8 ampdudensity)
++{
++ switch (ampdudensity) {
++ case IEEE80211_HT_MPDU_DENSITY_NONE:
++ return 0;
++ /* 1 microsecond is our granularity */
++ case IEEE80211_HT_MPDU_DENSITY_0_25:
++ case IEEE80211_HT_MPDU_DENSITY_0_5:
++ case IEEE80211_HT_MPDU_DENSITY_1:
++ return 1;
++ case IEEE80211_HT_MPDU_DENSITY_2:
++ return 2;
++ case IEEE80211_HT_MPDU_DENSITY_4:
++ return 4;
++ case IEEE80211_HT_MPDU_DENSITY_8:
++ return 8;
++ case IEEE80211_HT_MPDU_DENSITY_16:
++ return 16;
++ default:
++ return 0;
++ }
++}
++
++static inline bool use_pairwise_key(struct cfg80211_crypto_settings *crypto)
++{
++ if ((crypto->cipher_group == WLAN_CIPHER_SUITE_WEP40) ||
++ (crypto->cipher_group == WLAN_CIPHER_SUITE_WEP104))
++ return false;
++
++ return true;
++}
++
++static inline bool is_non_blocking_msg(int id)
++{
++ return ((id == MM_TIM_UPDATE_REQ) || (id == ME_RC_SET_RATE_REQ) ||
++ (id == MM_BFMER_ENABLE_REQ) || (id == ME_TRAFFIC_IND_REQ) ||
++ (id == TDLS_PEER_TRAFFIC_IND_REQ) ||
++ (id == MESH_PATH_CREATE_REQ) || (id == MESH_PROXY_ADD_REQ) ||
++ (id == SM_EXTERNAL_AUTH_REQUIRED_RSP));
++}
++
++static inline u8_l get_chan_flags(uint32_t flags)
++{
++ u8_l chan_flags = 0;
++#ifdef RADAR_OR_IR_DETECT
++ #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
++ if (flags & IEEE80211_CHAN_PASSIVE_SCAN)
++ #else
++ if (flags & IEEE80211_CHAN_NO_IR)
++ #endif
++ chan_flags |= CHAN_NO_IR;
++ if (flags & IEEE80211_CHAN_RADAR)
++ chan_flags |= CHAN_RADAR;
++#endif
++ return chan_flags;
++}
++
++static inline s8_l chan_to_fw_pwr(int power)
++{
++ return power>127?127:(s8_l)power;
++}
++
++static inline void limit_chan_bw(u8_l *bw, u16_l primary, u16_l *center1)
++{
++ int oft, new_oft = 10;
++
++ if (*bw <= PHY_CHNL_BW_40)
++ return;
++
++ oft = *center1 - primary;
++ *bw = PHY_CHNL_BW_40;
++
++ if (oft < 0)
++ new_oft = new_oft * -1;
++ if (abs(oft) == 10 || abs(oft) == 50)
++ new_oft = new_oft * -1;
++
++ *center1 = primary + new_oft;
++}
++
++struct rwnx_cmd *rwnx_cmd_malloc(void){
++ struct rwnx_cmd *cmd = NULL;
++ unsigned long flags = 0;
++
++ spin_lock_irqsave(&cmd_array_lock, flags);
++
++ for(cmd_array_index = 0; cmd_array_index < RWNX_CMD_ARRAY_SIZE; cmd_array_index++){
++ if(cmd_array[cmd_array_index].used == 0){
++ AICWFDBG(LOGTRACE, "%s get cmd_array[%d]:%p \r\n", __func__, cmd_array_index,&cmd_array[cmd_array_index]);
++ cmd = &cmd_array[cmd_array_index];
++ cmd_array[cmd_array_index].used = 1;
++ break;
++ }
++ }
++
++ if(cmd_array_index >= RWNX_CMD_HIGH_WATER_SIZE){
++ AICWFDBG(LOGERROR, "%s cmd(%d) was pending...\r\n", __func__, cmd_array_index);
++ mdelay(100);
++ }
++
++ if(!cmd){
++ AICWFDBG(LOGERROR, "%s array is empty...\r\n", __func__);
++ }
++
++ spin_unlock_irqrestore(&cmd_array_lock, flags);
++
++ return cmd;
++}
++
++void rwnx_cmd_free(struct rwnx_cmd *cmd){
++ unsigned long flags = 0;
++
++ spin_lock_irqsave(&cmd_array_lock, flags);
++ cmd->used = 0;
++ AICWFDBG(LOGTRACE, "%s cmd_array[%d]:%p \r\n", __func__, cmd->array_id, cmd);
++ spin_unlock_irqrestore(&cmd_array_lock, flags);
++}
++
++
++int rwnx_init_cmd_array(void){
++
++ AICWFDBG(LOGTRACE, "%s Enter \r\n", __func__);
++ spin_lock_init(&cmd_array_lock);
++
++ for(cmd_array_index = 0; cmd_array_index < RWNX_CMD_ARRAY_SIZE; cmd_array_index++){
++ AICWFDBG(LOGTRACE, "%s cmd_queue[%d]:%p \r\n", __func__, cmd_array_index, &cmd_array[cmd_array_index]);
++ cmd_array[cmd_array_index].used = 0;
++ cmd_array[cmd_array_index].array_id = cmd_array_index;
++ }
++ AICWFDBG(LOGTRACE, "%s Exit \r\n", __func__);
++
++ return 0;
++}
++
++void rwnx_free_cmd_array(void){
++
++ AICWFDBG(LOGTRACE, "%s Enter \r\n", __func__);
++
++ for(cmd_array_index = 0; cmd_array_index < RWNX_CMD_ARRAY_SIZE; cmd_array_index++){
++ cmd_array[cmd_array_index].used = 0;
++ }
++
++ AICWFDBG(LOGTRACE, "%s Exit \r\n", __func__);
++}
++
++
++#if 0
++int rwnx_init_msg_array(void){
++
++ printk("%s enter\r\n", __func__);
++ spin_lock_init(&msg_array_lock);
++
++ for(msg_array_index = 0; msg_array_index < RWNX_MSG_ARRAY_SIZE; msg_array_index++){
++ printk("%s msg_queue[%d]:%p \r\n", __func__, msg_array_index, &msg_array[msg_array_index]);
++ }
++ printk("%s exit\r\n", __func__);
++
++ return 0;
++
++}
++
++
++void rwnx_free_msg_array(void){
++
++ printk("%s enter\r\n", __func__);
++#if 1
++ for(msg_array_index = 0; msg_array_index < RWNX_MSG_ARRAY_SIZE; msg_array_index++){
++ msg_array[msg_array_index].param_len = 0;
++ }
++#endif
++ printk("%s exit\r\n", __func__);
++}
++
++struct lmac_msg *rwnx_msg_malloc_(void){
++ struct lmac_msg *msg = NULL;
++
++
++ spin_lock(&msg_array_lock);
++ printk("%s enter\r\n", __func__);
++ for(msg_array_index = 0; msg_array_index < RWNX_MSG_ARRAY_SIZE; msg_array_index++){
++ if(msg_array[msg_array_index].param_len== 0){
++ printk("%s get msg_array[%d]:%p \r\n", __func__, msg_array_index, &msg_array[msg_array_index]);
++ msg = &msg_array[msg_array_index];
++ break;
++ }else{
++ printk("%s msg_array[%d] in used param_len= %d \r\n",
++ __func__,
++ msg_array_index,
++ msg_array[msg_array_index].param_len);
++ }
++ }
++
++ if(!msg){
++ printk("%s array is empty...\r\n", __func__);
++ }
++ spin_unlock(&msg_array_lock);
++
++ return msg;
++}
++
++void rwnx_msg_free_(struct lmac_msg *msg){
++
++ spin_lock(&msg_array_lock);
++ printk("%s enter \r\n", __func__);
++
++ for(msg_array_index = 0; msg_array_index < RWNX_MSG_ARRAY_SIZE; msg_array_index++){
++ if(msg == &msg_array[msg_array_index]){
++ break;
++ }
++ }
++
++ memset(msg->param, 0, msg->param_len);
++ msg->id = 0;
++ msg->dest_id = 0;
++ msg->src_id = 0;
++ msg->param_len = 0;
++
++ printk("%s msg_array[%d]:%p \r\n", __func__, msg_array_index, msg);
++ spin_unlock(&msg_array_lock);
++}
++#endif
++
++
++/**
++ ******************************************************************************
++ * @brief Allocate memory for a message
++ *
++ * This primitive allocates memory for a message that has to be sent. The memory
++ * is allocated dynamically on the heap and the length of the variable parameter
++ * structure has to be provided in order to allocate the correct size.
++ *
++ * Several additional parameters are provided which will be preset in the message
++ * and which may be used internally to choose the kind of memory to allocate.
++ *
++ * The memory allocated will be automatically freed by the kernel, after the
++ * pointer has been sent to ke_msg_send(). If the message is not sent, it must
++ * be freed explicitly with ke_msg_free().
++ *
++ * Allocation failure is considered critical and should not happen.
++ *
++ * @param[in] id Message identifier
++ * @param[in] dest_id Destination Task Identifier
++ * @param[in] src_id Source Task Identifier
++ * @param[in] param_len Size of the message parameters to be allocated
++ *
++ * @return Pointer to the parameter member of the ke_msg. If the parameter
++ * structure is empty, the pointer will point to the end of the message
++ * and should not be used (except to retrieve the message pointer or to
++ * send the message)
++ ******************************************************************************
++ */
++static inline void *rwnx_msg_zalloc(lmac_msg_id_t const id,
++ lmac_task_id_t const dest_id,
++ lmac_task_id_t const src_id,
++ uint16_t const param_len)
++{
++ struct lmac_msg *msg;
++ gfp_t flags;
++
++ //if (is_non_blocking_msg(id) && in_softirq())
++ flags = GFP_ATOMIC;
++ //else
++ // flags = GFP_KERNEL;
++
++ msg = (struct lmac_msg *)kzalloc(sizeof(struct lmac_msg) + param_len,
++ flags);
++ if (msg == NULL) {
++ printk(KERN_CRIT "%s: msg allocation failed\n", __func__);
++ return NULL;
++ }
++ msg->id = id;
++ msg->dest_id = dest_id;
++ msg->src_id = src_id;
++ msg->param_len = param_len;
++ //printk("rwnx_msg_zalloc size=%d id=%d\n",msg->param_len,msg->id);
++
++ return msg->param;
++}
++
++static void rwnx_msg_free(struct rwnx_hw *rwnx_hw, const void *msg_params)
++{
++ struct lmac_msg *msg = container_of((void *)msg_params,
++ struct lmac_msg, param);
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Free the message */
++ kfree(msg);
++}
++
++
++
++static int rwnx_send_msg(struct rwnx_hw *rwnx_hw, const void *msg_params,
++ int reqcfm, lmac_msg_id_t reqid, void *cfm)
++{
++ struct lmac_msg *msg;
++ struct rwnx_cmd *cmd;
++ bool nonblock;
++ int ret = 0;
++ u8_l empty = 0;
++
++ //RWNX_DBG(RWNX_FN_ENTRY_STR);
++ AICWFDBG(LOGDEBUG, "%s (%d)%s reqcfm:%d in_softirq:%d in_atomic:%d\r\n",
++ __func__, reqid, RWNX_ID2STR(reqid), reqcfm, (int)in_softirq(), (int)in_atomic());
++
++#ifdef AICWF_USB_SUPPORT
++ if (rwnx_hw->usbdev->state == USB_DOWN_ST) {
++ rwnx_msg_free(rwnx_hw, msg_params);
++ AICWFDBG(LOGERROR, "%s bus is down\n", __func__);
++ return 0;
++ }
++#endif
++#ifdef AICWF_SDIO_SUPPORT
++ if(rwnx_hw->sdiodev->bus_if->state == BUS_DOWN_ST) {
++ rwnx_msg_free(rwnx_hw, msg_params);
++ sdio_err("bus is down\n");
++ return 0;
++ }
++#endif
++
++ msg = container_of((void *)msg_params, struct lmac_msg, param);
++
++ #if 0
++ if (!test_bit(RWNX_DEV_STARTED, &rwnx_hw->drv_flags) &&
++ reqid != DBG_MEM_READ_CFM && reqid != DBG_MEM_WRITE_CFM &&
++ reqid != DBG_MEM_BLOCK_WRITE_CFM && reqid != DBG_START_APP_CFM &&
++ reqid != MM_SET_RF_CALIB_CFM && reqid != MM_SET_RF_CONFIG_CFM &&
++ reqid != MM_RESET_CFM && reqid != MM_VERSION_CFM &&
++ reqid != MM_START_CFM && reqid != MM_SET_IDLE_CFM &&
++ reqid != ME_CONFIG_CFM && reqid != MM_SET_PS_MODE_CFM &&
++ reqid != ME_CHAN_CONFIG_CFM) {
++ printk(KERN_CRIT "%s: bypassing (RWNX_DEV_RESTARTING set) 0x%02x\n",
++ __func__, reqid);
++ kfree(msg);
++ return -EBUSY;
++ }
++ #endif
++#if 0
++ else if (!rwnx_hw->ipc_env) {
++ printk(KERN_CRIT "%s: bypassing (restart must have failed)\n", __func__);
++ kfree(msg);
++ return -EBUSY;
++ }
++#endif
++
++ //nonblock = is_non_blocking_msg(msg->id);
++ nonblock = 0;//AIDEN
++ cmd = rwnx_cmd_malloc();//kzalloc(sizeof(struct rwnx_cmd), nonblock ? GFP_ATOMIC : GFP_KERNEL);
++ cmd->result = -EINTR;
++ cmd->id = msg->id;
++ cmd->reqid = reqid;
++ cmd->a2e_msg = msg;
++ cmd->e2a_msg = cfm;
++ if (nonblock)
++ cmd->flags = RWNX_CMD_FLAG_NONBLOCK;
++ if (reqcfm)
++ cmd->flags |= RWNX_CMD_FLAG_REQ_CFM;
++
++ if(cfm != NULL) {
++ do {
++ if(rwnx_hw->cmd_mgr->state == RWNX_CMD_MGR_STATE_CRASHED)
++ break;
++ spin_lock_bh(&rwnx_hw->cmd_mgr->lock);
++ empty = list_empty(&rwnx_hw->cmd_mgr->cmds);
++ spin_unlock_bh(&rwnx_hw->cmd_mgr->lock);
++ if(!empty) {
++ if(in_softirq()) {
++ #ifdef CONFIG_RWNX_DBG
++ AICWFDBG(LOGDEBUG, "in_softirq:check cmdqueue empty\n");
++ #endif
++ mdelay(10);
++ }
++ else {
++ #ifdef CONFIG_RWNX_DBG
++ AICWFDBG(LOGDEBUG, "check cmdqueue empty\n");
++ #endif
++ msleep(50);
++ }
++ }
++ } while(!empty);//wait for cmd queue empty
++ }
++
++ if(reqcfm) {
++ cmd->flags &= ~RWNX_CMD_FLAG_WAIT_ACK; // we don't need ack any more
++ ret = rwnx_hw->cmd_mgr->queue(rwnx_hw->cmd_mgr, cmd);
++ } else {
++#ifdef AICWF_SDIO_SUPPORT
++ aicwf_set_cmd_tx((void *)(rwnx_hw->sdiodev), cmd->a2e_msg, sizeof(struct lmac_msg) + cmd->a2e_msg->param_len);
++#else
++ aicwf_set_cmd_tx((void *)(rwnx_hw->usbdev), cmd->a2e_msg, sizeof(struct lmac_msg) + cmd->a2e_msg->param_len);
++#endif
++ }
++
++ if(!reqcfm || ret)
++ rwnx_cmd_free(cmd);//kfree(cmd);
++
++ return ret;//0;
++}
++
++
++static int rwnx_send_msg1(struct rwnx_hw *rwnx_hw, const void *msg_params,
++ int reqcfm, lmac_msg_id_t reqid, void *cfm, bool defer)
++{
++ struct lmac_msg *msg;
++ struct rwnx_cmd *cmd;
++ bool nonblock;
++ int ret = 0;
++
++ //RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ AICWFDBG(LOGDEBUG,"%s (%d)%s reqcfm:%d in_softirq:%d in_atomic:%d\r\n",
++ __func__, reqid, RWNX_ID2STR(reqid), reqcfm, (int)in_softirq(), (int)in_atomic());
++
++ if (rwnx_hw->usbdev->state == USB_DOWN_ST) {
++ rwnx_msg_free(rwnx_hw, msg_params);
++ AICWFDBG(LOGERROR, "%s bus is down\n", __func__);
++ return 0;
++ }
++
++
++ msg = container_of((void *)msg_params, struct lmac_msg, param);
++
++ //nonblock = is_non_blocking_msg(msg->id);
++ nonblock = 0;
++ cmd = rwnx_cmd_malloc();//kzalloc(sizeof(struct rwnx_cmd), nonblock ? GFP_ATOMIC : GFP_KERNEL);
++ cmd->result = -EINTR;
++ cmd->id = msg->id;
++ cmd->reqid = reqid;
++ cmd->a2e_msg = msg;
++ cmd->e2a_msg = cfm;
++ if (nonblock)
++ cmd->flags = RWNX_CMD_FLAG_NONBLOCK;
++ if (reqcfm)
++ cmd->flags |= RWNX_CMD_FLAG_REQ_CFM;
++
++ if(reqcfm) {
++ cmd->flags &= ~RWNX_CMD_FLAG_WAIT_ACK; // we don't need ack any more
++ if(!defer)
++ ret = rwnx_hw->cmd_mgr->queue(rwnx_hw->cmd_mgr, cmd);
++ else
++ ret = cmd_mgr_queue_force_defer(rwnx_hw->cmd_mgr, cmd);
++ }
++
++ if (!reqcfm || ret) {
++ rwnx_cmd_free(cmd);//kfree(cmd);
++ }
++
++ if (!ret) {
++ ret = cmd->result;
++ }
++
++ //return ret;
++ return 0;
++}
++
++/******************************************************************************
++ * Control messages handling functions (FULLMAC)
++ *****************************************************************************/
++int rwnx_send_reset(struct rwnx_hw *rwnx_hw)
++{
++ void *void_param;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* RESET REQ has no parameter */
++ void_param = rwnx_msg_zalloc(MM_RESET_REQ, TASK_MM, DRV_TASK_ID, 0);
++ if (!void_param)
++ return -ENOMEM;
++
++ return rwnx_send_msg(rwnx_hw, void_param, 1, MM_RESET_CFM, NULL);
++}
++
++int rwnx_send_start(struct rwnx_hw *rwnx_hw)
++{
++ struct mm_start_req *start_req_param;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the START REQ message */
++ start_req_param = rwnx_msg_zalloc(MM_START_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_start_req));
++ if (!start_req_param)
++ return -ENOMEM;
++
++ /* Set parameters for the START message */
++ memcpy(&start_req_param->phy_cfg, &rwnx_hw->phy.cfg, sizeof(rwnx_hw->phy.cfg));
++ start_req_param->uapsd_timeout = (u32_l)rwnx_hw->mod_params->uapsd_timeout;
++ start_req_param->lp_clk_accuracy = (u16_l)rwnx_hw->mod_params->lp_clk_ppm;
++
++ /* Send the START REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, start_req_param, 1, MM_START_CFM, NULL);
++}
++
++int rwnx_send_version_req(struct rwnx_hw *rwnx_hw, struct mm_version_cfm *cfm)
++{
++ void *void_param;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* VERSION REQ has no parameter */
++ void_param = rwnx_msg_zalloc(MM_VERSION_REQ, TASK_MM, DRV_TASK_ID, 0);
++ if (!void_param)
++ return -ENOMEM;
++
++ return rwnx_send_msg(rwnx_hw, void_param, 1, MM_VERSION_CFM, cfm);
++}
++
++int rwnx_send_add_if(struct rwnx_hw *rwnx_hw, const unsigned char *mac,
++ enum nl80211_iftype iftype, bool p2p, struct mm_add_if_cfm *cfm)
++{
++ struct mm_add_if_req *add_if_req_param;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the ADD_IF_REQ message */
++ add_if_req_param = rwnx_msg_zalloc(MM_ADD_IF_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_add_if_req));
++ if (!add_if_req_param)
++ return -ENOMEM;
++
++ /* Set parameters for the ADD_IF_REQ message */
++ memcpy(&(add_if_req_param->addr.array[0]), mac, ETH_ALEN);
++ switch (iftype) {
++ #ifdef CONFIG_RWNX_FULLMAC
++ //case NL80211_IFTYPE_P2P_DEVICE:
++ case NL80211_IFTYPE_P2P_CLIENT:
++ add_if_req_param->p2p = true;
++ // no break
++ #endif /* CONFIG_RWNX_FULLMAC */
++ case NL80211_IFTYPE_STATION:
++ add_if_req_param->type = MM_STA;
++ break;
++
++ case NL80211_IFTYPE_ADHOC:
++ add_if_req_param->type = MM_IBSS;
++ break;
++
++ #ifdef CONFIG_RWNX_FULLMAC
++ case NL80211_IFTYPE_P2P_GO:
++ add_if_req_param->p2p = true;
++ // no break
++ #endif /* CONFIG_RWNX_FULLMAC */
++ case NL80211_IFTYPE_AP:
++ add_if_req_param->type = MM_AP;
++ break;
++ case NL80211_IFTYPE_MESH_POINT:
++ add_if_req_param->type = MM_MESH_POINT;
++ break;
++ case NL80211_IFTYPE_AP_VLAN:
++ return -1;
++ case NL80211_IFTYPE_MONITOR:
++ add_if_req_param->type = MM_MONITOR;
++ break;
++ default:
++ add_if_req_param->type = MM_STA;
++ break;
++ }
++
++
++ /* Send the ADD_IF_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, add_if_req_param, 1, MM_ADD_IF_CFM, cfm);
++}
++
++int rwnx_send_remove_if(struct rwnx_hw *rwnx_hw, u8 vif_index, bool defer)
++{
++ struct mm_remove_if_req *remove_if_req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MM_REMOVE_IF_REQ message */
++ remove_if_req = rwnx_msg_zalloc(MM_REMOVE_IF_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_remove_if_req));
++ if (!remove_if_req)
++ return -ENOMEM;
++
++ /* Set parameters for the MM_REMOVE_IF_REQ message */
++ remove_if_req->inst_nbr = vif_index;
++
++ /* Send the MM_REMOVE_IF_REQ message to LMAC FW */
++ return rwnx_send_msg1(rwnx_hw, remove_if_req, 1, MM_REMOVE_IF_CFM, NULL, defer);
++}
++
++int rwnx_send_set_channel(struct rwnx_hw *rwnx_hw, int phy_idx,
++ struct mm_set_channel_cfm *cfm)
++{
++ struct mm_set_channel_req *req;
++ enum nl80211_chan_width width;
++ u16 center_freq, center_freq1, center_freq2;
++ s8 tx_power = 0;
++ u8 flags;
++ enum nl80211_band band;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ if (phy_idx >= rwnx_hw->phy.cnt)
++ return -ENOTSUPP;
++
++ req = rwnx_msg_zalloc(MM_SET_CHANNEL_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_set_channel_req));
++ if (!req)
++ return -ENOMEM;
++
++ if (phy_idx == 0) {
++#ifdef CONFIG_RWNX_FULLMAC
++ /* On FULLMAC only setting channel of secondary chain */
++ wiphy_err(rwnx_hw->wiphy, "Trying to set channel of primary chain");
++ return 0;
++#endif /* CONFIG_RWNX_FULLMAC */
++ } else {
++ struct rwnx_sec_phy_chan *chan = &rwnx_hw->phy.sec_chan;
++
++ width = chnl2bw[chan->type];
++ band = chan->band;
++ center_freq = chan->prim20_freq;
++ center_freq1 = chan->center_freq1;
++ center_freq2 = chan->center_freq2;
++ flags = 0;
++ }
++
++ req->chan.band = band;
++ req->chan.type = bw2chnl[width];
++ req->chan.prim20_freq = center_freq;
++ req->chan.center1_freq = center_freq1;
++ req->chan.center2_freq = center_freq2;
++ req->chan.tx_power = tx_power;
++ req->chan.flags = flags;
++ req->index = phy_idx;
++
++ if (rwnx_hw->phy.limit_bw)
++ limit_chan_bw(&req->chan.type, req->chan.prim20_freq, &req->chan.center1_freq);
++
++ RWNX_DBG("mac80211: freq=%d(c1:%d - c2:%d)/width=%d - band=%d\n"
++ " hw(%d): prim20=%d(c1:%d - c2:%d)/ type=%d - band=%d\n",
++ center_freq, center_freq1, center_freq2, width, band,
++ phy_idx, req->chan.prim20_freq, req->chan.center1_freq,
++ req->chan.center2_freq, req->chan.type, req->chan.band);
++
++ /* Send the MM_SET_CHANNEL_REQ REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, MM_SET_CHANNEL_CFM, cfm);
++}
++
++int rwnx_send_key_add(struct rwnx_hw *rwnx_hw, u8 vif_idx, u8 sta_idx, bool pairwise,
++ u8 *key, u8 key_len, u8 key_idx, u8 cipher_suite,
++ struct mm_key_add_cfm *cfm)
++{
++ struct mm_key_add_req *key_add_req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MM_KEY_ADD_REQ message */
++ key_add_req = rwnx_msg_zalloc(MM_KEY_ADD_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_key_add_req));
++ if (!key_add_req)
++ return -ENOMEM;
++
++ /* Set parameters for the MM_KEY_ADD_REQ message */
++ if (sta_idx != 0xFF) {
++ /* Pairwise key */
++ key_add_req->sta_idx = sta_idx;
++ } else {
++ /* Default key */
++ key_add_req->sta_idx = sta_idx;
++ key_add_req->key_idx = (u8_l)key_idx; /* only useful for default keys */
++ }
++ key_add_req->pairwise = pairwise;
++ key_add_req->inst_nbr = vif_idx;
++ key_add_req->key.length = key_len;
++ memcpy(&(key_add_req->key.array[0]), key, key_len);
++
++ key_add_req->cipher_suite = cipher_suite;
++
++ RWNX_DBG("%s: sta_idx:%d key_idx:%d inst_nbr:%d cipher:%d key_len:%d\n", __func__,
++ key_add_req->sta_idx, key_add_req->key_idx, key_add_req->inst_nbr,
++ key_add_req->cipher_suite, key_add_req->key.length);
++#if defined(CONFIG_RWNX_DBG) || defined(CONFIG_DYNAMIC_DEBUG)
++ print_hex_dump_bytes("key: ", DUMP_PREFIX_OFFSET, key_add_req->key.array, key_add_req->key.length);
++#endif
++
++ /* Send the MM_KEY_ADD_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, key_add_req, 1, MM_KEY_ADD_CFM, cfm);
++}
++
++int rwnx_send_key_del(struct rwnx_hw *rwnx_hw, uint8_t hw_key_idx)
++{
++ struct mm_key_del_req *key_del_req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MM_KEY_DEL_REQ message */
++ key_del_req = rwnx_msg_zalloc(MM_KEY_DEL_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_key_del_req));
++ if (!key_del_req)
++ return -ENOMEM;
++
++ /* Set parameters for the MM_KEY_DEL_REQ message */
++ key_del_req->hw_key_idx = hw_key_idx;
++
++ /* Send the MM_KEY_DEL_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, key_del_req, 1, MM_KEY_DEL_CFM, NULL);
++}
++
++int rwnx_send_bcn(struct rwnx_hw *rwnx_hw,u8 *buf, u8 vif_idx, u16 bcn_len)
++{
++ struct apm_set_bcn_ie_req *bcn_ie_req;
++ bcn_ie_req = rwnx_msg_zalloc(APM_SET_BEACON_IE_REQ, TASK_APM, DRV_TASK_ID,
++ sizeof(struct apm_set_bcn_ie_req));
++ if (!bcn_ie_req)
++ return -ENOMEM;
++
++ bcn_ie_req->vif_idx = vif_idx;
++ bcn_ie_req->bcn_ie_len = bcn_len;
++ memcpy(bcn_ie_req->bcn_ie, (u8 *)buf, bcn_len);
++ kfree(buf);
++
++ return rwnx_send_msg(rwnx_hw, bcn_ie_req, 1, APM_SET_BEACON_IE_CFM, NULL);
++}
++
++int rwnx_send_bcn_change(struct rwnx_hw *rwnx_hw, u8 vif_idx, u32 bcn_addr,
++ u16 bcn_len, u16 tim_oft, u16 tim_len, u16 *csa_oft)
++{
++ struct mm_bcn_change_req *req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MM_BCN_CHANGE_REQ message */
++ req = rwnx_msg_zalloc(MM_BCN_CHANGE_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_bcn_change_req));
++ if (!req)
++ return -ENOMEM;
++
++ /* Set parameters for the MM_BCN_CHANGE_REQ message */
++ req->bcn_ptr = bcn_addr;
++ req->bcn_len = bcn_len;
++ req->tim_oft = tim_oft;
++ req->tim_len = tim_len;
++ req->inst_nbr = vif_idx;
++
++ if (csa_oft) {
++ int i;
++ for (i = 0; i < BCN_MAX_CSA_CPT; i++) {
++ req->csa_oft[i] = csa_oft[i];
++ }
++ }
++
++ /* Send the MM_BCN_CHANGE_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, MM_BCN_CHANGE_CFM, NULL);
++}
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
++static inline void cfg80211_chandef_create(struct cfg80211_chan_def *chandef,
++ struct ieee80211_channel *chan,
++ enum nl80211_channel_type chan_type)
++{
++ if (WARN_ON(!chan))
++ return;
++ chandef->chan = chan;
++ chandef->center_freq2 = 0;
++ switch (chan_type) {
++ case NL80211_CHAN_NO_HT:
++ chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
++ chandef->center_freq1 = chan->center_freq;
++ break;
++ case NL80211_CHAN_HT20:
++ chandef->width = NL80211_CHAN_WIDTH_20;
++ chandef->center_freq1 = chan->center_freq;
++ break;
++ case NL80211_CHAN_HT40PLUS:
++ chandef->width = NL80211_CHAN_WIDTH_40;
++ chandef->center_freq1 = chan->center_freq + 10;
++ break;
++ case NL80211_CHAN_HT40MINUS:
++ chandef->width = NL80211_CHAN_WIDTH_40;
++ chandef->center_freq1 = chan->center_freq - 10;
++ break;
++ default:
++ WARN_ON(1);
++ }
++}
++#endif
++
++int rwnx_send_roc(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif,
++ struct ieee80211_channel *chan, unsigned int duration,
++ struct mm_remain_on_channel_cfm *roc_cfm)
++{
++ struct mm_remain_on_channel_req *req;
++ struct cfg80211_chan_def chandef;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Create channel definition structure */
++ cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
++
++ /* Build the MM_REMAIN_ON_CHANNEL_REQ message */
++ req = rwnx_msg_zalloc(MM_REMAIN_ON_CHANNEL_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_remain_on_channel_req));
++ if (!req)
++ return -ENOMEM;
++
++ /* Set parameters for the MM_REMAIN_ON_CHANNEL_REQ message */
++ req->op_code = MM_ROC_OP_START;
++ req->vif_index = vif->vif_index;
++ req->duration_ms = duration;
++ req->band = chan->band;
++ req->type = bw2chnl[chandef.width];
++ req->prim20_freq = chan->center_freq;
++ req->center1_freq = chandef.center_freq1;
++ req->center2_freq = chandef.center_freq2;
++ req->tx_power = chan_to_fw_pwr(chan->max_power);
++
++ /* Send the MM_REMAIN_ON_CHANNEL_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, MM_REMAIN_ON_CHANNEL_CFM, roc_cfm);
++}
++
++int rwnx_send_cancel_roc(struct rwnx_hw *rwnx_hw)
++{
++ struct mm_remain_on_channel_req *req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MM_REMAIN_ON_CHANNEL_REQ message */
++ req = rwnx_msg_zalloc(MM_REMAIN_ON_CHANNEL_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_remain_on_channel_req));
++ if (!req)
++ return -ENOMEM;
++
++ /* Set parameters for the MM_REMAIN_ON_CHANNEL_REQ message */
++ req->op_code = MM_ROC_OP_CANCEL;
++
++ /* Send the MM_REMAIN_ON_CHANNEL_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, MM_REMAIN_ON_CHANNEL_CFM, NULL);
++}
++
++int rwnx_send_set_power(struct rwnx_hw *rwnx_hw, u8 vif_idx, s8 pwr,
++ struct mm_set_power_cfm *cfm)
++{
++ struct mm_set_power_req *req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MM_SET_POWER_REQ message */
++ req = rwnx_msg_zalloc(MM_SET_POWER_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_set_power_req));
++ if (!req)
++ return -ENOMEM;
++
++ /* Set parameters for the MM_SET_POWER_REQ message */
++ req->inst_nbr = vif_idx;
++ req->power = pwr;
++
++ /* Send the MM_SET_POWER_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, MM_SET_POWER_CFM, cfm);
++}
++
++int rwnx_send_set_edca(struct rwnx_hw *rwnx_hw, u8 hw_queue, u32 param,
++ bool uapsd, u8 inst_nbr)
++{
++ struct mm_set_edca_req *set_edca_req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MM_SET_EDCA_REQ message */
++ set_edca_req = rwnx_msg_zalloc(MM_SET_EDCA_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_set_edca_req));
++ if (!set_edca_req)
++ return -ENOMEM;
++
++ /* Set parameters for the MM_SET_EDCA_REQ message */
++ set_edca_req->ac_param = param;
++ set_edca_req->uapsd = uapsd;
++ set_edca_req->hw_queue = hw_queue;
++ set_edca_req->inst_nbr = inst_nbr;
++
++ /* Send the MM_SET_EDCA_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, set_edca_req, 1, MM_SET_EDCA_CFM, NULL);
++}
++
++#ifdef CONFIG_RWNX_P2P_DEBUGFS
++int rwnx_send_p2p_oppps_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ u8 ctw, struct mm_set_p2p_oppps_cfm *cfm)
++{
++ struct mm_set_p2p_oppps_req *p2p_oppps_req;
++ int error;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MM_SET_P2P_OPPPS_REQ message */
++ p2p_oppps_req = rwnx_msg_zalloc(MM_SET_P2P_OPPPS_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_set_p2p_oppps_req));
++
++ if (!p2p_oppps_req) {
++ return -ENOMEM;
++ }
++
++ /* Fill the message parameters */
++ p2p_oppps_req->vif_index = rwnx_vif->vif_index;
++ p2p_oppps_req->ctwindow = ctw;
++
++ /* Send the MM_P2P_OPPPS_REQ message to LMAC FW */
++ error = rwnx_send_msg(rwnx_hw, p2p_oppps_req, 1, MM_SET_P2P_OPPPS_CFM, cfm);
++
++ return (error);
++}
++
++int rwnx_send_p2p_noa_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ int count, int interval, int duration, bool dyn_noa,
++ struct mm_set_p2p_noa_cfm *cfm)
++{
++ struct mm_set_p2p_noa_req *p2p_noa_req;
++ int error;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Param check */
++ if (count > 255)
++ count = 255;
++
++ if (duration >= interval) {
++ dev_err(rwnx_hw->dev, "Invalid p2p NOA config: interval=%d <= duration=%d\n",
++ interval, duration);
++ return -EINVAL;
++ }
++
++ /* Build the MM_SET_P2P_NOA_REQ message */
++ p2p_noa_req = rwnx_msg_zalloc(MM_SET_P2P_NOA_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_set_p2p_noa_req));
++
++ if (!p2p_noa_req) {
++ return -ENOMEM;
++ }
++
++ /* Fill the message parameters */
++ p2p_noa_req->vif_index = rwnx_vif->vif_index;
++ p2p_noa_req->noa_inst_nb = 0;
++ p2p_noa_req->count = count;
++
++ if (count) {
++ p2p_noa_req->duration_us = duration * 1024;
++ p2p_noa_req->interval_us = interval * 1024;
++ p2p_noa_req->start_offset = (interval - duration - 10) * 1024;
++ p2p_noa_req->dyn_noa = dyn_noa;
++ }
++
++ /* Send the MM_SET_2P_NOA_REQ message to LMAC FW */
++ error = rwnx_send_msg(rwnx_hw, p2p_noa_req, 1, MM_SET_P2P_NOA_CFM, cfm);
++
++ return (error);
++}
++#endif /* CONFIG_RWNX_P2P_DEBUGFS */
++
++#ifdef AICWF_ARP_OFFLOAD
++int rwnx_send_arpoffload_en_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ u32_l ipaddr, u8_l enable)
++{
++ struct mm_set_arpoffload_en_req *arp_offload_req;
++ int error;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MM_SET_ARPOFFLOAD_REQ message */
++ arp_offload_req = rwnx_msg_zalloc(MM_SET_ARPOFFLOAD_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_set_arpoffload_en_req));
++
++ if (!arp_offload_req) {
++ return -ENOMEM;
++ }
++
++ /* Fill the message parameters */
++ arp_offload_req->enable = enable;
++ arp_offload_req->vif_idx = rwnx_vif->vif_index;
++ arp_offload_req->ipaddr = ipaddr;
++
++ /* Send the MM_ARPOFFLOAD_EN_REQ message to UMAC FW */
++ error = rwnx_send_msg(rwnx_hw, arp_offload_req, 1, MM_SET_ARPOFFLOAD_CFM, NULL);
++
++ return (error);
++}
++#endif
++
++int rwnx_send_coex_req(struct rwnx_hw *rwnx_hw, u8_l disable_coexnull, u8_l enable_nullcts)
++{
++ struct mm_set_coex_req *coex_req;
++ int error;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++
++ /* Build the MM_SET_COEX_REQ message */
++ coex_req = rwnx_msg_zalloc(MM_SET_COEX_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_set_coex_req));
++
++ if (!coex_req) {
++ return -ENOMEM;
++ }
++
++ coex_req->bt_on = 1;
++ coex_req->disable_coexnull = disable_coexnull;
++ coex_req->enable_nullcts = enable_nullcts;
++ coex_req->enable_periodic_timer = 0;
++ coex_req->coex_timeslot_set = 0;
++
++ /* Send the MM_SET_COEX_REQ message to UMAC FW */
++ error = rwnx_send_msg(rwnx_hw, coex_req, 1, MM_SET_COEX_CFM, NULL);
++
++ return (error);
++};
++
++
++int rwnx_send_rf_config_req(struct rwnx_hw *rwnx_hw, u8_l ofst, u8_l sel, u8_l *tbl, u16_l len)
++{
++ struct mm_set_rf_config_req *rf_config_req;
++ int error;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MM_SET_RF_CONFIG_REQ message */
++ rf_config_req = rwnx_msg_zalloc(MM_SET_RF_CONFIG_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_set_rf_config_req));
++
++ if (!rf_config_req) {
++ return -ENOMEM;
++ }
++
++ rf_config_req->table_sel = sel;
++ rf_config_req->table_ofst = ofst;
++ rf_config_req->table_num = 16;
++ rf_config_req->deft_page = 0;
++
++ memcpy(rf_config_req->data, tbl, len);
++
++ /* Send the MM_SET_RF_CONFIG_REQ message to UMAC FW */
++ error = rwnx_send_msg(rwnx_hw, rf_config_req, 1, MM_SET_RF_CONFIG_CFM, NULL);
++
++ return (error);
++}
++
++extern void get_userconfig_xtal_cap(xtal_cap_conf_t *xtal_cap);
++
++int rwnx_send_rf_calib_req(struct rwnx_hw *rwnx_hw, struct mm_set_rf_calib_cfm *cfm)
++{
++ struct mm_set_rf_calib_req *rf_calib_req;
++ xtal_cap_conf_t xtal_cap = {0,};
++ int error;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MM_SET_RF_CALIB_REQ message */
++ rf_calib_req = rwnx_msg_zalloc(MM_SET_RF_CALIB_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_set_rf_calib_req));
++
++ if (!rf_calib_req) {
++ return -ENOMEM;
++ }
++
++ if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8801){
++ rf_calib_req->cal_cfg_24g = 0xbf;
++ rf_calib_req->cal_cfg_5g = 0x3f;
++ }else if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC ||
++ rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW){
++ rf_calib_req->cal_cfg_24g = 0x0f8f;
++ rf_calib_req->cal_cfg_5g = 0;
++ }else if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81){
++ rf_calib_req->cal_cfg_24g = 0x0f8f;
++ rf_calib_req->cal_cfg_5g = 0x0f0f;
++ }
++
++ rf_calib_req->param_alpha = 0x0c34c008;
++ rf_calib_req->bt_calib_en = 0;
++ rf_calib_req->bt_calib_param = 0x264203;
++
++ get_userconfig_xtal_cap(&xtal_cap);
++
++ if (xtal_cap.enable) {
++ AICWFDBG(LOGINFO, "user xtal cap: %d, cap_fine: %d\n", xtal_cap.xtal_cap, xtal_cap.xtal_cap_fine);
++ rf_calib_req->xtal_cap = xtal_cap.xtal_cap;
++ rf_calib_req->xtal_cap_fine = xtal_cap.xtal_cap_fine;
++ } else {
++ rf_calib_req->xtal_cap = 0;
++ rf_calib_req->xtal_cap_fine = 0;
++ }
++
++ /* Send the MM_SET_RF_CALIB_REQ message to UMAC FW */
++ error = rwnx_send_msg(rwnx_hw, rf_calib_req, 1, MM_SET_RF_CALIB_CFM, cfm);
++
++ return (error);
++};
++
++int rwnx_send_get_macaddr_req(struct rwnx_hw *rwnx_hw, struct mm_get_mac_addr_cfm *cfm)
++{
++ struct mm_get_mac_addr_req *get_macaddr_req;
++ int error;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MM_GET_MAC_ADDR_REQ message */
++ get_macaddr_req = rwnx_msg_zalloc(MM_GET_MAC_ADDR_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_get_mac_addr_req));
++
++ if (!get_macaddr_req) {
++ return -ENOMEM;
++ }
++
++ get_macaddr_req->get = 1;
++
++ /* Send the MM_GET_MAC_ADDR_REQ message to UMAC FW */
++ error = rwnx_send_msg(rwnx_hw, get_macaddr_req, 1, MM_GET_MAC_ADDR_CFM, cfm);
++
++ return (error);
++};
++
++
++int rwnx_send_get_sta_info_req(struct rwnx_hw *rwnx_hw, u8_l sta_idx, struct mm_get_sta_info_cfm *cfm)
++{
++ struct mm_get_sta_info_req *get_info_req;
++ int error;
++
++ /* Build the MM_GET_STA_INFO_REQ message */
++ get_info_req = rwnx_msg_zalloc(MM_GET_STA_INFO_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_get_sta_info_req));
++
++ if (!get_info_req) {
++ return -ENOMEM;
++ }
++
++ get_info_req->sta_idx = sta_idx;
++
++ /* Send the MM_GET_STA_INFO_REQ message to UMAC FW */
++ error = rwnx_send_msg(rwnx_hw, get_info_req, 1, MM_GET_STA_INFO_CFM, cfm);
++
++ return error;
++};
++
++
++#if 0
++int rwnx_send_get_sta_txinfo_req(struct rwnx_hw *rwnx_hw, u8_l sta_idx, struct mm_get_sta_txinfo_cfm *cfm)
++{
++ struct mm_get_sta_txinfo_req *get_txinfo_req;
++ int error;
++
++
++ /* Build the MM_GET_STA_TXINFO_REQ message */
++ get_txinfo_req = rwnx_msg_zalloc(MM_GET_STA_TXINFO_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_get_sta_txinfo_req));
++
++ if (!get_txinfo_req) {
++ return -ENOMEM;
++ }
++
++ get_txinfo_req->sta_idx = 1;
++
++ /* Send the MM_GET_STA_TXINFO_REQ message to UMAC FW */
++ error = rwnx_send_msg(rwnx_hw, get_txinfo_req, 1, MM_GET_STA_TXINFO_CFM, cfm);
++
++ return (error);
++}
++#endif
++
++int rwnx_send_set_stack_start_req(struct rwnx_hw *rwnx_hw, u8_l on, u8_l efuse_valid, u8_l set_vendor_info,
++ u8_l fwtrace_redir_en, struct mm_set_stack_start_cfm *cfm)
++{
++ struct mm_set_stack_start_req *req;
++ int error;
++
++ /* Build the MM_SET_STACK_START_REQ message */
++ req = rwnx_msg_zalloc(MM_SET_STACK_START_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_stack_start_req));
++
++ if (!req) {
++ return -ENOMEM;
++ }
++
++ req->is_stack_start = on;
++ req->efuse_valid = efuse_valid;
++ req->set_vendor_info = set_vendor_info;
++ req->fwtrace_redir = fwtrace_redir_en;
++ /* Send the MM_GET_STA_TXINFO_REQ message to UMAC FW */
++ error = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_STACK_START_CFM, cfm);
++
++ return error;
++}
++
++#if 0
++int rwnx_send_txop_req(struct rwnx_hw *rwnx_hw, uint16_t *txop, u8_l long_nav_en, u8_l cfe_en)
++{
++ struct mm_set_txop_req *req;
++ int error;
++
++ /* Build the MM_SET_TXOP_REQ message */
++ req = rwnx_msg_zalloc(MM_SET_TXOP_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_txop_req));
++
++ if (!req) {
++ return -ENOMEM;
++ }
++
++ req->txop_bk = txop[0];
++ req->txop_be = txop[1];
++ req->txop_vi = txop[2];
++ req->txop_vo = txop[3];
++ req->long_nav_en = long_nav_en;
++ req->cfe_en = cfe_en;
++
++ /* Send the MM_SET_TXOP_REQ message to UMAC FW */
++ error = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_TXOP_CFM, NULL);
++
++ return error;
++}
++
++int rwnx_send_vendor_trx_param_req(struct rwnx_hw *rwnx_hw, uint32_t *edca, uint8_t vif_idx, uint8_t retry_cnt)
++{
++ struct mm_set_vendor_trx_param_req *req;
++ int error;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MM_SET_VENDOR_TRX_PARAM_REQ message */
++ req = rwnx_msg_zalloc(MM_SET_VENDOR_TRX_PARAM_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_vendor_trx_param_req));
++ if (!req) {
++ return -ENOMEM;
++ }
++
++ req->edca[0] = edca[0];
++ req->edca[1] = edca[1];
++ req->edca[2] = edca[2];
++ req->edca[3] = edca[3];
++ req->vif_idx = vif_idx;
++ req->retry_cnt = retry_cnt;
++
++ /* Send the MM_SET_VENDOR_TRX_PARAM_REQ message to UMAC FW */
++ error = rwnx_send_msg(rwnx_hw, req, 1, MM_SET_VENDOR_TRX_PARAM_CFM, NULL);
++
++ return error;
++}
++
++#endif
++int rwnx_send_vendor_hwconfig_req(struct rwnx_hw *rwnx_hw, uint32_t hwconfig_id, int32_t *param)
++{
++ struct mm_set_acs_txop_req *req0;
++ struct mm_set_channel_access_req *req1;
++ struct mm_set_mac_timescale_req *req2;
++ struct mm_set_cca_threshold_req *req3;
++ struct mm_set_bwmode_req *req4;
++
++ int error;
++
++ switch (hwconfig_id)
++ {
++ case ACS_TXOP_REQ:
++ /* Build the ACS_TXOP_REQ message */
++ req0= rwnx_msg_zalloc(MM_SET_VENDOR_HWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_acs_txop_req) );
++ if (!req0)
++ return -ENOMEM;
++ req0->hwconfig_id = hwconfig_id;
++ req0->txop_be = param[0];
++ req0->txop_bk = param[1];
++ req0->txop_vi = param[2];
++ req0->txop_vo = param[3];
++ printk("set_acs_txop_req: be: %x,bk: %x,vi: %x,vo: %x\n",
++ req0->txop_be, req0->txop_bk, req0->txop_vi, req0->txop_vo);
++ /* Send the MM_SET_VENDOR_HWCONFIG_CFM message to UMAC FW */
++ error = rwnx_send_msg(rwnx_hw, req0, 1, MM_SET_VENDOR_HWCONFIG_CFM, NULL);
++ break;
++
++ case CHANNEL_ACCESS_REQ:
++ /* Build the CHANNEL_ACCESS_REQ message */
++ req1 = rwnx_msg_zalloc(MM_SET_VENDOR_HWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_channel_access_req));
++ if (!req1)
++ return -ENOMEM;
++ req1->hwconfig_id = hwconfig_id;
++ req1->edca[0] = param[0];
++ req1->edca[1] = param[1];
++ req1->edca[2] = param[2];
++ req1->edca[3] = param[3];
++ req1->vif_idx = param[4];
++ req1->retry_cnt = param[5];
++ req1->rts_en = param[6];
++ req1->long_nav_en = param[7];
++ req1->cfe_en = param[8];
++ req1->rc_retry_cnt[0] = param[9];
++ req1->rc_retry_cnt[1] = param[10];
++ req1->rc_retry_cnt[2] = param[11];
++ printk("set_channel_access_req:edca[]= %x %x %x %x\nvif_idx: %x, retry_cnt: %x, rts_en: %x, long_nav_en: %x, cfe_en: %x, rc_retry_cnt: %x:%x:%x\n",
++ req1->edca[0], req1->edca[1], req1->edca[2], req1->edca[3], req1->vif_idx, req1->retry_cnt, req1->rts_en, req1->long_nav_en, req1->cfe_en, req1->rc_retry_cnt[0],req1->rc_retry_cnt[1], req1->rc_retry_cnt[2]);
++ /* Send the MM_SET_VENDOR_HWCONFIG_CFM message to UMAC FW */
++ error = rwnx_send_msg(rwnx_hw, req1, 1, MM_SET_VENDOR_HWCONFIG_CFM, NULL);
++ break;
++
++ case MAC_TIMESCALE_REQ:
++ /* Build the MAC_TIMESCALE_REQ message */
++ req2 = rwnx_msg_zalloc(MM_SET_VENDOR_HWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_mac_timescale_req));
++ if (!req2)
++ return -ENOMEM;
++ req2->hwconfig_id = hwconfig_id;
++ req2->sifsA_time = param[0];
++ req2->sifsB_time = param[1];
++ req2->slot_time = param[2];
++ req2->rx_startdelay_ofdm = param[3];
++ req2->rx_startdelay_long = param[4];
++ req2->rx_startdelay_short = param[5];
++ printk("set_mac_timescale_req:sifsA_time: %x, sifsB_time: %x, slot_time: %x, rx_startdelay ofdm:%x long %x short %x\n",
++ req2->sifsA_time, req2->sifsB_time, req2->slot_time, req2->rx_startdelay_ofdm, req2->rx_startdelay_long, req2->rx_startdelay_short);
++ /* Send the MM_SET_VENDOR_HWCONFIG_CFM message to UMAC FW */
++ error = rwnx_send_msg(rwnx_hw, req2, 1, MM_SET_VENDOR_HWCONFIG_CFM, NULL);
++ break;
++
++ case CCA_THRESHOLD_REQ:
++ /* Build the CCA_THRESHOLD_REQ message */
++ req3 = rwnx_msg_zalloc(MM_SET_VENDOR_HWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_cca_threshold_req));
++ if (!req3)
++ return -ENOMEM;
++ req3->hwconfig_id = hwconfig_id;
++ req3->auto_cca_en = param[0];
++ req3->cca20p_rise_th = param[1];
++ req3->cca20s_rise_th = param[2];
++ req3->cca20p_fall_th = param[3];
++ req3->cca20s_fall_th = param[4];
++ printk("cca_threshold_req: auto_cca_en:%d\ncca20p_rise_th = %d\ncca20s_rise_th = %d\ncca20p_fall_th = %d\ncca20s_fall_th = %d\n",
++ req3->auto_cca_en, req3->cca20p_rise_th, req3->cca20s_rise_th, req3->cca20p_fall_th, req3->cca20s_fall_th);
++ /* Send the MM_SET_VENDOR_HWCONFIG_CFM message to UMAC FW */
++ error = rwnx_send_msg(rwnx_hw, req3, 1, MM_SET_VENDOR_HWCONFIG_CFM, NULL);
++ break;
++ case BWMODE_REQ:
++ /* Build the SET_BWMODE_REQ message */
++ req4 = rwnx_msg_zalloc(MM_SET_VENDOR_HWCONFIG_REQ, TASK_MM, DRV_TASK_ID, sizeof(struct mm_set_bwmode_req));
++ if (!req4)
++ return -ENOMEM;
++ req4->hwconfig_id = hwconfig_id;
++ req4->bwmode = param[0];
++ printk("bwmode :%d\n", req4->bwmode);
++ /* Send the MM_SET_VENDOR_HWCONFIG_CFM message to UMAC FW */
++ error = rwnx_send_msg(rwnx_hw, req4, 1, MM_SET_VENDOR_HWCONFIG_CFM, NULL);
++ break;
++ default:
++ return -ENOMEM;
++ }
++ return error;
++}
++
++int rwnx_send_get_fw_version_req(struct rwnx_hw *rwnx_hw, struct mm_get_fw_version_cfm *cfm)
++{
++ void *req;
++ int error;
++
++ /* Build the MM_GET_FW_VERSION_REQ message */
++ req = rwnx_msg_zalloc(MM_GET_FW_VERSION_REQ, TASK_MM, DRV_TASK_ID, sizeof(u8));
++
++ if (!req) {
++ return -ENOMEM;
++ }
++
++ /* Send the MM_GET_FW_VERSION_REQ message to UMAC FW */
++ error = rwnx_send_msg(rwnx_hw, req, 1, MM_GET_FW_VERSION_CFM, cfm);
++
++ return error;
++}
++
++
++extern void get_userconfig_txpwr_idx(txpwr_idx_conf_t *txpwr_idx);
++
++int rwnx_send_txpwr_idx_req(struct rwnx_hw *rwnx_hw)
++{
++ struct mm_set_txpwr_idx_req *txpwr_idx_req;
++ txpwr_idx_conf_t *txpwr_idx;
++ int error;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MM_SET_TXPWR_IDX_REQ message */
++ txpwr_idx_req = rwnx_msg_zalloc(MM_SET_TXPWR_IDX_LVL_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_set_txpwr_idx_req));
++
++ if (!txpwr_idx_req) {
++ return -ENOMEM;
++ }
++
++ txpwr_idx = &txpwr_idx_req->txpwr_idx;
++ txpwr_idx->enable = 1;
++ txpwr_idx->dsss=9;
++ txpwr_idx->ofdmlowrate_2g4=8;
++ txpwr_idx->ofdm64qam_2g4=8;
++ txpwr_idx->ofdm256qam_2g4=8;
++ txpwr_idx->ofdm1024qam_2g4=8;
++ txpwr_idx->ofdmlowrate_5g=11;
++ txpwr_idx->ofdm64qam_5g=10;
++ txpwr_idx->ofdm256qam_5g=9;
++ txpwr_idx->ofdm1024qam_5g=9;
++
++ get_userconfig_txpwr_idx(txpwr_idx);
++
++ AICWFDBG(LOGINFO, "%s:enable:%d\r\n", __func__, txpwr_idx->enable);
++ AICWFDBG(LOGINFO, "%s:dsss:%d\r\n", __func__, txpwr_idx->dsss);
++ AICWFDBG(LOGINFO, "%s:ofdmlowrate_2g4:%d\r\n", __func__, txpwr_idx->ofdmlowrate_2g4);
++ AICWFDBG(LOGINFO, "%s:ofdm64qam_2g4:%d\r\n", __func__, txpwr_idx->ofdm64qam_2g4);
++ AICWFDBG(LOGINFO, "%s:ofdm256qam_2g4:%d\r\n", __func__, txpwr_idx->ofdm256qam_2g4);
++ AICWFDBG(LOGINFO, "%s:ofdm1024qam_2g4:%d\r\n", __func__, txpwr_idx->ofdm1024qam_2g4);
++ AICWFDBG(LOGINFO, "%s:ofdmlowrate_5g:%d\r\n", __func__, txpwr_idx->ofdmlowrate_5g);
++ AICWFDBG(LOGINFO, "%s:ofdm64qam_5g:%d\r\n", __func__, txpwr_idx->ofdm64qam_5g);
++ AICWFDBG(LOGINFO, "%s:ofdm256qam_5g:%d\r\n", __func__, txpwr_idx->ofdm256qam_5g);
++ AICWFDBG(LOGINFO, "%s:ofdm1024qam_5g:%d\r\n", __func__, txpwr_idx->ofdm1024qam_5g);
++
++ /* Send the MM_SET_TXPWR_IDX_REQ message to UMAC FW */
++ error = rwnx_send_msg(rwnx_hw, txpwr_idx_req, 1, MM_SET_TXPWR_IDX_LVL_CFM, NULL);
++
++ return (error);
++}
++
++int rwnx_send_txpwr_lvl_req(struct rwnx_hw *rwnx_hw)
++{
++ struct mm_set_txpwr_lvl_req *txpwr_lvl_req;
++ txpwr_lvl_conf_v2_t txpwr_lvl_v2_tmp;
++ txpwr_lvl_conf_v2_t *txpwr_lvl_v2;
++ txpwr_loss_conf_t txpwr_loss_tmp;
++ txpwr_loss_conf_t *txpwr_loss;
++ int error;
++ int i;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MM_SET_TXPWR_LVL_REQ message */
++ txpwr_lvl_req = rwnx_msg_zalloc(MM_SET_TXPWR_IDX_LVL_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_set_txpwr_lvl_req));
++
++ if (!txpwr_lvl_req) {
++ return -ENOMEM;
++ }
++
++ txpwr_lvl_v2 = &txpwr_lvl_v2_tmp;
++ txpwr_loss = &txpwr_loss_tmp;
++ txpwr_loss->loss_enable = 0;
++
++ get_userconfig_txpwr_lvl_v2_in_fdrv(txpwr_lvl_v2);
++ get_userconfig_txpwr_loss(txpwr_loss);
++ if (txpwr_lvl_v2->enable == 0) {
++ rwnx_msg_free(rwnx_hw, txpwr_lvl_req);
++ return 0;
++ } else {
++ AICWFDBG(LOGINFO, "%s:enable:%d\r\n", __func__, txpwr_lvl_v2->enable);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_1m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[0]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_2m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[1]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_5m5_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[2]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_11m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[3]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_6m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[4]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_9m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[5]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_12m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[6]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_18m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[7]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_24m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[8]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_36m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[9]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_48m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[10]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_54m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[11]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs0_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[0]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs1_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[1]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs2_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[2]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs3_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[3]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs4_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[4]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs5_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[5]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs6_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[6]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs7_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[7]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs8_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[8]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs9_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[9]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs0_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[0]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs1_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[1]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs2_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[2]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs3_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[3]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs4_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[4]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs5_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[5]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs6_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[6]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs7_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[7]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs8_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[8]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs9_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[9]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs10_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[10]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs11_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[11]);
++
++ if (txpwr_loss->loss_enable == 1) {
++ AICWFDBG(LOGINFO, "%s:loss_value:%d\r\n", __func__, txpwr_loss->loss_value);
++
++ for (i = 0; i <= 11; i++)
++ txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[i] += txpwr_loss->loss_value;
++ for (i = 0; i <= 9; i++)
++ txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[i] += txpwr_loss->loss_value;
++ for (i = 0; i <= 11; i++)
++ txpwr_lvl_v2->pwrlvl_11ax_2g4[i] += txpwr_loss->loss_value;
++ }
++ if ((testmode == 0) && (chip_sub_id == 0)) {
++ txpwr_lvl_req->txpwr_lvl.enable = txpwr_lvl_v2->enable;
++ txpwr_lvl_req->txpwr_lvl.dsss = txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[3]; // 11M
++ txpwr_lvl_req->txpwr_lvl.ofdmlowrate_2g4= txpwr_lvl_v2->pwrlvl_11ax_2g4[4]; // MCS4
++ txpwr_lvl_req->txpwr_lvl.ofdm64qam_2g4 = txpwr_lvl_v2->pwrlvl_11ax_2g4[7]; // MCS7
++ txpwr_lvl_req->txpwr_lvl.ofdm256qam_2g4 = txpwr_lvl_v2->pwrlvl_11ax_2g4[9]; // MCS9
++ txpwr_lvl_req->txpwr_lvl.ofdm1024qam_2g4= txpwr_lvl_v2->pwrlvl_11ax_2g4[11]; // MCS11
++ txpwr_lvl_req->txpwr_lvl.ofdmlowrate_5g = 13; // unused
++ txpwr_lvl_req->txpwr_lvl.ofdm64qam_5g = 13; // unused
++ txpwr_lvl_req->txpwr_lvl.ofdm256qam_5g = 13; // unused
++ txpwr_lvl_req->txpwr_lvl.ofdm1024qam_5g = 13; // unused
++ } else {
++ txpwr_lvl_req->txpwr_lvl_v2 = *txpwr_lvl_v2;
++ }
++
++ /* Send the MM_SET_TXPWR_LVL_REQ message to UMAC FW */
++ error = rwnx_send_msg(rwnx_hw, txpwr_lvl_req, 1, MM_SET_TXPWR_IDX_LVL_CFM, NULL);
++
++ return (error);
++ }
++}
++
++int rwnx_send_txpwr_lvl_v3_req(struct rwnx_hw *rwnx_hw)
++{
++ struct mm_set_txpwr_lvl_req *txpwr_lvl_req;
++ txpwr_lvl_conf_v3_t txpwr_lvl_v3_tmp;
++ txpwr_lvl_conf_v3_t *txpwr_lvl_v3;
++ int error;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MM_SET_TXPWR_LVL_REQ message */
++ txpwr_lvl_req = rwnx_msg_zalloc(MM_SET_TXPWR_IDX_LVL_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_set_txpwr_lvl_req));
++
++ if (!txpwr_lvl_req) {
++ return -ENOMEM;
++ }
++
++ txpwr_lvl_v3 = &txpwr_lvl_v3_tmp;
++
++ get_userconfig_txpwr_lvl_v3_in_fdrv(txpwr_lvl_v3);
++
++ if (txpwr_lvl_v3->enable == 0) {
++ rwnx_msg_free(rwnx_hw, txpwr_lvl_req);
++ return 0;
++ } else {
++ AICWFDBG(LOGINFO, "%s:enable:%d\r\n", __func__, txpwr_lvl_v3->enable);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_1m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[0]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_2m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[1]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_5m5_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[2]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_11m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[3]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_6m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[4]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_9m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[5]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_12m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[6]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_18m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[7]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_24m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[8]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_36m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[9]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_48m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[10]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_54m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[11]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs0_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[0]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs1_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[1]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs2_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[2]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs3_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[3]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs4_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[4]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs5_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[5]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs6_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[6]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs7_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[7]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs8_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[8]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs9_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[9]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs0_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[0]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs1_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[1]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs2_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[2]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs3_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[3]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs4_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[4]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs5_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[5]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs6_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[6]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs7_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[7]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs8_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[8]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs9_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[9]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs10_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[10]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs11_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[11]);
++
++ AICWFDBG(LOGINFO, "%s:lvl_11a_1m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[0]);
++ AICWFDBG(LOGINFO, "%s:lvl_11a_2m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[1]);
++ AICWFDBG(LOGINFO, "%s:lvl_11a_5m5_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[2]);
++ AICWFDBG(LOGINFO, "%s:lvl_11a_11m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[3]);
++ AICWFDBG(LOGINFO, "%s:lvl_11a_6m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[4]);
++ AICWFDBG(LOGINFO, "%s:lvl_11a_9m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[5]);
++ AICWFDBG(LOGINFO, "%s:lvl_11a_12m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[6]);
++ AICWFDBG(LOGINFO, "%s:lvl_11a_18m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[7]);
++ AICWFDBG(LOGINFO, "%s:lvl_11a_24m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[8]);
++ AICWFDBG(LOGINFO, "%s:lvl_11a_36m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[9]);
++ AICWFDBG(LOGINFO, "%s:lvl_11a_48m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[10]);
++ AICWFDBG(LOGINFO, "%s:lvl_11a_54m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[11]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs0_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[0]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs1_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[1]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs2_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[2]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs3_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[3]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs4_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[4]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs5_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[5]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs6_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[6]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs7_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[7]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs8_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[8]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs9_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[9]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs0_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[0]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs1_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[1]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs2_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[2]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs3_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[3]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs4_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[4]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs5_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[5]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs6_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[6]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs7_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[7]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs8_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[8]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs9_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[9]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs10_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[10]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs11_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[11]);
++
++ txpwr_lvl_req->txpwr_lvl_v3 = *txpwr_lvl_v3;
++
++ /* Send the MM_SET_TXPWR_LVL_REQ message to UMAC FW */
++ error = rwnx_send_msg(rwnx_hw, txpwr_lvl_req, 1, MM_SET_TXPWR_IDX_LVL_CFM, NULL);
++
++ return (error);
++ }
++}
++
++extern void get_userconfig_txpwr_ofst(txpwr_ofst_conf_t *txpwr_ofst);
++
++int rwnx_send_txpwr_ofst_req(struct rwnx_hw *rwnx_hw)
++{
++ struct mm_set_txpwr_ofst_req *txpwr_ofst_req;
++ txpwr_ofst_conf_t *txpwr_ofst;
++ int error = 0;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MM_SET_TXPWR_OFST_REQ message */
++ txpwr_ofst_req = rwnx_msg_zalloc(MM_SET_TXPWR_OFST_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_set_txpwr_ofst_req));
++
++ if (!txpwr_ofst_req) {
++ return -ENOMEM;
++ }
++
++ txpwr_ofst = &txpwr_ofst_req->txpwr_ofst;
++ txpwr_ofst->enable = 0;
++ txpwr_ofst->chan_1_4 = 0;
++ txpwr_ofst->chan_5_9 = 0;
++ txpwr_ofst->chan_10_13 = 0;
++ txpwr_ofst->chan_36_64 = 0;
++ txpwr_ofst->chan_100_120 = 0;
++ txpwr_ofst->chan_122_140 = 0;
++ txpwr_ofst->chan_142_165 = 0;
++ if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8801){
++ get_userconfig_txpwr_ofst(txpwr_ofst);
++ }else if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC ||
++ rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW ||
++ rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81){
++ get_userconfig_txpwr_ofst_in_fdrv(txpwr_ofst);
++ }
++ if(txpwr_ofst->enable){
++
++ AICWFDBG(LOGINFO, "%s:enable:%d\r\n", __func__, txpwr_ofst->enable);
++ AICWFDBG(LOGINFO, "%s:chan_1_4:%d\r\n", __func__, txpwr_ofst->chan_1_4);
++ AICWFDBG(LOGINFO, "%s:chan_5_9:%d\r\n", __func__, txpwr_ofst->chan_5_9);
++ AICWFDBG(LOGINFO, "%s:chan_10_13:%d\r\n", __func__, txpwr_ofst->chan_10_13);
++ AICWFDBG(LOGINFO, "%s:chan_36_64:%d\r\n", __func__, txpwr_ofst->chan_36_64);
++ AICWFDBG(LOGINFO, "%s:chan_100_120:%d\r\n", __func__, txpwr_ofst->chan_100_120);
++ AICWFDBG(LOGINFO, "%s:chan_122_140:%d\r\n", __func__, txpwr_ofst->chan_122_140);
++ AICWFDBG(LOGINFO, "%s:chan_142_165:%d\r\n", __func__, txpwr_ofst->chan_142_165);
++
++ /* Send the MM_SET_TXPWR_OFST_REQ message to UMAC FW */
++ error = rwnx_send_msg(rwnx_hw, txpwr_ofst_req, 1, MM_SET_TXPWR_OFST_CFM, NULL);
++ }else{
++ AICWFDBG(LOGINFO, "%s:Do not use txpwr_ofst\r\n", __func__);
++ rwnx_msg_free(rwnx_hw, txpwr_ofst_req);
++ }
++
++ return (error);
++}
++
++int rwnx_send_set_filter(struct rwnx_hw *rwnx_hw, uint32_t filter)
++{
++ struct mm_set_filter_req *set_filter_req_param;
++ uint32_t rx_filter = 0;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MM_SET_FILTER_REQ message */
++ set_filter_req_param =
++ rwnx_msg_zalloc(MM_SET_FILTER_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_set_filter_req));
++ if (!set_filter_req_param)
++ return -ENOMEM;
++
++ /* Set parameters for the MM_SET_FILTER_REQ message */
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0)
++ if (filter & FIF_PROMISC_IN_BSS)
++ rx_filter |= NXMAC_ACCEPT_UNICAST_BIT;
++#endif
++ if (filter & FIF_ALLMULTI)
++ rx_filter |= NXMAC_ACCEPT_MULTICAST_BIT;
++
++ if (filter & (FIF_FCSFAIL | FIF_PLCPFAIL))
++ rx_filter |= NXMAC_ACCEPT_ERROR_FRAMES_BIT;
++
++ if (filter & FIF_BCN_PRBRESP_PROMISC)
++ rx_filter |= NXMAC_ACCEPT_OTHER_BSSID_BIT;
++
++ if (filter & FIF_CONTROL)
++ rx_filter |= NXMAC_ACCEPT_OTHER_CNTRL_FRAMES_BIT |
++ NXMAC_ACCEPT_CF_END_BIT |
++ NXMAC_ACCEPT_ACK_BIT |
++ NXMAC_ACCEPT_CTS_BIT |
++ NXMAC_ACCEPT_RTS_BIT |
++ NXMAC_ACCEPT_BA_BIT | NXMAC_ACCEPT_BAR_BIT;
++
++ if (filter & FIF_OTHER_BSS)
++ rx_filter |= NXMAC_ACCEPT_OTHER_BSSID_BIT;
++
++ if (filter & FIF_PSPOLL) {
++ /* TODO: check if the MAC filters apply to our BSSID or is general */
++ rx_filter |= NXMAC_ACCEPT_PS_POLL_BIT;
++ }
++
++ if (filter & FIF_PROBE_REQ) {
++ rx_filter |= NXMAC_ACCEPT_PROBE_REQ_BIT;
++ rx_filter |= NXMAC_ACCEPT_ALL_BEACON_BIT;
++ }
++
++ /* Add the filter flags that are set by default and cannot be changed here */
++ rx_filter |= RWNX_MAC80211_NOT_CHANGEABLE;
++
++ /* XXX */
++ //if (ieee80211_hw_check(rwnx_hw->hw, AMPDU_AGGREGATION))
++ rx_filter |= NXMAC_ACCEPT_BA_BIT;
++
++ /* Now copy all the flags into the message parameter */
++ set_filter_req_param->filter = rx_filter;
++
++ RWNX_DBG("new total_flags = 0x%08x\nrx filter set to 0x%08x\n",
++ filter, rx_filter);
++
++ /* Send the MM_SET_FILTER_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, set_filter_req_param, 1, MM_SET_FILTER_CFM, NULL);
++}
++
++/******************************************************************************
++ * Control messages handling functions (FULLMAC only)
++ *****************************************************************************/
++#ifdef CONFIG_RWNX_FULLMAC
++#ifdef CONFIG_HE_FOR_OLD_KERNEL
++extern struct ieee80211_sband_iftype_data rwnx_he_capa;
++#endif
++#ifdef CONFIG_VHT_FOR_OLD_KERNEL
++static struct ieee80211_sta_vht_cap* rwnx_vht_capa;
++#endif
++int rwnx_send_me_config_req(struct rwnx_hw *rwnx_hw)
++{
++ struct me_config_req *req;
++ struct wiphy *wiphy = rwnx_hw->wiphy;
++
++ //#ifdef USE_5G
++ //struct ieee80211_sta_ht_cap *ht_cap = &wiphy->bands[NL80211_BAND_5GHZ]->ht_cap;
++ //struct ieee80211_sta_vht_cap *vht_cap = &wiphy->bands[NL80211_BAND_5GHZ]->vht_cap;
++ //#else
++ //struct ieee80211_sta_ht_cap *ht_cap = &wiphy->bands[NL80211_BAND_2GHZ]->ht_cap;
++ //struct ieee80211_sta_vht_cap *vht_cap = &wiphy->bands[NL80211_BAND_2GHZ]->vht_cap;
++ //#endif
++ struct ieee80211_sta_ht_cap *ht_cap;
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) || defined(CONFIG_VHT_FOR_OLD_KERNEL)
++ struct ieee80211_sta_vht_cap *vht_cap;
++ #endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)
++ struct ieee80211_sta_he_cap const *he_cap;
++#else
++ #ifdef CONFIG_HE_FOR_OLD_KERNEL
++ struct ieee80211_sta_he_cap const *he_cap;
++ #endif
++#endif
++ //uint8_t *ht_mcs = (uint8_t *)&ht_cap->mcs;
++ uint8_t *ht_mcs;
++ int i;
++
++ if (rwnx_hw->band_5g_support) {
++ ht_cap = &wiphy->bands[NL80211_BAND_5GHZ]->ht_cap;
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) || defined(CONFIG_VHT_FOR_OLD_KERNEL)
++ vht_cap = &wiphy->bands[NL80211_BAND_5GHZ]->vht_cap;
++ #endif
++ } else {
++ ht_cap = &wiphy->bands[NL80211_BAND_2GHZ]->ht_cap;
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) || defined(CONFIG_VHT_FOR_OLD_KERNEL)
++ vht_cap = &wiphy->bands[NL80211_BAND_2GHZ]->vht_cap;
++ #endif
++ }
++ #ifdef CONFIG_VHT_FOR_OLD_KERNEL
++ rwnx_vht_capa = vht_cap;
++ #endif
++
++ ht_mcs = (uint8_t *)&ht_cap->mcs;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the ME_CONFIG_REQ message */
++ req = rwnx_msg_zalloc(ME_CONFIG_REQ, TASK_ME, DRV_TASK_ID,
++ sizeof(struct me_config_req));
++ if (!req)
++ return -ENOMEM;
++
++ /* Set parameters for the ME_CONFIG_REQ message */
++ req->ht_supp = ht_cap->ht_supported;
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) || defined(CONFIG_VHT_FOR_OLD_KERNEL)
++ req->vht_supp = vht_cap->vht_supported;
++ #endif
++ req->ht_cap.ht_capa_info = cpu_to_le16(ht_cap->cap | IEEE80211_HT_CAP_LDPC_CODING);
++ req->ht_cap.a_mpdu_param = ht_cap->ampdu_factor |
++ (ht_cap->ampdu_density <<
++ IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
++ for (i = 0; i < sizeof(ht_cap->mcs); i++)
++ req->ht_cap.mcs_rate[i] = ht_mcs[i];
++ req->ht_cap.ht_extended_capa = 0;
++ req->ht_cap.tx_beamforming_capa = 0;
++ req->ht_cap.asel_capa = 0;
++
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) || defined(CONFIG_VHT_FOR_OLD_KERNEL)
++ if(req->vht_supp) {
++ req->vht_cap.vht_capa_info = cpu_to_le32(vht_cap->cap);
++ req->vht_cap.rx_highest = cpu_to_le16(vht_cap->vht_mcs.rx_highest);
++ req->vht_cap.rx_mcs_map = cpu_to_le16(vht_cap->vht_mcs.rx_mcs_map);
++ req->vht_cap.tx_highest = cpu_to_le16(vht_cap->vht_mcs.tx_highest);
++ req->vht_cap.tx_mcs_map = cpu_to_le16(vht_cap->vht_mcs.tx_mcs_map);
++ }
++ #endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) || defined(CONFIG_HE_FOR_OLD_KERNEL)
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)
++ if (wiphy->bands[NL80211_BAND_2GHZ]->iftype_data != NULL) {
++ he_cap = &wiphy->bands[NL80211_BAND_2GHZ]->iftype_data->he_cap;
++ #endif
++ #if defined(CONFIG_HE_FOR_OLD_KERNEL)
++ if (1) {
++ he_cap = &rwnx_he_capa.he_cap;
++ #endif
++
++ req->he_supp = he_cap->has_he;
++
++ for (i = 0; i < ARRAY_SIZE(he_cap->he_cap_elem.mac_cap_info); i++) {
++ req->he_cap.mac_cap_info[i] = he_cap->he_cap_elem.mac_cap_info[i];
++ }
++ for (i = 0; i < ARRAY_SIZE(he_cap->he_cap_elem.phy_cap_info); i++) {
++ req->he_cap.phy_cap_info[i] = he_cap->he_cap_elem.phy_cap_info[i];
++ }
++ req->he_cap.mcs_supp.rx_mcs_80 = cpu_to_le16(he_cap->he_mcs_nss_supp.rx_mcs_80);
++ req->he_cap.mcs_supp.tx_mcs_80 = cpu_to_le16(he_cap->he_mcs_nss_supp.tx_mcs_80);
++ req->he_cap.mcs_supp.rx_mcs_160 = cpu_to_le16(he_cap->he_mcs_nss_supp.rx_mcs_160);
++ req->he_cap.mcs_supp.tx_mcs_160 = cpu_to_le16(he_cap->he_mcs_nss_supp.tx_mcs_160);
++ req->he_cap.mcs_supp.rx_mcs_80p80 = cpu_to_le16(he_cap->he_mcs_nss_supp.rx_mcs_80p80);
++ req->he_cap.mcs_supp.tx_mcs_80p80 = cpu_to_le16(he_cap->he_mcs_nss_supp.tx_mcs_80p80);
++ for (i = 0; i < MAC_HE_PPE_THRES_MAX_LEN; i++) {
++ req->he_cap.ppe_thres[i] = he_cap->ppe_thres[i];
++ }
++ req->he_ul_on = rwnx_hw->mod_params->he_ul_on;
++ }
++
++#else
++ req->he_supp = false;
++ req->he_ul_on = false;
++#endif
++ req->ps_on = rwnx_hw->mod_params->ps_on;
++ req->dpsm = rwnx_hw->mod_params->dpsm;
++ req->tx_lft = rwnx_hw->mod_params->tx_lft;
++ req->ant_div_on = rwnx_hw->mod_params->ant_div;
++
++ if (rwnx_hw->mod_params->use_80){
++ req->phy_bw_max = PHY_CHNL_BW_80;
++ }else if (rwnx_hw->mod_params->use_2040){
++ req->phy_bw_max = PHY_CHNL_BW_40;
++ }else{
++ req->phy_bw_max = PHY_CHNL_BW_20;
++ }
++
++#if 0
++ if(!rwnx_hw->he_flag){
++ req->he_supp = false;
++ req->he_ul_on = false;
++ }
++
++ wiphy_info(wiphy, "HT supp %d, VHT supp %d, HE supp %d\n", req->ht_supp,
++ req->vht_supp,
++ req->he_supp);
++#endif
++
++ AICWFDBG(LOGINFO, "HT supp %d, VHT supp %d, HE supp %d\n", req->ht_supp,
++ req->vht_supp,
++ req->he_supp);
++
++ /* Send the ME_CONFIG_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, ME_CONFIG_CFM, NULL);
++}
++int rwnx_send_me_chan_config_req(struct rwnx_hw *rwnx_hw)
++{
++ struct me_chan_config_req *req;
++ struct wiphy *wiphy = rwnx_hw->wiphy;
++ int i;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the ME_CHAN_CONFIG_REQ message */
++ req = rwnx_msg_zalloc(ME_CHAN_CONFIG_REQ, TASK_ME, DRV_TASK_ID,
++ sizeof(struct me_chan_config_req));
++ if (!req)
++ return -ENOMEM;
++
++ req->chan2G4_cnt= 0;
++ if (wiphy->bands[NL80211_BAND_2GHZ] != NULL) {
++ struct ieee80211_supported_band *b = wiphy->bands[NL80211_BAND_2GHZ];
++ for (i = 0; i < b->n_channels; i++) {
++ req->chan2G4[req->chan2G4_cnt].flags = 0;
++ if (b->channels[i].flags & IEEE80211_CHAN_DISABLED)
++ req->chan2G4[req->chan2G4_cnt].flags |= CHAN_DISABLED;
++ req->chan2G4[req->chan2G4_cnt].flags |= get_chan_flags(b->channels[i].flags);
++ req->chan2G4[req->chan2G4_cnt].band = NL80211_BAND_2GHZ;
++ req->chan2G4[req->chan2G4_cnt].freq = b->channels[i].center_freq;
++ req->chan2G4[req->chan2G4_cnt].tx_power = chan_to_fw_pwr(b->channels[i].max_power);
++ req->chan2G4_cnt++;
++ if (req->chan2G4_cnt == MAC_DOMAINCHANNEL_24G_MAX)
++ break;
++ }
++ }
++
++ req->chan5G_cnt = 0;
++ if (wiphy->bands[NL80211_BAND_5GHZ] != NULL) {
++ struct ieee80211_supported_band *b = wiphy->bands[NL80211_BAND_5GHZ];
++ for (i = 0; i < b->n_channels; i++) {
++ req->chan5G[req->chan5G_cnt].flags = 0;
++ if (b->channels[i].flags & IEEE80211_CHAN_DISABLED)
++ req->chan5G[req->chan5G_cnt].flags |= CHAN_DISABLED;
++ req->chan5G[req->chan5G_cnt].flags |= get_chan_flags(b->channels[i].flags);
++ req->chan5G[req->chan5G_cnt].band = NL80211_BAND_5GHZ;
++ req->chan5G[req->chan5G_cnt].freq = b->channels[i].center_freq;
++ req->chan5G[req->chan5G_cnt].tx_power = chan_to_fw_pwr(b->channels[i].max_power);
++ req->chan5G_cnt++;
++ if (req->chan5G_cnt == MAC_DOMAINCHANNEL_5G_MAX)
++ break;
++ }
++ }
++
++ /* Send the ME_CHAN_CONFIG_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, ME_CHAN_CONFIG_CFM, NULL);
++}
++
++int rwnx_send_me_set_control_port_req(struct rwnx_hw *rwnx_hw, bool opened, u8 sta_idx)
++{
++ struct me_set_control_port_req *req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the ME_SET_CONTROL_PORT_REQ message */
++ req = rwnx_msg_zalloc(ME_SET_CONTROL_PORT_REQ, TASK_ME, DRV_TASK_ID,
++ sizeof(struct me_set_control_port_req));
++ if (!req)
++ return -ENOMEM;
++
++ /* Set parameters for the ME_SET_CONTROL_PORT_REQ message */
++ req->sta_idx = sta_idx;
++ req->control_port_open = opened;
++
++ /* Send the ME_SET_CONTROL_PORT_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, ME_SET_CONTROL_PORT_CFM, NULL);
++}
++
++int rwnx_send_me_sta_add(struct rwnx_hw *rwnx_hw, struct station_parameters *params,
++ const u8 *mac, u8 inst_nbr, struct me_sta_add_cfm *cfm)
++{
++ struct me_sta_add_req *req;
++
++#if LINUX_VERSION_CODE < HIGH_KERNEL_VERSION
++ u8 *ht_mcs = (u8 *)¶ms->ht_capa->mcs;
++#else
++ u8 *ht_mcs = (u8 *)¶ms->link_sta_params.ht_capa->mcs;
++#endif//HIGH_KERNEL_VERSION
++
++ int i;
++ struct rwnx_vif *rwnx_vif = rwnx_hw->vif_table[inst_nbr];
++ #if (defined CONFIG_HE_FOR_OLD_KERNEL) || (defined CONFIG_VHT_FOR_OLD_KERNEL)
++ struct aic_sta *sta = &rwnx_hw->aic_table[rwnx_vif->ap.aic_index];
++ printk("assoc_req idx %d, he: %d, vht: %d\n ", rwnx_vif->ap.aic_index, sta->he, sta->vht);
++ if (rwnx_vif->ap.aic_index < NX_REMOTE_STA_MAX + NX_VIRT_DEV_MAX)
++ rwnx_vif->ap.aic_index++;
++ else
++ rwnx_vif->ap.aic_index = 0;
++ #endif
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MM_STA_ADD_REQ message */
++ req = rwnx_msg_zalloc(ME_STA_ADD_REQ, TASK_ME, DRV_TASK_ID,
++ sizeof(struct me_sta_add_req));
++ if (!req)
++ return -ENOMEM;
++
++ /* Set parameters for the MM_STA_ADD_REQ message */
++ memcpy(&(req->mac_addr.array[0]), mac, ETH_ALEN);
++
++#if LINUX_VERSION_CODE < HIGH_KERNEL_VERSION
++ req->rate_set.length = params->supported_rates_len;
++#else
++ req->rate_set.length = params->link_sta_params.supported_rates_len;
++#endif//LINUX_VERSION_CODE < HIGH_KERNEL_VERSION
++
++ for (i = 0; i < req->rate_set.length; i++){
++#if LINUX_VERSION_CODE < HIGH_KERNEL_VERSION
++ req->rate_set.array[i] = params->supported_rates[i];
++#else
++ req->rate_set.array[i] = params->link_sta_params.supported_rates[i];
++#endif//LINUX_VERSION_CODE < HIGH_KERNEL_VERSION
++ }
++
++ req->flags = 0;
++
++#if LINUX_VERSION_CODE < HIGH_KERNEL_VERSION
++ if (params->ht_capa) {
++ const struct ieee80211_ht_cap *ht_capa = params->ht_capa;
++#else
++ if (params->link_sta_params.ht_capa) {
++ const struct ieee80211_ht_cap *ht_capa = params->link_sta_params.ht_capa;
++#endif//LINUX_VERSION_CODE < HIGH_KERNEL_VERSION
++
++
++ req->flags |= STA_HT_CAPA;
++ req->ht_cap.ht_capa_info = cpu_to_le16(ht_capa->cap_info);
++ req->ht_cap.a_mpdu_param = ht_capa->ampdu_params_info;
++ for (i = 0; i < sizeof(ht_capa->mcs); i++)
++ req->ht_cap.mcs_rate[i] = ht_mcs[i];
++ req->ht_cap.ht_extended_capa = cpu_to_le16(ht_capa->extended_ht_cap_info);
++ req->ht_cap.tx_beamforming_capa = cpu_to_le32(ht_capa->tx_BF_cap_info);
++ req->ht_cap.asel_capa = ht_capa->antenna_selection_info;
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
++#if LINUX_VERSION_CODE < HIGH_KERNEL_VERSION
++ if (params->vht_capa) {
++ const struct ieee80211_vht_cap *vht_capa = params->vht_capa;
++#else
++ if (params->link_sta_params.vht_capa) {
++ const struct ieee80211_vht_cap *vht_capa = params->link_sta_params.vht_capa;
++#endif//LINUX_VERSION_CODE < HIGH_KERNEL_VERSION
++ req->flags |= STA_VHT_CAPA;
++ req->vht_cap.vht_capa_info = cpu_to_le32(vht_capa->vht_cap_info);
++ req->vht_cap.rx_highest = cpu_to_le16(vht_capa->supp_mcs.rx_highest);
++ req->vht_cap.rx_mcs_map = cpu_to_le16(vht_capa->supp_mcs.rx_mcs_map);
++ req->vht_cap.tx_highest = cpu_to_le16(vht_capa->supp_mcs.tx_highest);
++ req->vht_cap.tx_mcs_map = cpu_to_le16(vht_capa->supp_mcs.tx_mcs_map);
++ }
++#elif defined(CONFIG_VHT_FOR_OLD_KERNEL)
++ if (sta->vht) {
++ const struct ieee80211_vht_cap *vht_capa = rwnx_vht_capa;
++
++ req->flags |= STA_VHT_CAPA;
++ req->vht_cap.vht_capa_info = cpu_to_le32(vht_capa->vht_cap_info);
++ req->vht_cap.rx_highest = cpu_to_le16(vht_capa->supp_mcs.rx_highest);
++ req->vht_cap.rx_mcs_map = cpu_to_le16(vht_capa->supp_mcs.rx_mcs_map);
++ req->vht_cap.tx_highest = cpu_to_le16(vht_capa->supp_mcs.tx_highest);
++ req->vht_cap.tx_mcs_map = cpu_to_le16(vht_capa->supp_mcs.tx_mcs_map);
++ }
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
++#if LINUX_VERSION_CODE < HIGH_KERNEL_VERSION
++ if (params->he_capa) {
++ const struct ieee80211_he_cap_elem *he_capa = params->he_capa;
++#else
++ if (params->link_sta_params.he_capa) {
++ const struct ieee80211_he_cap_elem *he_capa = params->link_sta_params.he_capa;
++#endif//LINUX_VERSION_CODE < HIGH_KERNEL_VERSION
++ struct ieee80211_he_mcs_nss_supp *mcs_nss_supp =
++ (struct ieee80211_he_mcs_nss_supp *)(he_capa + 1);
++
++ req->flags |= STA_HE_CAPA;
++ for (i = 0; i < ARRAY_SIZE(he_capa->mac_cap_info); i++) {
++ req->he_cap.mac_cap_info[i] = he_capa->mac_cap_info[i];
++ }
++ for (i = 0; i < ARRAY_SIZE(he_capa->phy_cap_info); i++) {
++ req->he_cap.phy_cap_info[i] = he_capa->phy_cap_info[i];
++ }
++ req->he_cap.mcs_supp.rx_mcs_80 = mcs_nss_supp->rx_mcs_80;
++ req->he_cap.mcs_supp.tx_mcs_80 = mcs_nss_supp->tx_mcs_80;
++ req->he_cap.mcs_supp.rx_mcs_160 = mcs_nss_supp->rx_mcs_160;
++ req->he_cap.mcs_supp.tx_mcs_160 = mcs_nss_supp->tx_mcs_160;
++ req->he_cap.mcs_supp.rx_mcs_80p80 = mcs_nss_supp->rx_mcs_80p80;
++ req->he_cap.mcs_supp.tx_mcs_80p80 = mcs_nss_supp->tx_mcs_80p80;
++ }
++#else
++ #ifdef CONFIG_HE_FOR_OLD_KERNEL
++ if (sta->he) {
++ const struct ieee80211_he_cap_elem *he_capa = &rwnx_he_capa.he_cap.he_cap_elem;
++ struct ieee80211_he_mcs_nss_supp *mcs_nss_supp =
++ (struct ieee80211_he_mcs_nss_supp *)(he_capa + 1);
++ req->flags |= STA_HE_CAPA;
++ for (i = 0; i < ARRAY_SIZE(he_capa->mac_cap_info); i++) {
++ req->he_cap.mac_cap_info[i] = he_capa->mac_cap_info[i];
++ }
++ for (i = 0; i < ARRAY_SIZE(he_capa->phy_cap_info); i++) {
++ req->he_cap.phy_cap_info[i] = he_capa->phy_cap_info[i];
++ }
++ req->he_cap.mcs_supp.rx_mcs_80 = mcs_nss_supp->rx_mcs_80;
++ req->he_cap.mcs_supp.tx_mcs_80 = mcs_nss_supp->tx_mcs_80;
++ req->he_cap.mcs_supp.rx_mcs_160 = mcs_nss_supp->rx_mcs_160;
++ req->he_cap.mcs_supp.tx_mcs_160 = mcs_nss_supp->tx_mcs_160;
++ req->he_cap.mcs_supp.rx_mcs_80p80 = mcs_nss_supp->rx_mcs_80p80;
++ req->he_cap.mcs_supp.tx_mcs_80p80 = mcs_nss_supp->tx_mcs_80p80;
++ }
++ #endif
++#endif
++
++ if (params->sta_flags_set & BIT(NL80211_STA_FLAG_WME))
++ req->flags |= STA_QOS_CAPA;
++
++ if (params->sta_flags_set & BIT(NL80211_STA_FLAG_MFP))
++ req->flags |= STA_MFP_CAPA;
++
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
++#if LINUX_VERSION_CODE < HIGH_KERNEL_VERSION
++ if (params->opmode_notif_used) {
++ req->opmode = params->opmode_notif;
++#else
++ if (params->link_sta_params.opmode_notif_used) {
++ req->opmode = params->link_sta_params.opmode_notif;
++#endif//LINUX_VERSION_CODE < HIGH_KERNEL_VERSION
++ req->flags |= STA_OPMOD_NOTIF;
++ }
++ #endif
++
++ req->aid = cpu_to_le16(params->aid);
++ req->uapsd_queues = params->uapsd_queues;
++ req->max_sp_len = params->max_sp * 2;
++ req->vif_idx = inst_nbr;
++
++ if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) {
++ //struct rwnx_vif *rwnx_vif = rwnx_hw->vif_table[inst_nbr];
++ req->tdls_sta = true;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
++ if ((params->ext_capab[3] & WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH) &&
++ !rwnx_vif->tdls_chsw_prohibited)
++ req->tdls_chsw_allowed = true;
++#endif
++ if (rwnx_vif->tdls_status == TDLS_SETUP_RSP_TX)
++ req->tdls_sta_initiator = true;
++ }
++
++ /* Send the ME_STA_ADD_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, ME_STA_ADD_CFM, cfm);
++}
++
++int rwnx_send_me_sta_del(struct rwnx_hw *rwnx_hw, u8 sta_idx, bool tdls_sta)
++{
++ struct me_sta_del_req *req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MM_STA_DEL_REQ message */
++ req = rwnx_msg_zalloc(ME_STA_DEL_REQ, TASK_ME, DRV_TASK_ID,
++ sizeof(struct me_sta_del_req));
++ if (!req)
++ return -ENOMEM;
++
++ /* Set parameters for the MM_STA_DEL_REQ message */
++ req->sta_idx = sta_idx;
++ req->tdls_sta = tdls_sta;
++
++ /* Send the ME_STA_DEL_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, ME_STA_DEL_CFM, NULL);
++}
++
++int rwnx_send_me_traffic_ind(struct rwnx_hw *rwnx_hw, u8 sta_idx, bool uapsd, u8 tx_status)
++{
++ struct me_traffic_ind_req *req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the ME_UTRAFFIC_IND_REQ message */
++ req = rwnx_msg_zalloc(ME_TRAFFIC_IND_REQ, TASK_ME, DRV_TASK_ID,
++ sizeof(struct me_traffic_ind_req));
++ if (!req)
++ return -ENOMEM;
++
++ /* Set parameters for the ME_TRAFFIC_IND_REQ message */
++ req->sta_idx = sta_idx;
++ req->tx_avail = tx_status;
++ req->uapsd = uapsd;
++
++ /* Send the ME_TRAFFIC_IND_REQ to UMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, ME_TRAFFIC_IND_CFM, NULL);
++}
++
++int rwnx_send_me_rc_stats(struct rwnx_hw *rwnx_hw,
++ u8 sta_idx,
++ struct me_rc_stats_cfm *cfm)
++{
++ struct me_rc_stats_req *req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the ME_RC_STATS_REQ message */
++ req = rwnx_msg_zalloc(ME_RC_STATS_REQ, TASK_ME, DRV_TASK_ID,
++ sizeof(struct me_rc_stats_req));
++ if (!req)
++ return -ENOMEM;
++
++ /* Set parameters for the ME_RC_STATS_REQ message */
++ req->sta_idx = sta_idx;
++
++ /* Send the ME_RC_STATS_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, ME_RC_STATS_CFM, cfm);
++}
++
++int rwnx_send_me_rc_set_rate(struct rwnx_hw *rwnx_hw,
++ u8 sta_idx,
++ u16 rate_cfg)
++{
++ struct me_rc_set_rate_req *req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the ME_RC_SET_RATE_REQ message */
++ req = rwnx_msg_zalloc(ME_RC_SET_RATE_REQ, TASK_ME, DRV_TASK_ID,
++ sizeof(struct me_rc_set_rate_req));
++ if (!req)
++ return -ENOMEM;
++
++ /* Set parameters for the ME_RC_SET_RATE_REQ message */
++ req->sta_idx = sta_idx;
++ req->fixed_rate_cfg = rate_cfg;
++
++ /* Send the ME_RC_SET_RATE_REQ message to FW */
++ return rwnx_send_msg(rwnx_hw, req, 0, 0, NULL);
++}
++
++int rwnx_send_me_set_ps_mode(struct rwnx_hw *rwnx_hw, u8 ps_mode)
++{
++ struct me_set_ps_mode_req *req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the ME_SET_PS_MODE_REQ message */
++ req = rwnx_msg_zalloc(ME_SET_PS_MODE_REQ, TASK_ME, DRV_TASK_ID,
++ sizeof(struct me_set_ps_mode_req));
++ if (!req)
++ return -ENOMEM;
++
++ /* Set parameters for the ME_SET_PS_MODE_REQ message */
++ req->ps_state = ps_mode;
++
++ /* Send the ME_SET_PS_MODE_REQ message to FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, ME_SET_PS_MODE_CFM, NULL);
++}
++
++int rwnx_send_me_set_lp_level(struct rwnx_hw *rwnx_hw, u8 lp_level)
++{
++ struct me_set_lp_level_req *req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the ME_SET_LP_LEVEL_REQ message */
++ req = rwnx_msg_zalloc(ME_SET_LP_LEVEL_REQ, TASK_ME, DRV_TASK_ID,
++ sizeof(struct me_set_lp_level_req));
++ if (!req)
++ return -ENOMEM;
++
++ /* Set parameters for the ME_SET_LP_LEVEL_REQ message */
++ req->lp_level = lp_level;
++
++ /* Send the ME_SET_LP_LEVEL_REQ message to FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, ME_SET_LP_LEVEL_CFM, NULL);
++}
++
++int rwnx_send_sm_connect_req(struct rwnx_hw *rwnx_hw,
++ struct rwnx_vif *rwnx_vif,
++ struct cfg80211_connect_params *sme,
++ struct sm_connect_cfm *cfm)
++{
++ struct sm_connect_req *req;
++ int i;
++ u32_l flags = 0;
++ bool gval = false;
++ bool pval = false;
++
++ rwnx_vif->wep_enabled = false;
++ rwnx_vif->wep_auth_err = false;
++ rwnx_vif->last_auth_type = 0;
++
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the SM_CONNECT_REQ message */
++ req = rwnx_msg_zalloc(SM_CONNECT_REQ, TASK_SM, DRV_TASK_ID,
++ sizeof(struct sm_connect_req));
++ if (!req)
++ return -ENOMEM;
++
++ if ((sme->crypto.cipher_group == WLAN_CIPHER_SUITE_WEP40) ||
++ (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_WEP104)) {
++ gval = true;
++ }
++
++ if (sme->crypto.n_ciphers_pairwise &&
++ ((sme->crypto.ciphers_pairwise[0] == WLAN_CIPHER_SUITE_WEP40) ||
++ (sme->crypto.ciphers_pairwise[0] == WLAN_CIPHER_SUITE_WEP104))) {
++ pval = true;
++ }
++
++ /* Set parameters for the SM_CONNECT_REQ message */
++ if (sme->crypto.n_ciphers_pairwise &&
++ ((sme->crypto.ciphers_pairwise[0] == WLAN_CIPHER_SUITE_WEP40) ||
++ (sme->crypto.ciphers_pairwise[0] == WLAN_CIPHER_SUITE_TKIP) ||
++ (sme->crypto.ciphers_pairwise[0] == WLAN_CIPHER_SUITE_WEP104)))
++ flags |= DISABLE_HT;
++
++ if (sme->crypto.control_port)
++ flags |= CONTROL_PORT_HOST;
++
++ if (sme->crypto.control_port_no_encrypt)
++ flags |= CONTROL_PORT_NO_ENC;
++
++ if (use_pairwise_key(&sme->crypto))
++ flags |= WPA_WPA2_IN_USE;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) || defined (CONFIG_WPA3_FOR_OLD_KERNEL)
++ if (sme->mfp == NL80211_MFP_REQUIRED)
++ flags |= MFP_IN_USE;
++#endif
++ if (rwnx_vif->sta.ap)
++ flags |= REASSOCIATION;
++
++ req->ctrl_port_ethertype = sme->crypto.control_port_ethertype;
++
++ if (sme->bssid)
++ memcpy(&req->bssid, sme->bssid, ETH_ALEN);
++ else
++ req->bssid = mac_addr_bcst;
++ req->vif_idx = rwnx_vif->vif_index;
++ if (sme->channel) {
++ req->chan.band = sme->channel->band;
++ req->chan.freq = sme->channel->center_freq;
++ req->chan.flags = get_chan_flags(sme->channel->flags);
++ } else {
++ req->chan.freq = (u16_l)-1;
++ }
++ for (i = 0; i < sme->ssid_len; i++)
++ req->ssid.array[i] = sme->ssid[i];
++ req->ssid.length = sme->ssid_len;
++
++ req->flags = flags;
++ if (WARN_ON(sme->ie_len > sizeof(req->ie_buf)))
++ goto invalid_param;
++ if (sme->ie_len)
++ memcpy(req->ie_buf, sme->ie, sme->ie_len);
++ req->ie_len = sme->ie_len;
++ req->listen_interval = rwnx_mod_params.listen_itv;
++ req->dont_wait_bcmc = !rwnx_mod_params.listen_bcmc;
++
++ /* Set auth_type */
++ if (sme->auth_type == NL80211_AUTHTYPE_AUTOMATIC)
++ req->auth_type = WLAN_AUTH_OPEN;
++ else if (sme->auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM)
++ req->auth_type = WLAN_AUTH_OPEN;
++ else if (sme->auth_type == NL80211_AUTHTYPE_SHARED_KEY)
++ req->auth_type = WLAN_AUTH_SHARED_KEY;
++ else if (sme->auth_type == NL80211_AUTHTYPE_FT)
++ req->auth_type = WLAN_AUTH_FT;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) || defined (CONFIG_WPA3_FOR_OLD_KERNEL)
++ else if (sme->auth_type == NL80211_AUTHTYPE_SAE)
++ req->auth_type = WLAN_AUTH_SAE;
++#endif
++ else
++ goto invalid_param;
++
++ /* Set UAPSD queues */
++ req->uapsd_queues = rwnx_mod_params.uapsd_queues;
++
++ rwnx_vif->wep_enabled = pval & gval;
++
++ if (rwnx_vif->wep_enabled) {
++ rwnx_vif->last_auth_type = sme->auth_type;
++ }
++#ifdef CONFIG_USE_WIRELESS_EXT
++ memset(rwnx_hw->wext_essid, 0, 32);
++ memcpy(rwnx_hw->wext_essid, sme->ssid, (int)sme->ssid_len);
++#endif
++
++ rwnx_vif->sta.ssid_len = (int)sme->ssid_len;
++ memset(rwnx_vif->sta.ssid, 0, rwnx_vif->sta.ssid_len + 1);
++ memcpy(rwnx_vif->sta.ssid, sme->ssid, rwnx_vif->sta.ssid_len);
++ memcpy(rwnx_vif->sta.bssid, sme->bssid, ETH_ALEN);
++
++ AICWFDBG(LOGINFO, "%s drv_vif_index:%d connect to %s(%d) channel:%d auth_type:%d\r\n",
++ __func__,
++ rwnx_vif->drv_vif_index,
++ rwnx_vif->sta.ssid,
++ rwnx_vif->sta.ssid_len,
++ req->chan.freq,
++ req->auth_type);
++
++ /* Send the SM_CONNECT_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, SM_CONNECT_CFM, cfm);
++
++invalid_param:
++ rwnx_msg_free(rwnx_hw, req);
++ return -EINVAL;
++}
++
++int rwnx_send_sm_disconnect_req(struct rwnx_hw *rwnx_hw,
++ struct rwnx_vif *rwnx_vif,
++ u16 reason)
++{
++ struct sm_disconnect_req *req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the SM_DISCONNECT_REQ message */
++ req = rwnx_msg_zalloc(SM_DISCONNECT_REQ, TASK_SM, DRV_TASK_ID,
++ sizeof(struct sm_disconnect_req));
++ if (!req)
++ return -ENOMEM;
++
++ /* Set parameters for the SM_DISCONNECT_REQ message */
++ req->reason_code = reason;
++ req->vif_idx = rwnx_vif->vif_index;
++
++ /* Send the SM_DISCONNECT_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, SM_DISCONNECT_CFM, NULL);
++}
++
++int rwnx_send_sm_external_auth_required_rsp(struct rwnx_hw *rwnx_hw,
++ struct rwnx_vif *rwnx_vif,
++ u16 status)
++{
++ struct sm_external_auth_required_rsp *rsp;
++
++ /* Build the SM_EXTERNAL_AUTH_CFM message */
++ rsp = rwnx_msg_zalloc(SM_EXTERNAL_AUTH_REQUIRED_RSP, TASK_SM, DRV_TASK_ID,
++ sizeof(struct sm_external_auth_required_rsp));
++ if (!rsp)
++ return -ENOMEM;
++
++ rsp->status = status;
++ rsp->vif_idx = rwnx_vif->vif_index;
++
++ /* send the SM_EXTERNAL_AUTH_REQUIRED_RSP message UMAC FW */
++ return rwnx_send_msg(rwnx_hw, rsp, 0, 0, NULL);
++}
++
++int rwnx_send_apm_start_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif,
++ struct cfg80211_ap_settings *settings,
++ struct apm_start_cfm *cfm,
++ struct rwnx_ipc_elem_var *elem)
++{
++ struct apm_start_req *req;
++ struct rwnx_bcn *bcn = &vif->ap.bcn;
++ u8 *buf;
++ u32 flags = 0;
++ const u8 *rate_ie;
++ u8 rate_len = 0;
++ int var_offset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
++ const u8 *var_pos;
++ int len, i;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the APM_START_REQ message */
++ req = rwnx_msg_zalloc(APM_START_REQ, TASK_APM, DRV_TASK_ID,
++ sizeof(struct apm_start_req));
++ if (!req)
++ return -ENOMEM;
++
++ // Build the beacon
++ bcn->dtim = (u8)settings->dtim_period;
++ buf = rwnx_build_bcn(bcn, &settings->beacon);
++ if (!buf) {
++ rwnx_msg_free(rwnx_hw, req);
++ return -ENOMEM;
++ }
++
++ // Retrieve the basic rate set from the beacon buffer
++ len = bcn->len - var_offset;
++ var_pos = buf + var_offset;
++
++// Assume that rate higher that 54 Mbps are BSS membership
++#define IS_BASIC_RATE(r) (r & 0x80) && ((r & ~0x80) <= (54 * 2))
++
++ rate_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
++ if (rate_ie) {
++ const u8 *rates = rate_ie + 2;
++ for (i = 0; (i < rate_ie[1]) && (rate_len < MAC_RATESET_LEN); i++) {
++ if (IS_BASIC_RATE(rates[i]))
++ req->basic_rates.array[rate_len++] = rates[i];
++ }
++ }
++ rate_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, var_pos, len);
++ if (rate_ie) {
++ const u8 *rates = rate_ie + 2;
++ for (i = 0; (i < rate_ie[1]) && (rate_len < MAC_RATESET_LEN); i++) {
++ if (IS_BASIC_RATE(rates[i]))
++ req->basic_rates.array[rate_len++] = rates[i];
++ }
++ }
++ req->basic_rates.length = rate_len;
++#undef IS_BASIC_RATE
++
++ #if 0
++ // Sync buffer for FW
++ if ((error = rwnx_ipc_elem_var_allocs(rwnx_hw, elem, bcn->len,
++ DMA_TO_DEVICE, buf, NULL, NULL))) {
++ return error;
++ }
++ #else
++ rwnx_send_bcn(rwnx_hw, buf, vif->vif_index, bcn->len);
++ #endif
++
++ /* Set parameters for the APM_START_REQ message */
++ req->vif_idx = vif->vif_index;
++ req->bcn_addr = elem->dma_addr;
++ req->bcn_len = bcn->len;
++ req->tim_oft = bcn->head_len;
++ req->tim_len = bcn->tim_len;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
++ req->chan.band = settings->chandef.chan->band;
++ req->chan.freq = settings->chandef.chan->center_freq;
++#endif
++ req->chan.flags = 0;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
++ req->chan.tx_power = chan_to_fw_pwr(settings->chandef.chan->max_power);
++ req->center_freq1 = settings->chandef.center_freq1;
++ req->center_freq2 = settings->chandef.center_freq2;
++ req->ch_width = bw2chnl[settings->chandef.width];
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)
++ req->chan.band = rwnx_hw->ap_chan.band;
++ req->chan.freq = rwnx_hw->ap_chan.prim20_freq;
++ req->center_freq1 = rwnx_hw->ap_chan.center1_freq;
++ req->center_freq2 = rwnx_hw->ap_chan.center2_freq;
++ req->chan.tx_power = rwnx_hw->ap_chan.tx_power;
++#endif
++ req->bcn_int = settings->beacon_interval;
++ if (settings->crypto.control_port)
++ flags |= CONTROL_PORT_HOST;
++
++ if (settings->crypto.control_port_no_encrypt)
++ flags |= CONTROL_PORT_NO_ENC;
++
++ if (use_pairwise_key(&settings->crypto))
++ flags |= WPA_WPA2_IN_USE;
++
++ if (settings->crypto.control_port_ethertype)
++ req->ctrl_port_ethertype = settings->crypto.control_port_ethertype;
++ else
++ req->ctrl_port_ethertype = ETH_P_PAE;
++ req->flags = flags;
++
++ /* Send the APM_START_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, APM_START_CFM, cfm);
++}
++
++int rwnx_send_apm_stop_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif)
++{
++ struct apm_stop_req *req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the APM_STOP_REQ message */
++ req = rwnx_msg_zalloc(APM_STOP_REQ, TASK_APM, DRV_TASK_ID,
++ sizeof(struct apm_stop_req));
++ if (!req)
++ return -ENOMEM;
++
++ /* Set parameters for the APM_STOP_REQ message */
++ req->vif_idx = vif->vif_index;
++
++ /* Send the APM_STOP_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, APM_STOP_CFM, NULL);
++}
++
++uint8_t scanning = 0;
++uint8_t p2p_working = 0;
++
++#define P2P_WILDCARD_SSID "DIRECT-"
++#define P2P_WILDCARD_SSID_LEN (sizeof(P2P_WILDCARD_SSID) - 1)
++
++
++#ifdef CONFIG_SET_VENDOR_EXTENSION_IE
++u8_l vendor_extension_data[256];
++u8_l vendor_extension_len = 0;
++#if 0
++u8_l vendor_extension_data[]={
++ 0x10,0x49,0x00,0x17,0x00,0x01,0x37,0x10,
++ 0x06,0x00,0x10,0xc5,0xc9,0x91,0xeb,0x1f,
++ 0xce,0x4d,0x00,0xa1,0x2a,0xdf,0xa1,0xe9,
++ 0xc3,0x44,0xe6,0x10,0x49,0x00,0x21,0x00,
++ 0x01,0x37,0x20,0x01,0x00,0x01,0x05,0x20,
++ 0x02,0x00,0x04,0x43,0x56,0x54,0x45,0x20,
++ 0x05,0x00,0x0d,0x31,0x39,0x32,0x2e,0x31,
++ 0x36,0x38,0x2e,0x31,0x35,0x34,0x2e,0x31};
++#endif
++
++void rwnx_insert_vendor_extension_data(struct scanu_vendor_ie_req *ie_req){
++ u8_l temp_ie[256];
++ u8_l vendor_extension_subelement[3] = {0x00,0x37,0x2A};
++ u8_l vendor_extension_id[2] = {0x10,0x49};
++ int index = 0;
++ int vendor_extension_subelement_len = 0;
++
++ memset(temp_ie, 0, 256);
++
++ //find vendor_extension_subelement
++ for(index = 0; index < ie_req->add_ie_len; index++){
++ if(ie_req->ie[index] == vendor_extension_id[0]){
++ index++;
++ if(index == ie_req->add_ie_len){
++ return;
++ }
++ if(ie_req->ie[index] == vendor_extension_id[1] &&
++ ie_req->ie[index + 3] == vendor_extension_subelement[0]&&
++ ie_req->ie[index + 4] == vendor_extension_subelement[1]&&
++ ie_req->ie[index + 5] == vendor_extension_subelement[2]){
++ index = index + 2;
++ vendor_extension_subelement_len = ie_req->ie[index];
++ AICWFDBG(LOGDEBUG, "%s find vendor_extension_subelement,index:%d len:%d\r\n", __func__, index, ie_req->ie[index]);
++ break;
++ }
++ }
++ }
++ index = index + vendor_extension_subelement_len;
++
++ //insert vendor extension
++ memcpy(&temp_ie[0], ie_req->ie, index + 1);
++ memcpy(&temp_ie[index + 1], vendor_extension_data, vendor_extension_len/*sizeof(vendor_extension_data)*/);//insert vendor extension data
++ memcpy(&temp_ie[index + 1 + vendor_extension_len/*sizeof(vendor_extension_data)*/], &ie_req->ie[index + 1], ie_req->add_ie_len - index);
++
++ memcpy(ie_req->ie, temp_ie, ie_req->add_ie_len + vendor_extension_len/*sizeof(vendor_extension_data)*/);
++ ie_req->add_ie_len = ie_req->add_ie_len + vendor_extension_len/*sizeof(vendor_extension_data)*/;
++ ie_req->ie[1] = ie_req->ie[1] + vendor_extension_len/*sizeof(vendor_extension_data)*/;
++
++ //rwnx_data_dump((char*)__func__, (void*)ie_req->ie, ie_req->add_ie_len);
++}
++#endif//CONFIG_SET_VENDOR_EXTENSION_IE
++
++int rwnx_send_scanu_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ struct cfg80211_scan_request *param)
++{
++ struct scanu_start_req *req = NULL;
++ struct scanu_vendor_ie_req *ie_req = NULL;
++ struct mm_add_if_cfm add_if_cfm;
++ int i;
++ uint8_t chan_flags = 0;
++ int err;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the SCANU_START_REQ message */
++ req = rwnx_msg_zalloc(SCANU_START_REQ, TASK_SCANU, DRV_TASK_ID,
++ sizeof(struct scanu_start_req));
++ if (!req)
++ return -ENOMEM;
++
++ scanning = 1;
++ /* Set parameters */
++ req->vif_idx = rwnx_vif->vif_index;
++ req->chan_cnt = (u8)min_t(int, SCAN_CHANNEL_MAX, param->n_channels);
++ req->ssid_cnt = (u8)min_t(int, SCAN_SSID_MAX, param->n_ssids);
++ req->bssid = mac_addr_bcst;
++ req->no_cck = param->no_cck;
++
++#ifdef RADAR_OR_IR_DETECT
++ if (req->ssid_cnt == 0)
++ chan_flags |= CHAN_NO_IR;
++#endif
++ for (i = 0; i < req->ssid_cnt; i++) {
++ int j;
++ for (j = 0; j < param->ssids[i].ssid_len; j++)
++ req->ssid[i].array[j] = param->ssids[i].ssid[j];
++ req->ssid[i].length = param->ssids[i].ssid_len;
++
++ if (!memcmp(P2P_WILDCARD_SSID, param->ssids[i].ssid,
++ P2P_WILDCARD_SSID_LEN)) {
++ AICWFDBG(LOGINFO, "p2p scanu:%d,%d,%d\n",rwnx_vif->vif_index, rwnx_vif->is_p2p_vif, rwnx_hw->is_p2p_alive);
++#ifdef CONFIG_USE_P2P0
++ if (rwnx_vif->is_p2p_vif && !rwnx_hw->is_p2p_alive) {
++#else
++ if (rwnx_vif == rwnx_hw->p2p_dev_vif && !rwnx_vif->up) {
++#endif
++ err = rwnx_send_add_if (rwnx_hw, rwnx_vif->wdev.address,
++ RWNX_VIF_TYPE(rwnx_vif), false, &add_if_cfm);
++ if (err)
++ goto error;
++
++ /*
++ if ((err = rwnx_send_add_if(rwnx_hw, rwnx_vif->wdev.address,
++ RWNX_VIF_TYPE(rwnx_vif), false, &add_if_cfm)))
++ goto error;
++ */
++
++ if (add_if_cfm.status != 0) {
++ return -EIO;
++ }
++
++ /* Save the index retrieved from LMAC */
++ spin_lock_bh(&rwnx_hw->cb_lock);
++ rwnx_vif->vif_index = add_if_cfm.inst_nbr;
++ rwnx_vif->up = true;
++ rwnx_hw->vif_started++;
++ rwnx_hw->vif_table[add_if_cfm.inst_nbr] = rwnx_vif;
++ spin_unlock_bh(&rwnx_hw->cb_lock);
++ }
++ rwnx_hw->is_p2p_alive = 1;
++#ifndef CONFIG_USE_P2P0
++ mod_timer(&rwnx_hw->p2p_alive_timer, jiffies + msecs_to_jiffies(1000));
++ atomic_set(&rwnx_hw->p2p_alive_timer_count, 0);
++#endif
++ AICWFDBG(LOGINFO ,"p2p scan start\n");
++#ifdef CONFIG_STA_SCAN_WHEN_P2P_WORKING
++ p2p_working = 0;
++#else
++ p2p_working = 1;
++#endif
++ }
++ }
++
++#if 1
++ if (param->ie) {
++ #if 0
++ if (rwnx_ipc_elem_var_allocs(rwnx_hw, &rwnx_hw->scan_ie,
++ param->ie_len, DMA_TO_DEVICE,
++ NULL, param->ie, NULL))
++ goto error;
++
++ req->add_ie_len = param->ie_len;
++ req->add_ies = rwnx_hw->scan_ie.dma_addr;
++ #else
++ ie_req = rwnx_msg_zalloc(SCANU_VENDOR_IE_REQ, TASK_SCANU, DRV_TASK_ID,
++ sizeof(struct scanu_vendor_ie_req));
++ if (!ie_req)
++ return -ENOMEM;
++
++ ie_req->add_ie_len = param->ie_len;
++ ie_req->vif_idx = rwnx_vif->vif_index;
++ memcpy(ie_req->ie, param->ie, param->ie_len);
++#ifdef CONFIG_SET_VENDOR_EXTENSION_IE
++ rwnx_insert_vendor_extension_data(ie_req);
++#endif //CONFIG_SET_VENDOR_EXTENSION_IE
++ req->add_ie_len = 0;
++ req->add_ies = 0;
++
++ if ((err = rwnx_send_msg(rwnx_hw, ie_req, 1, SCANU_VENDOR_IE_CFM, NULL)))
++ goto error;
++ #endif
++ }
++ else {
++ req->add_ie_len = 0;
++ req->add_ies = 0;
++ }
++#else
++ req->add_ie_len = 0;
++ req->add_ies = 0;
++#endif
++
++ for (i = 0; i < req->chan_cnt; i++) {
++ struct ieee80211_channel *chan = param->channels[i];
++ AICWFDBG(LOGDEBUG, "scan channel:%d(%d) \r\n", ieee80211_frequency_to_channel(chan->center_freq), chan->center_freq);
++ req->chan[i].band = chan->band;
++ req->chan[i].freq = chan->center_freq;
++ req->chan[i].flags = chan_flags | get_chan_flags(chan->flags);
++ req->chan[i].tx_power = chan_to_fw_pwr(chan->max_reg_power);
++ }
++
++ /* Send the SCANU_START_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, SCANU_START_CFM_ADDTIONAL, NULL);
++error:
++ if (req != NULL)
++ rwnx_msg_free(rwnx_hw, req);
++ if (ie_req != NULL)
++ rwnx_msg_free(rwnx_hw, ie_req);
++ return -ENOMEM;
++}
++
++int rwnx_send_scanu_cancel_req(struct rwnx_hw *rwnx_hw, struct scan_cancel_cfm *cfm)
++{
++ struct scan_cancel_req *req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the SCAN_CANCEL_REQ message */
++ req = rwnx_msg_zalloc(SCANU_CANCEL_REQ, TASK_SCANU, DRV_TASK_ID,
++ sizeof(struct scan_cancel_req));
++ if (!req)
++ return -ENOMEM;
++
++ /* Send the SCAN_CANCEL_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, SCANU_CANCEL_CFM, cfm);
++}
++
++int rwnx_send_apm_start_cac_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif,
++ struct cfg80211_chan_def *chandef,
++ struct apm_start_cac_cfm *cfm)
++{
++ struct apm_start_cac_req *req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the APM_START_CAC_REQ message */
++ req = rwnx_msg_zalloc(APM_START_CAC_REQ, TASK_APM, DRV_TASK_ID,
++ sizeof(struct apm_start_cac_req));
++ if (!req)
++ return -ENOMEM;
++
++ /* Set parameters for the APM_START_CAC_REQ message */
++ req->vif_idx = vif->vif_index;
++ req->chan.band = chandef->chan->band;
++ req->chan.freq = chandef->chan->center_freq;
++ req->chan.flags = 0;
++ req->center_freq1 = chandef->center_freq1;
++ req->center_freq2 = chandef->center_freq2;
++ req->ch_width = bw2chnl[chandef->width];
++
++ /* Send the APM_START_CAC_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, APM_START_CAC_CFM, cfm);
++}
++
++int rwnx_send_apm_stop_cac_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif)
++{
++ struct apm_stop_cac_req *req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the APM_STOP_CAC_REQ message */
++ req = rwnx_msg_zalloc(APM_STOP_CAC_REQ, TASK_APM, DRV_TASK_ID,
++ sizeof(struct apm_stop_cac_req));
++ if (!req)
++ return -ENOMEM;
++
++ /* Set parameters for the APM_STOP_CAC_REQ message */
++ req->vif_idx = vif->vif_index;
++
++ /* Send the APM_STOP_CAC_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, APM_STOP_CAC_CFM, NULL);
++}
++
++int rwnx_send_mesh_start_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif,
++ const struct mesh_config *conf, const struct mesh_setup *setup,
++ struct mesh_start_cfm *cfm)
++{
++ // Message to send
++ struct mesh_start_req *req;
++ // Supported basic rates
++ struct ieee80211_supported_band *band_2GHz = rwnx_hw->wiphy->bands[NL80211_BAND_2GHZ];
++ /* Counter */
++ int i;
++ /* Return status */
++ int status;
++ /* DMA Address to be unmapped after confirmation reception */
++ u32 dma_addr = 0;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MESH_START_REQ message */
++ req = rwnx_msg_zalloc(MESH_START_REQ, TASK_MESH, DRV_TASK_ID,
++ sizeof(struct mesh_start_req));
++ if (!req) {
++ return -ENOMEM;
++ }
++
++ req->vif_index = vif->vif_index;
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
++ req->bcn_int = setup->beacon_interval;
++ req->dtim_period = setup->dtim_period;
++#endif
++ req->mesh_id_len = setup->mesh_id_len;
++
++ for (i = 0; i < setup->mesh_id_len; i++) {
++ req->mesh_id[i] = *(setup->mesh_id + i);
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++ req->user_mpm = setup->user_mpm;
++#endif
++ req->is_auth = setup->is_authenticated;
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)
++ req->auth_id = setup->auth_id;
++ #endif
++ req->ie_len = setup->ie_len;
++
++ if (setup->ie_len) {
++ /*
++ * Need to provide a Virtual Address to the MAC so that it can download the
++ * additional information elements.
++ */
++ req->ie_addr = dma_map_single(rwnx_hw->dev, (void *)setup->ie,
++ setup->ie_len, DMA_FROM_DEVICE);
++
++ /* Check DMA mapping result */
++ if (dma_mapping_error(rwnx_hw->dev, req->ie_addr)) {
++ printk(KERN_CRIT "%s - DMA Mapping error on additional IEs\n", __func__);
++
++ /* Consider there is no Additional IEs */
++ req->ie_len = 0;
++ } else {
++ /* Store DMA Address so that we can unmap the memory section once MESH_START_CFM is received */
++ dma_addr = req->ie_addr;
++ }
++ }
++
++ /* Provide rate information */
++ req->basic_rates.length = 0;
++ for (i = 0; i < band_2GHz->n_bitrates; i++) {
++ u16 rate = band_2GHz->bitrates[i].bitrate;
++
++ /* Read value is in in units of 100 Kbps, provided value is in units
++ * of 1Mbps, and multiplied by 2 so that 5.5 becomes 11 */
++ rate = (rate << 1) / 10;
++
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) // TODO: check basic rates
++ if (setup->basic_rates & CO_BIT(i)) {
++ rate |= 0x80;
++ }
++ #endif
++
++ req->basic_rates.array[i] = (u8)rate;
++ req->basic_rates.length++;
++ }
++
++ /* Provide channel information */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
++ req->chan.band = setup->chandef.chan->band;
++ req->chan.freq = setup->chandef.chan->center_freq;
++#endif
++
++ req->chan.flags = 0;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
++ req->chan.tx_power = chan_to_fw_pwr(setup->chandef.chan->max_power);
++ req->center_freq1 = setup->chandef.center_freq1;
++ req->center_freq2 = setup->chandef.center_freq2;
++ req->ch_width = bw2chnl[setup->chandef.width];
++#endif
++
++ /* Send the MESH_START_REQ message to UMAC FW */
++ status = rwnx_send_msg(rwnx_hw, req, 1, MESH_START_CFM, cfm);
++
++ /* Unmap DMA area */
++ if (setup->ie_len) {
++ dma_unmap_single(rwnx_hw->dev, dma_addr, setup->ie_len, DMA_TO_DEVICE);
++ }
++
++ /* Return the status */
++ return (status);
++}
++
++int rwnx_send_mesh_stop_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif,
++ struct mesh_stop_cfm *cfm)
++{
++ // Message to send
++ struct mesh_stop_req *req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MESH_STOP_REQ message */
++ req = rwnx_msg_zalloc(MESH_STOP_REQ, TASK_MESH, DRV_TASK_ID,
++ sizeof(struct mesh_stop_req));
++ if (!req) {
++ return -ENOMEM;
++ }
++
++ req->vif_idx = vif->vif_index;
++
++ /* Send the MESH_STOP_REQ message to UMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, MESH_STOP_CFM, cfm);
++}
++
++int rwnx_send_mesh_update_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif,
++ u32 mask, const struct mesh_config *p_mconf, struct mesh_update_cfm *cfm)
++{
++ // Message to send
++ struct mesh_update_req *req;
++ // Keep only bit for fields which can be updated
++ u32 supp_mask = (mask << 1) & (CO_BIT(NL80211_MESHCONF_GATE_ANNOUNCEMENTS)
++ | CO_BIT(NL80211_MESHCONF_HWMP_ROOTMODE)
++ | CO_BIT(NL80211_MESHCONF_FORWARDING)
++ | CO_BIT(NL80211_MESHCONF_POWER_MODE));
++
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ if (!supp_mask) {
++ return -ENOENT;
++ }
++
++ /* Build the MESH_UPDATE_REQ message */
++ req = rwnx_msg_zalloc(MESH_UPDATE_REQ, TASK_MESH, DRV_TASK_ID,
++ sizeof(struct mesh_update_req));
++
++ if (!req) {
++ return -ENOMEM;
++ }
++
++ req->vif_idx = vif->vif_index;
++
++ if (supp_mask & CO_BIT(NL80211_MESHCONF_GATE_ANNOUNCEMENTS))
++ {
++ req->flags |= CO_BIT(MESH_UPDATE_FLAGS_GATE_MODE_BIT);
++ req->gate_announ = p_mconf->dot11MeshGateAnnouncementProtocol;
++ }
++
++ if (supp_mask & CO_BIT(NL80211_MESHCONF_HWMP_ROOTMODE))
++ {
++ req->flags |= CO_BIT(MESH_UPDATE_FLAGS_ROOT_MODE_BIT);
++ req->root_mode = p_mconf->dot11MeshHWMPRootMode;
++ }
++
++ if (supp_mask & CO_BIT(NL80211_MESHCONF_FORWARDING))
++ {
++ req->flags |= CO_BIT(MESH_UPDATE_FLAGS_MESH_FWD_BIT);
++ req->mesh_forward = p_mconf->dot11MeshForwarding;
++ }
++
++ if (supp_mask & CO_BIT(NL80211_MESHCONF_POWER_MODE))
++ {
++ req->flags |= CO_BIT(MESH_UPDATE_FLAGS_LOCAL_PSM_BIT);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
++ req->local_ps_mode = p_mconf->power_mode;
++#endif
++ }
++
++ /* Send the MESH_UPDATE_REQ message to UMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, MESH_UPDATE_CFM, cfm);
++}
++
++int rwnx_send_mesh_peer_info_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif,
++ u8 sta_idx, struct mesh_peer_info_cfm *cfm)
++{
++ // Message to send
++ struct mesh_peer_info_req *req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MESH_PEER_INFO_REQ message */
++ req = rwnx_msg_zalloc(MESH_PEER_INFO_REQ, TASK_MESH, DRV_TASK_ID,
++ sizeof(struct mesh_peer_info_req));
++ if (!req) {
++ return -ENOMEM;
++ }
++
++ req->sta_idx = sta_idx;
++
++ /* Send the MESH_PEER_INFO_REQ message to UMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, MESH_PEER_INFO_CFM, cfm);
++}
++
++void rwnx_send_mesh_peer_update_ntf(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif,
++ u8 sta_idx, u8 mlink_state)
++{
++ // Message to send
++ struct mesh_peer_update_ntf *ntf;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MESH_PEER_UPDATE_NTF message */
++ ntf = rwnx_msg_zalloc(MESH_PEER_UPDATE_NTF, TASK_MESH, DRV_TASK_ID,
++ sizeof(struct mesh_peer_update_ntf));
++
++ if (ntf) {
++ ntf->vif_idx = vif->vif_index;
++ ntf->sta_idx = sta_idx;
++ ntf->state = mlink_state;
++
++ /* Send the MESH_PEER_INFO_REQ message to UMAC FW */
++ rwnx_send_msg(rwnx_hw, ntf, 0, 0, NULL);
++ }
++}
++
++void rwnx_send_mesh_path_create_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, u8 *tgt_addr)
++{
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Check if we are already waiting for a confirmation */
++ if (!vif->ap.create_path) {
++ // Message to send
++ struct mesh_path_create_req *req;
++
++ /* Build the MESH_PATH_CREATE_REQ message */
++ req = rwnx_msg_zalloc(MESH_PATH_CREATE_REQ, TASK_MESH, DRV_TASK_ID,
++ sizeof(struct mesh_path_create_req));
++
++ if (req) {
++ req->vif_idx = vif->vif_index;
++ memcpy(&req->tgt_mac_addr, tgt_addr, ETH_ALEN);
++
++ vif->ap.create_path = true;
++
++ /* Send the MESH_PATH_CREATE_REQ message to UMAC FW */
++ rwnx_send_msg(rwnx_hw, req, 0, 0, NULL);
++ }
++ }
++}
++
++int rwnx_send_mesh_path_update_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, const u8 *tgt_addr,
++ const u8 *p_nhop_addr, struct mesh_path_update_cfm *cfm)
++{
++ // Message to send
++ struct mesh_path_update_req *req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MESH_PATH_UPDATE_REQ message */
++ req = rwnx_msg_zalloc(MESH_PATH_UPDATE_REQ, TASK_MESH, DRV_TASK_ID,
++ sizeof(struct mesh_path_update_req));
++ if (!req) {
++ return -ENOMEM;
++ }
++
++ req->delete = (p_nhop_addr == NULL);
++ req->vif_idx = vif->vif_index;
++ memcpy(&req->tgt_mac_addr, tgt_addr, ETH_ALEN);
++
++ if (p_nhop_addr) {
++ memcpy(&req->nhop_mac_addr, p_nhop_addr, ETH_ALEN);
++ }
++
++ /* Send the MESH_PATH_UPDATE_REQ message to UMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, MESH_PATH_UPDATE_CFM, cfm);
++}
++
++void rwnx_send_mesh_proxy_add_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, u8 *ext_addr)
++{
++ // Message to send
++ struct mesh_proxy_add_req *req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MESH_PROXY_ADD_REQ message */
++ req = rwnx_msg_zalloc(MESH_PROXY_ADD_REQ, TASK_MESH, DRV_TASK_ID,
++ sizeof(struct mesh_proxy_add_req));
++
++ if (req) {
++ req->vif_idx = vif->vif_index;
++ memcpy(&req->ext_sta_addr, ext_addr, ETH_ALEN);
++
++ /* Send the MESH_PROXY_ADD_REQ message to UMAC FW */
++ rwnx_send_msg(rwnx_hw, req, 0, 0, NULL);
++ }
++}
++
++int rwnx_send_tdls_peer_traffic_ind_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif)
++{
++ struct tdls_peer_traffic_ind_req *tdls_peer_traffic_ind_req;
++
++ if (!rwnx_vif->sta.tdls_sta)
++ return -ENOLINK;
++
++ /* Build the TDLS_PEER_TRAFFIC_IND_REQ message */
++ tdls_peer_traffic_ind_req = rwnx_msg_zalloc(TDLS_PEER_TRAFFIC_IND_REQ, TASK_TDLS, DRV_TASK_ID,
++ sizeof(struct tdls_peer_traffic_ind_req));
++
++ if (!tdls_peer_traffic_ind_req)
++ return -ENOMEM;
++
++ /* Set parameters for the TDLS_PEER_TRAFFIC_IND_REQ message */
++ tdls_peer_traffic_ind_req->vif_index = rwnx_vif->vif_index;
++ tdls_peer_traffic_ind_req->sta_idx = rwnx_vif->sta.tdls_sta->sta_idx;
++ memcpy(&(tdls_peer_traffic_ind_req->peer_mac_addr.array[0]),
++ rwnx_vif->sta.tdls_sta->mac_addr, ETH_ALEN);
++ tdls_peer_traffic_ind_req->dialog_token = 0; // check dialog token value
++ tdls_peer_traffic_ind_req->last_tid = rwnx_vif->sta.tdls_sta->tdls.last_tid;
++ tdls_peer_traffic_ind_req->last_sn = rwnx_vif->sta.tdls_sta->tdls.last_sn;
++
++ /* Send the TDLS_PEER_TRAFFIC_IND_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, tdls_peer_traffic_ind_req, 0, 0, NULL);
++}
++
++int rwnx_send_config_monitor_req(struct rwnx_hw *rwnx_hw,
++ struct cfg80211_chan_def *chandef,
++ struct me_config_monitor_cfm *cfm)
++{
++ struct me_config_monitor_req *req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the ME_CONFIG_MONITOR_REQ message */
++ req = rwnx_msg_zalloc(ME_CONFIG_MONITOR_REQ, TASK_ME, DRV_TASK_ID,
++ sizeof(struct me_config_monitor_req));
++ if (!req)
++ return -ENOMEM;
++
++ if (chandef) {
++ req->chan_set = true;
++
++ req->chan.band = chandef->chan->band;
++ req->chan.type = bw2chnl[chandef->width];
++ req->chan.prim20_freq = chandef->chan->center_freq;
++ req->chan.center1_freq = chandef->center_freq1;
++ req->chan.center2_freq = chandef->center_freq2;
++ req->chan.tx_power = chan_to_fw_pwr(chandef->chan->max_power);
++
++ if (rwnx_hw->phy.limit_bw)
++ limit_chan_bw(&req->chan.type, req->chan.prim20_freq, &req->chan.center1_freq);
++ } else {
++ req->chan_set = false;
++ }
++
++ req->uf = rwnx_hw->mod_params->uf;
++ req->auto_reply = rwnx_hw->mod_params->auto_reply;
++
++ /* Send the ME_CONFIG_MONITOR_REQ message to FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, ME_CONFIG_MONITOR_CFM, cfm);
++}
++#endif /* CONFIG_RWNX_FULLMAC */
++
++int rwnx_send_tdls_chan_switch_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ struct rwnx_sta *rwnx_sta, bool sta_initiator,
++ u8 oper_class, struct cfg80211_chan_def *chandef,
++ struct tdls_chan_switch_cfm *cfm)
++{
++ struct tdls_chan_switch_req *tdls_chan_switch_req;
++
++
++ /* Build the TDLS_CHAN_SWITCH_REQ message */
++ tdls_chan_switch_req = rwnx_msg_zalloc(TDLS_CHAN_SWITCH_REQ, TASK_TDLS, DRV_TASK_ID,
++ sizeof(struct tdls_chan_switch_req));
++
++ if (!tdls_chan_switch_req)
++ return -ENOMEM;
++
++ /* Set parameters for the TDLS_CHAN_SWITCH_REQ message */
++ tdls_chan_switch_req->vif_index = rwnx_vif->vif_index;
++ tdls_chan_switch_req->sta_idx = rwnx_sta->sta_idx;
++ memcpy(&(tdls_chan_switch_req->peer_mac_addr.array[0]),
++ rwnx_sta_addr(rwnx_sta), ETH_ALEN);
++ tdls_chan_switch_req->initiator = sta_initiator;
++ tdls_chan_switch_req->band = chandef->chan->band;
++ tdls_chan_switch_req->type = bw2chnl[chandef->width];
++ tdls_chan_switch_req->prim20_freq = chandef->chan->center_freq;
++ tdls_chan_switch_req->center1_freq = chandef->center_freq1;
++ tdls_chan_switch_req->center2_freq = chandef->center_freq2;
++ tdls_chan_switch_req->tx_power = chan_to_fw_pwr(chandef->chan->max_power);
++ tdls_chan_switch_req->op_class = oper_class;
++
++ /* Send the TDLS_CHAN_SWITCH_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, tdls_chan_switch_req, 1, TDLS_CHAN_SWITCH_CFM, cfm);
++}
++
++int rwnx_send_tdls_cancel_chan_switch_req(struct rwnx_hw *rwnx_hw,
++ struct rwnx_vif *rwnx_vif,
++ struct rwnx_sta *rwnx_sta,
++ struct tdls_cancel_chan_switch_cfm *cfm)
++{
++ struct tdls_cancel_chan_switch_req *tdls_cancel_chan_switch_req;
++
++ /* Build the TDLS_CHAN_SWITCH_REQ message */
++ tdls_cancel_chan_switch_req = rwnx_msg_zalloc(TDLS_CANCEL_CHAN_SWITCH_REQ, TASK_TDLS, DRV_TASK_ID,
++ sizeof(struct tdls_cancel_chan_switch_req));
++ if (!tdls_cancel_chan_switch_req)
++ return -ENOMEM;
++
++ /* Set parameters for the TDLS_CHAN_SWITCH_REQ message */
++ tdls_cancel_chan_switch_req->vif_index = rwnx_vif->vif_index;
++ tdls_cancel_chan_switch_req->sta_idx = rwnx_sta->sta_idx;
++ memcpy(&(tdls_cancel_chan_switch_req->peer_mac_addr.array[0]),
++ rwnx_sta_addr(rwnx_sta), ETH_ALEN);
++
++ /* Send the TDLS_CHAN_SWITCH_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, tdls_cancel_chan_switch_req, 1, TDLS_CANCEL_CHAN_SWITCH_CFM, cfm);
++}
++
++#ifdef CONFIG_RWNX_BFMER
++#ifdef CONFIG_RWNX_FULLMAC
++void rwnx_send_bfmer_enable(struct rwnx_hw *rwnx_hw, struct rwnx_sta *rwnx_sta,
++ const struct ieee80211_vht_cap *vht_cap)
++#endif /* CONFIG_RWNX_FULLMAC*/
++{
++ struct mm_bfmer_enable_req *bfmer_en_req;
++#ifdef CONFIG_RWNX_FULLMAC
++ __le32 vht_capability;
++ u8 rx_nss = 0;
++#endif /* CONFIG_RWNX_FULLMAC */
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++#ifdef CONFIG_RWNX_FULLMAC
++ if (!vht_cap) {
++#endif /* CONFIG_RWNX_FULLMAC */
++ goto end;
++ }
++
++#ifdef CONFIG_RWNX_FULLMAC
++ vht_capability = vht_cap->vht_cap_info;
++#endif /* CONFIG_RWNX_FULLMAC */
++
++ if (!(vht_capability & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) {
++ goto end;
++ }
++
++#ifdef CONFIG_RWNX_FULLMAC
++ rx_nss = rwnx_bfmer_get_rx_nss(vht_cap);
++#endif /* CONFIG_RWNX_FULLMAC */
++
++ /* Allocate a structure that will contain the beamforming report */
++ if (rwnx_bfmer_report_add(rwnx_hw, rwnx_sta, RWNX_BFMER_REPORT_SPACE_SIZE))
++ {
++ goto end;
++ }
++
++ /* Build the MM_BFMER_ENABLE_REQ message */
++ bfmer_en_req = rwnx_msg_zalloc(MM_BFMER_ENABLE_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_bfmer_enable_req));
++
++ /* Check message allocation */
++ if (!bfmer_en_req) {
++ /* Free memory allocated for the report */
++ rwnx_bfmer_report_del(rwnx_hw, rwnx_sta);
++
++ /* Do not use beamforming */
++ goto end;
++ }
++
++ /* Provide DMA address to the MAC */
++ bfmer_en_req->host_bfr_addr = rwnx_sta->bfm_report->dma_addr;
++ bfmer_en_req->host_bfr_size = RWNX_BFMER_REPORT_SPACE_SIZE;
++ bfmer_en_req->sta_idx = rwnx_sta->sta_idx;
++#ifdef CONFIG_RWNX_FULLMAC
++ bfmer_en_req->aid = rwnx_sta->aid;
++ bfmer_en_req->rx_nss = rx_nss;
++#endif /* CONFIG_RWNX_FULLMAC */
++
++ if (vht_capability & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) {
++ bfmer_en_req->vht_mu_bfmee = true;
++ } else {
++ bfmer_en_req->vht_mu_bfmee = false;
++ }
++
++ /* Send the MM_BFMER_EN_REQ message to LMAC FW */
++ rwnx_send_msg(rwnx_hw, bfmer_en_req, 0, 0, NULL);
++
++end:
++ return;
++}
++
++#ifdef CONFIG_RWNX_MUMIMO_TX
++int rwnx_send_mu_group_update_req(struct rwnx_hw *rwnx_hw, struct rwnx_sta *rwnx_sta)
++{
++ struct mm_mu_group_update_req *req;
++ int group_id, i = 0;
++ u64 map;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MM_MU_GROUP_UPDATE_REQ message */
++ req = rwnx_msg_zalloc(MM_MU_GROUP_UPDATE_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_mu_group_update_req) +
++ rwnx_sta->group_info.cnt * sizeof(req->groups[0]));
++
++ /* Check message allocation */
++ if (!req)
++ return -ENOMEM;
++
++ /* Go through the groups the STA belongs to */
++ group_sta_for_each(rwnx_sta, group_id, map) {
++ int user_pos = rwnx_mu_group_sta_get_pos(rwnx_hw, rwnx_sta, group_id);
++
++ if (WARN((i >= rwnx_sta->group_info.cnt),
++ "STA%d: Too much group (%d)\n",
++ rwnx_sta->sta_idx, i + 1))
++ break;
++
++ req->groups[i].group_id = group_id;
++ req->groups[i].user_pos = user_pos;
++
++ i++;
++ }
++
++ req->group_cnt = rwnx_sta->group_info.cnt;
++ req->sta_idx = rwnx_sta->sta_idx;
++
++ /* Send the MM_MU_GROUP_UPDATE_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, MM_MU_GROUP_UPDATE_CFM, NULL);
++}
++#endif /* CONFIG_RWNX_MUMIMO_TX */
++#endif /* CONFIG_RWNX_BFMER */
++
++/**********************************************************************
++ * Debug Messages
++ *********************************************************************/
++int rwnx_send_dbg_trigger_req(struct rwnx_hw *rwnx_hw, char *msg)
++{
++ struct mm_dbg_trigger_req *req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MM_DBG_TRIGGER_REQ message */
++ req = rwnx_msg_zalloc(MM_DBG_TRIGGER_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_dbg_trigger_req));
++ if (!req)
++ return -ENOMEM;
++
++ /* Set parameters for the MM_DBG_TRIGGER_REQ message */
++ strncpy(req->error, msg, sizeof(req->error));
++
++ /* Send the MM_DBG_TRIGGER_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 0, -1, NULL);
++}
++
++int rwnx_send_dbg_mem_read_req(struct rwnx_hw *rwnx_hw, u32 mem_addr,
++ struct dbg_mem_read_cfm *cfm)
++{
++ struct dbg_mem_read_req *mem_read_req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the DBG_MEM_READ_REQ message */
++ mem_read_req = rwnx_msg_zalloc(DBG_MEM_READ_REQ, TASK_DBG, DRV_TASK_ID,
++ sizeof(struct dbg_mem_read_req));
++ if (!mem_read_req)
++ return -ENOMEM;
++
++ /* Set parameters for the DBG_MEM_READ_REQ message */
++ mem_read_req->memaddr = mem_addr;
++
++ /* Send the DBG_MEM_READ_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, mem_read_req, 1, DBG_MEM_READ_CFM, cfm);
++}
++
++int rwnx_send_dbg_mem_write_req(struct rwnx_hw *rwnx_hw, u32 mem_addr,
++ u32 mem_data)
++{
++ struct dbg_mem_write_req *mem_write_req;
++
++ //RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the DBG_MEM_WRITE_REQ message */
++ mem_write_req = rwnx_msg_zalloc(DBG_MEM_WRITE_REQ, TASK_DBG, DRV_TASK_ID,
++ sizeof(struct dbg_mem_write_req));
++ if (!mem_write_req)
++ return -ENOMEM;
++
++ /* Set parameters for the DBG_MEM_WRITE_REQ message */
++ mem_write_req->memaddr = mem_addr;
++ mem_write_req->memdata = mem_data;
++
++ /* Send the DBG_MEM_WRITE_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, mem_write_req, 1, DBG_MEM_WRITE_CFM, NULL);
++}
++
++int rwnx_send_dbg_mem_mask_write_req(struct rwnx_hw *rwnx_hw, u32 mem_addr,
++ u32 mem_mask, u32 mem_data)
++{
++ struct dbg_mem_mask_write_req *mem_mask_write_req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the DBG_MEM_MASK_WRITE_REQ message */
++ mem_mask_write_req = rwnx_msg_zalloc(DBG_MEM_MASK_WRITE_REQ, TASK_DBG, DRV_TASK_ID,
++ sizeof(struct dbg_mem_mask_write_req));
++ if (!mem_mask_write_req)
++ return -ENOMEM;
++
++ /* Set parameters for the DBG_MEM_MASK_WRITE_REQ message */
++ mem_mask_write_req->memaddr = mem_addr;
++ mem_mask_write_req->memmask = mem_mask;
++ mem_mask_write_req->memdata = mem_data;
++
++ /* Send the DBG_MEM_MASK_WRITE_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, mem_mask_write_req, 1, DBG_MEM_MASK_WRITE_CFM, NULL);
++}
++
++#ifdef CONFIG_RFTEST
++int rwnx_send_rftest_req(struct rwnx_hw *rwnx_hw, u32_l cmd, u32_l argc, u8_l *argv, struct dbg_rftest_cmd_cfm *cfm)
++{
++ struct dbg_rftest_cmd_req *mem_rftest_cmd_req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the DBG_RFTEST_CMD_REQ message */
++ mem_rftest_cmd_req = rwnx_msg_zalloc(DBG_RFTEST_CMD_REQ, TASK_DBG, DRV_TASK_ID,
++ sizeof(struct dbg_rftest_cmd_req));
++ if (!mem_rftest_cmd_req)
++ return -ENOMEM;
++
++ if(argc > 30)
++ return -ENOMEM;
++
++ /* Set parameters for the DBG_MEM_MASK_WRITE_REQ message */
++ mem_rftest_cmd_req->cmd = cmd;
++ mem_rftest_cmd_req->argc = argc;
++ if(argc != 0)
++ memcpy(mem_rftest_cmd_req->argv, argv, argc);
++
++ /* Send the DBG_RFTEST_CMD_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, mem_rftest_cmd_req, 1, DBG_RFTEST_CMD_CFM, cfm);
++}
++#endif
++
++int rwnx_send_dbg_set_mod_filter_req(struct rwnx_hw *rwnx_hw, u32 filter)
++{
++ struct dbg_set_mod_filter_req *set_mod_filter_req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the DBG_SET_MOD_FILTER_REQ message */
++ set_mod_filter_req =
++ rwnx_msg_zalloc(DBG_SET_MOD_FILTER_REQ, TASK_DBG, DRV_TASK_ID,
++ sizeof(struct dbg_set_mod_filter_req));
++ if (!set_mod_filter_req)
++ return -ENOMEM;
++
++ /* Set parameters for the DBG_SET_MOD_FILTER_REQ message */
++ set_mod_filter_req->mod_filter = filter;
++
++ /* Send the DBG_SET_MOD_FILTER_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, set_mod_filter_req, 1, DBG_SET_MOD_FILTER_CFM, NULL);
++}
++
++int rwnx_send_dbg_set_sev_filter_req(struct rwnx_hw *rwnx_hw, u32 filter)
++{
++ struct dbg_set_sev_filter_req *set_sev_filter_req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the DBG_SET_SEV_FILTER_REQ message */
++ set_sev_filter_req =
++ rwnx_msg_zalloc(DBG_SET_SEV_FILTER_REQ, TASK_DBG, DRV_TASK_ID,
++ sizeof(struct dbg_set_sev_filter_req));
++ if (!set_sev_filter_req)
++ return -ENOMEM;
++
++ /* Set parameters for the DBG_SET_SEV_FILTER_REQ message */
++ set_sev_filter_req->sev_filter = filter;
++
++ /* Send the DBG_SET_SEV_FILTER_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, set_sev_filter_req, 1, DBG_SET_SEV_FILTER_CFM, NULL);
++}
++
++int rwnx_send_dbg_get_sys_stat_req(struct rwnx_hw *rwnx_hw,
++ struct dbg_get_sys_stat_cfm *cfm)
++{
++ void *req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Allocate the message */
++ req = rwnx_msg_zalloc(DBG_GET_SYS_STAT_REQ, TASK_DBG, DRV_TASK_ID, 0);
++ if (!req)
++ return -ENOMEM;
++
++ /* Send the DBG_MEM_READ_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 1, DBG_GET_SYS_STAT_CFM, cfm);
++}
++
++int rwnx_send_dbg_mem_block_write_req(struct rwnx_hw *rwnx_hw, u32 mem_addr,
++ u32 mem_size, u32 *mem_data)
++{
++ struct dbg_mem_block_write_req *mem_blk_write_req;
++
++ //RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the DBG_MEM_BLOCK_WRITE_REQ message */
++ mem_blk_write_req = rwnx_msg_zalloc(DBG_MEM_BLOCK_WRITE_REQ, TASK_DBG, DRV_TASK_ID,
++ sizeof(struct dbg_mem_block_write_req));
++ if (!mem_blk_write_req)
++ return -ENOMEM;
++
++ /* Set parameters for the DBG_MEM_BLOCK_WRITE_REQ message */
++ mem_blk_write_req->memaddr = mem_addr;
++ mem_blk_write_req->memsize = mem_size;
++ memcpy(mem_blk_write_req->memdata, mem_data, mem_size);
++
++ /* Send the DBG_MEM_BLOCK_WRITE_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, mem_blk_write_req, 1, DBG_MEM_BLOCK_WRITE_CFM, NULL);
++}
++
++int rwnx_send_dbg_start_app_req(struct rwnx_hw *rwnx_hw, u32 boot_addr,
++ u32 boot_type)
++{
++ struct dbg_start_app_req *start_app_req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the DBG_START_APP_REQ message */
++ start_app_req = rwnx_msg_zalloc(DBG_START_APP_REQ, TASK_DBG, DRV_TASK_ID,
++ sizeof(struct dbg_start_app_req));
++ if (!start_app_req)
++ return -ENOMEM;
++
++ /* Set parameters for the DBG_START_APP_REQ message */
++ start_app_req->bootaddr = boot_addr;
++ start_app_req->boottype = boot_type;
++
++ /* Send the DBG_START_APP_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, start_app_req, 1, DBG_START_APP_CFM, NULL);
++}
++
++int rwnx_send_dbg_gpio_write_req(struct rwnx_hw *rwnx_hw, u8 gpio_idx, u8 gpio_val)
++{
++ struct dbg_gpio_write_req *gpio_write_req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ gpio_write_req = rwnx_msg_zalloc(DBG_GPIO_WRITE_REQ, TASK_DBG, DRV_TASK_ID,
++ sizeof(struct dbg_gpio_write_req));
++ if (!gpio_write_req)
++ return -ENOMEM;
++
++ gpio_write_req->gpio_idx = gpio_idx;
++ gpio_write_req->gpio_val = gpio_val;
++
++ return rwnx_send_msg(rwnx_hw, gpio_write_req, 1, DBG_GPIO_WRITE_CFM, NULL);
++}
++
++int rwnx_send_dbg_gpio_read_req(struct rwnx_hw *rwnx_hw, u8_l gpio_idx, struct dbg_gpio_read_cfm *cfm)
++{
++ struct dbg_gpio_read_req *gpio_read_req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ gpio_read_req = rwnx_msg_zalloc(DBG_GPIO_READ_REQ, TASK_DBG, DRV_TASK_ID,
++ sizeof(struct dbg_gpio_read_req));
++ if (!gpio_read_req)
++ return -ENOMEM;
++
++ gpio_read_req->gpio_idx = gpio_idx;
++
++ return rwnx_send_msg(rwnx_hw, gpio_read_req, 1, DBG_GPIO_READ_CFM, cfm);
++}
++
++int rwnx_send_dbg_gpio_init_req(struct rwnx_hw *rwnx_hw, u8_l gpio_idx, u8_l gpio_dir, u8_l gpio_val)
++{
++ struct dbg_gpio_init_req *gpio_init_req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ gpio_init_req = rwnx_msg_zalloc(DBG_GPIO_INIT_REQ, TASK_DBG, DRV_TASK_ID,
++ sizeof(struct dbg_gpio_init_req));
++ if (!gpio_init_req)
++ return -ENOMEM;
++
++ gpio_init_req->gpio_idx = gpio_idx;
++ gpio_init_req->gpio_dir = gpio_dir;
++ gpio_init_req->gpio_val = gpio_val;
++
++ return rwnx_send_msg(rwnx_hw, gpio_init_req, 1, DBG_GPIO_INIT_CFM, NULL);
++}
++
++int rwnx_send_cfg_rssi_req(struct rwnx_hw *rwnx_hw, u8 vif_index, int rssi_thold, u32 rssi_hyst)
++{
++ struct mm_cfg_rssi_req *req;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* Build the MM_CFG_RSSI_REQ message */
++ req = rwnx_msg_zalloc(MM_CFG_RSSI_REQ, TASK_MM, DRV_TASK_ID,
++ sizeof(struct mm_cfg_rssi_req));
++ if (!req)
++ return -ENOMEM;
++
++ if(rwnx_hw->vif_table[vif_index]==NULL)
++ return 0;
++
++ /* Set parameters for the MM_CFG_RSSI_REQ message */
++ req->vif_index = vif_index;
++ req->rssi_thold = (s8)rssi_thold;
++ req->rssi_hyst = (u8)rssi_hyst;
++
++ /* Send the MM_CFG_RSSI_REQ message to LMAC FW */
++ return rwnx_send_msg(rwnx_hw, req, 0, 0, NULL);
++}
++
++#ifdef CONFIG_USB_BT
++int rwnx_send_reboot(struct rwnx_hw *rwnx_hw)
++{
++ int ret = 0;
++ u32 delay = 2 *1000; //1s
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ ret = rwnx_send_dbg_start_app_req(rwnx_hw, delay, HOST_START_APP_REBOOT);
++ return ret;
++}
++#endif // CONFIG_USB_BT
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_msg_tx.h
+@@ -0,0 +1,181 @@
++/**
++ ****************************************************************************************
++ *
++ * @file rwnx_msg_tx.h
++ *
++ * @brief TX function declarations
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ****************************************************************************************
++ */
++
++#ifndef _RWNX_MSG_TX_H_
++#define _RWNX_MSG_TX_H_
++
++#include "rwnx_defs.h"
++
++int rwnx_send_reset(struct rwnx_hw *rwnx_hw);
++int rwnx_send_start(struct rwnx_hw *rwnx_hw);
++int rwnx_send_version_req(struct rwnx_hw *rwnx_hw, struct mm_version_cfm *cfm);
++int rwnx_send_add_if(struct rwnx_hw *rwnx_hw, const unsigned char *mac,
++ enum nl80211_iftype iftype, bool p2p, struct mm_add_if_cfm *cfm);
++int rwnx_send_remove_if(struct rwnx_hw *rwnx_hw, u8 vif_index, bool defer);
++int rwnx_send_set_channel(struct rwnx_hw *rwnx_hw, int phy_idx,
++ struct mm_set_channel_cfm *cfm);
++int rwnx_send_key_add(struct rwnx_hw *rwnx_hw, u8 vif_idx, u8 sta_idx, bool pairwise,
++ u8 *key, u8 key_len, u8 key_idx, u8 cipher_suite,
++ struct mm_key_add_cfm *cfm);
++int rwnx_send_key_del(struct rwnx_hw *rwnx_hw, uint8_t hw_key_idx);
++int rwnx_send_bcn(struct rwnx_hw *rwnx_hw,u8 *buf, u8 vif_idx, u16 bcn_len);
++
++int rwnx_send_bcn_change(struct rwnx_hw *rwnx_hw, u8 vif_idx, u32 bcn_addr,
++ u16 bcn_len, u16 tim_oft, u16 tim_len, u16 *csa_oft);
++int rwnx_send_tim_update(struct rwnx_hw *rwnx_hw, u8 vif_idx, u16 aid,
++ u8 tx_status);
++int rwnx_send_roc(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif,
++ struct ieee80211_channel *chan, unsigned int duration, struct mm_remain_on_channel_cfm *roc_cfm);
++int rwnx_send_cancel_roc(struct rwnx_hw *rwnx_hw);
++int rwnx_send_set_power(struct rwnx_hw *rwnx_hw, u8 vif_idx, s8 pwr,
++ struct mm_set_power_cfm *cfm);
++int rwnx_send_set_edca(struct rwnx_hw *rwnx_hw, u8 hw_queue, u32 param,
++ bool uapsd, u8 inst_nbr);
++int rwnx_send_tdls_chan_switch_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ struct rwnx_sta *rwnx_sta, bool sta_initiator,
++ u8 oper_class, struct cfg80211_chan_def *chandef,
++ struct tdls_chan_switch_cfm *cfm);
++int rwnx_send_tdls_cancel_chan_switch_req(struct rwnx_hw *rwnx_hw,
++ struct rwnx_vif *rwnx_vif,
++ struct rwnx_sta *rwnx_sta,
++ struct tdls_cancel_chan_switch_cfm *cfm);
++
++#ifdef CONFIG_RWNX_P2P_DEBUGFS
++int rwnx_send_p2p_oppps_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ u8 ctw, struct mm_set_p2p_oppps_cfm *cfm);
++int rwnx_send_p2p_noa_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ int count, int interval, int duration,
++ bool dyn_noa, struct mm_set_p2p_noa_cfm *cfm);
++#endif /* CONFIG_RWNX_P2P_DEBUGFS */
++
++#ifdef AICWF_ARP_OFFLOAD
++int rwnx_send_arpoffload_en_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ u32_l ipaddr, u8_l enable);
++#endif
++int rwnx_send_rf_config_req(struct rwnx_hw *rwnx_hw, u8_l ofst, u8_l sel, u8_l *tbl, u16_l len);
++int rwnx_send_rf_calib_req(struct rwnx_hw *rwnx_hw, struct mm_set_rf_calib_cfm *cfm);
++int rwnx_send_get_macaddr_req(struct rwnx_hw *rwnx_hw, struct mm_get_mac_addr_cfm *cfm);
++
++#ifdef CONFIG_RWNX_FULLMAC
++int rwnx_send_me_config_req(struct rwnx_hw *rwnx_hw);
++int rwnx_send_me_chan_config_req(struct rwnx_hw *rwnx_hw);
++int rwnx_send_me_set_control_port_req(struct rwnx_hw *rwnx_hw, bool opened,
++ u8 sta_idx);
++int rwnx_send_me_sta_add(struct rwnx_hw *rwnx_hw, struct station_parameters *params,
++ const u8 *mac, u8 inst_nbr, struct me_sta_add_cfm *cfm);
++int rwnx_send_me_sta_del(struct rwnx_hw *rwnx_hw, u8 sta_idx, bool tdls_sta);
++int rwnx_send_me_traffic_ind(struct rwnx_hw *rwnx_hw, u8 sta_idx, bool uapsd, u8 tx_status);
++int rwnx_send_me_rc_stats(struct rwnx_hw *rwnx_hw, u8 sta_idx,
++ struct me_rc_stats_cfm *cfm);
++int rwnx_send_me_rc_set_rate(struct rwnx_hw *rwnx_hw,
++ u8 sta_idx,
++ u16 rate_idx);
++int rwnx_send_me_set_ps_mode(struct rwnx_hw *rwnx_hw, u8 ps_mode);
++int rwnx_send_me_set_lp_level(struct rwnx_hw *rwnx_hw, u8 lp_level);
++int rwnx_send_sm_connect_req(struct rwnx_hw *rwnx_hw,
++ struct rwnx_vif *rwnx_vif,
++ struct cfg80211_connect_params *sme,
++ struct sm_connect_cfm *cfm);
++int rwnx_send_sm_disconnect_req(struct rwnx_hw *rwnx_hw,
++ struct rwnx_vif *rwnx_vif,
++ u16 reason);
++int rwnx_send_sm_external_auth_required_rsp(struct rwnx_hw *rwnx_hw,
++ struct rwnx_vif *rwnx_vif,
++ u16 status);
++int rwnx_send_apm_start_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif,
++ struct cfg80211_ap_settings *settings,
++ struct apm_start_cfm *cfm,
++ struct rwnx_ipc_elem_var *elem);
++int rwnx_send_apm_stop_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif);
++int rwnx_send_scanu_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ struct cfg80211_scan_request *param);
++int rwnx_send_scanu_cancel_req(struct rwnx_hw *rwnx_hw,
++ struct scan_cancel_cfm *cfm);
++
++int rwnx_send_apm_start_cac_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif,
++ struct cfg80211_chan_def *chandef,
++ struct apm_start_cac_cfm *cfm);
++int rwnx_send_apm_stop_cac_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif);
++int rwnx_send_tdls_peer_traffic_ind_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif);
++int rwnx_send_config_monitor_req(struct rwnx_hw *rwnx_hw,
++ struct cfg80211_chan_def *chandef,
++ struct me_config_monitor_cfm *cfm);
++int rwnx_send_mesh_start_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif,
++ const struct mesh_config *conf, const struct mesh_setup *setup,
++ struct mesh_start_cfm *cfm);
++int rwnx_send_mesh_stop_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif,
++ struct mesh_stop_cfm *cfm);
++int rwnx_send_mesh_update_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif,
++ u32 mask, const struct mesh_config *p_mconf, struct mesh_update_cfm *cfm);
++int rwnx_send_mesh_peer_info_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif,
++ u8 sta_idx, struct mesh_peer_info_cfm *cfm);
++void rwnx_send_mesh_peer_update_ntf(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif,
++ u8 sta_idx, u8 mlink_state);
++void rwnx_send_mesh_path_create_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, u8 *tgt_addr);
++int rwnx_send_mesh_path_update_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, const u8 *tgt_addr,
++ const u8 *p_nhop_addr, struct mesh_path_update_cfm *cfm);
++void rwnx_send_mesh_proxy_add_req(struct rwnx_hw *rwnx_hw, struct rwnx_vif *vif, u8 *ext_addr);
++#endif /* CONFIG_RWNX_FULLMAC */
++
++#ifdef CONFIG_RWNX_BFMER
++#ifdef CONFIG_RWNX_FULLMAC
++void rwnx_send_bfmer_enable(struct rwnx_hw *rwnx_hw, struct rwnx_sta *rwnx_sta,
++ const struct ieee80211_vht_cap *vht_cap);
++#endif /* CONFIG_RWNX_FULLMAC */
++#ifdef CONFIG_RWNX_MUMIMO_TX
++int rwnx_send_mu_group_update_req(struct rwnx_hw *rwnx_hw, struct rwnx_sta *rwnx_sta);
++#endif /* CONFIG_RWNX_MUMIMO_TX */
++#endif /* CONFIG_RWNX_BFMER */
++
++/* Debug messages */
++int rwnx_send_dbg_trigger_req(struct rwnx_hw *rwnx_hw, char *msg);
++int rwnx_send_dbg_mem_read_req(struct rwnx_hw *rwnx_hw, u32 mem_addr,
++ struct dbg_mem_read_cfm *cfm);
++int rwnx_send_dbg_mem_write_req(struct rwnx_hw *rwnx_hw, u32 mem_addr,
++ u32 mem_data);
++int rwnx_send_dbg_mem_mask_write_req(struct rwnx_hw *rwnx_hw, u32 mem_addr,
++ u32 mem_mask, u32 mem_data);
++int rwnx_send_dbg_set_mod_filter_req(struct rwnx_hw *rwnx_hw, u32 filter);
++#ifdef CONFIG_RFTEST
++int rwnx_send_rftest_req(struct rwnx_hw *rwnx_hw, u32_l cmd, u32_l argc, u8_l *argv, struct dbg_rftest_cmd_cfm *cfm);
++#endif
++int rwnx_send_dbg_set_sev_filter_req(struct rwnx_hw *rwnx_hw, u32 filter);
++int rwnx_send_dbg_get_sys_stat_req(struct rwnx_hw *rwnx_hw,
++ struct dbg_get_sys_stat_cfm *cfm);
++int rwnx_send_dbg_mem_block_write_req(struct rwnx_hw *rwnx_hw, u32 mem_addr,
++ u32 mem_size, u32 *mem_data);
++int rwnx_send_dbg_start_app_req(struct rwnx_hw *rwnx_hw, u32 boot_addr,
++ u32 boot_type);
++int rwnx_send_dbg_gpio_write_req(struct rwnx_hw *rwnx_hw, u8_l gpio_idx, u8_l gpio_val);
++int rwnx_send_dbg_gpio_read_req(struct rwnx_hw *rwnx_hw, u8_l gpio_idx, struct dbg_gpio_read_cfm *cfm);
++int rwnx_send_dbg_gpio_init_req(struct rwnx_hw *rwnx_hw, u8_l gpio_idx, u8_l gpio_dir, u8_l gpio_val);
++int rwnx_send_cfg_rssi_req(struct rwnx_hw *rwnx_hw, u8 vif_index, int rssi_thold, u32 rssi_hyst);
++int rwnx_send_coex_req(struct rwnx_hw *rwnx_hw, u8_l disable_coexnull, u8_l enable_nullcts);
++int rwnx_send_get_sta_info_req(struct rwnx_hw *rwnx_hw, u8_l sta_idx, struct mm_get_sta_info_cfm *cfm);
++int rwnx_send_set_stack_start_req(struct rwnx_hw *rwnx_hw, u8_l on, u8_l efuse_valid, u8_l set_vendor_info,
++ u8_l fwtrace_redir_en, struct mm_set_stack_start_cfm *cfm);
++int rwnx_send_txop_req(struct rwnx_hw *rwnx_hw, uint16_t *txop, u8_l long_nav_en, u8_l cfe_en);
++int rwnx_send_vendor_hwconfig_req(struct rwnx_hw *rwnx_hw, uint32_t hwconfig_id, int32_t *param);
++
++int rwnx_send_get_fw_version_req(struct rwnx_hw *rwnx_hw, struct mm_get_fw_version_cfm *cfm);
++int rwnx_send_txpwr_idx_req(struct rwnx_hw *rwnx_hw);
++int rwnx_send_txpwr_ofst_req(struct rwnx_hw *rwnx_hw);
++int rwnx_send_set_filter(struct rwnx_hw *rwnx_hw, uint32_t filter);
++int rwnx_send_txpwr_lvl_req(struct rwnx_hw *rwnx_hw);
++int rwnx_send_txpwr_lvl_v3_req(struct rwnx_hw *rwnx_hw);
++
++#ifdef CONFIG_USB_BT
++int rwnx_send_reboot(struct rwnx_hw *rwnx_hw);
++#endif // CONFIG_USB_BT
++
++
++#endif /* _RWNX_MSG_TX_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_mu_group.c
+@@ -0,0 +1,659 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_mu_group.c
++ *
++ * Copyright (C) RivieraWaves 2016-2019
++ *
++ ******************************************************************************
++ */
++
++#include "rwnx_defs.h"
++#include "rwnx_msg_tx.h"
++#include "rwnx_events.h"
++
++
++/**
++ * rwnx_mu_group_sta_init - Initialize group information for a STA
++ *
++ * @sta: Sta to initialize
++ */
++void rwnx_mu_group_sta_init(struct rwnx_sta *sta,
++ const struct ieee80211_vht_cap *vht_cap)
++{
++ sta->group_info.map = 0;
++ sta->group_info.cnt = 0;
++ sta->group_info.active.next = LIST_POISON1;
++ sta->group_info.update.next = LIST_POISON1;
++ sta->group_info.last_update = 0;
++ sta->group_info.traffic = 0;
++ sta->group_info.group = 0;
++
++ if (!vht_cap ||
++ !(vht_cap->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
++ sta->group_info.map = RWNX_SU_GROUP;
++ }
++}
++
++/**
++ * rwnx_mu_group_sta_del - Remove a sta from all MU group
++ *
++ * @rwnx_hw: main driver data
++ * @sta: STA to remove
++ *
++ * Remove one sta from all the MU groups it belongs to.
++ */
++void rwnx_mu_group_sta_del(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta)
++{
++ struct rwnx_mu_info *mu = &rwnx_hw->mu;
++ int i, j, group_id;
++ bool lock_taken;
++ u64 map;
++
++ lock_taken = (down_interruptible(&mu->lock) == 0);
++
++ group_sta_for_each(sta, group_id, map) {
++ struct rwnx_mu_group *group = rwnx_mu_group_from_id(mu, group_id);
++
++ for (i = 0; i < CONFIG_USER_MAX; i++) {
++ if (group->users[i] == sta) {
++ group->users[i] = NULL;
++ group->user_cnt --;
++ /* Don't keep group with only one user */
++ if (group->user_cnt == 1) {
++ for (j = 0; j < CONFIG_USER_MAX; j++) {
++ if (group->users[j]) {
++ group->users[j]->group_info.cnt--;
++ group->users[j]->group_info.map &= ~BIT_ULL(group->group_id);
++ if (group->users[j]->group_info.group == group_id)
++ group->users[j]->group_info.group = 0;
++ group->user_cnt --;
++ break;
++ }
++ }
++ mu->group_cnt--;
++ trace_mu_group_delete(group->group_id);
++ } else {
++ trace_mu_group_update(group);
++ }
++ break;
++ }
++ }
++
++ WARN((i == CONFIG_USER_MAX), "sta %d doesn't belongs to group %d",
++ sta->sta_idx, group_id);
++ }
++
++ sta->group_info.map = 0;
++ sta->group_info.cnt = 0;
++ sta->group_info.traffic = 0;
++
++ if (sta->group_info.active.next != LIST_POISON1)
++ list_del(&sta->group_info.active);
++
++ if (sta->group_info.update.next != LIST_POISON1)
++ list_del(&sta->group_info.update);
++
++ if (lock_taken)
++ up(&mu->lock);
++}
++
++/**
++ * rwnx_mu_group_sta_get_map - Get the list of group a STA belongs to
++ *
++ * @sta: pointer to the sta
++ *
++ * @return the list of group a STA belongs to as a bitfield
++ */
++u64 rwnx_mu_group_sta_get_map(struct rwnx_sta *sta)
++{
++ if (sta)
++ return sta->group_info.map;
++ return 0;
++}
++
++/**
++ * rwnx_mu_group_sta_get_pos - Get sta position in a group
++ *
++ * @rwnx_hw: main driver data
++ * @sta: pointer to the sta
++ * @group_id: Group id
++ *
++ * @return the positon of @sta in group @group_id or -1 if the sta
++ * doesn't belongs to the group (or group id is invalid)
++ */
++int rwnx_mu_group_sta_get_pos(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta,
++ int group_id)
++{
++ struct rwnx_mu_group *group;
++ int i;
++
++ group = rwnx_mu_group_from_id(&rwnx_hw->mu, group_id);
++ if (!group)
++ return -1;
++
++ for (i = 0; i < CONFIG_USER_MAX; i++) {
++ if (group->users[i] == sta)
++ return i;
++ }
++
++ WARN(1, "sta %d doesn't belongs to group %d",
++ sta->sta_idx, group_id);
++ return -1;
++}
++
++/**
++ * rwnx_mu_group_move_head - Move (or add) one element at the top of a list
++ *
++ * @list: list pointer
++ * @elem: element to move (or add) at the top of @list
++ *
++ */
++static inline
++void rwnx_mu_group_move_head(struct list_head *list, struct list_head *elem)
++{
++ if (elem->next != LIST_POISON1) {
++ __list_del_entry(elem);
++ }
++ list_add(elem, list);
++}
++
++/**
++ * rwnx_mu_group_remove_users - Remove all the users of a group
++ *
++ * @mu: pointer on MU info
++ * @group: pointer on group to remove users from
++ *
++ * Loop over all users one one group and remove this group from their
++ * map (and count).
++ * Each users is also added to the update_sta list, so that group info
++ * will be resent to fw for this user.
++ */
++static inline
++void rwnx_mu_group_remove_users(struct rwnx_mu_info *mu,
++ struct rwnx_mu_group *group)
++{
++ struct rwnx_sta *sta;
++ int i, group_id = group->group_id;
++
++ for (i = 0; i < CONFIG_USER_MAX; i++) {
++ if (group->users[i]) {
++ sta = group->users[i];
++ group->users[i] = NULL;
++ sta->group_info.cnt--;
++ sta->group_info.map &= ~BIT_ULL(group_id);
++ rwnx_mu_group_move_head(&mu->update_sta,
++ &sta->group_info.update);
++ }
++ }
++
++ if (group->user_cnt)
++ mu->group_cnt--;
++ group->user_cnt = 0;
++ trace_mu_group_delete(group_id);
++}
++
++/**
++ * rwnx_mu_group_add_users - Add users to a group
++ *
++ * @mu: pointer on MU info
++ * @group: pointer on group to add users in
++ * @nb_user: number of users to ad
++ * @users: table of user to add
++ *
++ * Add @nb_users to @group (which may already have users)
++ * Each new users is added to the first free position.
++ * It is assume that @group has at least @nb_user free position. If it is not
++ * case it only add the number of users needed to complete the group.
++ * Each users (effectively added to @group) is also added to the update_sta
++ * list, so that group info will be resent to fw for this user.
++ */
++static inline
++void rwnx_mu_group_add_users(struct rwnx_mu_info *mu,
++ struct rwnx_mu_group *group,
++ int nb_user, struct rwnx_sta **users)
++{
++ int i, j, group_id = group->group_id;
++
++ if (!group->user_cnt)
++ mu->group_cnt++;
++
++ j = 0;
++ for (i = 0; i < nb_user ; i++) {
++ for (; j < CONFIG_USER_MAX ; j++) {
++ if (group->users[j] == NULL) {
++ group->users[j] = users[i];
++ users[i]->group_info.cnt ++;
++ users[i]->group_info.map |= BIT_ULL(group_id);
++
++ rwnx_mu_group_move_head(&(mu->update_sta),
++ &(users[i]->group_info.update));
++ group->user_cnt ++;
++ j ++;
++ break;
++ }
++
++ WARN(j == (CONFIG_USER_MAX - 1),
++ "Too many user for group %d (nb_user=%d)",
++ group_id, group->user_cnt + nb_user - i);
++ }
++ }
++
++ trace_mu_group_update(group);
++}
++
++
++/**
++ * rwnx_mu_group_create_one - create on group with a specific group of user
++ *
++ * @mu: pointer on MU info
++ * @nb_user: number of user to include in the group (<= CONFIG_USER_MAX)
++ * @users: table of users
++ *
++ * Try to create a new group with a specific group of users.
++ * 1- First it checks if a group containing all this users already exists.
++ *
++ * 2- Then it checks if it is possible to complete a group which already
++ * contains at least one user.
++ *
++ * 3- Finally it create a new group. To do so, it take take the last group of
++ * the active_groups list, remove all its current users and add the new ones
++ *
++ * In all cases, the group selected is moved at the top of the active_groups
++ * list
++ *
++ * @return 1 if a new group has been created and 0 otherwise
++ */
++static
++int rwnx_mu_group_create_one(struct rwnx_mu_info *mu, int nb_user,
++ struct rwnx_sta **users, int *nb_group_left)
++{
++ int i, group_id;
++ struct rwnx_mu_group *group;
++ u64 group_match;
++ u64 group_avail;
++
++ group_match = users[0]->group_info.map;
++ group_avail = users[0]->group_info.map;
++ for (i = 1; i < nb_user ; i++) {
++ group_match &= users[i]->group_info.map;
++ group_avail |= users[i]->group_info.map;
++
++ }
++
++ if (group_match) {
++ /* a group (or more) with all the users already exist */
++ group_id = RWNX_GET_FIRST_GROUP_ID(group_match);
++ group = rwnx_mu_group_from_id(mu, group_id);
++ rwnx_mu_group_move_head(&mu->active_groups, &group->list);
++ return 0;
++ }
++
++#if CONFIG_USER_MAX > 2
++ if (group_avail) {
++ /* check if we can complete a group */
++ struct rwnx_sta *users2[CONFIG_USER_MAX];
++ int nb_user2;
++
++ group_for_each(group_id, group_avail) {
++ group = rwnx_mu_group_from_id(mu, group_id);
++ if (group->user_cnt == CONFIG_USER_MAX)
++ continue;
++
++ nb_user2 = 0;
++ for (i = 0; i < nb_user ; i++) {
++ if (!(users[i]->group_info.map & BIT_ULL(group_id))) {
++ users2[nb_user2] = users[i];
++ nb_user2++;
++ }
++ }
++
++ if ((group->user_cnt + nb_user2) <= CONFIG_USER_MAX) {
++ rwnx_mu_group_add_users(mu, group, nb_user2, users2);
++ rwnx_mu_group_move_head(&mu->active_groups, &group->list);
++ return 0;
++ }
++ }
++ }
++#endif /* CONFIG_USER_MAX > 2*/
++
++ /* create a new group */
++ group = list_last_entry(&mu->active_groups, struct rwnx_mu_group, list);
++ rwnx_mu_group_remove_users(mu, group);
++ rwnx_mu_group_add_users(mu, group, nb_user, users);
++ rwnx_mu_group_move_head(&mu->active_groups, &group->list);
++ (*nb_group_left)--;
++
++ return 1;
++}
++
++/**
++ * rwnx_mu_group_create - Create new groups containing one specific sta
++ *
++ * @mu: pointer on MU info
++ * @sta: sta to add in each group
++ * @nb_group_left: maximum number to new group allowed. (updated on exit)
++ *
++ * This will try to create "all the possible" group with a specific sta being
++ * a member of all these group.
++ * The function simply loops over the @active_sta list (starting from @sta).
++ * When it has (CONFIG_USER_MAX - 1) users it try to create a new group with
++ * these users (plus @sta).
++ * Loops end when there is no more users, or no more new group is allowed
++ *
++ */
++static
++void rwnx_mu_group_create(struct rwnx_mu_info *mu, struct rwnx_sta *sta,
++ int *nb_group_left)
++{
++ struct rwnx_sta *user_sta = sta;
++ struct rwnx_sta *users[CONFIG_USER_MAX];
++ int nb_user = 1;
++
++ users[0] = sta;
++ while (*nb_group_left) {
++
++ list_for_each_entry_continue(user_sta, &mu->active_sta, group_info.active) {
++ users[nb_user] = user_sta;
++ if (++nb_user == CONFIG_USER_MAX) {
++ break;
++ }
++ }
++
++ if (nb_user > 1) {
++ if (rwnx_mu_group_create_one(mu, nb_user, users, nb_group_left))
++ (*nb_group_left)--;
++
++ if (nb_user < CONFIG_USER_MAX)
++ break;
++ else
++ nb_user = 1;
++ } else
++ break;
++ }
++}
++
++/**
++ * rwnx_mu_group_work - process function of the "group_work"
++ *
++ * The work is scheduled when several sta (MU beamformee capable) are active.
++ * When called, the @active_sta contains the list of the active sta (starting
++ * from the most recent one), and @active_groups is the list of all possible
++ * groups ordered so that the first one is the most recently used.
++ *
++ * This function will create new groups, starting from group containing the
++ * most "active" sta.
++ * For example if the list of sta is :
++ * sta8 -> sta3 -> sta4 -> sta7 -> sta1
++ * and the number of user per group is 3, it will create grooups :
++ * - sta8 / sta3 / sta4
++ * - sta8 / sta7 / sta1
++ * - sta3 / sta4 / sta7
++ * - sta3 / sta1
++ * - sta4 / sta7 / sta1
++ * - sta7 / sta1
++ *
++ * To create new group, the least used group are first selected.
++ * It is only allowed to create NX_MU_GROUP_MAX per iteration.
++ *
++ * Once groups have been updated, mu group information is update to the fw.
++ * To do so it use the @update_sta list to know which sta has been affected.
++ * As it is necessary to wait for fw confirmation before using this new group
++ * MU is temporarily disabled during group update
++ *
++ * Work is then rescheduled.
++ *
++ * At the end of the function, both @active_sta and @update_sta list are empty.
++ *
++ * Note:
++ * - This is still a WIP, and will require more tuning
++ * - not all combinations are created, to avoid to much processing.
++ * - reschedule delay should be adaptative
++ */
++void rwnx_mu_group_work(struct work_struct *ws)
++{
++ struct delayed_work *dw = container_of(ws, struct delayed_work, work);
++ struct rwnx_mu_info *mu = container_of(dw, struct rwnx_mu_info, group_work);
++ struct rwnx_hw *rwnx_hw = container_of(mu, struct rwnx_hw, mu);
++ struct rwnx_sta *sta, *next;
++ int nb_group_left = NX_MU_GROUP_MAX;
++
++ if (WARN(!rwnx_hw->mod_params->mutx,
++ "In group formation work, but mutx disabled"))
++ return;
++
++ if (down_interruptible(&mu->lock) != 0)
++ return;
++
++ mu->update_count++;
++ if (!mu->update_count)
++ mu->update_count++;
++
++ list_for_each_entry_safe(sta, next, &mu->active_sta, group_info.active) {
++ if (nb_group_left)
++ rwnx_mu_group_create(mu, sta, &nb_group_left);
++
++ sta->group_info.last_update = mu->update_count;
++ list_del(&sta->group_info.active);
++ }
++
++ if (! list_empty(&mu->update_sta)) {
++ list_for_each_entry_safe(sta, next, &mu->update_sta, group_info.update) {
++ rwnx_send_mu_group_update_req(rwnx_hw, sta);
++ list_del(&sta->group_info.update);
++ }
++ }
++
++ mu->next_group_select = jiffies;
++ rwnx_mu_group_sta_select(rwnx_hw);
++ up(&mu->lock);
++
++ return;
++}
++
++/**
++ * rwnx_mu_group_init - Initialize MU groups
++ *
++ * @rwnx_hw: main driver data
++ *
++ * Initialize all MU group
++ */
++void rwnx_mu_group_init(struct rwnx_hw *rwnx_hw)
++{
++ struct rwnx_mu_info *mu = &rwnx_hw->mu;
++ int i;
++
++ INIT_LIST_HEAD(&mu->active_groups);
++ INIT_LIST_HEAD(&mu->active_sta);
++ INIT_LIST_HEAD(&mu->update_sta);
++
++ for (i = 0; i < NX_MU_GROUP_MAX; i++) {
++ int j;
++ mu->groups[i].user_cnt = 0;
++ mu->groups[i].group_id = i + 1;
++ for (j = 0; j < CONFIG_USER_MAX; j++) {
++ mu->groups[i].users[j] = NULL;
++ }
++ list_add(&mu->groups[i].list, &mu->active_groups);
++ }
++
++ mu->update_count = 1;
++ mu->group_cnt = 0;
++ mu->next_group_select = jiffies;
++ INIT_DELAYED_WORK(&mu->group_work, rwnx_mu_group_work);
++ sema_init(&mu->lock, 1);
++}
++
++/**
++ * rwnx_mu_set_active_sta - mark a STA as active
++ *
++ * @rwnx_hw: main driver data
++ * @sta: pointer to the sta
++ * @traffic: Number of buffers to add in the sta's traffic counter
++ *
++ * If @sta is MU beamformee capable (and MU-MIMO tx is enabled) move the
++ * sta at the top of the @active_sta list.
++ * It also schedule the group_work if not already scheduled and the list
++ * contains more than one sta.
++ *
++ * If a STA was already in the list during the last group update
++ * (i.e. sta->group_info.last_update == mu->update_count) it is not added
++ * back to the list until a sta that wasn't active during the last update is
++ * added. This is to avoid scheduling group update with a list of sta that
++ * were all already in the list during previous update.
++ *
++ * It is called with mu->lock taken.
++ */
++void rwnx_mu_set_active_sta(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta,
++ int traffic)
++{
++ struct rwnx_mu_info *mu = &rwnx_hw->mu;
++
++ if (!sta || (sta->group_info.map & RWNX_SU_GROUP))
++ return;
++
++ sta->group_info.traffic += traffic;
++
++ if ((sta->group_info.last_update != mu->update_count) ||
++ !list_empty(&mu->active_sta)) {
++
++ rwnx_mu_group_move_head(&mu->active_sta, &sta->group_info.active);
++
++ if (!delayed_work_pending(&mu->group_work) &&
++ !list_is_singular(&mu->active_sta)) {
++ schedule_delayed_work(&mu->group_work,
++ msecs_to_jiffies(RWNX_MU_GROUP_INTERVAL));
++ }
++ }
++}
++
++/**
++ * rwnx_mu_set_active_group - mark a MU group as active
++ *
++ * @rwnx_hw: main driver data
++ * @group_id: Group id
++ *
++ * move a group at the top of the @active_groups list
++ */
++void rwnx_mu_set_active_group(struct rwnx_hw *rwnx_hw, int group_id)
++{
++ struct rwnx_mu_info *mu = &rwnx_hw->mu;
++ struct rwnx_mu_group *group = rwnx_mu_group_from_id(mu, group_id);
++
++ rwnx_mu_group_move_head(&mu->active_groups, &group->list);
++}
++
++
++/**
++ * rwnx_mu_group_sta_select - Select the best group for MU stas
++ *
++ * @rwnx_hw: main driver data
++ *
++ * For each MU capable client of AP interfaces this function tries to select
++ * the best group to use.
++ *
++ * In first pass, gather information from all stations to form statistics
++ * for each group for the previous @RWNX_MU_GROUP_SELECT_INTERVAL interval:
++ * - number of buffers transmitted
++ * - number of user
++ *
++ * Then groups with more than 2 active users, are assigned after being ordered
++ * by traffic :
++ * - group with highest traffic is selected: set this group for all its users
++ * - update nb_users for all others group (as one sta may be in several groups)
++ * - select the next group that have still mor than 2 users and assign it.
++ * - continue until all group are processed
++ *
++ */
++void rwnx_mu_group_sta_select(struct rwnx_hw *rwnx_hw)
++{
++ struct rwnx_mu_info *mu = &rwnx_hw->mu;
++ int nb_users[NX_MU_GROUP_MAX + 1];
++ int traffic[NX_MU_GROUP_MAX + 1];
++ int order[NX_MU_GROUP_MAX + 1];
++ struct rwnx_sta *sta;
++ struct rwnx_vif *vif;
++ struct list_head *head;
++ u64 map;
++ int i, j, update, group_id, tmp, cnt = 0;
++
++ if (!mu->group_cnt || time_before(jiffies, mu->next_group_select))
++ return;
++
++ list_for_each_entry(vif, &rwnx_hw->vifs, list) {
++
++ if (RWNX_VIF_TYPE(vif) != NL80211_IFTYPE_AP)
++ continue;
++
++#ifdef CONFIG_RWNX_FULLMAC
++ head = &vif->ap.sta_list;
++#else
++ head = &vif->stations;
++#endif /* CONFIG_RWNX_FULLMAC */
++
++ memset(nb_users, 0, sizeof(nb_users));
++ memset(traffic, 0, sizeof(traffic));
++ list_for_each_entry(sta, head, list) {
++ int sta_traffic = sta->group_info.traffic;
++
++ /* reset statistics for next selection */
++ sta->group_info.traffic = 0;
++ if (sta->group_info.group)
++ trace_mu_group_selection(sta, 0);
++ sta->group_info.group = 0;
++
++ if (sta->group_info.cnt == 0 ||
++ sta_traffic < RWNX_MU_GROUP_MIN_TRAFFIC)
++ continue;
++
++ group_sta_for_each(sta, group_id, map) {
++ nb_users[group_id]++;
++ traffic[group_id] += sta_traffic;
++
++ /* list group with 2 users or more */
++ if (nb_users[group_id] == 2)
++ order[cnt++] = group_id;
++ }
++ }
++
++ /* reorder list of group with more that 2 users */
++ update = 1;
++ while(update) {
++ update = 0;
++ for (i = 0; i < cnt - 1; i++) {
++ if (traffic[order[i]] < traffic[order[i + 1]]) {
++ tmp = order[i];
++ order[i] = order[i + 1];
++ order[i + 1] = tmp;
++ update = 1;
++ }
++ }
++ }
++
++ /* now assign group in traffic order */
++ for (i = 0; i < cnt ; i ++) {
++ struct rwnx_mu_group *group;
++ group_id = order[i];
++
++ if (nb_users[group_id] < 2)
++ continue;
++
++ group = rwnx_mu_group_from_id(mu, group_id);
++ for (j = 0; j < CONFIG_USER_MAX ; j++) {
++ if (group->users[j]) {
++ trace_mu_group_selection(group->users[j], group_id);
++ group->users[j]->group_info.group = group_id;
++
++ group_sta_for_each(group->users[j], tmp, map) {
++ if (group_id != tmp)
++ nb_users[tmp]--;
++ }
++ }
++ }
++ }
++ }
++
++ mu->next_group_select = jiffies +
++ msecs_to_jiffies(RWNX_MU_GROUP_SELECT_INTERVAL);
++ mu->next_group_select |= 1;
++}
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_mu_group.h
+@@ -0,0 +1,179 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_mu_group.h
++ *
++ * Copyright (C) RivieraWaves 2016-2019
++ *
++ ******************************************************************************
++ */
++#ifndef _RWNX_MU_GROUP_H_
++#define _RWNX_MU_GROUP_H_
++
++#include <linux/workqueue.h>
++#include <linux/semaphore.h>
++
++struct rwnx_hw;
++struct rwnx_sta;
++
++#ifdef CONFIG_RWNX_MUMIMO_TX
++
++/**
++ * struct rwnx_sta_group_info - Group Information for a STA
++ *
++ * @active: node for @mu->active_sta list
++ * @update: node for @mu->update_sta list
++ * @cnt: Number of groups the STA belongs to
++ * @map: Bitfield of groups the sta belongs to
++ * @traffic: Number of buffers sent since previous group selection
++ * @group: Id of the group selected by previous group selection
++ * (cf @rwnx_mu_group_sta_select)
++ */
++struct rwnx_sta_group_info {
++ struct list_head active;
++ struct list_head update;
++ u16 last_update;
++ int cnt;
++ u64 map;
++ int traffic;
++ u8 group;
++};
++
++/**
++ * struct mu_group_info - Information about the users of a group
++ *
++ * @list: node for mu->active_groups
++ * @group_id: Group identifier
++ * @user_cnt: Number of the users in the group
++ * @users: Pointer to the sta, ordered by user position
++ */
++struct rwnx_mu_group {
++ struct list_head list;
++ int group_id;
++ int user_cnt;
++ struct rwnx_sta *users[CONFIG_USER_MAX];
++};
++
++/**
++ * struct rwnx_mu_info - Information about all MU group
++ *
++ * @active_groups: List of all possible groups. Ordered from the most recently
++ * used one to the least one (and possibly never used)
++ * @active_sta: List of MU beamformee sta that have been active (since previous
++ * group update). Ordered from the most recently active.
++ * @update_sta: List of sta whose group information has changed and need to be
++ * updated at fw level
++ * @groups: Table of all groups
++ * @group_work: Work item used to schedule group update
++ * @update_count: Counter used to identify the last group formation update.
++ * (cf rwnx_sta_group_info.last_update)
++ * @lock: Lock taken during group update. If tx happens lock is taken, then tx
++ * will not used MU.
++ * @next_group_assign: Next time the group selection should be run
++ * (ref @rwnx_mu_group_sta_select)
++ * @group_cnt: Number of group created
++ */
++struct rwnx_mu_info {
++ struct list_head active_groups;
++ struct list_head active_sta;
++ struct list_head update_sta;
++ struct rwnx_mu_group groups[NX_MU_GROUP_MAX];
++ struct delayed_work group_work;
++ u16 update_count;
++ struct semaphore lock;
++ unsigned long next_group_select;
++ u8 group_cnt;
++};
++
++#define RWNX_SU_GROUP BIT_ULL(0)
++#define RWNX_MU_GROUP_MASK 0x7ffffffffffffffeULL
++#define RWNX_MU_GROUP_INTERVAL 200 /* in ms */
++#define RWNX_MU_GROUP_SELECT_INTERVAL 100 /* in ms */
++// minimum traffic in a RWNX_MU_GROUP_SELECT_INTERVAL to consider the sta
++#define RWNX_MU_GROUP_MIN_TRAFFIC 50 /* in number of packet */
++
++
++#define RWNX_GET_FIRST_GROUP_ID(map) (fls64(map) - 1)
++
++#define group_sta_for_each(sta, id, map) \
++ map = sta->group_info.map & RWNX_MU_GROUP_MASK; \
++ for (id = (fls64(map) - 1) ; id > 0 ; \
++ map &= ~(u64)BIT_ULL(id), id = (fls64(map) - 1))
++
++#define group_for_each(id, map) \
++ for (id = (fls64(map) - 1) ; id > 0 ; \
++ map &= ~(u64)BIT_ULL(id), id = (fls64(map) - 1))
++
++#define RWNX_MUMIMO_INFO_POS_ID(info) (((info) >> 6) & 0x3)
++#define RWNX_MUMIMO_INFO_GROUP_ID(info) ((info) & 0x3f)
++
++static inline
++struct rwnx_mu_group *rwnx_mu_group_from_id(struct rwnx_mu_info *mu, int id)
++{
++ if (id > NX_MU_GROUP_MAX)
++ return NULL;
++
++ return &mu->groups[id - 1];
++}
++
++
++void rwnx_mu_group_sta_init(struct rwnx_sta *sta,
++ const struct ieee80211_vht_cap *vht_cap);
++void rwnx_mu_group_sta_del(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta);
++u64 rwnx_mu_group_sta_get_map(struct rwnx_sta *sta);
++int rwnx_mu_group_sta_get_pos(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta,
++ int group_id);
++
++void rwnx_mu_group_init(struct rwnx_hw *rwnx_hw);
++
++void rwnx_mu_set_active_sta(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta,
++ int traffic);
++void rwnx_mu_set_active_group(struct rwnx_hw *rwnx_hw, int group_id);
++void rwnx_mu_group_sta_select(struct rwnx_hw *rwnx_hw);
++
++
++#else /* ! CONFIG_RWNX_MUMIMO_TX */
++
++static inline
++void rwnx_mu_group_sta_init(struct rwnx_sta *sta,
++ const struct ieee80211_vht_cap *vht_cap)
++{}
++
++static inline
++void rwnx_mu_group_sta_del(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta)
++{}
++
++static inline
++u64 rwnx_mu_group_sta_get_map(struct rwnx_sta *sta)
++{
++ return 0;
++}
++
++static inline
++int rwnx_mu_group_sta_get_pos(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta,
++ int group_id)
++{
++ return 0;
++}
++
++static inline
++void rwnx_mu_group_init(struct rwnx_hw *rwnx_hw)
++{}
++
++static inline
++void rwnx_mu_set_active_sta(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta,
++ int traffic)
++{}
++
++static inline
++void rwnx_mu_set_active_group(struct rwnx_hw *rwnx_hw, int group_id)
++{}
++
++static inline
++void rwnx_mu_group_sta_select(struct rwnx_hw *rwnx_hw)
++{}
++
++#endif /* CONFIG_RWNX_MUMIMO_TX */
++
++#endif /* _RWNX_MU_GROUP_H_ */
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_pci.c
+@@ -0,0 +1,94 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_pci.c
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ******************************************************************************
++ */
++#include <linux/pci.h>
++#include <linux/module.h>
++
++#include "rwnx_defs.h"
++#include "rwnx_dini.h"
++#include "rwnx_v7.h"
++
++#define PCI_VENDOR_ID_DINIGROUP 0x17DF
++#define PCI_DEVICE_ID_DINIGROUP_DNV6_F2PCIE 0x1907
++
++#define PCI_DEVICE_ID_XILINX_CEVA_VIRTEX7 0x7011
++
++static const struct pci_device_id rwnx_pci_ids[] = {
++ {PCI_DEVICE(PCI_VENDOR_ID_DINIGROUP, PCI_DEVICE_ID_DINIGROUP_DNV6_F2PCIE)},
++ {PCI_DEVICE(PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_XILINX_CEVA_VIRTEX7)},
++ {0,}
++};
++
++
++/* Uncomment this for depmod to create module alias */
++/* We don't want this on development platform */
++//MODULE_DEVICE_TABLE(pci, rwnx_pci_ids);
++
++static int rwnx_pci_probe(struct pci_dev *pci_dev,
++ const struct pci_device_id *pci_id)
++{
++ struct rwnx_plat *rwnx_plat = NULL;
++ void *drvdata;
++ int ret = -ENODEV;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ if (pci_id->vendor == PCI_VENDOR_ID_DINIGROUP) {
++ ret = rwnx_dini_platform_init(pci_dev, &rwnx_plat);
++ } else if (pci_id->vendor == PCI_VENDOR_ID_XILINX) {
++ ret = rwnx_v7_platform_init(pci_dev, &rwnx_plat);
++ }
++
++ if (ret)
++ return ret;
++
++ rwnx_plat->pci_dev = pci_dev;
++
++ ret = rwnx_platform_init(rwnx_plat, &drvdata);
++ pci_set_drvdata(pci_dev, drvdata);
++
++ if (ret)
++ rwnx_plat->deinit(rwnx_plat);
++
++ return ret;
++}
++
++static void rwnx_pci_remove(struct pci_dev *pci_dev)
++{
++ struct rwnx_hw *rwnx_hw;
++ struct rwnx_plat *rwnx_plat;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ rwnx_hw = pci_get_drvdata(pci_dev);
++ rwnx_plat = rwnx_hw->plat;
++
++ rwnx_platform_deinit(rwnx_hw);
++ rwnx_plat->deinit(rwnx_plat);
++
++ pci_set_drvdata(pci_dev, NULL);
++}
++
++static struct pci_driver rwnx_pci_drv = {
++ .name = KBUILD_MODNAME,
++ .id_table = rwnx_pci_ids,
++ .probe = rwnx_pci_probe,
++ .remove = rwnx_pci_remove
++};
++
++int rwnx_pci_register_drv(void)
++{
++ return pci_register_driver(&rwnx_pci_drv);
++}
++
++void rwnx_pci_unregister_drv(void)
++{
++ pci_unregister_driver(&rwnx_pci_drv);
++}
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_pci.h
+@@ -0,0 +1,17 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_pci.h
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ******************************************************************************
++ */
++
++#ifndef _RWNX_PCI_H_
++#define _RWNX_PCI_H_
++
++int rwnx_pci_register_drv(void);
++void rwnx_pci_unregister_drv(void);
++
++#endif /* _RWNX_PCI_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_platform.c
+@@ -0,0 +1,2306 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_platform.c
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ******************************************************************************
++ */
++
++#include <linux/module.h>
++#include <linux/firmware.h>
++#include <linux/delay.h>
++#include <linux/vmalloc.h>
++
++#include "rwnx_platform.h"
++#include "reg_access.h"
++#include "hal_desc.h"
++#include "rwnx_main.h"
++#include "rwnx_pci.h"
++#ifndef CONFIG_RWNX_FHOST
++#include "ipc_host.h"
++#endif /* !CONFIG_RWNX_FHOST */
++#include "rwnx_msg_tx.h"
++
++#ifdef AICWF_SDIO_SUPPORT
++#include "aicwf_sdio.h"
++#endif
++
++#ifdef AICWF_USB_SUPPORT
++#include "aicwf_usb.h"
++#endif
++#include "md5.h"
++#include "aicwf_compat_8800dc.h"
++#include "aicwf_compat_8800d80.h"
++#ifdef CONFIG_USE_FW_REQUEST
++#include <linux/firmware.h>
++#endif
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
++static inline struct inode *file_inode(const struct file *f)
++{
++ return f->f_dentry->d_inode;
++}
++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */
++struct rwnx_plat *g_rwnx_plat = NULL;
++
++#define FW_PATH_MAX_LEN 200
++extern char aic_fw_path[FW_PATH_MAX_LEN];
++
++//Parser state
++#define INIT 0
++#define CMD 1
++#define PRINT 2
++#define GET_VALUE 3
++
++typedef struct
++{
++ txpwr_lvl_conf_t txpwr_lvl;
++ txpwr_lvl_conf_v2_t txpwr_lvl_v2;
++ txpwr_lvl_conf_v3_t txpwr_lvl_v3;
++ txpwr_loss_conf_t txpwr_loss;
++ txpwr_ofst_conf_t txpwr_ofst;
++ xtal_cap_conf_t xtal_cap;
++} userconfig_info_t;
++
++userconfig_info_t userconfig_info = {
++ .txpwr_lvl = {
++ .enable = 1,
++ .dsss = 9,
++ .ofdmlowrate_2g4 = 8,
++ .ofdm64qam_2g4 = 8,
++ .ofdm256qam_2g4 = 8,
++ .ofdm1024qam_2g4 = 8,
++ .ofdmlowrate_5g = 11,
++ .ofdm64qam_5g = 10,
++ .ofdm256qam_5g = 9,
++ .ofdm1024qam_5g = 9
++ },
++ .txpwr_lvl_v2 = {
++ .enable = 1,
++ .pwrlvl_11b_11ag_2g4 =
++ //1M, 2M, 5M5, 11M, 6M, 9M, 12M, 18M, 24M, 36M, 48M, 54M
++ { 20, 20, 20, 20, 20, 20, 20, 20, 18, 18, 16, 16},
++ .pwrlvl_11n_11ac_2g4 =
++ //MCS0, MCS1, MCS2, MCS3, MCS4, MCS5, MCS6, MCS7, MCS8, MCS9
++ { 20, 20, 20, 20, 18, 18, 16, 16, 16, 16},
++ .pwrlvl_11ax_2g4 =
++ //MCS0, MCS1, MCS2, MCS3, MCS4, MCS5, MCS6, MCS7, MCS8, MCS9, MCS10,MCS11
++ { 20, 20, 20, 20, 18, 18, 16, 16, 16, 16, 15, 15},
++ },
++ .txpwr_lvl_v3 = {
++ .enable = 1,
++ .pwrlvl_11b_11ag_2g4 =
++ //1M, 2M, 5M5, 11M, 6M, 9M, 12M, 18M, 24M, 36M, 48M, 54M
++ { 20, 20, 20, 20, 20, 20, 20, 20, 18, 18, 16, 16},
++ .pwrlvl_11n_11ac_2g4 =
++ //MCS0, MCS1, MCS2, MCS3, MCS4, MCS5, MCS6, MCS7, MCS8, MCS9
++ { 20, 20, 20, 20, 18, 18, 16, 16, 16, 16},
++ .pwrlvl_11ax_2g4 =
++ //MCS0, MCS1, MCS2, MCS3, MCS4, MCS5, MCS6, MCS7, MCS8, MCS9, MCS10,MCS11
++ { 20, 20, 20, 20, 18, 18, 16, 16, 16, 16, 15, 15},
++ .pwrlvl_11a_5g =
++ //NA, NA, NA, NA, 6M, 9M, 12M, 18M, 24M, 36M, 48M, 54M
++ { 0x80, 0x80, 0x80, 0x80, 20, 20, 20, 20, 18, 18, 16, 16},
++ .pwrlvl_11n_11ac_5g =
++ //MCS0, MCS1, MCS2, MCS3, MCS4, MCS5, MCS6, MCS7, MCS8, MCS9
++ { 20, 20, 20, 20, 18, 18, 16, 16, 16, 15},
++ .pwrlvl_11ax_5g =
++ //MCS0, MCS1, MCS2, MCS3, MCS4, MCS5, MCS6, MCS7, MCS8, MCS9, MCS10,MCS11
++ { 20, 20, 20, 20, 18, 18, 16, 16, 16, 15, 14, 14},
++ },
++ .txpwr_loss = {
++ .loss_enable = 1,
++ .loss_value = 0,
++ },
++ .txpwr_ofst = {
++ .enable = 1,
++ .chan_1_4 = 0,
++ .chan_5_9 = 0,
++ .chan_10_13 = 0,
++ .chan_36_64 = 0,
++ .chan_100_120 = 0,
++ .chan_122_140 = 0,
++ .chan_142_165 = 0,
++ },
++ .xtal_cap = {
++ .enable = 0,
++ .xtal_cap = 24,
++ .xtal_cap_fine = 31,
++ },
++};
++
++
++#ifndef CONFIG_ROM_PATCH_EN
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0))
++static inline struct inode *file_inode(const struct file *f)
++{
++ return f->f_dentry->d_inode;
++}
++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)) */
++
++
++#endif/* !CONFIG_ROM_PATCH_EN */
++
++
++
++#ifdef CONFIG_RWNX_TL4
++/**
++ * rwnx_plat_tl4_fw_upload() - Load the requested FW into embedded side.
++ *
++ * @rwnx_plat: pointer to platform structure
++ * @fw_addr: Virtual address where the fw must be loaded
++ * @filename: Name of the fw.
++ *
++ * Load a fw, stored as a hex file, into the specified address
++ */
++static int rwnx_plat_tl4_fw_upload(struct rwnx_plat *rwnx_plat, u8* fw_addr,
++ char *filename)
++{
++ struct device *dev = rwnx_platform_get_dev(rwnx_plat);
++ const struct firmware *fw;
++ int err = 0;
++ u32 *dst;
++ u8 const *file_data;
++ char typ0, typ1;
++ u32 addr0, addr1;
++ u32 dat0, dat1;
++ int remain;
++
++ err = request_firmware(&fw, filename, dev);
++ if (err) {
++ return err;
++ }
++ file_data = fw->data;
++ remain = fw->size;
++
++ /* Copy the file on the Embedded side */
++ dev_dbg(dev, "\n### Now copy %s firmware, @ = %p\n", filename, fw_addr);
++
++ /* Walk through all the lines of the configuration file */
++ while (remain >= 16) {
++ u32 data, offset;
++
++ if (sscanf(file_data, "%c:%08X %04X", &typ0, &addr0, &dat0) != 3)
++ break;
++ if ((addr0 & 0x01) != 0) {
++ addr0 = addr0 - 1;
++ dat0 = 0;
++ } else {
++ file_data += 16;
++ remain -= 16;
++ }
++ if ((remain < 16) ||
++ (sscanf(file_data, "%c:%08X %04X", &typ1, &addr1, &dat1) != 3) ||
++ (typ1 != typ0) || (addr1 != (addr0 + 1))) {
++ typ1 = typ0;
++ addr1 = addr0 + 1;
++ dat1 = 0;
++ } else {
++ file_data += 16;
++ remain -= 16;
++ }
++
++ if (typ0 == 'C') {
++ offset = 0x00200000;
++ if ((addr1 % 4) == 3)
++ offset += 2*(addr1 - 3);
++ else
++ offset += 2*(addr1 + 1);
++
++ data = dat1 | (dat0 << 16);
++ } else {
++ offset = 2*(addr1 - 1);
++ data = dat0 | (dat1 << 16);
++ }
++ dst = (u32 *)(fw_addr + offset);
++ *dst = data;
++ }
++
++ release_firmware(fw);
++
++ return err;
++}
++#endif
++
++#if 0
++/**
++ * rwnx_plat_bin_fw_upload() - Load the requested binary FW into embedded side.
++ *
++ * @rwnx_plat: pointer to platform structure
++ * @fw_addr: Virtual address where the fw must be loaded
++ * @filename: Name of the fw.
++ *
++ * Load a fw, stored as a binary file, into the specified address
++ */
++static int rwnx_plat_bin_fw_upload(struct rwnx_plat *rwnx_plat, u8* fw_addr,
++ char *filename)
++{
++ const struct firmware *fw;
++ struct device *dev = rwnx_platform_get_dev(rwnx_plat);
++ int err = 0;
++ unsigned int i, size;
++ u32 *src, *dst;
++
++ err = request_firmware(&fw, filename, dev);
++ if (err) {
++ return err;
++ }
++
++ /* Copy the file on the Embedded side */
++ dev_dbg(dev, "\n### Now copy %s firmware, @ = %p\n", filename, fw_addr);
++
++ src = (u32 *)fw->data;
++ dst = (u32 *)fw_addr;
++ size = (unsigned int)fw->size;
++
++ /* check potential platform bug on multiple stores vs memcpy */
++ for (i = 0; i < size; i += 4) {
++ *dst++ = *src++;
++ }
++
++ release_firmware(fw);
++
++ return err;
++}
++#endif
++
++#define MD5(x) x[0],x[1],x[2],x[3],x[4],x[5],x[6],x[7],x[8],x[9],x[10],x[11],x[12],x[13],x[14],x[15]
++#define MD5PINRT "file md5:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\r\n"
++
++static int rwnx_load_firmware(u32 **fw_buf, const char *name, struct device *device)
++{
++
++#ifdef CONFIG_USE_FW_REQUEST
++ const struct firmware *fw = NULL;
++ u32 *dst = NULL;
++ void *buffer=NULL;
++ MD5_CTX md5;
++ unsigned char decrypt[16];
++ int size = 0;
++ int ret = 0;
++
++ AICWFDBG(LOGINFO, "%s: request firmware = %s \n", __func__ ,name);
++
++ ret = request_firmware(&fw, name, NULL);
++
++ if (ret < 0) {
++ AICWFDBG(LOGERROR, "Load %s fail\n", name);
++ release_firmware(fw);
++ return -1;
++ }
++
++ size = fw->size;
++ dst = (u32 *)fw->data;
++
++ if (size <= 0) {
++ AICWFDBG(LOGERROR, "wrong size of firmware file\n");
++ release_firmware(fw);
++ return -1;
++ }
++
++ buffer = vmalloc(size);
++ memset(buffer, 0, size);
++ memcpy(buffer, dst, size);
++
++ *fw_buf = buffer;
++
++ MD5Init(&md5);
++ MD5Update(&md5, (unsigned char *)buffer, size);
++ MD5Final(&md5, decrypt);
++ AICWFDBG(LOGINFO, MD5PINRT, MD5(decrypt));
++
++ release_firmware(fw);
++
++ return size;
++#else
++ void *buffer = NULL;
++ char *path = NULL;
++ struct file *fp = NULL;
++ int size = 0, len = 0, i = 0;
++ ssize_t rdlen = 0;
++ u32 *src = NULL, *dst = NULL;
++ MD5_CTX md5;
++ unsigned char decrypt[16];
++
++ /* get the firmware path */
++ path = __getname();
++ if (!path) {
++ *fw_buf = NULL;
++ return -1;
++ }
++
++ len = snprintf(path, FW_PATH_MAX_LEN, "%s/%s", aic_fw_path, name);
++
++ //len = snprintf(path, FW_PATH_MAX_LEN, "%s", name);
++ if (len >= FW_PATH_MAX_LEN) {
++ AICWFDBG(LOGERROR, "%s: %s file's path too long\n", __func__, name);
++ *fw_buf = NULL;
++ __putname(path);
++ return -1;
++ }
++
++ AICWFDBG(LOGINFO, "%s :firmware path = %s \n", __func__, path);
++
++ /* open the firmware file */
++ fp = filp_open(path, O_RDONLY, 0);
++ if (IS_ERR_OR_NULL(fp)) {
++ AICWFDBG(LOGERROR, "%s: %s file failed to open\n", __func__, name);
++ *fw_buf = NULL;
++ __putname(path);
++ fp = NULL;
++ return -1;
++ }
++
++ size = i_size_read(file_inode(fp));
++ if (size <= 0) {
++ AICWFDBG(LOGERROR, "%s: %s file size invalid %d\n", __func__, name, size);
++ *fw_buf = NULL;
++ __putname(path);
++ filp_close(fp, NULL);
++ fp = NULL;
++ return -1;
++ }
++
++ /* start to read from firmware file */
++ buffer = vmalloc(size);
++ if (!buffer) {
++ *fw_buf = NULL;
++ __putname(path);
++ filp_close(fp, NULL);
++ fp = NULL;
++ return -1;
++ }
++
++ #if LINUX_VERSION_CODE > KERNEL_VERSION(4, 13, 16)
++ rdlen = kernel_read(fp, buffer, size, &fp->f_pos);
++ #else
++ rdlen = kernel_read(fp, fp->f_pos, buffer, size);
++ #endif
++
++ if (size != rdlen) {
++ AICWFDBG(LOGERROR, "%s: %s file rdlen invalid %d\n", __func__, name, (int)rdlen);
++ *fw_buf = NULL;
++ __putname(path);
++ filp_close(fp, NULL);
++ fp = NULL;
++ vfree(buffer);
++ buffer = NULL;
++ return -1;
++ }
++ if (rdlen > 0) {
++ fp->f_pos += rdlen;
++ }
++
++ /*start to transform the data format*/
++ src = (u32 *)buffer;
++ dst = (u32 *)vmalloc(size);
++
++ if (!dst) {
++ *fw_buf = NULL;
++ __putname(path);
++ filp_close(fp, NULL);
++ fp = NULL;
++ vfree(buffer);
++ buffer = NULL;
++ return -1;
++ }
++
++ for (i = 0; i < (size/4); i++) {
++ dst[i] = src[i];
++ }
++
++ __putname(path);
++ filp_close(fp, NULL);
++ fp = NULL;
++ vfree(buffer);
++ buffer = NULL;
++ *fw_buf = dst;
++
++ MD5Init(&md5);
++ MD5Update(&md5, (unsigned char *)dst, size);
++ MD5Final(&md5, decrypt);
++
++ AICWFDBG(LOGINFO, MD5PINRT, MD5(decrypt));
++
++ return size;
++#endif
++}
++
++static void rwnx_restore_firmware(u32 **fw_buf)
++{
++ vfree(*fw_buf);
++ *fw_buf = NULL;
++}
++
++
++/* buffer is allocated by kzalloc */
++int rwnx_request_firmware_common(struct rwnx_hw *rwnx_hw, u32** buffer, const char *filename)
++{
++ int size;
++
++ AICWFDBG(LOGINFO, "### Load file %s\n", filename);
++
++ size = rwnx_load_firmware(buffer, filename, NULL);
++
++ return size;
++}
++
++void rwnx_release_firmware_common(u32** buffer)
++{
++ rwnx_restore_firmware(buffer);
++}
++
++
++
++/**
++ * rwnx_plat_bin_fw_upload_2() - Load the requested binary FW into embedded side.
++ *
++ * @rwnx_hw: Main driver data
++ * @fw_addr: Address where the fw must be loaded
++ * @filename: Name of the fw.
++ *
++ * Load a fw, stored as a binary file, into the specified address
++ */
++
++int rwnx_plat_bin_fw_upload_2(struct rwnx_hw *rwnx_hw, u32 fw_addr,
++ char *filename)
++{
++ int err = 0;
++ unsigned int i = 0, size;
++// u32 *src;
++ u32 *dst = NULL;
++
++ /* Copy the file on the Embedded side */
++ AICWFDBG(LOGINFO, "### Upload %s firmware, @ = %x\n", filename, fw_addr);
++
++ size = rwnx_request_firmware_common(rwnx_hw, &dst, filename);
++ if (!dst) {
++ AICWFDBG(LOGERROR, "No such file or directory\n");
++ return -1;
++ }
++ if (size <= 0) {
++ AICWFDBG(LOGERROR, "wrong size of firmware file\n");
++ dst = NULL;
++ err = -1;
++ return -1;
++ }
++
++ AICWFDBG(LOGINFO, "size=%d, dst[0]=%x\n", size, dst[0]);
++ if (size > 512) {
++ for (; i < (size - 512); i += 512) {
++ //printk("wr blk 0: %p -> %x\r\n", dst + i / 4, fw_addr + i);
++ err = rwnx_send_dbg_mem_block_write_req(rwnx_hw, fw_addr + i, 512, dst + i / 4);
++ if (err) {
++ AICWFDBG(LOGERROR, "bin upload fail: %x, err:%d\r\n", fw_addr + i, err);
++ break;
++ }
++ }
++ }
++ if (!err && (i < size)) {
++ //printk("wr blk 1: %p -> %x\r\n", dst + i / 4, fw_addr + i);
++ err = rwnx_send_dbg_mem_block_write_req(rwnx_hw, fw_addr + i, size - i, dst + i / 4);
++ if (err) {
++ AICWFDBG(LOGERROR, "bin upload fail: %x, err:%d\r\n", fw_addr + i, err);
++ }
++ }
++
++ if (dst) {
++ rwnx_release_firmware_common(&dst);
++ }
++
++ return err;
++}
++
++
++
++#ifndef CONFIG_ROM_PATCH_EN
++#if defined(CONFIG_PLATFORM_ALLWINNER) || defined(CONFIG_NANOPI_M4)
++#if 0
++static int aic_load_firmware(u32 ** fw_buf, const char *name,
++ struct device *device)
++{
++ void *buffer=NULL;
++ char *path=NULL;
++ struct file *fp=NULL;
++ int size = 0, len=0, i=0;
++ ssize_t rdlen=0;
++ u32 *src=NULL, *dst = NULL;
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ /* get the firmware path */
++ path = __getname();
++ if (!path){
++ *fw_buf=NULL;
++ return -1;
++ }
++
++ len = snprintf(path, FW_PATH_MAX_LEN, "%s/%s",aic_fw_path, name);
++ if (len >= FW_PATH_MAX_LEN) {
++ printk("%s: %s file's path too long\n", __func__, name);
++ *fw_buf=NULL;
++ __putname(path);
++ return -1;
++ }
++
++ printk("%s :firmware path = %s \n", __func__ ,path);
++
++
++ /* open the firmware file */
++ fp=filp_open(path, O_RDONLY, 0);
++ if(IS_ERR(fp) || (!fp)){
++ printk("%s: %s file failed to open\n", __func__, name);
++ if(IS_ERR(fp))
++ printk("is_Err\n");
++ *fw_buf=NULL;
++ __putname(path);
++ fp=NULL;
++ return -1;
++ }
++
++ size = i_size_read(file_inode(fp));
++ if(size<=0){
++ printk("%s: %s file size invalid %d\n", __func__, name, size);
++ *fw_buf=NULL;
++ __putname(path);
++ filp_close(fp,NULL);
++ fp=NULL;
++ return -1;
++ }
++
++ /* start to read from firmware file */
++ buffer = kzalloc(size, GFP_KERNEL);
++ if(!buffer){
++ *fw_buf=NULL;
++ __putname(path);
++ filp_close(fp,NULL);
++ fp=NULL;
++ return -1;
++ }
++
++
++ #if LINUX_VERSION_CODE > KERNEL_VERSION(4, 13, 16)
++ rdlen = kernel_read(fp, buffer, size, &fp->f_pos);
++ #else
++ rdlen = kernel_read(fp, fp->f_pos, buffer, size);
++ #endif
++
++ if(size != rdlen){
++ printk("%s: %s file rdlen invalid %ld\n", __func__, name, (long int)rdlen);
++ *fw_buf=NULL;
++ __putname(path);
++ filp_close(fp,NULL);
++ fp=NULL;
++ kfree(buffer);
++ buffer=NULL;
++ return -1;
++ }
++ if(rdlen > 0){
++ fp->f_pos += rdlen;
++ }
++
++
++ /*start to transform the data format*/
++ src = (u32*)buffer;
++ printk("malloc dst\n");
++ dst = (u32*)kzalloc(size,GFP_KERNEL);
++
++ if(!dst){
++ *fw_buf=NULL;
++ __putname(path);
++ filp_close(fp,NULL);
++ fp=NULL;
++ kfree(buffer);
++ buffer=NULL;
++ return -1;
++ }
++
++ for(i=0;i<(size/4);i++){
++ dst[i] = src[i];
++ }
++
++ __putname(path);
++ filp_close(fp,NULL);
++ fp=NULL;
++ kfree(buffer);
++ buffer=NULL;
++ *fw_buf = dst;
++
++ return size;
++
++}
++#endif
++#endif
++#endif
++
++
++#ifndef CONFIG_ROM_PATCH_EN
++#if defined(CONFIG_PLATFORM_ALLWINNER) || defined(CONFIG_NANOPI_M4)
++#if 0
++static int rwnx_plat_bin_fw_upload_android(struct rwnx_hw *rwnx_hw, u32 fw_addr,
++ char *filename)
++{
++ struct device *dev = rwnx_platform_get_dev(rwnx_hw->plat);
++ unsigned int i=0;
++ int size;
++ u32 *dst=NULL;
++ int err=0;
++
++
++ /* load aic firmware */
++ size = aic_load_firmware(&dst, filename, dev);
++ if(size<=0){
++ printk("wrong size of firmware file\n");
++ kfree(dst);
++ dst = NULL;
++ return -1;
++ }
++
++
++ /* Copy the file on the Embedded side */
++ printk("\n### Upload %s firmware, @ = %x size=%d\n", filename, fw_addr, size);
++
++ if (size > 1024) {// > 1KB data
++ for (i = 0; i < (size - 1024); i += 1024) {//each time write 1KB
++ err = rwnx_send_dbg_mem_block_write_req(rwnx_hw, fw_addr + i, 1024, dst + i / 4);
++ if (err) {
++ printk("bin upload fail: %x, err:%d\r\n", fw_addr + i, err);
++ break;
++ }
++ }
++ }
++
++ if (!err && (i < size)) {// <1KB data
++ err = rwnx_send_dbg_mem_block_write_req(rwnx_hw, fw_addr + i, size - i, dst + i / 4);
++ if (err) {
++ printk("bin upload fail: %x, err:%d\r\n", fw_addr + i, err);
++ }
++ }
++
++ if (dst) {
++ kfree(dst);
++ dst = NULL;
++ }
++
++ return err;
++}
++#endif
++#endif
++#endif
++
++
++
++#if 0
++#ifndef CONFIG_RWNX_TL4
++#define IHEX_REC_DATA 0
++#define IHEX_REC_EOF 1
++#define IHEX_REC_EXT_SEG_ADD 2
++#define IHEX_REC_START_SEG_ADD 3
++#define IHEX_REC_EXT_LIN_ADD 4
++#define IHEX_REC_START_LIN_ADD 5
++
++/**
++ * rwnx_plat_ihex_fw_upload() - Load the requested intel hex 8 FW into embedded side.
++ *
++ * @rwnx_plat: pointer to platform structure
++ * @fw_addr: Virtual address where the fw must be loaded
++ * @filename: Name of the fw.
++ *
++ * Load a fw, stored as a ihex file, into the specified address.
++ */
++static int rwnx_plat_ihex_fw_upload(struct rwnx_plat *rwnx_plat, u8* fw_addr,
++ char *filename)
++{
++ const struct firmware *fw;
++ struct device *dev = rwnx_platform_get_dev(rwnx_plat);
++ u8 const *src, *end;
++ u32 *dst;
++ u16 haddr, segaddr, addr;
++ u32 hwaddr;
++ u8 load_fw, byte_count, checksum, csum, rec_type;
++ int err, rec_idx;
++ char hex_buff[9];
++
++ err = request_firmware(&fw, filename, dev);
++ if (err) {
++ return err;
++ }
++
++ /* Copy the file on the Embedded side */
++ dev_dbg(dev, "\n### Now copy %s firmware, @ = %p\n", filename, fw_addr);
++
++ src = fw->data;
++ end = src + (unsigned int)fw->size;
++ haddr = 0;
++ segaddr = 0;
++ load_fw = 1;
++ err = -EINVAL;
++ rec_idx = 0;
++ hwaddr = 0;
++
++#define IHEX_READ8(_val, _cs) { \
++ hex_buff[2] = 0; \
++ strncpy(hex_buff, src, 2); \
++ if (kstrtou8(hex_buff, 16, &_val)) \
++ goto end; \
++ src += 2; \
++ if (_cs) \
++ csum += _val; \
++ }
++
++#define IHEX_READ16(_val) { \
++ hex_buff[4] = 0; \
++ strncpy(hex_buff, src, 4); \
++ if (kstrtou16(hex_buff, 16, &_val)) \
++ goto end; \
++ src += 4; \
++ csum += (_val & 0xff) + (_val >> 8); \
++ }
++
++#define IHEX_READ32(_val) { \
++ hex_buff[8] = 0; \
++ strncpy(hex_buff, src, 8); \
++ if (kstrtouint(hex_buff, 16, &_val)) \
++ goto end; \
++ src += 8; \
++ csum += (_val & 0xff) + ((_val >> 8) & 0xff) + \
++ ((_val >> 16) & 0xff) + (_val >> 24); \
++ }
++
++#define IHEX_READ32_PAD(_val, _nb) { \
++ memset(hex_buff, '0', 8); \
++ hex_buff[8] = 0; \
++ strncpy(hex_buff, src, (2 * _nb)); \
++ if (kstrtouint(hex_buff, 16, &_val)) \
++ goto end; \
++ src += (2 * _nb); \
++ csum += (_val & 0xff) + ((_val >> 8) & 0xff) + \
++ ((_val >> 16) & 0xff) + (_val >> 24); \
++}
++
++ /* loop until end of file is read*/
++ while (load_fw) {
++ rec_idx++;
++ csum = 0;
++
++ /* Find next colon start code */
++ while (*src != ':') {
++ src++;
++ if ((src + 3) >= end) /* 3 = : + rec_len */
++ goto end;
++ }
++ src++;
++
++ /* Read record len */
++ IHEX_READ8(byte_count, 1);
++ if ((src + (byte_count * 2) + 8) >= end) /* 8 = rec_addr + rec_type + chksum */
++ goto end;
++
++ /* Read record addr */
++ IHEX_READ16(addr);
++
++ /* Read record type */
++ IHEX_READ8(rec_type, 1);
++
++ switch(rec_type) {
++ case IHEX_REC_DATA:
++ {
++ /* Update destination address */
++ dst = (u32 *) (fw_addr + hwaddr + addr);
++
++ while (byte_count) {
++ u32 val;
++ if (byte_count >= 4) {
++ IHEX_READ32(val);
++ byte_count -= 4;
++ } else {
++ IHEX_READ32_PAD(val, byte_count);
++ byte_count = 0;
++ }
++ *dst++ = __swab32(val);
++ }
++ break;
++ }
++ case IHEX_REC_EOF:
++ {
++ load_fw = 0;
++ err = 0;
++ break;
++ }
++ case IHEX_REC_EXT_SEG_ADD: /* Extended Segment Address */
++ {
++ IHEX_READ16(segaddr);
++ hwaddr = (haddr << 16) + (segaddr << 4);
++ break;
++ }
++ case IHEX_REC_EXT_LIN_ADD: /* Extended Linear Address */
++ {
++ IHEX_READ16(haddr);
++ hwaddr = (haddr << 16) + (segaddr << 4);
++ break;
++ }
++ case IHEX_REC_START_LIN_ADD: /* Start Linear Address */
++ {
++ u32 val;
++ IHEX_READ32(val); /* need to read for checksum */
++ break;
++ }
++ case IHEX_REC_START_SEG_ADD:
++ default:
++ {
++ dev_err(dev, "ihex: record type %d not supported\n", rec_type);
++ load_fw = 0;
++ }
++ }
++
++ /* Read and compare checksum */
++ IHEX_READ8(checksum, 0);
++ if (checksum != (u8)(~csum + 1))
++ goto end;
++ }
++
++#undef IHEX_READ8
++#undef IHEX_READ16
++#undef IHEX_READ32
++#undef IHEX_READ32_PAD
++
++ end:
++ release_firmware(fw);
++
++ if (err)
++ dev_err(dev, "%s: Invalid ihex record around line %d\n", filename, rec_idx);
++
++ return err;
++}
++#endif /* CONFIG_RWNX_TL4 */
++
++#ifndef CONFIG_RWNX_SDM
++/**
++ * rwnx_plat_get_rf() - Retrun the RF used in the platform
++ *
++ * @rwnx_plat: pointer to platform structure
++ */
++static u32 rwnx_plat_get_rf(struct rwnx_plat *rwnx_plat)
++{
++ u32 ver;
++ ver = RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM, MDM_HDMCONFIG_ADDR);
++
++ ver = __MDM_PHYCFG_FROM_VERS(ver);
++ WARN(((ver != MDM_PHY_CONFIG_TRIDENT) &&
++ (ver != MDM_PHY_CONFIG_ELMA) &&
++ (ver != MDM_PHY_CONFIG_KARST)),
++ "bad phy version 0x%08x\n", ver);
++
++ return ver;
++}
++
++/**
++ * rwnx_plat_stop_agcfsm() - Stop a AGC state machine
++ *
++ * @rwnx_plat: pointer to platform structure
++ * @agg_reg: Address of the agccntl register (within RWNX_ADDR_SYSTEM)
++ * @agcctl: Updated with value of the agccntl rgister before stop
++ * @memclk: Updated with value of the clock register before stop
++ * @agc_ver: Version of the AGC load procedure
++ * @clkctrladdr: Indicates which AGC clock register should be accessed
++ */
++static void rwnx_plat_stop_agcfsm(struct rwnx_plat *rwnx_plat, int agc_reg,
++ u32 *agcctl, u32 *memclk, u8 agc_ver,
++ u32 clkctrladdr)
++{
++ /* First read agcctnl and clock registers */
++ *memclk = RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM, clkctrladdr);
++
++ /* Stop state machine : xxAGCCNTL0[AGCFSMRESET]=1 */
++ *agcctl = RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM, agc_reg);
++ RWNX_REG_WRITE((*agcctl) | BIT(12), rwnx_plat, RWNX_ADDR_SYSTEM, agc_reg);
++
++ /* Force clock */
++ if (agc_ver > 0) {
++ /* CLKGATEFCTRL0[AGCCLKFORCE]=1 */
++ RWNX_REG_WRITE((*memclk) | BIT(29), rwnx_plat, RWNX_ADDR_SYSTEM,
++ clkctrladdr);
++ } else {
++ /* MEMCLKCTRL0[AGCMEMCLKCTRL]=0 */
++ RWNX_REG_WRITE((*memclk) & ~BIT(3), rwnx_plat, RWNX_ADDR_SYSTEM,
++ clkctrladdr);
++ }
++}
++
++
++/**
++ * rwnx_plat_start_agcfsm() - Restart a AGC state machine
++ *
++ * @rwnx_plat: pointer to platform structure
++ * @agg_reg: Address of the agccntl register (within RWNX_ADDR_SYSTEM)
++ * @agcctl: value of the agccntl register to restore
++ * @memclk: value of the clock register to restore
++ * @agc_ver: Version of the AGC load procedure
++ * @clkctrladdr: Indicates which AGC clock register should be accessed
++ */
++static void rwnx_plat_start_agcfsm(struct rwnx_plat *rwnx_plat, int agc_reg,
++ u32 agcctl, u32 memclk, u8 agc_ver,
++ u32 clkctrladdr)
++{
++
++ /* Release clock */
++ if (agc_ver > 0)
++ /* CLKGATEFCTRL0[AGCCLKFORCE]=0 */
++ RWNX_REG_WRITE(memclk & ~BIT(29), rwnx_plat, RWNX_ADDR_SYSTEM,
++ clkctrladdr);
++ else
++ /* MEMCLKCTRL0[AGCMEMCLKCTRL]=1 */
++ RWNX_REG_WRITE(memclk | BIT(3), rwnx_plat, RWNX_ADDR_SYSTEM,
++ clkctrladdr);
++
++ /* Restart state machine: xxAGCCNTL0[AGCFSMRESET]=0 */
++ RWNX_REG_WRITE(agcctl & ~BIT(12), rwnx_plat, RWNX_ADDR_SYSTEM, agc_reg);
++}
++#endif
++
++/**
++ * rwnx_plat_fcu_load() - Load FCU (Fith Chain Unit) ucode
++ *
++ * @rwnx_hw: main driver data
++ *
++ * c.f Modem UM (AGC/CCA initialization)
++ */
++static int rwnx_plat_fcu_load(struct rwnx_hw *rwnx_hw)
++{
++ int ret=0;
++#ifndef CONFIG_RWNX_SDM
++ struct rwnx_plat *rwnx_plat = rwnx_hw->plat;
++ u32 agcctl, memclk;
++
++#ifndef CONFIG_RWNX_FHOST
++ /* By default, we consider that there is only one RF in the system */
++ rwnx_hw->phy.cnt = 1;
++#endif // CONFIG_RWNX_FHOST
++
++ if (rwnx_plat_get_rf(rwnx_plat) != MDM_PHY_CONFIG_ELMA)
++ /* No FCU for PHYs other than Elma */
++ return 0;
++
++ agcctl = RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM, RIU_RWNXAGCCNTL_ADDR);
++ if (!__RIU_FCU_PRESENT(agcctl))
++ /* No FCU present in this version */
++ return 0;
++
++#ifndef CONFIG_RWNX_FHOST
++ /* FCU is present */
++ #ifdef USE_5G
++ rwnx_hw->phy.cnt = 2;
++ rwnx_hw->phy.sec_chan.band = NL80211_BAND_5GHZ;
++ rwnx_hw->phy.sec_chan.type = PHY_CHNL_BW_20;
++ rwnx_hw->phy.sec_chan.prim20_freq = 5500;
++ rwnx_hw->phy.sec_chan.center_freq1 = 5500;
++ rwnx_hw->phy.sec_chan.center_freq2 = 0;
++ #endif
++#endif // CONFIG_RWNX_FHOST
++
++ rwnx_plat_stop_agcfsm(rwnx_plat, FCU_RWNXFCAGCCNTL_ADDR, &agcctl, &memclk, 0,
++ MDM_MEMCLKCTRL0_ADDR);
++
++ ret = rwnx_plat_bin_fw_upload(rwnx_plat,
++ RWNX_ADDR(rwnx_plat, RWNX_ADDR_SYSTEM, PHY_FCU_UCODE_ADDR),
++ RWNX_FCU_FW_NAME);
++
++ rwnx_plat_start_agcfsm(rwnx_plat, FCU_RWNXFCAGCCNTL_ADDR, agcctl, memclk, 0,
++ MDM_MEMCLKCTRL0_ADDR);
++#endif
++
++ return ret;
++}
++
++/**
++ * rwnx_is_new_agc_load() - Return is new agc clock register should be used
++ *
++ * @rwnx_plat: platform data
++ * @rf: rf in used
++ *
++ * c.f Modem UM (AGC/CCA initialization)
++ */
++#ifndef CONFIG_RWNX_SDM
++static u8 rwnx_get_agc_load_version(struct rwnx_plat *rwnx_plat, u32 rf, u32 *clkctrladdr)
++{
++ u8 agc_load_ver = 0;
++ u32 agc_ver;
++ u32 regval;
++
++ /* Trident and Elma PHY use old method */
++ if (rf != MDM_PHY_CONFIG_KARST) {
++ *clkctrladdr = MDM_MEMCLKCTRL0_ADDR;
++ return 0;
++ }
++
++ /* Get the FPGA signature */
++ regval = RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM, SYSCTRL_SIGNATURE_ADDR);
++
++ if (__FPGA_TYPE(regval) == 0xC0CA)
++ *clkctrladdr = CRM_CLKGATEFCTRL0_ADDR;
++ else
++ *clkctrladdr = MDM_CLKGATEFCTRL0_ADDR;
++
++ /* Read RIU version register */
++ agc_ver = RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM, RIU_RWNXVERSION_ADDR);
++ agc_load_ver = __RIU_AGCLOAD_FROM_VERS(agc_ver);
++
++ return agc_load_ver;
++}
++#endif /* CONFIG_RWNX_SDM */
++
++/**
++ * rwnx_plat_agc_load() - Load AGC ucode
++ *
++ * @rwnx_plat: platform data
++ * c.f Modem UM (AGC/CCA initialization)
++ */
++static int rwnx_plat_agc_load(struct rwnx_plat *rwnx_plat)
++{
++ int ret = 0;
++#ifndef CONFIG_RWNX_SDM
++ u32 agc = 0, agcctl, memclk;
++ u32 clkctrladdr;
++ u32 rf = rwnx_plat_get_rf(rwnx_plat);
++ u8 agc_ver;
++
++ switch (rf) {
++ case MDM_PHY_CONFIG_TRIDENT:
++ agc = AGC_RWNXAGCCNTL_ADDR;
++ break;
++ case MDM_PHY_CONFIG_ELMA:
++ case MDM_PHY_CONFIG_KARST:
++ agc = RIU_RWNXAGCCNTL_ADDR;
++ break;
++ default:
++ return -1;
++ }
++
++ agc_ver = rwnx_get_agc_load_version(rwnx_plat, rf, &clkctrladdr);
++
++ rwnx_plat_stop_agcfsm(rwnx_plat, agc, &agcctl, &memclk, agc_ver, clkctrladdr);
++
++ ret = rwnx_plat_bin_fw_upload(rwnx_plat,
++ RWNX_ADDR(rwnx_plat, RWNX_ADDR_SYSTEM, PHY_AGC_UCODE_ADDR),
++ RWNX_AGC_FW_NAME);
++
++ if (!ret && (agc_ver == 1)) {
++ /* Run BIST to ensure that the AGC RAM was correctly loaded */
++ RWNX_REG_WRITE(BIT(28), rwnx_plat, RWNX_ADDR_SYSTEM,
++ RIU_RWNXDYNAMICCONFIG_ADDR);
++ while (RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM,
++ RIU_RWNXDYNAMICCONFIG_ADDR) & BIT(28));
++
++ if (!(RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM,
++ RIU_AGCMEMBISTSTAT_ADDR) & BIT(0))) {
++ dev_err(rwnx_platform_get_dev(rwnx_plat),
++ "AGC RAM not loaded correctly 0x%08x\n",
++ RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM,
++ RIU_AGCMEMSIGNATURESTAT_ADDR));
++ ret = -EIO;
++ }
++ }
++
++ rwnx_plat_start_agcfsm(rwnx_plat, agc, agcctl, memclk, agc_ver, clkctrladdr);
++
++#endif
++ return ret;
++}
++
++/**
++ * rwnx_ldpc_load() - Load LDPC RAM
++ *
++ * @rwnx_hw: Main driver data
++ * c.f Modem UM (LDPC initialization)
++ */
++static int rwnx_ldpc_load(struct rwnx_hw *rwnx_hw)
++{
++#ifndef CONFIG_RWNX_SDM
++ struct rwnx_plat *rwnx_plat = rwnx_hw->plat;
++ u32 rf = rwnx_plat_get_rf(rwnx_plat);
++ u32 phy_feat = RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM, MDM_HDMCONFIG_ADDR);
++
++ if ((rf != MDM_PHY_CONFIG_KARST) ||
++ (phy_feat & (MDM_LDPCDEC_BIT | MDM_LDPCENC_BIT)) !=
++ (MDM_LDPCDEC_BIT | MDM_LDPCENC_BIT)) {
++ goto disable_ldpc;
++ }
++
++ if (rwnx_plat_bin_fw_upload(rwnx_plat,
++ RWNX_ADDR(rwnx_plat, RWNX_ADDR_SYSTEM, PHY_LDPC_RAM_ADDR),
++ RWNX_LDPC_RAM_NAME)) {
++ goto disable_ldpc;
++ }
++
++ return 0;
++
++ disable_ldpc:
++ rwnx_hw->mod_params->ldpc_on = false;
++
++#endif /* CONFIG_RWNX_SDM */
++ return 0;
++}
++
++/**
++ * rwnx_plat_lmac_load() - Load FW code
++ *
++ * @rwnx_plat: platform data
++ */
++static int rwnx_plat_lmac_load(struct rwnx_plat *rwnx_plat)
++{
++ int ret;
++
++ #ifdef CONFIG_RWNX_TL4
++ ret = rwnx_plat_tl4_fw_upload(rwnx_plat,
++ RWNX_ADDR(rwnx_plat, RWNX_ADDR_CPU, RAM_LMAC_FW_ADDR),
++ RWNX_MAC_FW_NAME);
++ #else
++ ret = rwnx_plat_ihex_fw_upload(rwnx_plat,
++ RWNX_ADDR(rwnx_plat, RWNX_ADDR_CPU, RAM_LMAC_FW_ADDR),
++ RWNX_MAC_FW_NAME);
++ if (ret == -ENOENT)
++ {
++ ret = rwnx_plat_bin_fw_upload(rwnx_plat,
++ RWNX_ADDR(rwnx_plat, RWNX_ADDR_CPU, RAM_LMAC_FW_ADDR),
++ RWNX_MAC_FW_NAME2);
++ }
++ #endif
++
++ return ret;
++}
++#endif
++
++#ifndef CONFIG_ROM_PATCH_EN
++/**
++ * rwnx_plat_fmac_load() - Load FW code
++ *
++ * @rwnx_hw: Main driver data
++ */
++#if 0
++static int rwnx_plat_fmac_load(struct rwnx_hw *rwnx_hw)
++{
++ int ret;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++ #if defined(CONFIG_NANOPI_M4) || defined(CONFIG_PLATFORM_ALLWINNER)
++ ret = rwnx_plat_bin_fw_upload_android(rwnx_hw, RAM_FMAC_FW_ADDR, RWNX_MAC_FW_NAME2);
++ #else
++ ret = rwnx_plat_bin_fw_upload_2(rwnx_hw,
++ RAM_FMAC_FW_ADDR,
++ RWNX_MAC_FW_NAME2);
++ #endif
++ return ret;
++}
++#endif
++#endif /* !CONFIG_ROM_PATCH_EN */
++
++#if 0
++/**
++ * rwnx_plat_mpif_sel() - Select the MPIF according to the FPGA signature
++ *
++ * @rwnx_plat: platform data
++ */
++static void rwnx_plat_mpif_sel(struct rwnx_plat *rwnx_plat)
++{
++#ifndef CONFIG_RWNX_SDM
++ u32 regval;
++ u32 type;
++
++ /* Get the FPGA signature */
++ regval = RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM, SYSCTRL_SIGNATURE_ADDR);
++ type = __FPGA_TYPE(regval);
++
++ /* Check if we need to switch to the old MPIF or not */
++ if ((type != 0xCAFE) && (type != 0XC0CA) && (regval & 0xF) < 0x3)
++ {
++ /* A old FPGA A is used, so configure the FPGA B to use the old MPIF */
++ RWNX_REG_WRITE(0x3, rwnx_plat, RWNX_ADDR_SYSTEM, FPGAB_MPIF_SEL_ADDR);
++ }
++#endif
++}
++#endif
++#ifdef CONFIG_DPD
++int is_file_exist(char* name)
++{
++ char *path = NULL;
++ struct file *fp = NULL;
++ int len;
++
++ path = __getname();
++ if (!path) {
++ AICWFDBG(LOGINFO, "%s getname fail\n", __func__);
++ return -1;
++ }
++
++ len = snprintf(path, FW_PATH_MAX_LEN, "%s/%s", aic_fw_path, name);
++
++ fp = filp_open(path, O_RDONLY, 0);
++ if (IS_ERR(fp)) {
++ __putname(path);
++ fp = NULL;
++ return 0;
++ } else {
++ __putname(path);
++ filp_close(fp, NULL);
++ fp = NULL;
++ return 1;
++ }
++}
++#endif//CONFIG_DPD
++/**
++ * rwnx_plat_patch_load() - Load patch code
++ *
++ * @rwnx_hw: Main driver data
++ */
++#ifdef CONFIG_ROM_PATCH_EN
++
++
++static int rwnx_plat_patch_load(struct rwnx_hw *rwnx_hw)
++{
++ int ret = 0;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC ||
++ rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW){
++#ifndef ANDROID_PLATFORM
++ sprintf(aic_fw_path, "%s/%s", aic_fw_path, "aic8800DC");
++#endif
++#ifdef CONFIG_DPD
++ if (chip_sub_id == 1) {
++ AICWFDBG(LOGINFO, "testmode=%d\n", testmode);
++ if (testmode == FW_NORMAL_MODE) {
++ if (is_file_exist(FW_DPDRESULT_NAME_8800DC) == 1) {
++ AICWFDBG(LOGINFO, "dpd bin load\n");
++ ret = aicwf_dpd_result_load_8800dc(rwnx_hw);
++ if (ret) {
++ AICWFDBG(LOGINFO, "load dpd bin fail: %d\n", ret);
++ return ret;
++ }
++ } else {
++ aicwf_misc_ram_init_8800dc(rwnx_hw);
++ }
++ } else if (testmode == FW_DPDCALIB_MODE) {
++ if (is_file_exist(FW_DPDRESULT_NAME_8800DC) == 0) {
++ uint32_t dpd_res[DPD_RESULT_SIZE_8800DC / 4] = {0,};
++ AICWFDBG(LOGINFO, "dpd calib & write\n");
++ ret = aicwf_dpd_calib_8800dc(rwnx_hw, &dpd_res[0]);
++ if (ret) {
++ AICWFDBG(LOGINFO, "dpd calib fail: %d\n", ret);
++ return ret;
++ }
++ ret = aicwf_dpd_result_write_8800dc((void *)dpd_res, DPD_RESULT_SIZE_8800DC);
++ if (ret) {
++ AICWFDBG(LOGINFO, "file write fail: %d\n", ret);
++ return ret;
++ }
++ }
++ return 1; // exit calib mode
++ }
++ }
++#else
++ if (chip_sub_id == 1) {
++ aicwf_misc_ram_init_8800dc(rwnx_hw);
++ }
++#endif
++
++ ret = aicwf_plat_patch_load_8800dc(rwnx_hw);
++ if (!ret) {
++ aicwf_patch_config_8800dc(rwnx_hw);
++ }
++ }
++
++ return ret;
++}
++#endif
++
++
++/**
++ * rwnx_platform_reset() - Reset the platform
++ *
++ * @rwnx_plat: platform data
++ */
++static int rwnx_platform_reset(struct rwnx_plat *rwnx_plat)
++{
++ u32 regval;
++
++#if defined(AICWF_USB_SUPPORT) || defined(AICWF_SDIO_SUPPORT)
++ return 0;
++#endif
++
++ /* the doc states that SOFT implies FPGA_B_RESET
++ * adding FPGA_B_RESET is clearer */
++ RWNX_REG_WRITE(SOFT_RESET | FPGA_B_RESET, rwnx_plat,
++ RWNX_ADDR_SYSTEM, SYSCTRL_MISC_CNTL_ADDR);
++ msleep(100);
++
++ regval = RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM, SYSCTRL_MISC_CNTL_ADDR);
++
++ if (regval & SOFT_RESET) {
++ dev_err(rwnx_platform_get_dev(rwnx_plat), "reset: failed\n");
++ return -EIO;
++ }
++
++ RWNX_REG_WRITE(regval & ~FPGA_B_RESET, rwnx_plat,
++ RWNX_ADDR_SYSTEM, SYSCTRL_MISC_CNTL_ADDR);
++ msleep(100);
++ return 0;
++}
++
++/**
++ * rwmx_platform_save_config() - Save hardware config before reload
++ *
++ * @rwnx_plat: Pointer to platform data
++ *
++ * Return configuration registers values.
++ */
++static void* rwnx_term_save_config(struct rwnx_plat *rwnx_plat)
++{
++ const u32 *reg_list;
++ u32 *reg_value, *res;
++ int i, size = 0;
++
++ if (rwnx_plat->get_config_reg) {
++ size = rwnx_plat->get_config_reg(rwnx_plat, ®_list);
++ }
++
++ if (size <= 0)
++ return NULL;
++
++ res = kmalloc(sizeof(u32) * size, GFP_KERNEL);
++ if (!res)
++ return NULL;
++
++ reg_value = res;
++ for (i = 0; i < size; i++) {
++ *reg_value++ = RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM,
++ *reg_list++);
++ }
++
++ return res;
++}
++
++#if 0
++/**
++ * rwmx_platform_restore_config() - Restore hardware config after reload
++ *
++ * @rwnx_plat: Pointer to platform data
++ * @reg_value: Pointer of value to restore
++ * (obtained with rwmx_platform_save_config())
++ *
++ * Restore configuration registers value.
++ */
++static void rwnx_term_restore_config(struct rwnx_plat *rwnx_plat,
++ u32 *reg_value)
++{
++ const u32 *reg_list;
++ int i, size = 0;
++
++ if (!reg_value || !rwnx_plat->get_config_reg)
++ return;
++
++ size = rwnx_plat->get_config_reg(rwnx_plat, ®_list);
++
++ for (i = 0; i < size; i++) {
++ RWNX_REG_WRITE(*reg_value++, rwnx_plat, RWNX_ADDR_SYSTEM,
++ *reg_list++);
++ }
++}
++#endif
++
++#ifndef CONFIG_RWNX_FHOST
++#if 0
++static int rwnx_check_fw_compatibility(struct rwnx_hw *rwnx_hw)
++{
++ struct ipc_shared_env_tag *shared = rwnx_hw->ipc_env->shared;
++ #ifdef CONFIG_RWNX_FULLMAC
++ struct wiphy *wiphy = rwnx_hw->wiphy;
++ #endif //CONFIG_RWNX_FULLMAC
++ #ifdef CONFIG_RWNX_OLD_IPC
++ int ipc_shared_version = 10;
++ #else //CONFIG_RWNX_OLD_IPC
++ int ipc_shared_version = 11;
++ #endif //CONFIG_RWNX_OLD_IPC
++ int res = 0;
++
++ if(shared->comp_info.ipc_shared_version != ipc_shared_version)
++ {
++ wiphy_err(wiphy, "Different versions of IPC shared version between driver and FW (%d != %d)\n ",
++ ipc_shared_version, shared->comp_info.ipc_shared_version);
++ res = -1;
++ }
++
++ if(shared->comp_info.radarbuf_cnt != IPC_RADARBUF_CNT)
++ {
++ wiphy_err(wiphy, "Different number of host buffers available for Radar events handling "\
++ "between driver and FW (%d != %d)\n", IPC_RADARBUF_CNT,
++ shared->comp_info.radarbuf_cnt);
++ res = -1;
++ }
++
++ if(shared->comp_info.unsuprxvecbuf_cnt != IPC_UNSUPRXVECBUF_CNT)
++ {
++ wiphy_err(wiphy, "Different number of host buffers available for unsupported Rx vectors "\
++ "handling between driver and FW (%d != %d)\n", IPC_UNSUPRXVECBUF_CNT,
++ shared->comp_info.unsuprxvecbuf_cnt);
++ res = -1;
++ }
++
++ #ifdef CONFIG_RWNX_FULLMAC
++ if(shared->comp_info.rxdesc_cnt != IPC_RXDESC_CNT)
++ {
++ wiphy_err(wiphy, "Different number of shared descriptors available for Data RX handling "\
++ "between driver and FW (%d != %d)\n", IPC_RXDESC_CNT,
++ shared->comp_info.rxdesc_cnt);
++ res = -1;
++ }
++ #endif /* CONFIG_RWNX_FULLMAC */
++
++ if(shared->comp_info.rxbuf_cnt != IPC_RXBUF_CNT)
++ {
++ wiphy_err(wiphy, "Different number of host buffers available for Data Rx handling "\
++ "between driver and FW (%d != %d)\n", IPC_RXBUF_CNT,
++ shared->comp_info.rxbuf_cnt);
++ res = -1;
++ }
++
++ if(shared->comp_info.msge2a_buf_cnt != IPC_MSGE2A_BUF_CNT)
++ {
++ wiphy_err(wiphy, "Different number of host buffers available for Emb->App MSGs "\
++ "sending between driver and FW (%d != %d)\n", IPC_MSGE2A_BUF_CNT,
++ shared->comp_info.msge2a_buf_cnt);
++ res = -1;
++ }
++
++ if(shared->comp_info.dbgbuf_cnt != IPC_DBGBUF_CNT)
++ {
++ wiphy_err(wiphy, "Different number of host buffers available for debug messages "\
++ "sending between driver and FW (%d != %d)\n", IPC_DBGBUF_CNT,
++ shared->comp_info.dbgbuf_cnt);
++ res = -1;
++ }
++
++ if(shared->comp_info.bk_txq != NX_TXDESC_CNT0)
++ {
++ wiphy_err(wiphy, "Driver and FW have different sizes of BK TX queue (%d != %d)\n",
++ NX_TXDESC_CNT0, shared->comp_info.bk_txq);
++ res = -1;
++ }
++
++ if(shared->comp_info.be_txq != NX_TXDESC_CNT1)
++ {
++ wiphy_err(wiphy, "Driver and FW have different sizes of BE TX queue (%d != %d)\n",
++ NX_TXDESC_CNT1, shared->comp_info.be_txq);
++ res = -1;
++ }
++
++ if(shared->comp_info.vi_txq != NX_TXDESC_CNT2)
++ {
++ wiphy_err(wiphy, "Driver and FW have different sizes of VI TX queue (%d != %d)\n",
++ NX_TXDESC_CNT2, shared->comp_info.vi_txq);
++ res = -1;
++ }
++
++ if(shared->comp_info.vo_txq != NX_TXDESC_CNT3)
++ {
++ wiphy_err(wiphy, "Driver and FW have different sizes of VO TX queue (%d != %d)\n",
++ NX_TXDESC_CNT3, shared->comp_info.vo_txq);
++ res = -1;
++ }
++
++ #if NX_TXQ_CNT == 5
++ if(shared->comp_info.bcn_txq != NX_TXDESC_CNT4)
++ {
++ wiphy_err(wiphy, "Driver and FW have different sizes of BCN TX queue (%d != %d)\n",
++ NX_TXDESC_CNT4, shared->comp_info.bcn_txq);
++ res = -1;
++ }
++ #else
++ if (shared->comp_info.bcn_txq > 0)
++ {
++ wiphy_err(wiphy, "BCMC enabled in firmware but disabled in driver\n");
++ res = -1;
++ }
++ #endif /* NX_TXQ_CNT == 5 */
++
++ if(shared->comp_info.ipc_shared_size != sizeof(ipc_shared_env))
++ {
++ wiphy_err(wiphy, "Different sizes of IPC shared between driver and FW (%zd != %d)\n",
++ sizeof(ipc_shared_env), shared->comp_info.ipc_shared_size);
++ res = -1;
++ }
++
++ if(shared->comp_info.msg_api != MSG_API_VER)
++ {
++ wiphy_warn(wiphy, "WARNING: Different supported message API versions between "\
++ "driver and FW (%d != %d)\n", MSG_API_VER, shared->comp_info.msg_api);
++ }
++
++ return res;
++}
++#endif
++#endif /* !CONFIG_RWNX_FHOST */
++
++int rwnx_atoi(char *value)
++{
++ int len = 0;
++ int i = 0;
++ int result = 0;
++ int flag = 1;
++
++ if (value[0] == '-') {
++ flag = -1;
++ value++;
++ }
++ len = strlen(value);
++
++ for (i = 0;i < len ;i++) {
++ result = result * 10;
++ if (value[i] >= 48 && value[i] <= 57) {
++ result += value[i] - 48;
++ } else {
++ result = 0;
++ break;
++ }
++ }
++
++ return result * flag;
++}
++
++void get_userconfig_txpwr_lvl_in_fdrv(txpwr_lvl_conf_t *txpwr_lvl)
++{
++ txpwr_lvl->enable = userconfig_info.txpwr_lvl.enable;
++ txpwr_lvl->dsss = userconfig_info.txpwr_lvl.dsss;
++ txpwr_lvl->ofdmlowrate_2g4 = userconfig_info.txpwr_lvl.ofdmlowrate_2g4;
++ txpwr_lvl->ofdm64qam_2g4 = userconfig_info.txpwr_lvl.ofdm64qam_2g4;
++ txpwr_lvl->ofdm256qam_2g4 = userconfig_info.txpwr_lvl.ofdm256qam_2g4;
++ txpwr_lvl->ofdm1024qam_2g4 = userconfig_info.txpwr_lvl.ofdm1024qam_2g4;
++ txpwr_lvl->ofdmlowrate_5g = userconfig_info.txpwr_lvl.ofdmlowrate_5g;
++ txpwr_lvl->ofdm64qam_5g = userconfig_info.txpwr_lvl.ofdm64qam_5g;
++ txpwr_lvl->ofdm256qam_5g = userconfig_info.txpwr_lvl.ofdm256qam_5g;
++ txpwr_lvl->ofdm1024qam_5g = userconfig_info.txpwr_lvl.ofdm1024qam_5g;
++
++ AICWFDBG(LOGINFO, "%s:enable:%d\r\n", __func__, txpwr_lvl->enable);
++ AICWFDBG(LOGINFO, "%s:dsss:%d\r\n", __func__, txpwr_lvl->dsss);
++ AICWFDBG(LOGINFO, "%s:ofdmlowrate_2g4:%d\r\n", __func__, txpwr_lvl->ofdmlowrate_2g4);
++ AICWFDBG(LOGINFO, "%s:ofdm64qam_2g4:%d\r\n", __func__, txpwr_lvl->ofdm64qam_2g4);
++ AICWFDBG(LOGINFO, "%s:ofdm256qam_2g4:%d\r\n", __func__, txpwr_lvl->ofdm256qam_2g4);
++ AICWFDBG(LOGINFO, "%s:ofdm1024qam_2g4:%d\r\n", __func__, txpwr_lvl->ofdm1024qam_2g4);
++ AICWFDBG(LOGINFO, "%s:ofdmlowrate_5g:%d\r\n", __func__, txpwr_lvl->ofdmlowrate_5g);
++ AICWFDBG(LOGINFO, "%s:ofdm64qam_5g:%d\r\n", __func__, txpwr_lvl->ofdm64qam_5g);
++ AICWFDBG(LOGINFO, "%s:ofdm256qam_5g:%d\r\n", __func__, txpwr_lvl->ofdm256qam_5g);
++ AICWFDBG(LOGINFO, "%s:ofdm1024qam_5g:%d\r\n", __func__, txpwr_lvl->ofdm1024qam_5g);
++}
++
++void get_userconfig_txpwr_lvl_v2_in_fdrv(txpwr_lvl_conf_v2_t *txpwr_lvl_v2)
++{
++ *txpwr_lvl_v2 = userconfig_info.txpwr_lvl_v2;
++
++ AICWFDBG(LOGINFO, "%s:enable:%d\r\n", __func__, txpwr_lvl_v2->enable);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_1m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[0]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_2m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[1]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_5m5_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[2]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_11m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[3]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_6m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[4]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_9m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[5]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_12m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[6]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_18m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[7]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_24m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[8]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_36m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[9]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_48m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[10]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_54m_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11b_11ag_2g4[11]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs0_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[0]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs1_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[1]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs2_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[2]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs3_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[3]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs4_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[4]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs5_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[5]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs6_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[6]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs7_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[7]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs8_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[8]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs9_2g4:%d\r\n",__func__, txpwr_lvl_v2->pwrlvl_11n_11ac_2g4[9]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs0_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[0]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs1_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[1]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs2_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[2]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs3_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[3]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs4_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[4]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs5_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[5]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs6_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[6]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs7_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[7]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs8_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[8]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs9_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[9]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs10_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[10]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs11_2g4:%d\r\n", __func__, txpwr_lvl_v2->pwrlvl_11ax_2g4[11]);
++}
++
++void get_userconfig_txpwr_lvl_v3_in_fdrv(txpwr_lvl_conf_v3_t *txpwr_lvl_v3)
++{
++ *txpwr_lvl_v3 = userconfig_info.txpwr_lvl_v3;
++
++ AICWFDBG(LOGINFO, "%s:enable:%d\r\n", __func__, txpwr_lvl_v3->enable);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_1m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[0]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_2m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[1]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_5m5_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[2]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_11m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[3]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_6m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[4]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_9m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[5]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_12m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[6]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_18m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[7]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_24m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[8]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_36m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[9]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_48m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[10]);
++ AICWFDBG(LOGINFO, "%s:lvl_11b_11ag_54m_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11b_11ag_2g4[11]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs0_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[0]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs1_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[1]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs2_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[2]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs3_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[3]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs4_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[4]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs5_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[5]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs6_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[6]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs7_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[7]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs8_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[8]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs9_2g4:%d\r\n",__func__, txpwr_lvl_v3->pwrlvl_11n_11ac_2g4[9]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs0_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[0]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs1_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[1]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs2_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[2]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs3_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[3]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs4_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[4]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs5_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[5]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs6_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[6]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs7_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[7]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs8_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[8]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs9_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[9]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs10_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[10]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs11_2g4:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_2g4[11]);
++
++ AICWFDBG(LOGINFO, "%s:lvl_11a_1m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[0]);
++ AICWFDBG(LOGINFO, "%s:lvl_11a_2m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[1]);
++ AICWFDBG(LOGINFO, "%s:lvl_11a_5m5_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[2]);
++ AICWFDBG(LOGINFO, "%s:lvl_11a_11m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[3]);
++ AICWFDBG(LOGINFO, "%s:lvl_11a_6m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[4]);
++ AICWFDBG(LOGINFO, "%s:lvl_11a_9m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[5]);
++ AICWFDBG(LOGINFO, "%s:lvl_11a_12m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[6]);
++ AICWFDBG(LOGINFO, "%s:lvl_11a_18m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[7]);
++ AICWFDBG(LOGINFO, "%s:lvl_11a_24m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[8]);
++ AICWFDBG(LOGINFO, "%s:lvl_11a_36m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[9]);
++ AICWFDBG(LOGINFO, "%s:lvl_11a_48m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[10]);
++ AICWFDBG(LOGINFO, "%s:lvl_11a_54m_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11a_5g[11]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs0_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[0]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs1_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[1]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs2_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[2]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs3_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[3]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs4_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[4]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs5_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[5]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs6_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[6]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs7_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[7]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs8_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[8]);
++ AICWFDBG(LOGINFO, "%s:lvl_11n_11ac_mcs9_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11n_11ac_5g[9]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs0_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[0]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs1_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[1]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs2_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[2]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs3_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[3]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs4_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[4]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs5_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[5]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs6_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[6]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs7_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[7]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs8_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[8]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs9_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[9]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs10_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[10]);
++ AICWFDBG(LOGINFO, "%s:lvl_11ax_mcs11_5g:%d\r\n", __func__, txpwr_lvl_v3->pwrlvl_11ax_5g[11]);
++}
++
++
++void get_userconfig_txpwr_ofst_in_fdrv(txpwr_ofst_conf_t *txpwr_ofst)
++{
++ txpwr_ofst->enable = userconfig_info.txpwr_ofst.enable;
++ txpwr_ofst->chan_1_4 = userconfig_info.txpwr_ofst.chan_1_4;
++ txpwr_ofst->chan_5_9 = userconfig_info.txpwr_ofst.chan_5_9;
++ txpwr_ofst->chan_10_13 = userconfig_info.txpwr_ofst.chan_10_13;
++ txpwr_ofst->chan_36_64 = userconfig_info.txpwr_ofst.chan_36_64;
++ txpwr_ofst->chan_100_120 = userconfig_info.txpwr_ofst.chan_100_120;
++ txpwr_ofst->chan_122_140 = userconfig_info.txpwr_ofst.chan_122_140;
++ txpwr_ofst->chan_142_165 = userconfig_info.txpwr_ofst.chan_142_165;
++
++ AICWFDBG(LOGINFO, "%s:enable :%d\r\n", __func__, txpwr_ofst->enable);
++ AICWFDBG(LOGINFO, "%s:chan_1_4 :%d\r\n", __func__, txpwr_ofst->chan_1_4);
++ AICWFDBG(LOGINFO, "%s:chan_5_9 :%d\r\n", __func__, txpwr_ofst->chan_5_9);
++ AICWFDBG(LOGINFO, "%s:chan_10_13 :%d\r\n", __func__, txpwr_ofst->chan_10_13);
++ AICWFDBG(LOGINFO, "%s:chan_36_64 :%d\r\n", __func__, txpwr_ofst->chan_36_64);
++ AICWFDBG(LOGINFO, "%s:chan_100_120:%d\r\n", __func__, txpwr_ofst->chan_100_120);
++ AICWFDBG(LOGINFO, "%s:chan_122_140:%d\r\n", __func__, txpwr_ofst->chan_122_140);
++ AICWFDBG(LOGINFO, "%s:chan_142_165:%d\r\n", __func__, txpwr_ofst->chan_142_165);
++}
++
++void get_userconfig_txpwr_loss(txpwr_loss_conf_t *txpwr_loss)
++{
++ txpwr_loss->loss_enable = userconfig_info.txpwr_loss.loss_enable;
++ txpwr_loss->loss_value = userconfig_info.txpwr_loss.loss_value;
++
++ AICWFDBG(LOGINFO, "%s:loss_enable:%d\r\n", __func__, txpwr_loss->loss_enable);
++ AICWFDBG(LOGINFO, "%s:loss_value:%d\r\n", __func__, txpwr_loss->loss_value);
++}
++
++void get_userconfig_xtal_cap(xtal_cap_conf_t *xtal_cap)
++{
++ *xtal_cap = userconfig_info.xtal_cap;
++
++ AICWFDBG(LOGINFO, "%s:enable :%d\r\n", __func__, xtal_cap->enable);
++ AICWFDBG(LOGINFO, "%s:xtal_cap :%d\r\n", __func__, xtal_cap->xtal_cap);
++ AICWFDBG(LOGINFO, "%s:xtal_cap_fine:%d\r\n", __func__, xtal_cap->xtal_cap_fine);
++}
++
++void rwnx_plat_nvram_set_value(char *command, char *value)
++{
++ //TODO send command
++ AICWFDBG(LOGINFO, "%s:command=%s value=%s\n", __func__, command, value);
++ if (!strcmp(command, "enable")) {
++ userconfig_info.txpwr_lvl.enable = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v2.enable = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.enable = rwnx_atoi(value);
++ } else if (!strcmp(command, "dsss")) {
++ userconfig_info.txpwr_lvl.dsss = rwnx_atoi(value);
++ } else if (!strcmp(command, "ofdmlowrate_2g4")) {
++ userconfig_info.txpwr_lvl.ofdmlowrate_2g4 = rwnx_atoi(value);
++ } else if (!strcmp(command, "ofdm64qam_2g4")) {
++ userconfig_info.txpwr_lvl.ofdm64qam_2g4 = rwnx_atoi(value);
++ } else if (!strcmp(command, "ofdm256qam_2g4")) {
++ userconfig_info.txpwr_lvl.ofdm256qam_2g4 = rwnx_atoi(value);
++ } else if (!strcmp(command, "ofdm1024qam_2g4")) {
++ userconfig_info.txpwr_lvl.ofdm1024qam_2g4 = rwnx_atoi(value);
++ } else if (!strcmp(command, "ofdmlowrate_5g")) {
++ userconfig_info.txpwr_lvl.ofdmlowrate_5g = rwnx_atoi(value);
++ } else if (!strcmp(command, "ofdm64qam_5g")) {
++ userconfig_info.txpwr_lvl.ofdm64qam_5g = rwnx_atoi(value);
++ } else if (!strcmp(command, "ofdm256qam_5g")) {
++ userconfig_info.txpwr_lvl.ofdm256qam_5g = rwnx_atoi(value);
++ } else if (!strcmp(command, "ofdm1024qam_5g")) {
++ userconfig_info.txpwr_lvl.ofdm1024qam_5g = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11b_11ag_1m_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11b_11ag_2g4[0] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11b_11ag_2g4[0] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11b_11ag_2m_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11b_11ag_2g4[1] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11b_11ag_2g4[1] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11b_11ag_5m5_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11b_11ag_2g4[2] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11b_11ag_2g4[2] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11b_11ag_11m_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11b_11ag_2g4[3] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11b_11ag_2g4[3] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11b_11ag_6m_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11b_11ag_2g4[4] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11b_11ag_2g4[4] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11b_11ag_9m_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11b_11ag_2g4[5] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11b_11ag_2g4[5] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11b_11ag_12m_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11b_11ag_2g4[6] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11b_11ag_2g4[6] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11b_11ag_18m_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11b_11ag_2g4[7] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11b_11ag_2g4[7] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11b_11ag_24m_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11b_11ag_2g4[8] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11b_11ag_2g4[8] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11b_11ag_36m_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11b_11ag_2g4[9] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11b_11ag_2g4[9] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11b_11ag_48m_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11b_11ag_2g4[10] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11b_11ag_2g4[10] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11b_11ag_54m_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11b_11ag_2g4[11] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11b_11ag_2g4[11] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11n_11ac_mcs0_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11n_11ac_2g4[0] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_2g4[0] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11n_11ac_mcs1_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11n_11ac_2g4[1] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_2g4[1] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11n_11ac_mcs2_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11n_11ac_2g4[2] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_2g4[2] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11n_11ac_mcs3_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11n_11ac_2g4[3] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_2g4[3] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11n_11ac_mcs4_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11n_11ac_2g4[4] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_2g4[4] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11n_11ac_mcs5_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11n_11ac_2g4[5] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_2g4[5] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11n_11ac_mcs6_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11n_11ac_2g4[6] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_2g4[6] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11n_11ac_mcs7_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11n_11ac_2g4[7] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_2g4[7] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11n_11ac_mcs8_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11n_11ac_2g4[8] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_2g4[8] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11n_11ac_mcs9_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11n_11ac_2g4[9] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_2g4[9] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11ax_mcs0_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11ax_2g4[0] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_2g4[0] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11ax_mcs1_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11ax_2g4[1] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_2g4[1] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11ax_mcs2_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11ax_2g4[2] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_2g4[2] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11ax_mcs3_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11ax_2g4[3] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_2g4[3] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11ax_mcs4_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11ax_2g4[4] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_2g4[4] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11ax_mcs5_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11ax_2g4[5] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_2g4[5] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11ax_mcs6_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11ax_2g4[6] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_2g4[6] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11ax_mcs7_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11ax_2g4[7] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_2g4[7] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11ax_mcs8_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11ax_2g4[8] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_2g4[8] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11ax_mcs9_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11ax_2g4[9] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_2g4[9] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11ax_mcs10_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11ax_2g4[10] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_2g4[10] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11ax_mcs11_2g4")) {
++ userconfig_info.txpwr_lvl_v2.pwrlvl_11ax_2g4[11] = rwnx_atoi(value);
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_2g4[11] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11a_1m_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11a_5g[0] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11a_2m_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11a_5g[1] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11a_5m5_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11a_5g[2] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11a_11m_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11a_5g[3] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11a_6m_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11a_5g[4] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11a_9m_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11a_5g[5] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11a_12m_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11a_5g[6] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11a_18m_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11a_5g[7] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11a_24m_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11a_5g[8] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11a_36m_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11a_5g[9] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11a_48m_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11a_5g[10] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11a_54m_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11a_5g[11] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11n_11ac_mcs0_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_5g[0] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11n_11ac_mcs1_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_5g[1] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11n_11ac_mcs2_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_5g[2] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11n_11ac_mcs3_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_5g[3] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11n_11ac_mcs4_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_5g[4] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11n_11ac_mcs5_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_5g[5] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11n_11ac_mcs6_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_5g[6] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11n_11ac_mcs7_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_5g[7] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11n_11ac_mcs8_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_5g[8] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11n_11ac_mcs9_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11n_11ac_5g[9] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11ax_mcs0_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_5g[0] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11ax_mcs1_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_5g[1] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11ax_mcs2_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_5g[2] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11ax_mcs3_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_5g[3] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11ax_mcs4_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_5g[4] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11ax_mcs5_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_5g[5] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11ax_mcs6_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_5g[6] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11ax_mcs7_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_5g[7] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11ax_mcs8_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_5g[8] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11ax_mcs9_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_5g[9] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11ax_mcs10_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_5g[10] = rwnx_atoi(value);
++ } else if (!strcmp(command, "lvl_11ax_mcs11_5g")) {
++ userconfig_info.txpwr_lvl_v3.pwrlvl_11ax_5g[11] = rwnx_atoi(value);
++ } else if (!strcmp(command, "loss_enable")) {
++ userconfig_info.txpwr_loss.loss_enable = rwnx_atoi(value);
++ } else if (!strcmp(command, "loss_value")) {
++ userconfig_info.txpwr_loss.loss_value = rwnx_atoi(value);
++ } else if (!strcmp(command, "ofst_enable")) {
++ userconfig_info.txpwr_ofst.enable = rwnx_atoi(value);
++ } else if (!strcmp(command, "ofst_chan_1_4")) {
++ userconfig_info.txpwr_ofst.chan_1_4 = rwnx_atoi(value);
++ } else if (!strcmp(command, "ofst_chan_5_9")) {
++ userconfig_info.txpwr_ofst.chan_5_9 = rwnx_atoi(value);
++ } else if (!strcmp(command, "ofst_chan_10_13")) {
++ userconfig_info.txpwr_ofst.chan_10_13 = rwnx_atoi(value);
++ } else if (!strcmp(command, "ofst_chan_36_64")) {
++ userconfig_info.txpwr_ofst.chan_36_64 = rwnx_atoi(value);
++ } else if (!strcmp(command, "ofst_chan_100_120")) {
++ userconfig_info.txpwr_ofst.chan_100_120 = rwnx_atoi(value);
++ } else if (!strcmp(command, "ofst_chan_122_140")) {
++ userconfig_info.txpwr_ofst.chan_122_140 = rwnx_atoi(value);
++ } else if (!strcmp(command, "ofst_chan_142_165")) {
++ userconfig_info.txpwr_ofst.chan_142_165 = rwnx_atoi(value);
++ } else if (!strcmp(command, "xtal_enable")) {
++ userconfig_info.xtal_cap.enable = rwnx_atoi(value);
++ } else if (!strcmp(command, "xtal_cap")) {
++ userconfig_info.xtal_cap.xtal_cap = rwnx_atoi(value);
++ } else if (!strcmp(command, "xtal_cap_fine")) {
++ userconfig_info.xtal_cap.xtal_cap_fine = rwnx_atoi(value);
++ } else {
++ AICWFDBG(LOGERROR, "invalid cmd: %s\n", command);
++ }
++
++}
++
++void rwnx_plat_userconfig_parsing(char *buffer, int size)
++{
++ int i = 0;
++ int parse_state = 0;
++ char command[30];
++ char value[100];
++ int char_counter = 0;
++
++ memset(command, 0, 30);
++ memset(value, 0, 100);
++
++ for (i = 0; i < size; i++) {
++ //Send command or print nvram log when char is \r or \n
++ if (buffer[i] == 0x0a || buffer[i] == 0x0d) {
++ if (command[0] != 0 && value[0] != 0) {
++ if (parse_state == PRINT) {
++ AICWFDBG(LOGINFO, "%s:%s\r\n", __func__, value);
++ } else if (parse_state == GET_VALUE) {
++ rwnx_plat_nvram_set_value(command, value);
++ }
++ }
++ //Reset command value and char_counter
++ memset(command, 0, 30);
++ memset(value, 0, 100);
++ char_counter = 0;
++ parse_state = INIT;
++ continue;
++ }
++
++ //Switch parser state
++ if (parse_state == INIT) {
++ if (buffer[i] == '#') {
++ parse_state = PRINT;
++ continue;
++ } else if (buffer[i] == 0x0a || buffer[i] == 0x0d) {
++ parse_state = INIT;
++ continue;
++ } else {
++ parse_state = CMD;
++ }
++ }
++
++ //Fill data to command and value
++ if (parse_state == PRINT) {
++ command[0] = 0x01;
++ value[char_counter] = buffer[i];
++ char_counter++;
++ } else if (parse_state == CMD) {
++ if (command[0] != 0 && buffer[i] == '=') {
++ parse_state = GET_VALUE;
++ char_counter = 0;
++ continue;
++ }
++ command[char_counter] = buffer[i];
++ char_counter++;
++ } else if (parse_state == GET_VALUE) {
++ value[char_counter] = buffer[i];
++ char_counter++;
++ }
++ }
++}
++
++/**
++ * rwnx_plat_userconfig_load ---Load aic_userconfig.txt
++ *@filename name of config
++*/
++static int rwnx_plat_userconfig_load(struct rwnx_hw *rwnx_hw) {
++
++ if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DC){
++ rwnx_plat_userconfig_load_8800dc(rwnx_hw);
++ }else if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800DW){
++ rwnx_plat_userconfig_load_8800dw(rwnx_hw);
++ }else if(rwnx_hw->usbdev->chipid == PRODUCT_ID_AIC8800D81){
++ rwnx_plat_userconfig_load_8800d80(rwnx_hw);
++ }
++
++ return 0;
++}
++
++
++/**
++ * rwnx_platform_on() - Start the platform
++ *
++ * @rwnx_hw: Main driver data
++ * @config: Config to restore (NULL if nothing to restore)
++ *
++ * It starts the platform :
++ * - load fw and ucodes
++ * - initialize IPC
++ * - boot the fw
++ * - enable link communication/IRQ
++ *
++ * Called by 802.11 part
++ */
++int rwnx_platform_on(struct rwnx_hw *rwnx_hw, void *config)
++{
++ #if 0
++ u8 *shared_ram;
++ #endif
++#ifdef CONFIG_ROM_PATCH_EN
++ int ret = 0;
++#endif
++
++ struct rwnx_plat *rwnx_plat = rwnx_hw->plat;
++
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ if (rwnx_plat->enabled)
++ return 0;
++
++ #if 0
++ if (rwnx_platform_reset(rwnx_plat))
++ return -1;
++
++ rwnx_plat_mpif_sel(rwnx_plat);
++
++ if ((ret = rwnx_plat_fcu_load(rwnx_hw)))
++ return ret;
++ if ((ret = rwnx_plat_agc_load(rwnx_plat)))
++ return ret;
++ if ((ret = rwnx_ldpc_load(rwnx_hw)))
++ return ret;
++ if ((ret = rwnx_plat_lmac_load(rwnx_plat)))
++ return ret;
++
++ shared_ram = RWNX_ADDR(rwnx_plat, RWNX_ADDR_SYSTEM, SHARED_RAM_START_ADDR);
++ if ((ret = rwnx_ipc_init(rwnx_hw, shared_ram)))
++ return ret;
++
++ if ((ret = rwnx_plat->enable(rwnx_hw)))
++ return ret;
++ RWNX_REG_WRITE(BOOTROM_ENABLE, rwnx_plat,
++ RWNX_ADDR_SYSTEM, SYSCTRL_MISC_CNTL_ADDR);
++
++ #if 0
++ if ((ret = rwnx_fw_trace_config_filters(rwnx_get_shared_trace_buf(rwnx_hw),
++ rwnx_ipc_fw_trace_desc_get(rwnx_hw),
++ rwnx_hw->mod_params->ftl)))
++ #endif
++
++ #ifndef CONFIG_RWNX_FHOST
++ if ((ret = rwnx_check_fw_compatibility(rwnx_hw)))
++ {
++ rwnx_hw->plat->disable(rwnx_hw);
++ tasklet_kill(&rwnx_hw->task);
++ rwnx_ipc_deinit(rwnx_hw);
++ return ret;
++ }
++ #endif /* !CONFIG_RWNX_FHOST */
++
++ if (config)
++ rwnx_term_restore_config(rwnx_plat, config);
++
++ rwnx_ipc_start(rwnx_hw);
++ #else
++ #ifndef CONFIG_ROM_PATCH_EN
++ #ifdef CONFIG_DOWNLOAD_FW
++ if ((ret = rwnx_plat_fmac_load(rwnx_hw)))
++ return ret;
++ #endif /* !CONFIG_ROM_PATCH_EN */
++ #endif
++ #endif
++
++#ifdef CONFIG_ROM_PATCH_EN
++ ret = rwnx_plat_patch_load(rwnx_hw);
++ if (ret) {
++ return ret;
++ }
++#endif
++
++ rwnx_plat_userconfig_load(rwnx_hw);
++
++
++ //rwnx_plat->enabled = true;
++
++ return 0;
++}
++
++/**
++ * rwnx_platform_off() - Stop the platform
++ *
++ * @rwnx_hw: Main driver data
++ * @config: Updated with pointer to config, to be able to restore it with
++ * rwnx_platform_on(). It's up to the caller to free the config. Set to NULL
++ * if configuration is not needed.
++ *
++ * Called by 802.11 part
++ */
++void rwnx_platform_off(struct rwnx_hw *rwnx_hw, void **config)
++{
++#if defined(AICWF_USB_SUPPORT) || defined(AICWF_SDIO_SUPPORT)
++ tasklet_kill(&rwnx_hw->task);
++ rwnx_hw->plat->enabled = false;
++ return ;
++#endif
++
++ if (!rwnx_hw->plat->enabled) {
++ if (config)
++ *config = NULL;
++ return;
++ }
++
++#ifdef AICWF_PCIE_SUPPORT
++ rwnx_ipc_stop(rwnx_hw);
++#endif
++
++ if (config)
++ *config = rwnx_term_save_config(rwnx_hw->plat);
++
++ rwnx_hw->plat->disable(rwnx_hw);
++
++ tasklet_kill(&rwnx_hw->task);
++
++#ifdef AICWF_PCIE_SUPPORT
++ rwnx_ipc_deinit(rwnx_hw);
++#endif
++
++
++ rwnx_platform_reset(rwnx_hw->plat);
++
++ rwnx_hw->plat->enabled = false;
++}
++
++/**
++ * rwnx_platform_init() - Initialize the platform
++ *
++ * @rwnx_plat: platform data (already updated by platform driver)
++ * @platform_data: Pointer to store the main driver data pointer (aka rwnx_hw)
++ * That will be set as driver data for the platform driver
++ * Return: 0 on success, < 0 otherwise
++ *
++ * Called by the platform driver after it has been probed
++ */
++int rwnx_platform_init(struct rwnx_plat *rwnx_plat, void **platform_data)
++{
++ int ret = 0;
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++ rwnx_plat->enabled = false;
++ rwnx_plat->wait_disconnect_cb = false;
++ g_rwnx_plat = rwnx_plat;
++
++#if defined CONFIG_RWNX_FULLMAC
++ AICWFDBG(LOGINFO, "%s rwnx_cfg80211_init enter \r\n", __func__);
++ ret = rwnx_cfg80211_init(rwnx_plat, platform_data);
++ AICWFDBG(LOGINFO, "%s rwnx_cfg80211_init exit \r\n", __func__);
++#if defined(AICWF_USB_SUPPORT) && defined(CONFIG_VENDOR_GPIO)
++ // initialize gpiob2, gpiob3, gpiob5 to output mode and set output to 0
++ rwnx_send_dbg_gpio_init_req(rwnx_plat->usbdev->rwnx_hw, 2, 1, 0);//gpiob 2 = 0
++ rwnx_send_dbg_gpio_init_req(rwnx_plat->usbdev->rwnx_hw, 3, 1, 0);//gpiob 3 = 0
++ rwnx_send_dbg_gpio_init_req(rwnx_plat->usbdev->rwnx_hw, 5, 1, 0);//gpiob 5 = 0
++
++ // read gpiob2
++ //struct dbg_gpio_read_cfm gpio_rd_cfm;
++ //rwnx_send_dbg_gpio_read_req(rwnx_plat->usbdev->rwnx_hw, 2, &gpio_rd_cfm);
++ //AICWFDBG(LOGINFO, "gpio_rd_cfm idx:%d val:%d\n", gpio_rd_cfm.gpio_idx, gpio_rd_cfm.gpio_val);
++
++ // set gpiob2 output to 1
++ //rwnx_send_dbg_gpio_write_req(rwnx_plat->usbdev->rwnx_hw, 2, 1);
++#endif
++
++ return ret;
++#elif defined CONFIG_RWNX_FHOST
++ return rwnx_fhost_init(rwnx_plat, platform_data);
++#endif
++}
++
++/**
++ * rwnx_platform_deinit() - Deinitialize the platform
++ *
++ * @rwnx_hw: main driver data
++ *
++ * Called by the platform driver after it is removed
++ */
++void rwnx_platform_deinit(struct rwnx_hw *rwnx_hw)
++{
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++
++#if defined CONFIG_RWNX_FULLMAC
++ rwnx_cfg80211_deinit(rwnx_hw);
++#elif defined CONFIG_RWNX_FHOST
++ rwnx_fhost_deinit(rwnx_hw);
++#endif
++}
++
++/**
++ * rwnx_platform_register_drv() - Register all possible platform drivers
++ */
++int rwnx_platform_register_drv(void)
++{
++ return rwnx_pci_register_drv();
++}
++
++
++/**
++ * rwnx_platform_unregister_drv() - Unegister all platform drivers
++ */
++void rwnx_platform_unregister_drv(void)
++{
++ return rwnx_pci_unregister_drv();
++}
++
++struct device *rwnx_platform_get_dev(struct rwnx_plat *rwnx_plat)
++{
++#ifdef AICWF_SDIO_SUPPORT
++ return rwnx_plat->sdiodev->dev;
++#endif
++#ifdef AICWF_USB_SUPPORT
++ return rwnx_plat->usbdev->dev;
++#endif
++ return &(rwnx_plat->pci_dev->dev);
++}
++
++
++#ifndef CONFIG_RWNX_SDM
++MODULE_FIRMWARE(RWNX_AGC_FW_NAME);
++MODULE_FIRMWARE(RWNX_FCU_FW_NAME);
++MODULE_FIRMWARE(RWNX_LDPC_RAM_NAME);
++#endif
++MODULE_FIRMWARE(RWNX_MAC_FW_NAME);
++#ifndef CONFIG_RWNX_TL4
++MODULE_FIRMWARE(RWNX_MAC_FW_NAME2);
++#endif
++
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_platform.h
+@@ -0,0 +1,142 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_platorm.h
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ******************************************************************************
++ */
++
++#ifndef _RWNX_PLATFORM_H_
++#define _RWNX_PLATFORM_H_
++
++#include <linux/pci.h>
++#include "lmac_msg.h"
++
++
++#define RWNX_CONFIG_FW_NAME "rwnx_settings.ini"
++#define RWNX_PHY_CONFIG_TRD_NAME "rwnx_trident.ini"
++#define RWNX_PHY_CONFIG_KARST_NAME "rwnx_karst.ini"
++#define RWNX_AGC_FW_NAME "agcram.bin"
++#define RWNX_LDPC_RAM_NAME "ldpcram.bin"
++#ifdef CONFIG_RWNX_FULLMAC
++#define RWNX_MAC_FW_BASE_NAME "fmacfw"
++#elif defined CONFIG_RWNX_FHOST
++#define RWNX_MAC_FW_BASE_NAME "fhostfw"
++#endif /* CONFIG_RWNX_FULLMAC */
++
++#ifdef CONFIG_RWNX_TL4
++#define RWNX_MAC_FW_NAME RWNX_MAC_FW_BASE_NAME".hex"
++#else
++#define RWNX_MAC_FW_NAME RWNX_MAC_FW_BASE_NAME".ihex"
++#define RWNX_MAC_FW_NAME2 RWNX_MAC_FW_BASE_NAME".bin"
++#endif
++
++#define RWNX_FCU_FW_NAME "fcuram.bin"
++#ifdef CONFIG_DPD
++#define FW_DPDRESULT_NAME_8800DC "aic_dpdresult_8800dc.bin"
++#endif
++
++enum {
++ FW_NORMAL_MODE = 0,
++ FW_RFTEST_MODE = 1,
++ FW_BLE_SCAN_WAKEUP_MODE = 2,
++ FW_M2D_OTA_MODE = 3,
++ FW_DPDCALIB_MODE = 4,
++ FW_BLE_SCAN_AD_FILTER_MODE = 5,
++};
++
++
++/**
++ * Type of memory to access (cf rwnx_plat.get_address)
++ *
++ * @RWNX_ADDR_CPU To access memory of the embedded CPU
++ * @RWNX_ADDR_SYSTEM To access memory/registers of one subsystem of the
++ * embedded system
++ *
++ */
++enum rwnx_platform_addr {
++ RWNX_ADDR_CPU,
++ RWNX_ADDR_SYSTEM,
++ RWNX_ADDR_MAX,
++};
++
++struct rwnx_hw;
++
++/**
++ * struct rwnx_plat - Operation pointers for RWNX PCI platform
++ *
++ * @pci_dev: pointer to pci dev
++ * @enabled: Set if embedded platform has been enabled (i.e. fw loaded and
++ * ipc started)
++ * @enable: Configure communication with the fw (i.e. configure the transfers
++ * enable and register interrupt)
++ * @disable: Stop communication with the fw
++ * @deinit: Free all ressources allocated for the embedded platform
++ * @get_address: Return the virtual address to access the requested address on
++ * the platform.
++ * @ack_irq: Acknowledge the irq at link level.
++ * @get_config_reg: Return the list (size + pointer) of registers to restore in
++ * order to reload the platform while keeping the current configuration.
++ *
++ * @priv Private data for the link driver
++ */
++struct rwnx_plat {
++ struct pci_dev *pci_dev;
++
++#ifdef AICWF_SDIO_SUPPORT
++ struct aic_sdio_dev *sdiodev;
++#endif
++
++#ifdef AICWF_USB_SUPPORT
++ struct aic_usb_dev *usbdev;
++#endif
++ bool enabled;
++ bool wait_disconnect_cb;
++
++ int (*enable)(struct rwnx_hw *rwnx_hw);
++ int (*disable)(struct rwnx_hw *rwnx_hw);
++ void (*deinit)(struct rwnx_plat *rwnx_plat);
++ u8* (*get_address)(struct rwnx_plat *rwnx_plat, int addr_name,
++ unsigned int offset);
++ void (*ack_irq)(struct rwnx_plat *rwnx_plat);
++ int (*get_config_reg)(struct rwnx_plat *rwnx_plat, const u32 **list);
++
++ u8 priv[0] __aligned(sizeof(void *));
++};
++
++#define RWNX_ADDR(plat, base, offset) \
++ plat->get_address(plat, base, offset)
++
++#define RWNX_REG_READ(plat, base, offset) \
++ readl(plat->get_address(plat, base, offset))
++
++#define RWNX_REG_WRITE(val, plat, base, offset) \
++ writel(val, plat->get_address(plat, base, offset))
++
++extern struct rwnx_plat *g_rwnx_plat;
++
++int rwnx_platform_init(struct rwnx_plat *rwnx_plat, void **platform_data);
++void rwnx_platform_deinit(struct rwnx_hw *rwnx_hw);
++
++int rwnx_platform_on(struct rwnx_hw *rwnx_hw, void *config);
++void rwnx_platform_off(struct rwnx_hw *rwnx_hw, void **config);
++
++int is_file_exist(char* name);
++void get_userconfig_txpwr_lvl_in_fdrv(txpwr_lvl_conf_t *txpwr_lvl);
++void get_userconfig_txpwr_lvl_v2_in_fdrv(txpwr_lvl_conf_v2_t *txpwr_lvl_v2);
++void get_userconfig_txpwr_lvl_v3_in_fdrv(txpwr_lvl_conf_v3_t *txpwr_lvl_v3);
++void get_userconfig_txpwr_ofst_in_fdrv(txpwr_ofst_conf_t *txpwr_ofst);
++void get_userconfig_txpwr_loss(txpwr_loss_conf_t *txpwr_loss);
++int rwnx_platform_register_drv(void);
++void rwnx_platform_unregister_drv(void);
++
++extern struct device *rwnx_platform_get_dev(struct rwnx_plat *rwnx_plat);
++
++static inline unsigned int rwnx_platform_get_irq(struct rwnx_plat *rwnx_plat)
++{
++ return rwnx_plat->pci_dev->irq;
++}
++
++#endif /* _RWNX_PLATFORM_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_prof.h
+@@ -0,0 +1,133 @@
++/**
++ ****************************************************************************************
++ *
++ * @file rwnx_prof.h
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ****************************************************************************************
++ */
++
++#ifndef _RWNX_PROF_H_
++#define _RWNX_PROF_H_
++
++#include "reg_access.h"
++#include "rwnx_platform.h"
++
++static inline void rwnx_prof_set(struct rwnx_hw *rwnx_hw, int val)
++{
++ struct rwnx_plat *rwnx_plat = rwnx_hw->plat;
++ RWNX_REG_WRITE(val, rwnx_plat, RWNX_ADDR_SYSTEM, NXMAC_SW_SET_PROFILING_ADDR);
++}
++
++static inline void rwnx_prof_clear(struct rwnx_hw *rwnx_hw, int val)
++{
++ struct rwnx_plat *rwnx_plat = rwnx_hw->plat;
++ RWNX_REG_WRITE(val, rwnx_plat, RWNX_ADDR_SYSTEM, NXMAC_SW_CLEAR_PROFILING_ADDR);
++}
++
++#if 0
++/* Defines for SW Profiling registers values */
++enum {
++ TX_IPC_IRQ,
++ TX_IPC_EVT,
++ TX_PREP_EVT,
++ TX_DMA_IRQ,
++ TX_MAC_IRQ,
++ TX_PAYL_HDL,
++ TX_CFM_EVT,
++ TX_IPC_CFM,
++ RX_MAC_IRQ, // 8
++ RX_TRIGGER_EVT,
++ RX_DMA_IRQ,
++ RX_DMA_EVT,
++ RX_IPC_IND,
++ RX_MPDU_XFER,
++ DBG_PROF_MAX
++};
++#endif
++
++enum {
++ SW_PROF_HOSTBUF_IDX = 12,
++ /****** IPC IRQs related signals ******/
++ /* E2A direction */
++ SW_PROF_IRQ_E2A_RXDESC = 16, // to make sure we let 16 bits available for LMAC FW
++ SW_PROF_IRQ_E2A_TXCFM,
++ SW_PROF_IRQ_E2A_DBG,
++ SW_PROF_IRQ_E2A_MSG,
++ SW_PROF_IPC_MSGPUSH,
++ SW_PROF_MSGALLOC,
++ SW_PROF_MSGIND,
++ SW_PROF_DBGIND,
++
++ /* A2E direction */
++ SW_PROF_IRQ_A2E_TXCFM_BACK,
++
++ /****** Driver functions related signals ******/
++ SW_PROF_WAIT_QUEUE_STOP,
++ SW_PROF_WAIT_QUEUE_WAKEUP,
++ SW_PROF_RWNXDATAIND,
++ SW_PROF_RWNX_IPC_IRQ_HDLR,
++ SW_PROF_RWNX_IPC_THR_IRQ_HDLR,
++ SW_PROF_IEEE80211RX,
++ SW_PROF_RWNX_PATTERN,
++ SW_PROF_MAX
++};
++
++// [LT]For debug purpose only
++#if (0)
++#define SW_PROF_CHAN_CTXT_CFM_HDL_BIT (21)
++#define SW_PROF_CHAN_CTXT_CFM_BIT (22)
++#define SW_PROF_CHAN_CTXT_CFM_SWDONE_BIT (23)
++#define SW_PROF_CHAN_CTXT_PUSH_BIT (24)
++#define SW_PROF_CHAN_CTXT_QUEUE_BIT (25)
++#define SW_PROF_CHAN_CTXT_TX_BIT (26)
++#define SW_PROF_CHAN_CTXT_TX_PAUSE_BIT (27)
++#define SW_PROF_CHAN_CTXT_PSWTCH_BIT (28)
++#define SW_PROF_CHAN_CTXT_SWTCH_BIT (29)
++
++// TO DO: update this
++
++#define REG_SW_SET_PROFILING_CHAN(env, bit) \
++ rwnx_prof_set((struct rwnx_hw*)env, BIT(bit))
++
++#define REG_SW_CLEAR_PROFILING_CHAN(env, bit) \
++ rwnx_prof_clear((struct rwnx_hw*)env, BIT(bit))
++
++#else
++#define SW_PROF_CHAN_CTXT_CFM_HDL_BIT (0)
++#define SW_PROF_CHAN_CTXT_CFM_BIT (0)
++#define SW_PROF_CHAN_CTXT_CFM_SWDONE_BIT (0)
++#define SW_PROF_CHAN_CTXT_PUSH_BIT (0)
++#define SW_PROF_CHAN_CTXT_QUEUE_BIT (0)
++#define SW_PROF_CHAN_CTXT_TX_BIT (0)
++#define SW_PROF_CHAN_CTXT_TX_PAUSE_BIT (0)
++#define SW_PROF_CHAN_CTXT_PSWTCH_BIT (0)
++#define SW_PROF_CHAN_CTXT_SWTCH_BIT (0)
++
++#define REG_SW_SET_PROFILING_CHAN(env, bit) do {} while (0)
++#define REG_SW_CLEAR_PROFILING_CHAN(env, bit) do {} while (0)
++#endif
++
++#ifdef CONFIG_RWNX_SW_PROFILING
++/* Macros for SW PRofiling registers access */
++#define REG_SW_SET_PROFILING(env, bit) \
++ rwnx_prof_set((struct rwnx_hw*)env, BIT(bit))
++
++#define REG_SW_SET_HOSTBUF_IDX_PROFILING(env, val) \
++ rwnx_prof_set((struct rwnx_hw*)env, val<<(SW_PROF_HOSTBUF_IDX))
++
++#define REG_SW_CLEAR_PROFILING(env, bit) \
++ rwnx_prof_clear((struct rwnx_hw*)env, BIT(bit))
++
++#define REG_SW_CLEAR_HOSTBUF_IDX_PROFILING(env) \
++ rwnx_prof_clear((struct rwnx_hw*)env,0x0F<<(SW_PROF_HOSTBUF_IDX))
++
++#else
++#define REG_SW_SET_PROFILING(env, value) do {} while (0)
++#define REG_SW_CLEAR_PROFILING(env, value) do {} while (0)
++#define REG_SW_SET_HOSTBUF_IDX_PROFILING(env, val) do {} while (0)
++#define REG_SW_CLEAR_HOSTBUF_IDX_PROFILING(env) do {} while (0)
++#endif
++
++#endif /* _RWNX_PROF_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_radar.c
+@@ -0,0 +1,1644 @@
++/**
++******************************************************************************
++ *
++ * @file rwnx_radar.c
++ *
++ * @brief Functions to handle radar detection
++ * Radar detection is copied (and adapted) from ath driver source code.
++ *
++ * Copyright (c) 2012 Neratec Solutions AG
++ * Copyright (C) RivieraWaves 2015-2019
++ *
++ ******************************************************************************
++ */
++#include <linux/list.h>
++#include <linux/kernel.h>
++#include <linux/ktime.h>
++#include <net/mac80211.h>
++
++#include "rwnx_radar.h"
++#include "rwnx_defs.h"
++#include "rwnx_msg_tx.h"
++#include "rwnx_events.h"
++#include "rwnx_compat.h"
++
++/*
++ * tolerated deviation of radar time stamp in usecs on both sides
++ * TODO: this might need to be HW-dependent
++ */
++#define PRI_TOLERANCE 16
++
++/**
++ * struct radar_types - contains array of patterns defined for one DFS domain
++ * @domain: DFS regulatory domain
++ * @num_radar_types: number of radar types to follow
++ * @radar_types: radar types array
++ */
++struct radar_types {
++ enum nl80211_dfs_regions region;
++ u32 num_radar_types;
++ const struct radar_detector_specs *spec_riu;
++ const struct radar_detector_specs *spec_fcu;
++};
++
++/**
++ * Type of radar waveform:
++ * RADAR_WAVEFORM_SHORT : waveform defined by
++ * - pulse width
++ * - pulse interval in a burst (pri)
++ * - number of pulses in a burst (ppb)
++ *
++ * RADAR_WAVEFORM_WEATHER :
++ * same than SHORT except that ppb is dependent of pri
++ *
++ * RADAR_WAVEFORM_INTERLEAVED :
++ * same than SHORT except there are several value of pri (interleaved)
++ *
++ * RADAR_WAVEFORM_LONG :
++ *
++ */
++enum radar_waveform_type {
++ RADAR_WAVEFORM_SHORT,
++ RADAR_WAVEFORM_WEATHER,
++ RADAR_WAVEFORM_INTERLEAVED,
++ RADAR_WAVEFORM_LONG
++};
++
++/**
++ * struct radar_detector_specs - detector specs for a radar pattern type
++ * @type_id: pattern type, as defined by regulatory
++ * @width_min: minimum radar pulse width in [us]
++ * @width_max: maximum radar pulse width in [us]
++ * @pri_min: minimum pulse repetition interval in [us] (including tolerance)
++ * @pri_max: minimum pri in [us] (including tolerance)
++ * @num_pri: maximum number of different pri for this type
++ * @ppb: pulses per bursts for this type
++ * @ppb_thresh: number of pulses required to trigger detection
++ * @max_pri_tolerance: pulse time stamp tolerance on both sides [us]
++ * @type: Type of radar waveform
++ */
++struct radar_detector_specs {
++ u8 type_id;
++ u8 width_min;
++ u8 width_max;
++ u16 pri_min;
++ u16 pri_max;
++ u8 num_pri;
++ u8 ppb;
++ u8 ppb_thresh;
++ u8 max_pri_tolerance;
++ enum radar_waveform_type type;
++};
++
++
++/* percentage on ppb threshold to trigger detection */
++#define MIN_PPB_THRESH 50
++#define PPB_THRESH(PPB) ((PPB * MIN_PPB_THRESH + 50) / 100)
++#define PRF2PRI(PRF) ((1000000 + PRF / 2) / PRF)
++
++/* width tolerance */
++#define WIDTH_TOLERANCE 2
++#define WIDTH_LOWER(X) (X)
++#define WIDTH_UPPER(X) (X)
++
++#define ETSI_PATTERN_SHORT(ID, WMIN, WMAX, PMIN, PMAX, PPB) \
++ { \
++ ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
++ (PRF2PRI(PMAX) - PRI_TOLERANCE), \
++ (PRF2PRI(PMIN) + PRI_TOLERANCE), 1, PPB, \
++ PPB_THRESH(PPB), PRI_TOLERANCE, RADAR_WAVEFORM_SHORT \
++ }
++
++#define ETSI_PATTERN_INTERLEAVED(ID, WMIN, WMAX, PMIN, PMAX, PRFMIN, PRFMAX, PPB) \
++ { \
++ ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
++ (PRF2PRI(PMAX) * PRFMIN- PRI_TOLERANCE), \
++ (PRF2PRI(PMIN) * PRFMAX + PRI_TOLERANCE), \
++ PRFMAX, PPB * PRFMAX, \
++ PPB_THRESH(PPB), PRI_TOLERANCE, RADAR_WAVEFORM_INTERLEAVED \
++ }
++
++/* radar types as defined by ETSI EN-301-893 v1.7.1 */
++static const struct radar_detector_specs etsi_radar_ref_types_v17_riu[] = {
++ ETSI_PATTERN_SHORT(0, 0, 8, 700, 700, 18),
++ ETSI_PATTERN_SHORT(1, 0, 10, 200, 1000, 10),
++ ETSI_PATTERN_SHORT(2, 0, 22, 200, 1600, 15),
++ ETSI_PATTERN_SHORT(3, 0, 22, 2300, 4000, 25),
++ ETSI_PATTERN_SHORT(4, 20, 38, 2000, 4000, 20),
++ ETSI_PATTERN_INTERLEAVED(5, 0, 8, 300, 400, 2, 3, 10),
++ ETSI_PATTERN_INTERLEAVED(6, 0, 8, 400, 1200, 2, 3, 15),
++};
++
++static const struct radar_detector_specs etsi_radar_ref_types_v17_fcu[] = {
++ ETSI_PATTERN_SHORT(0, 0, 8, 700, 700, 18),
++ ETSI_PATTERN_SHORT(1, 0, 8, 200, 1000, 10),
++ ETSI_PATTERN_SHORT(2, 0, 16, 200, 1600, 15),
++ ETSI_PATTERN_SHORT(3, 0, 16, 2300, 4000, 25),
++ ETSI_PATTERN_SHORT(4, 20, 34, 2000, 4000, 20),
++ ETSI_PATTERN_INTERLEAVED(5, 0, 8, 300, 400, 2, 3, 10),
++ ETSI_PATTERN_INTERLEAVED(6, 0, 8, 400, 1200, 2, 3, 15),
++};
++
++static const struct radar_types etsi_radar_types_v17 = {
++ .region = NL80211_DFS_ETSI,
++ .num_radar_types = ARRAY_SIZE(etsi_radar_ref_types_v17_riu),
++ .spec_riu = etsi_radar_ref_types_v17_riu,
++ .spec_fcu = etsi_radar_ref_types_v17_fcu,
++};
++
++#define FCC_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, TYPE) \
++ { \
++ ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
++ PMIN - PRI_TOLERANCE, \
++ PMAX * PRF + PRI_TOLERANCE, PRF, PPB * PRF, \
++ PPB_THRESH(PPB), PRI_TOLERANCE, TYPE \
++ }
++
++static const struct radar_detector_specs fcc_radar_ref_types_riu[] = {
++ FCC_PATTERN(0, 0, 8, 1428, 1428, 1, 18, RADAR_WAVEFORM_SHORT),
++ FCC_PATTERN(1, 0, 8, 518, 3066, 1, 102, RADAR_WAVEFORM_WEATHER),
++ FCC_PATTERN(2, 0, 8, 150, 230, 1, 23, RADAR_WAVEFORM_SHORT),
++ FCC_PATTERN(3, 6, 20, 200, 500, 1, 16, RADAR_WAVEFORM_SHORT),
++ FCC_PATTERN(4, 10, 28, 200, 500, 1, 12, RADAR_WAVEFORM_SHORT),
++ FCC_PATTERN(5, 50, 110, 1000, 2000, 1, 8, RADAR_WAVEFORM_LONG),
++ FCC_PATTERN(6, 0, 8, 333, 333, 1, 9, RADAR_WAVEFORM_SHORT),
++};
++
++static const struct radar_detector_specs fcc_radar_ref_types_fcu[] = {
++ FCC_PATTERN(0, 0, 8, 1428, 1428, 1, 18, RADAR_WAVEFORM_SHORT),
++ FCC_PATTERN(1, 0, 8, 518, 3066, 1, 102, RADAR_WAVEFORM_WEATHER),
++ FCC_PATTERN(2, 0, 8, 150, 230, 1, 23, RADAR_WAVEFORM_SHORT),
++ FCC_PATTERN(3, 6, 12, 200, 500, 1, 16, RADAR_WAVEFORM_SHORT),
++ FCC_PATTERN(4, 10, 22, 200, 500, 1, 12, RADAR_WAVEFORM_SHORT),
++ FCC_PATTERN(5, 50, 104, 1000, 2000, 1, 8, RADAR_WAVEFORM_LONG),
++ FCC_PATTERN(6, 0, 8, 333, 333, 1, 9, RADAR_WAVEFORM_SHORT),
++};
++
++static const struct radar_types fcc_radar_types = {
++ .region = NL80211_DFS_FCC,
++ .num_radar_types = ARRAY_SIZE(fcc_radar_ref_types_riu),
++ .spec_riu = fcc_radar_ref_types_riu,
++ .spec_fcu = fcc_radar_ref_types_fcu,
++};
++
++#define JP_PATTERN FCC_PATTERN
++static const struct radar_detector_specs jp_radar_ref_types_riu[] = {
++ JP_PATTERN(0, 0, 8, 1428, 1428, 1, 18, RADAR_WAVEFORM_SHORT),
++ JP_PATTERN(1, 2, 8, 3846, 3846, 1, 18, RADAR_WAVEFORM_SHORT),
++ JP_PATTERN(2, 0, 8, 1388, 1388, 1, 18, RADAR_WAVEFORM_SHORT),
++ JP_PATTERN(3, 0, 8, 4000, 4000, 1, 18, RADAR_WAVEFORM_SHORT),
++ JP_PATTERN(4, 0, 8, 150, 230, 1, 23, RADAR_WAVEFORM_SHORT),
++ JP_PATTERN(5, 6, 20, 200, 500, 1, 16, RADAR_WAVEFORM_SHORT),
++ JP_PATTERN(6, 10, 28, 200, 500, 1, 12, RADAR_WAVEFORM_SHORT),
++ JP_PATTERN(7, 50, 110, 1000, 2000, 1, 8, RADAR_WAVEFORM_LONG),
++ JP_PATTERN(8, 0, 8, 333, 333, 1, 9, RADAR_WAVEFORM_SHORT),
++};
++
++static const struct radar_detector_specs jp_radar_ref_types_fcu[] = {
++ JP_PATTERN(0, 0, 8, 1428, 1428, 1, 18, RADAR_WAVEFORM_SHORT),
++ JP_PATTERN(1, 2, 6, 3846, 3846, 1, 18, RADAR_WAVEFORM_SHORT),
++ JP_PATTERN(2, 0, 8, 1388, 1388, 1, 18, RADAR_WAVEFORM_SHORT),
++ JP_PATTERN(3, 2, 2, 4000, 4000, 1, 18, RADAR_WAVEFORM_SHORT),
++ JP_PATTERN(4, 0, 8, 150, 230, 1, 23, RADAR_WAVEFORM_SHORT),
++ JP_PATTERN(5, 6, 12, 200, 500, 1, 16, RADAR_WAVEFORM_SHORT),
++ JP_PATTERN(6, 10, 22, 200, 500, 1, 12, RADAR_WAVEFORM_SHORT),
++ JP_PATTERN(7, 50, 104, 1000, 2000, 1, 8, RADAR_WAVEFORM_LONG),
++ JP_PATTERN(8, 0, 8, 333, 333, 1, 9, RADAR_WAVEFORM_SHORT),
++};
++
++static const struct radar_types jp_radar_types = {
++ .region = NL80211_DFS_JP,
++ .num_radar_types = ARRAY_SIZE(jp_radar_ref_types_riu),
++ .spec_riu = jp_radar_ref_types_riu,
++ .spec_fcu = jp_radar_ref_types_fcu,
++};
++
++static const struct radar_types *dfs_domains[] = {
++ &etsi_radar_types_v17,
++ &fcc_radar_types,
++ &jp_radar_types,
++};
++
++
++/**
++ * struct pri_sequence - sequence of pulses matching one PRI
++ * @head: list_head
++ * @pri: pulse repetition interval (PRI) in usecs
++ * @dur: duration of sequence in usecs
++ * @count: number of pulses in this sequence
++ * @count_falses: number of not matching pulses in this sequence
++ * @first_ts: time stamp of first pulse in usecs
++ * @last_ts: time stamp of last pulse in usecs
++ * @deadline_ts: deadline when this sequence becomes invalid (first_ts + dur)
++ * @ppb_thresh: Number of pulses to validate detection
++ * (need for weather radar whose value depends of pri)
++ */
++struct pri_sequence {
++ struct list_head head;
++ u32 pri;
++ u32 dur;
++ u32 count;
++ u32 count_falses;
++ u64 first_ts;
++ u64 last_ts;
++ u64 deadline_ts;
++ u8 ppb_thresh;
++};
++
++
++/**
++ * struct pulse_elem - elements in pulse queue
++ * @ts: time stamp in usecs
++ */
++struct pulse_elem {
++ struct list_head head;
++ u64 ts;
++};
++
++/**
++ * struct pri_detector - PRI detector element for a dedicated radar type
++ * @head:
++ * @rs: detector specs for this detector element
++ * @last_ts: last pulse time stamp considered for this element in usecs
++ * @sequences: list_head holding potential pulse sequences
++ * @pulses: list connecting pulse_elem objects
++ * @count: number of pulses in queue
++ * @max_count: maximum number of pulses to be queued
++ * @window_size: window size back from newest pulse time stamp in usecs
++ * @freq:
++ */
++struct pri_detector {
++ struct list_head head;
++ const struct radar_detector_specs *rs;
++ u64 last_ts;
++ struct list_head sequences;
++ struct list_head pulses;
++ u32 count;
++ u32 max_count;
++ u32 window_size;
++ struct pri_detector_ops *ops;
++ u16 freq;
++};
++
++/**
++ * struct pri_detector_ops - PRI detector ops (dependent of waveform type)
++ * @init : Initialize pri_detector structure
++ * @add_pulse : Add a pulse to the pri-detector
++ * @reset_on_pri_overflow : Should the pri_detector be resetted when pri overflow
++ */
++struct pri_detector_ops {
++ void (*init)(struct pri_detector *pde);
++ struct pri_sequence * (*add_pulse)(struct pri_detector *pde, u16 len, u64 ts, u16 pri);
++ int reset_on_pri_overflow;
++};
++
++
++/******************************************************************************
++ * PRI (pulse repetition interval) sequence detection
++ *****************************************************************************/
++/**
++ * Singleton Pulse and Sequence Pools
++ *
++ * Instances of pri_sequence and pulse_elem are kept in singleton pools to
++ * reduce the number of dynamic allocations. They are shared between all
++ * instances and grow up to the peak number of simultaneously used objects.
++ *
++ * Memory is freed after all references to the pools are released.
++ */
++static u32 singleton_pool_references;
++static LIST_HEAD(pulse_pool);
++static LIST_HEAD(pseq_pool);
++static DEFINE_SPINLOCK(pool_lock);
++
++static void pool_register_ref(void)
++{
++ spin_lock_bh(&pool_lock);
++ singleton_pool_references++;
++ spin_unlock_bh(&pool_lock);
++}
++
++static void pool_deregister_ref(void)
++{
++ spin_lock_bh(&pool_lock);
++ singleton_pool_references--;
++ if (singleton_pool_references == 0) {
++ /* free singleton pools with no references left */
++ struct pri_sequence *ps, *ps0;
++ struct pulse_elem *p, *p0;
++
++ list_for_each_entry_safe(p, p0, &pulse_pool, head) {
++ list_del(&p->head);
++ kfree(p);
++ }
++ list_for_each_entry_safe(ps, ps0, &pseq_pool, head) {
++ list_del(&ps->head);
++ kfree(ps);
++ }
++ }
++ spin_unlock_bh(&pool_lock);
++}
++
++static void pool_put_pulse_elem(struct pulse_elem *pe)
++{
++ spin_lock_bh(&pool_lock);
++ list_add(&pe->head, &pulse_pool);
++ spin_unlock_bh(&pool_lock);
++}
++
++static void pool_put_pseq_elem(struct pri_sequence *pse)
++{
++ spin_lock_bh(&pool_lock);
++ list_add(&pse->head, &pseq_pool);
++ spin_unlock_bh(&pool_lock);
++}
++
++static struct pri_sequence *pool_get_pseq_elem(void)
++{
++ struct pri_sequence *pse = NULL;
++ spin_lock_bh(&pool_lock);
++ if (!list_empty(&pseq_pool)) {
++ pse = list_first_entry(&pseq_pool, struct pri_sequence, head);
++ list_del(&pse->head);
++ }
++ spin_unlock_bh(&pool_lock);
++
++ if (pse == NULL) {
++ pse = kmalloc(sizeof(*pse), GFP_ATOMIC);
++ }
++
++ return pse;
++}
++
++static struct pulse_elem *pool_get_pulse_elem(void)
++{
++ struct pulse_elem *pe = NULL;
++ spin_lock_bh(&pool_lock);
++ if (!list_empty(&pulse_pool)) {
++ pe = list_first_entry(&pulse_pool, struct pulse_elem, head);
++ list_del(&pe->head);
++ }
++ spin_unlock_bh(&pool_lock);
++ return pe;
++}
++
++static struct pulse_elem *pulse_queue_get_tail(struct pri_detector *pde)
++{
++ struct list_head *l = &pde->pulses;
++ if (list_empty(l))
++ return NULL;
++ return list_entry(l->prev, struct pulse_elem, head);
++}
++
++static bool pulse_queue_dequeue(struct pri_detector *pde)
++{
++ struct pulse_elem *p = pulse_queue_get_tail(pde);
++ if (p != NULL) {
++ list_del_init(&p->head);
++ pde->count--;
++ /* give it back to pool */
++ pool_put_pulse_elem(p);
++ }
++ return (pde->count > 0);
++}
++
++/**
++ * pulse_queue_check_window - remove pulses older than window
++ * @pde: pointer on pri_detector
++ *
++ * dequeue pulse that are too old.
++ */
++static
++void pulse_queue_check_window(struct pri_detector *pde)
++{
++ u64 min_valid_ts;
++ struct pulse_elem *p;
++
++ /* there is no delta time with less than 2 pulses */
++ if (pde->count < 2)
++ return;
++
++ if (pde->last_ts <= pde->window_size)
++ return;
++
++ min_valid_ts = pde->last_ts - pde->window_size;
++ while ((p = pulse_queue_get_tail(pde)) != NULL) {
++ if (p->ts >= min_valid_ts)
++ return;
++ pulse_queue_dequeue(pde);
++ }
++}
++
++/**
++ * pulse_queue_enqueue - Queue one pulse
++ * @pde: pointer on pri_detector
++ *
++ * Add one pulse to the list. If the maximum number of pulses
++ * if reached, remove oldest one.
++ */
++static
++bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts)
++{
++ struct pulse_elem *p = pool_get_pulse_elem();
++ if (p == NULL) {
++ p = kmalloc(sizeof(*p), GFP_ATOMIC);
++ if (p == NULL) {
++ return false;
++ }
++ }
++ INIT_LIST_HEAD(&p->head);
++ p->ts = ts;
++ list_add(&p->head, &pde->pulses);
++ pde->count++;
++ pde->last_ts = ts;
++ pulse_queue_check_window(pde);
++ if (pde->count >= pde->max_count)
++ pulse_queue_dequeue(pde);
++
++ return true;
++}
++
++
++/***************************************************************************
++ * Short waveform
++ **************************************************************************/
++/**
++ * pde_get_multiple() - get number of multiples considering a given tolerance
++ * @return factor if abs(val - factor*fraction) <= tolerance, 0 otherwise
++ */
++static
++u32 pde_get_multiple(u32 val, u32 fraction, u32 tolerance)
++{
++ u32 remainder;
++ u32 factor;
++ u32 delta;
++
++ if (fraction == 0)
++ return 0;
++
++ delta = (val < fraction) ? (fraction - val) : (val - fraction);
++
++ if (delta <= tolerance)
++ /* val and fraction are within tolerance */
++ return 1;
++
++ factor = val / fraction;
++ remainder = val % fraction;
++ if (remainder > tolerance) {
++ /* no exact match */
++ if ((fraction - remainder) <= tolerance)
++ /* remainder is within tolerance */
++ factor++;
++ else
++ factor = 0;
++ }
++ return factor;
++}
++
++/**
++ * pde_short_create_sequences - create_sequences function for
++ * SHORT/WEATHER/INTERLEAVED radar waveform
++ * @pde: pointer on pri_detector
++ * @ts: timestamp of the pulse
++ * @min_count: Minimum number of pulse to be present in the sequence.
++ * (With this pulse there is already a sequence with @min_count
++ * pulse, so if we can't create a sequence with more pulse don't
++ * create it)
++ * @return: false if an error occured (memory allocation) true otherwise
++ *
++ * For each pulses queued check if we can create a sequence with
++ * pri = (ts - pulse_queued.ts) which contains more than @min_count pulses.
++ *
++ */
++static
++bool pde_short_create_sequences(struct pri_detector *pde,
++ u64 ts, u32 min_count)
++{
++ struct pulse_elem *p;
++ u16 pulse_idx = 0;
++
++ list_for_each_entry(p, &pde->pulses, head) {
++ struct pri_sequence ps, *new_ps;
++ struct pulse_elem *p2;
++ u32 tmp_false_count;
++ u64 min_valid_ts;
++ u32 delta_ts = ts - p->ts;
++ pulse_idx++;
++
++ if (delta_ts < pde->rs->pri_min)
++ /* ignore too small pri */
++ continue;
++
++ if (delta_ts > pde->rs->pri_max)
++ /* stop on too large pri (sorted list) */
++ break;
++
++ /* build a new sequence with new potential pri */
++ ps.count = 2;
++ ps.count_falses = pulse_idx - 1;
++ ps.first_ts = p->ts;
++ ps.last_ts = ts;
++ ps.pri = ts - p->ts;
++ ps.dur = ps.pri * (pde->rs->ppb - 1)
++ + 2 * pde->rs->max_pri_tolerance;
++
++ p2 = p;
++ tmp_false_count = 0;
++ if (ps.dur > ts)
++ min_valid_ts = 0;
++ else
++ min_valid_ts = ts - ps.dur;
++ /* check which past pulses are candidates for new sequence */
++ list_for_each_entry_continue(p2, &pde->pulses, head) {
++ u32 factor;
++ if (p2->ts < min_valid_ts)
++ /* stop on crossing window border */
++ break;
++ /* check if pulse match (multi)PRI */
++ factor = pde_get_multiple(ps.last_ts - p2->ts, ps.pri,
++ pde->rs->max_pri_tolerance);
++ if (factor > 0) {
++ ps.count++;
++ ps.first_ts = p2->ts;
++ /*
++ * on match, add the intermediate falses
++ * and reset counter
++ */
++ ps.count_falses += tmp_false_count;
++ tmp_false_count = 0;
++ } else {
++ /* this is a potential false one */
++ tmp_false_count++;
++ }
++ }
++ if (ps.count <= min_count) {
++ /* did not reach minimum count, drop sequence */
++ continue;
++ }
++ /* this is a valid one, add it */
++ ps.deadline_ts = ps.first_ts + ps.dur;
++ if (pde->rs->type == RADAR_WAVEFORM_WEATHER) {
++ ps.ppb_thresh = 19000000 / (360 * ps.pri);
++ ps.ppb_thresh = PPB_THRESH(ps.ppb_thresh);
++ } else {
++ ps.ppb_thresh = pde->rs->ppb_thresh;
++ }
++
++ new_ps = pool_get_pseq_elem();
++ if (new_ps == NULL) {
++ return false;
++ }
++ memcpy(new_ps, &ps, sizeof(ps));
++ INIT_LIST_HEAD(&new_ps->head);
++ list_add(&new_ps->head, &pde->sequences);
++ }
++ return true;
++}
++
++/**
++ * pde_short_add_to_existing_seqs - add_to_existing_seqs function for
++ * SHORT/WEATHER/INTERLEAVED radar waveform
++ * @pde: pointer on pri_detector
++ * @ts: timestamp of the pulse
++ *
++ * Check all sequemces created for this pde.
++ * - If the sequence is too old delete it.
++ * - Else if the delta with the previous pulse match the pri of the sequence
++ * add the pulse to this sequence. If the pulse cannot be added it is added
++ * to the false pulses for this sequence
++ *
++ * @return the length of the longest sequence in which the pulse has been added
++ */
++static
++u32 pde_short_add_to_existing_seqs(struct pri_detector *pde, u64 ts)
++{
++ u32 max_count = 0;
++ struct pri_sequence *ps, *ps2;
++ list_for_each_entry_safe(ps, ps2, &pde->sequences, head) {
++ u32 delta_ts;
++ u32 factor;
++
++ /* first ensure that sequence is within window */
++ if (ts > ps->deadline_ts) {
++ list_del_init(&ps->head);
++ pool_put_pseq_elem(ps);
++ continue;
++ }
++
++ delta_ts = ts - ps->last_ts;
++ factor = pde_get_multiple(delta_ts, ps->pri,
++ pde->rs->max_pri_tolerance);
++
++ if (factor > 0) {
++ ps->last_ts = ts;
++ ps->count++;
++
++ if (max_count < ps->count)
++ max_count = ps->count;
++ } else {
++ ps->count_falses++;
++ }
++ }
++ return max_count;
++}
++
++
++/**
++ * pde_short_check_detection - check_detection function for
++ * SHORT/WEATHER/INTERLEAVED radar waveform
++ * @pde: pointer on pri_detector
++ *
++ * Check all sequemces created for this pde.
++ * - If a sequence contains more pulses than the threshold and more matching
++ * that false pulses.
++ *
++ * @return The first complete sequence, and NULL if no sequence is complete.
++ */
++static
++struct pri_sequence * pde_short_check_detection(struct pri_detector *pde)
++{
++ struct pri_sequence *ps;
++
++ if (list_empty(&pde->sequences))
++ return NULL;
++
++ list_for_each_entry(ps, &pde->sequences, head) {
++ /*
++ * we assume to have enough matching confidence if we
++ * 1) have enough pulses
++ * 2) have more matching than false pulses
++ */
++ if ((ps->count >= ps->ppb_thresh) &&
++ (ps->count * pde->rs->num_pri > ps->count_falses)) {
++ return ps;
++ }
++ }
++ return NULL;
++}
++
++/**
++ * pde_short_init - init function for
++ * SHORT/WEATHER/INTERLEAVED radar waveform
++ * @pde: pointer on pri_detector
++ *
++ * Initialize pri_detector window size to the maximun size of one burst
++ * for the radar specification associated.
++ */
++static
++void pde_short_init(struct pri_detector *pde)
++{
++ pde->window_size = pde->rs->pri_max * pde->rs->ppb * pde->rs->num_pri;
++ pde->max_count = pde->rs->ppb * 2;
++}
++
++static void pri_detector_reset(struct pri_detector *pde, u64 ts);
++/**
++ * pde_short_add_pulse - Add pulse to a pri_detector for
++ * SHORT/WEATHER/INTERLEAVED radar waveform
++ *
++ * @pde : pointer on pri_detector
++ * @len : width of the pulse
++ * @ts : timestamp of the pulse received
++ * @pri : Delta in us with the previous pulse.
++ * (0 means that delta in bigger than 65535 us)
++ *
++ * Process on pulse within this pri_detector
++ * - First try to add it to existing sequence
++ * - Then try to create a new and longest sequence
++ * - Check if this pulse complete a sequence
++ * - If not save this pulse in the list
++ */
++static
++struct pri_sequence *pde_short_add_pulse(struct pri_detector *pde,
++ u16 len, u64 ts, u16 pri)
++{
++ u32 max_updated_seq;
++ struct pri_sequence *ps;
++ const struct radar_detector_specs *rs = pde->rs;
++
++ if (pde->count == 0) {
++ /* This is the first pulse after reset, no need to check sequences */
++ pulse_queue_enqueue(pde, ts);
++ return NULL;
++ }
++
++ if ((ts - pde->last_ts) < rs->max_pri_tolerance) {
++ /* if delta to last pulse is too short, don't use this pulse */
++ return NULL;
++ }
++
++ max_updated_seq = pde_short_add_to_existing_seqs(pde, ts);
++
++ if (!pde_short_create_sequences(pde, ts, max_updated_seq)) {
++ pri_detector_reset(pde, ts);
++ return NULL;
++ }
++
++ ps = pde_short_check_detection(pde);
++
++ if (ps == NULL)
++ pulse_queue_enqueue(pde, ts);
++
++ return ps;
++}
++
++
++
++/**
++ * pri detector ops to detect short radar waveform
++ * A Short waveform is defined by :
++ * The width of pulses.
++ * The interval between two pulses inside a burst (called pri)
++ * (some waveform may have or 2/3 interleaved pri)
++ * The number of pulses per burst (ppb)
++ */
++static struct pri_detector_ops pri_detector_short = {
++ .init = pde_short_init,
++ .add_pulse = pde_short_add_pulse,
++ .reset_on_pri_overflow = 1,
++};
++
++
++/***************************************************************************
++ * Long waveform
++ **************************************************************************/
++#define LONG_RADAR_DURATION 12000000
++#define LONG_RADAR_BURST_MIN_DURATION (12000000 / 20)
++#define LONG_RADAR_MAX_BURST 20
++
++/**
++ * pde_long_init - init function for LONG radar waveform
++ * @pde: pointer on pri_detector
++ *
++ * Initialize pri_detector window size to the long waveform radar
++ * waveform (ie. 12s) and max_count
++ */
++static
++void pde_long_init(struct pri_detector *pde)
++{
++ pde->window_size = LONG_RADAR_DURATION;
++ pde->max_count = LONG_RADAR_MAX_BURST; /* only count burst not pulses */
++}
++
++
++/**
++ * pde_long_add_pulse - Add pulse to a pri_detector for
++ * LONG radar waveform
++ *
++ * @pde : pointer on pri_detector
++ * @len : width of the pulse
++ * @ts : timestamp of the pulse received
++ * @pri : Delta in us with the previous pulse.
++ *
++ *
++ * For long pulse we only handle one sequence. Since each burst
++ * have a different set of parameters (number of pulse, pri) than
++ * the previous one we only use pulse width to add the pulse in the
++ * sequence.
++ * We only queue one pulse per burst and valid the radar when enough burst
++ * has been detected.
++ */
++static
++struct pri_sequence *pde_long_add_pulse(struct pri_detector *pde,
++ u16 len, u64 ts, u16 pri)
++{
++ struct pri_sequence *ps;
++ const struct radar_detector_specs *rs = pde->rs;
++
++ if (list_empty(&pde->sequences)) {
++ /* First pulse, create a new sequence */
++ ps = pool_get_pseq_elem();
++ if (ps == NULL) {
++ return NULL;
++ }
++
++ /*For long waveform, "count" represents the number of burst detected */
++ ps->count = 1;
++ /*"count_false" represents the number of pulse in the current burst */
++ ps->count_falses = 1;
++ ps->first_ts = ts;
++ ps->last_ts = ts;
++ ps->deadline_ts = ts + pde->window_size;
++ ps->pri = 0;
++ INIT_LIST_HEAD(&ps->head);
++ list_add(&ps->head, &pde->sequences);
++ pulse_queue_enqueue(pde, ts);
++ } else {
++ u32 delta_ts;
++
++ ps = (struct pri_sequence *)pde->sequences.next;
++
++ delta_ts = ts - ps->last_ts;
++ ps->last_ts = ts;
++
++ if (delta_ts < rs->pri_max) {
++ /* ignore pulse too close from previous one */
++ } else if ((delta_ts >= rs->pri_min) &&
++ (delta_ts <= rs->pri_max)) {
++ /* this is a new pulse in the current burst, ignore it
++ (i.e don't queue it) */
++ ps->count_falses++;
++ } else if ((ps->count > 2) &&
++ (ps->dur + delta_ts) < LONG_RADAR_BURST_MIN_DURATION) {
++ /* not enough time between burst, ignore pulse */
++ } else {
++ /* a new burst */
++ ps->count++;
++ ps->count_falses = 1;
++
++ /* reset the start of the sequence if deadline reached */
++ if (ts > ps->deadline_ts) {
++ struct pulse_elem *p;
++ u64 min_valid_ts;
++
++ min_valid_ts = ts - pde->window_size;
++ while ((p = pulse_queue_get_tail(pde)) != NULL) {
++ if (p->ts >= min_valid_ts) {
++ ps->first_ts = p->ts;
++ ps->deadline_ts = p->ts + pde->window_size;
++ break;
++ }
++ pulse_queue_dequeue(pde);
++ ps->count--;
++ }
++ }
++
++ /* valid radar if enough burst detected and delta with first burst
++ is at least duration/2 */
++ if (ps->count > pde->rs->ppb_thresh &&
++ (ts - ps->first_ts) > (pde->window_size / 2)) {
++ return ps;
++ } else {
++ pulse_queue_enqueue(pde, ts);
++ ps->dur = delta_ts;
++ }
++ }
++ }
++
++ return NULL;
++}
++
++/**
++ * pri detector ops to detect long radar waveform
++ */
++static struct pri_detector_ops pri_detector_long = {
++ .init = pde_long_init,
++ .add_pulse = pde_long_add_pulse,
++ .reset_on_pri_overflow = 0,
++};
++
++
++/***************************************************************************
++ * PRI detector init/reset/exit/get
++ **************************************************************************/
++/**
++ * pri_detector_init- Create a new pri_detector
++ *
++ * @dpd: dfs_pattern_detector instance pointer
++ * @radar_type: index of radar pattern
++ * @freq: Frequency of the pri detector
++ */
++struct pri_detector *pri_detector_init(struct dfs_pattern_detector *dpd,
++ u16 radar_type, u16 freq)
++{
++ struct pri_detector *pde;
++
++ pde = kzalloc(sizeof(*pde), GFP_ATOMIC);
++ if (pde == NULL)
++ return NULL;
++
++ INIT_LIST_HEAD(&pde->sequences);
++ INIT_LIST_HEAD(&pde->pulses);
++ INIT_LIST_HEAD(&pde->head);
++ list_add(&pde->head, &dpd->detectors[radar_type]);
++
++ pde->rs = &dpd->radar_spec[radar_type];
++ pde->freq = freq;
++
++ if (pde->rs->type == RADAR_WAVEFORM_LONG) {
++ /* for LONG WAVEFORM */
++ pde->ops = &pri_detector_long;
++ } else {
++ /* for SHORT, WEATHER and INTERLEAVED */
++ pde->ops = &pri_detector_short;
++ }
++
++ /* Init dependent of specs */
++ pde->ops->init(pde);
++
++ pool_register_ref();
++ return pde;
++}
++
++/**
++ * pri_detector_reset - Reset pri_detector
++ *
++ * @pde: pointer on pri_detector
++ * @ts: New ts reference for the pri_detector
++ *
++ * free pulse queue and sequences list and give objects back to pools
++ */
++static
++void pri_detector_reset(struct pri_detector *pde, u64 ts)
++{
++ struct pri_sequence *ps, *ps0;
++ struct pulse_elem *p, *p0;
++ list_for_each_entry_safe(ps, ps0, &pde->sequences, head) {
++ list_del_init(&ps->head);
++ pool_put_pseq_elem(ps);
++ }
++ list_for_each_entry_safe(p, p0, &pde->pulses, head) {
++ list_del_init(&p->head);
++ pool_put_pulse_elem(p);
++ }
++ pde->count = 0;
++ pde->last_ts = ts;
++}
++
++/**
++ * pri_detector_exit - Delete pri_detector
++ *
++ * @pde: pointer on pri_detector
++ */
++static
++void pri_detector_exit(struct pri_detector *pde)
++{
++ pri_detector_reset(pde, 0);
++ pool_deregister_ref();
++ list_del(&pde->head);
++ kfree(pde);
++}
++
++/**
++ * pri_detector_get() - get pri detector for a given frequency and type
++ * @dpd: dfs_pattern_detector instance pointer
++ * @freq: frequency in MHz
++ * @radar_type: index of radar pattern
++ * @return pointer to pri detector on success, NULL otherwise
++ *
++ * Return existing pri detector for the given frequency or return a
++ * newly create one.
++ * Pri detector are "merged" by frequency so that if a pri detector for a freq
++ * of +/- 2Mhz already exists don't create a new one.
++ *
++ * Maybe will need to adapt frequency merge for pattern with chirp.
++ */
++static struct pri_detector *
++pri_detector_get(struct dfs_pattern_detector *dpd, u16 freq, u16 radar_type)
++{
++ struct pri_detector *pde, *cur = NULL;
++ list_for_each_entry(pde, &dpd->detectors[radar_type], head) {
++ if (pde->freq == freq) {
++ if (pde->count)
++ return pde;
++ else
++ cur = pde;
++ } else if (pde->freq - 2 == freq && pde->count) {
++ return pde;
++ } else if (pde->freq + 2 == freq && pde->count) {
++ return pde;
++ }
++ }
++
++ if (cur)
++ return cur;
++ else
++ return pri_detector_init(dpd, radar_type, freq);
++}
++
++
++/******************************************************************************
++ * DFS Pattern Detector
++ *****************************************************************************/
++/**
++ * dfs_pattern_detector_reset() - reset all channel detectors
++ *
++ * @dpd: dfs_pattern_detector
++ */
++static void dfs_pattern_detector_reset(struct dfs_pattern_detector *dpd)
++{
++ struct pri_detector *pde;
++ int i;
++
++ for (i = 0; i < dpd->num_radar_types; i++) {
++ if (!list_empty(&dpd->detectors[i]))
++ list_for_each_entry(pde, &dpd->detectors[i], head)
++ pri_detector_reset(pde, dpd->last_pulse_ts);
++ }
++
++ dpd->last_pulse_ts = 0;
++ dpd->prev_jiffies = jiffies;
++}
++
++/**
++ * dfs_pattern_detector_reset() - delete all channel detectors
++ *
++ * @dpd: dfs_pattern_detector
++ */
++static void dfs_pattern_detector_exit(struct dfs_pattern_detector *dpd)
++{
++ struct pri_detector *pde, *pde0;
++ int i;
++
++ for (i = 0; i < dpd->num_radar_types; i++) {
++ if (!list_empty(&dpd->detectors[i]))
++ list_for_each_entry_safe(pde, pde0, &dpd->detectors[i], head)
++ pri_detector_exit(pde);
++ }
++
++ kfree(dpd);
++}
++
++/**
++ * dfs_pattern_detector_pri_overflow - reset all channel detectors on pri
++ * overflow
++ * @dpd: dfs_pattern_detector
++ */
++static void dfs_pattern_detector_pri_overflow(struct dfs_pattern_detector *dpd)
++{
++ struct pri_detector *pde;
++ int i;
++
++ for (i = 0; i < dpd->num_radar_types; i++) {
++ if (!list_empty(&dpd->detectors[i]))
++ list_for_each_entry(pde, &dpd->detectors[i], head)
++ if (pde->ops->reset_on_pri_overflow)
++ pri_detector_reset(pde, dpd->last_pulse_ts);
++ }
++}
++
++/**
++ * dfs_pattern_detector_add_pulse - Process one pulse
++ *
++ * @dpd: dfs_pattern_detector
++ * @chain: Chain that correspond to this pattern_detector (only for debug)
++ * @freq: frequency of the pulse
++ * @pri: Delta with previous pulse. (0 if delta is too big for u16)
++ * @len: width of the pulse
++ * @now: jiffies value when pulse was received
++ *
++ * Get (or create) the channel_detector for this frequency. Then add the pulse
++ * in each pri_detector created in this channel_detector.
++ *
++ *
++ * @return True is the pulse complete a radar pattern, false otherwise
++ */
++static bool dfs_pattern_detector_add_pulse(struct dfs_pattern_detector *dpd,
++ enum rwnx_radar_chain chain,
++ u16 freq, u16 pri, u16 len, u32 now)
++{
++ u32 i;
++
++ /*
++ * pulses received for a non-supported or un-initialized
++ * domain are treated as detected radars for fail-safety
++ */
++ if (dpd->region == NL80211_DFS_UNSET)
++ return true;
++
++ /* Compute pulse time stamp */
++ if (pri == 0) {
++ u32 delta_jiffie;
++ if (unlikely(now < dpd->prev_jiffies)) {
++ delta_jiffie = 0xffffffff - dpd->prev_jiffies + now;
++ } else {
++ delta_jiffie = now - dpd->prev_jiffies;
++ }
++ dpd->last_pulse_ts += jiffies_to_usecs(delta_jiffie);
++ dpd->prev_jiffies = now;
++ dfs_pattern_detector_pri_overflow(dpd);
++ } else {
++ dpd->last_pulse_ts += pri;
++ }
++
++ for (i = 0; i < dpd->num_radar_types; i++) {
++ struct pri_sequence *ps;
++ struct pri_detector *pde;
++ const struct radar_detector_specs *rs = &dpd->radar_spec[i];
++
++ /* no need to look up for pde if len is not within range */
++ if ((rs->width_min > len) ||
++ (rs->width_max < len)) {
++ continue;
++ }
++
++ pde = pri_detector_get(dpd, freq, i);
++ ps = pde->ops->add_pulse(pde, len, dpd->last_pulse_ts, pri);
++
++ if (ps != NULL) {
++#ifdef CREATE_TRACE_POINTS
++ trace_radar_detected(chain, dpd->region, pde->freq, i, ps->pri);
++#endif
++ // reset everything instead of just the channel detector
++ dfs_pattern_detector_reset(dpd);
++ return true;
++ }
++ }
++
++ return false;
++}
++
++/**
++ * get_dfs_domain_radar_types() - get radar types for a given DFS domain
++ * @param domain DFS domain
++ * @return radar_types ptr on success, NULL if DFS domain is not supported
++ */
++static const struct radar_types *
++get_dfs_domain_radar_types(enum nl80211_dfs_regions region)
++{
++ u32 i;
++ for (i = 0; i < ARRAY_SIZE(dfs_domains); i++) {
++ if (dfs_domains[i]->region == region)
++ return dfs_domains[i];
++ }
++ return NULL;
++}
++
++/**
++ * get_dfs_max_radar_types() - get maximum radar types for all supported domain
++ * @return the maximum number of radar pattern supported by on region
++ */
++static u16 get_dfs_max_radar_types(void)
++{
++ u32 i;
++ u16 max = 0;
++ for (i = 0; i < ARRAY_SIZE(dfs_domains); i++) {
++ if (dfs_domains[i]->num_radar_types > max)
++ max = dfs_domains[i]->num_radar_types;
++ }
++ return max;
++}
++
++/**
++ * dfs_pattern_detector_set_domain - set DFS domain
++ *
++ * @dpd: dfs_pattern_detector
++ * @region: DFS region
++ *
++ * set DFS domain, resets detector lines upon domain changes
++ */
++static
++bool dfs_pattern_detector_set_domain(struct dfs_pattern_detector *dpd,
++ enum nl80211_dfs_regions region, u8 chain)
++{
++ const struct radar_types *rt;
++ struct pri_detector *pde, *pde0;
++ int i;
++
++ if (dpd->region == region)
++ return true;
++
++ dpd->region = NL80211_DFS_UNSET;
++
++ rt = get_dfs_domain_radar_types(region);
++ if (rt == NULL)
++ return false;
++
++ /* delete all pri detectors for previous DFS domain */
++ for (i = 0; i < dpd->num_radar_types; i++) {
++ if (!list_empty(&dpd->detectors[i]))
++ list_for_each_entry_safe(pde, pde0, &dpd->detectors[i], head)
++ pri_detector_exit(pde);
++ }
++
++ if (chain == RWNX_RADAR_RIU)
++ dpd->radar_spec = rt->spec_riu;
++ else
++ dpd->radar_spec = rt->spec_fcu;
++ dpd->num_radar_types = rt->num_radar_types;
++
++ dpd->region = region;
++ return true;
++}
++
++/**
++ * dfs_pattern_detector_init - Initialize dfs_pattern_detector
++ *
++ * @region: DFS region
++ * @return: pointer on dfs_pattern_detector
++ *
++ */
++static struct dfs_pattern_detector *
++dfs_pattern_detector_init(enum nl80211_dfs_regions region, u8 chain)
++{
++ struct dfs_pattern_detector *dpd;
++ u16 i, max_radar_type = get_dfs_max_radar_types();
++
++ dpd = kmalloc(sizeof(*dpd) + max_radar_type * sizeof(dpd->detectors[0]),
++ GFP_KERNEL);
++ if (dpd == NULL)
++ return NULL;
++
++ dpd->region = NL80211_DFS_UNSET;
++ dpd->enabled = RWNX_RADAR_DETECT_DISABLE;
++ dpd->last_pulse_ts = 0;
++ dpd->prev_jiffies = jiffies;
++ dpd->num_radar_types = 0;
++ for (i = 0; i < max_radar_type; i++)
++ INIT_LIST_HEAD(&dpd->detectors[i]);
++
++ if (dfs_pattern_detector_set_domain(dpd, region, chain))
++ return dpd;
++
++ kfree(dpd);
++ return NULL;
++}
++
++
++/******************************************************************************
++ * driver interface
++ *****************************************************************************/
++static u16 rwnx_radar_get_center_freq(struct rwnx_hw *rwnx_hw, u8 chain)
++{
++ if (chain == RWNX_RADAR_FCU)
++ return rwnx_hw->phy.sec_chan.center_freq1;
++
++ if (chain == RWNX_RADAR_RIU) {
++#ifdef CONFIG_RWNX_FULLMAC
++ if (!rwnx_chanctx_valid(rwnx_hw, rwnx_hw->cur_chanctx)) {
++ WARN(1, "Radar pulse without channel information");
++ } else
++ return rwnx_hw->chanctx_table[rwnx_hw->cur_chanctx].chan_def.center_freq1;
++#endif /* CONFIG_RWNX_FULLMAC */
++ }
++
++ return 0;
++}
++
++static void rwnx_radar_detected(struct rwnx_hw *rwnx_hw)
++{
++#ifdef CONFIG_RWNX_FULLMAC
++ struct cfg80211_chan_def chan_def;
++
++ if (!rwnx_chanctx_valid(rwnx_hw, rwnx_hw->cur_chanctx)) {
++ WARN(1, "Radar detected without channel information");
++ return;
++ }
++
++ /*
++ recopy chan_def in local variable because rwnx_radar_cancel_cac may
++ clean the variable (if in CAC and it's the only vif using this context)
++ and CAC should be aborted before reporting the radar.
++ */
++ chan_def = rwnx_hw->chanctx_table[rwnx_hw->cur_chanctx].chan_def;
++
++ rwnx_radar_cancel_cac(&rwnx_hw->radar);
++ cfg80211_radar_event(rwnx_hw->wiphy, &chan_def, GFP_KERNEL);
++
++#endif /* CONFIG_RWNX_FULLMAC */
++}
++
++static void rwnx_radar_process_pulse(struct work_struct *ws)
++{
++ struct rwnx_radar *radar = container_of(ws, struct rwnx_radar,
++ detection_work);
++ struct rwnx_hw *rwnx_hw = container_of(radar, struct rwnx_hw, radar);
++ int chain;
++ u32 pulses[RWNX_RADAR_LAST][RWNX_RADAR_PULSE_MAX];
++ u16 pulses_count[RWNX_RADAR_LAST];
++ u32 now = jiffies; /* would be better to store jiffies value in IT handler */
++
++ /* recopy pulses locally to avoid too long spin_lock */
++ spin_lock_bh(&radar->lock);
++ for (chain = RWNX_RADAR_RIU; chain < RWNX_RADAR_LAST; chain++) {
++ int start, count;
++
++ count = radar->pulses[chain].count;
++ start = radar->pulses[chain].index - count;
++ if (start < 0)
++ start += RWNX_RADAR_PULSE_MAX;
++
++ pulses_count[chain] = count;
++ if (count == 0)
++ continue;
++
++ if ((start + count) > RWNX_RADAR_PULSE_MAX) {
++ u16 count1 = (RWNX_RADAR_PULSE_MAX - start);
++ memcpy(&(pulses[chain][0]),
++ &(radar->pulses[chain].buffer[start]),
++ count1 * sizeof(struct radar_pulse));
++ memcpy(&(pulses[chain][count1]),
++ &(radar->pulses[chain].buffer[0]),
++ (count - count1) * sizeof(struct radar_pulse));
++ } else {
++ memcpy(&(pulses[chain][0]),
++ &(radar->pulses[chain].buffer[start]),
++ count * sizeof(struct radar_pulse));
++ }
++ radar->pulses[chain].count = 0;
++ }
++ spin_unlock_bh(&radar->lock);
++
++
++ /* now process pulses */
++ for (chain = RWNX_RADAR_RIU; chain < RWNX_RADAR_LAST; chain++) {
++ int i;
++ u16 freq;
++
++ if (pulses_count[chain] == 0)
++ continue;
++
++ freq = rwnx_radar_get_center_freq(rwnx_hw, chain);
++
++ for (i = 0; i < pulses_count[chain] ; i++) {
++ struct radar_pulse *p = (struct radar_pulse *)&pulses[chain][i];
++#ifdef CREATE_TRACE_POINTS
++ trace_radar_pulse(chain, p);
++#endif
++ if (dfs_pattern_detector_add_pulse(radar->dpd[chain], chain,
++ (s16)freq + (2 * p->freq),
++ p->rep, (p->len * 2), now)) {
++ u16 idx = radar->detected[chain].index;
++
++ if (chain == RWNX_RADAR_RIU) {
++ /* operating chain, inform upper layer to change channel */
++ if (radar->dpd[chain]->enabled == RWNX_RADAR_DETECT_REPORT) {
++ rwnx_radar_detected(rwnx_hw);
++ /* no need to report new radar until upper layer set a
++ new channel. This prevent warning if a new radar is
++ detected while mac80211 is changing channel */
++ rwnx_radar_detection_enable(radar,
++ RWNX_RADAR_DETECT_DISABLE,
++ chain);
++ /* purge any event received since the beginning of the
++ function (we are sure not to interfer with tasklet
++ as we disable detection just before) */
++ radar->pulses[chain].count = 0;
++ }
++ } else {
++ /* secondary radar detection chain, simply report info in
++ debugfs for now */
++ }
++
++ radar->detected[chain].freq[idx] = (s16)freq + (2 * p->freq);
++ radar->detected[chain].time[idx] = ktime_get_real_seconds();
++ radar->detected[chain].index = ((idx + 1 ) %
++ NX_NB_RADAR_DETECTED);
++ radar->detected[chain].count++;
++ /* no need to process next pulses for this chain */
++ break;
++ }
++ }
++ }
++}
++
++#ifdef CONFIG_RWNX_FULLMAC
++static void rwnx_radar_cac_work(struct work_struct *ws)
++{
++ struct delayed_work *dw = container_of(ws, struct delayed_work, work);
++ struct rwnx_radar *radar = container_of(dw, struct rwnx_radar, cac_work);
++ struct rwnx_hw *rwnx_hw = container_of(radar, struct rwnx_hw, radar);
++ struct rwnx_chanctx *ctxt;
++
++ if (radar->cac_vif == NULL) {
++ WARN(1, "CAC finished but no vif set");
++ return;
++ }
++
++ ctxt = &rwnx_hw->chanctx_table[radar->cac_vif->ch_index];
++ cfg80211_cac_event(radar->cac_vif->ndev,
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
++ &ctxt->chan_def,
++ #endif
++ NL80211_RADAR_CAC_FINISHED, GFP_KERNEL, 0);
++ rwnx_send_apm_stop_cac_req(rwnx_hw, radar->cac_vif);
++ rwnx_chanctx_unlink(radar->cac_vif);
++
++ radar->cac_vif = NULL;
++}
++#endif /* CONFIG_RWNX_FULLMAC */
++
++bool rwnx_radar_detection_init(struct rwnx_radar *radar)
++{
++ spin_lock_init(&radar->lock);
++
++ radar->dpd[RWNX_RADAR_RIU] = dfs_pattern_detector_init(NL80211_DFS_UNSET,
++ RWNX_RADAR_RIU);
++ if (radar->dpd[RWNX_RADAR_RIU] == NULL)
++ return false;
++
++ radar->dpd[RWNX_RADAR_FCU] = dfs_pattern_detector_init(NL80211_DFS_UNSET,
++ RWNX_RADAR_FCU);
++ if (radar->dpd[RWNX_RADAR_FCU] == NULL) {
++ rwnx_radar_detection_deinit(radar);
++ return false;
++ }
++
++ INIT_WORK(&radar->detection_work, rwnx_radar_process_pulse);
++#ifdef CONFIG_RWNX_FULLMAC
++ INIT_DELAYED_WORK(&radar->cac_work, rwnx_radar_cac_work);
++ radar->cac_vif = NULL;
++#endif /* CONFIG_RWNX_FULLMAC */
++ return true;
++}
++
++void rwnx_radar_detection_deinit(struct rwnx_radar *radar)
++{
++ if (radar->dpd[RWNX_RADAR_RIU]) {
++ dfs_pattern_detector_exit(radar->dpd[RWNX_RADAR_RIU]);
++ radar->dpd[RWNX_RADAR_RIU] = NULL;
++ }
++ if (radar->dpd[RWNX_RADAR_FCU]) {
++ dfs_pattern_detector_exit(radar->dpd[RWNX_RADAR_FCU]);
++ radar->dpd[RWNX_RADAR_FCU] = NULL;
++ }
++}
++
++bool rwnx_radar_set_domain(struct rwnx_radar *radar,
++ enum nl80211_dfs_regions region)
++{
++ if (radar->dpd[0] == NULL)
++ return false;
++#ifdef CREATE_TRACE_POINTS
++ trace_radar_set_region(region);
++#endif
++ return (dfs_pattern_detector_set_domain(radar->dpd[RWNX_RADAR_RIU],
++ region, RWNX_RADAR_RIU) &&
++ dfs_pattern_detector_set_domain(radar->dpd[RWNX_RADAR_FCU],
++ region, RWNX_RADAR_FCU));
++}
++
++void rwnx_radar_detection_enable(struct rwnx_radar *radar, u8 enable, u8 chain)
++{
++ if (chain < RWNX_RADAR_LAST ) {
++#ifdef CREATE_TRACE_POINTS
++ trace_radar_enable_detection(radar->dpd[chain]->region, enable, chain);
++#endif
++ spin_lock_bh(&radar->lock);
++ radar->dpd[chain]->enabled = enable;
++ spin_unlock_bh(&radar->lock);
++ }
++}
++
++bool rwnx_radar_detection_is_enable(struct rwnx_radar *radar, u8 chain)
++{
++ return radar->dpd[chain]->enabled != RWNX_RADAR_DETECT_DISABLE;
++}
++
++#ifdef CONFIG_RWNX_FULLMAC
++void rwnx_radar_start_cac(struct rwnx_radar *radar, u32 cac_time_ms,
++ struct rwnx_vif *vif)
++{
++ WARN(radar->cac_vif != NULL, "CAC already in progress");
++ radar->cac_vif = vif;
++ schedule_delayed_work(&radar->cac_work, msecs_to_jiffies(cac_time_ms));
++}
++
++void rwnx_radar_cancel_cac(struct rwnx_radar *radar)
++{
++ struct rwnx_hw *rwnx_hw = container_of(radar, struct rwnx_hw, radar);
++
++ if (radar->cac_vif == NULL) {
++ return;
++ }
++
++ if (cancel_delayed_work(&radar->cac_work)) {
++ struct rwnx_chanctx *ctxt;
++ ctxt = &rwnx_hw->chanctx_table[radar->cac_vif->ch_index];
++ rwnx_send_apm_stop_cac_req(rwnx_hw, radar->cac_vif);
++ cfg80211_cac_event(radar->cac_vif->ndev,
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
++ &ctxt->chan_def,
++ #endif
++ NL80211_RADAR_CAC_ABORTED, GFP_KERNEL, 0);
++ rwnx_chanctx_unlink(radar->cac_vif);
++ }
++
++ radar->cac_vif = NULL;
++}
++
++void rwnx_radar_detection_enable_on_cur_channel(struct rwnx_hw *rwnx_hw)
++{
++ struct rwnx_chanctx *ctxt;
++
++ /* If no information on current channel do nothing */
++ if (!rwnx_chanctx_valid(rwnx_hw, rwnx_hw->cur_chanctx))
++ return;
++
++ ctxt = &rwnx_hw->chanctx_table[rwnx_hw->cur_chanctx];
++ if (ctxt->chan_def.chan->flags & IEEE80211_CHAN_RADAR) {
++ rwnx_radar_detection_enable(&rwnx_hw->radar,
++ RWNX_RADAR_DETECT_REPORT,
++ RWNX_RADAR_RIU);
++ } else {
++ rwnx_radar_detection_enable(&rwnx_hw->radar,
++ RWNX_RADAR_DETECT_DISABLE,
++ RWNX_RADAR_RIU);
++ }
++}
++#endif /* CONFIG_RWNX_FULLMAC */
++
++/*****************************************************************************
++ * Debug functions
++ *****************************************************************************/
++static
++int rwnx_radar_dump_pri_detector(char *buf, size_t len,
++ struct pri_detector *pde)
++{
++ char freq_info[] = "Freq = %3.dMhz\n";
++ char seq_info[] = " pri | count | false \n";
++ struct pri_sequence *seq;
++ int res, write = 0;
++
++ if (list_empty(&pde->sequences)) {
++ return 0;
++ }
++
++ if (buf == NULL) {
++ int nb_seq = 1;
++ list_for_each_entry(seq, &pde->sequences, head) {
++ nb_seq++;
++ }
++
++ return (sizeof(freq_info) + nb_seq * sizeof(seq_info));
++ }
++
++ res = scnprintf(buf, len, freq_info, pde->freq);
++ write += res;
++ len -= res;
++
++ res = scnprintf(&buf[write], len, "%s", seq_info);
++ write += res;
++ len -= res;
++
++ list_for_each_entry(seq, &pde->sequences, head) {
++ res = scnprintf(&buf[write], len, " %6.d | %2.d | %.2d \n",
++ seq->pri, seq->count, seq->count_falses);
++ write += res;
++ len -= res;
++ }
++
++ return write;
++}
++
++int rwnx_radar_dump_pattern_detector(char *buf, size_t len,
++ struct rwnx_radar *radar, u8 chain)
++{
++ struct dfs_pattern_detector *dpd = radar->dpd[chain];
++ char info[] = "Type = %3.d\n";
++ struct pri_detector *pde;
++ int i, res, write = 0;
++
++ /* if buf is NULL return size needed for dump */
++ if (buf == NULL) {
++ int size_needed = 0;
++
++ for (i = 0; i < dpd->num_radar_types; i++) {
++ list_for_each_entry(pde, &dpd->detectors[i], head) {
++ size_needed += rwnx_radar_dump_pri_detector(NULL, 0, pde);
++ }
++ size_needed += sizeof(info);
++
++ return size_needed;
++ }
++ }
++
++ /* */
++ for (i = 0; i < dpd->num_radar_types; i++) {
++ res = scnprintf(&buf[write], len, info, i);
++
++ write += res;
++ len -= res;
++ list_for_each_entry(pde, &dpd->detectors[i], head) {
++ res = rwnx_radar_dump_pri_detector(&buf[write], len, pde);
++ write += res;
++ len -= res;
++ }
++ }
++
++ return write;
++}
++
++
++int rwnx_radar_dump_radar_detected(char *buf, size_t len,
++ struct rwnx_radar *radar, u8 chain)
++{
++ struct rwnx_radar_detected *detect = &(radar->detected[chain]);
++ char info[] = "2001/02/02 - 02:20 5126MHz\n";
++ int idx, i, res, write = 0;
++ int count = detect->count;
++
++ if (count > NX_NB_RADAR_DETECTED)
++ count = NX_NB_RADAR_DETECTED;
++
++ if (buf == NULL) {
++ return (count * sizeof(info)) + 1;
++ }
++
++ idx = (detect->index - detect->count) % NX_NB_RADAR_DETECTED;
++
++ for (i = 0; i < count; i++) {
++ struct tm tm;
++ time64_to_tm(detect->time[idx], 0, &tm);
++
++ res = scnprintf(&buf[write], len,
++ "%.4d/%.2d/%.2d - %.2d:%.2d %4.4dMHz\n",
++ (int)tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
++ tm.tm_hour, tm.tm_min, detect->freq[idx]);
++ write += res;
++ len -= res;
++
++ idx = (idx + 1 ) % NX_NB_RADAR_DETECTED;
++ }
++
++ return write;
++}
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_radar.h
+@@ -0,0 +1,160 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_radar.h
++ *
++ * @brief Functions to handle radar detection
++ *
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ******************************************************************************
++ */
++#ifndef _RWNX_RADAR_H_
++#define _RWNX_RADAR_H_
++
++#include <linux/nl80211.h>
++
++struct rwnx_vif;
++struct rwnx_hw;
++
++enum rwnx_radar_chain {
++ RWNX_RADAR_RIU = 0,
++ RWNX_RADAR_FCU,
++ RWNX_RADAR_LAST
++};
++
++enum rwnx_radar_detector {
++ RWNX_RADAR_DETECT_DISABLE = 0, /* Ignore radar pulses */
++ RWNX_RADAR_DETECT_ENABLE = 1, /* Process pattern detection but do not
++ report radar to upper layer (for test) */
++ RWNX_RADAR_DETECT_REPORT = 2 /* Process pattern detection and report
++ radar to upper layer. */
++};
++
++#ifdef CONFIG_RWNX_RADAR
++#include <linux/workqueue.h>
++#include <linux/spinlock.h>
++
++#define RWNX_RADAR_PULSE_MAX 32
++
++/**
++ * struct rwnx_radar_pulses - List of pulses reported by HW
++ * @index: write index
++ * @count: number of valid pulses
++ * @buffer: buffer of pulses
++ */
++struct rwnx_radar_pulses {
++ /* Last radar pulses received */
++ int index;
++ int count;
++ u32 buffer[RWNX_RADAR_PULSE_MAX];
++};
++
++/**
++ * struct dfs_pattern_detector - DFS pattern detector
++ * @region: active DFS region, NL80211_DFS_UNSET until set
++ * @num_radar_types: number of different radar types
++ * @last_pulse_ts: time stamp of last valid pulse in usecs
++ * @prev_jiffies:
++ * @radar_detector_specs: array of radar detection specs
++ * @channel_detectors: list connecting channel_detector elements
++ */
++struct dfs_pattern_detector {
++ u8 enabled;
++ enum nl80211_dfs_regions region;
++ u8 num_radar_types;
++ u64 last_pulse_ts;
++ u32 prev_jiffies;
++ const struct radar_detector_specs *radar_spec;
++ struct list_head detectors[];
++};
++
++#define NX_NB_RADAR_DETECTED 4
++
++/**
++ * struct rwnx_radar_detected - List of radar detected
++ */
++struct rwnx_radar_detected {
++ u16 index;
++ u16 count;
++ s64 time[NX_NB_RADAR_DETECTED];
++ s16 freq[NX_NB_RADAR_DETECTED];
++};
++
++
++struct rwnx_radar {
++ struct rwnx_radar_pulses pulses[RWNX_RADAR_LAST];
++ struct dfs_pattern_detector *dpd[RWNX_RADAR_LAST];
++ struct rwnx_radar_detected detected[RWNX_RADAR_LAST];
++ struct work_struct detection_work; /* Work used to process radar pulses */
++ spinlock_t lock; /* lock for pulses processing */
++
++ /* In softmac cac is handled by mac80211 */
++#ifdef CONFIG_RWNX_FULLMAC
++ struct delayed_work cac_work; /* Work used to handle CAC */
++ struct rwnx_vif *cac_vif; /* vif on which we started CAC */
++#endif
++};
++
++bool rwnx_radar_detection_init(struct rwnx_radar *radar);
++void rwnx_radar_detection_deinit(struct rwnx_radar *radar);
++bool rwnx_radar_set_domain(struct rwnx_radar *radar,
++ enum nl80211_dfs_regions region);
++void rwnx_radar_detection_enable(struct rwnx_radar *radar, u8 enable, u8 chain);
++bool rwnx_radar_detection_is_enable(struct rwnx_radar *radar, u8 chain);
++void rwnx_radar_start_cac(struct rwnx_radar *radar, u32 cac_time_ms,
++ struct rwnx_vif *vif);
++void rwnx_radar_cancel_cac(struct rwnx_radar *radar);
++void rwnx_radar_detection_enable_on_cur_channel(struct rwnx_hw *rwnx_hw);
++int rwnx_radar_dump_pattern_detector(char *buf, size_t len,
++ struct rwnx_radar *radar, u8 chain);
++int rwnx_radar_dump_radar_detected(char *buf, size_t len,
++ struct rwnx_radar *radar, u8 chain);
++
++#else
++
++struct rwnx_radar {
++};
++
++static inline bool rwnx_radar_detection_init(struct rwnx_radar *radar)
++{return true;}
++
++static inline void rwnx_radar_detection_deinit(struct rwnx_radar *radar)
++{}
++
++static inline bool rwnx_radar_set_domain(struct rwnx_radar *radar,
++ enum nl80211_dfs_regions region)
++{return true;}
++
++static inline void rwnx_radar_detection_enable(struct rwnx_radar *radar,
++ u8 enable, u8 chain)
++{}
++
++static inline bool rwnx_radar_detection_is_enable(struct rwnx_radar *radar,
++ u8 chain)
++{return false;}
++
++static inline void rwnx_radar_start_cac(struct rwnx_radar *radar,
++ u32 cac_time_ms, struct rwnx_vif *vif)
++{}
++
++static inline void rwnx_radar_cancel_cac(struct rwnx_radar *radar)
++{}
++
++static inline void rwnx_radar_detection_enable_on_cur_channel(struct rwnx_hw *rwnx_hw)
++{}
++
++static inline int rwnx_radar_dump_pattern_detector(char *buf, size_t len,
++ struct rwnx_radar *radar,
++ u8 chain)
++{return 0;}
++
++static inline int rwnx_radar_dump_radar_detected(char *buf, size_t len,
++ struct rwnx_radar *radar,
++ u8 chain)
++{return 0;}
++
++#endif /* CONFIG_RWNX_RADAR */
++
++#endif // _RWNX_RADAR_H_
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_rx.c
+@@ -0,0 +1,2219 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_rx.c
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ******************************************************************************
++ */
++#include <linux/dma-mapping.h>
++#include <linux/ieee80211.h>
++#include <linux/etherdevice.h>
++#include <net/ieee80211_radiotap.h>
++
++#include "rwnx_defs.h"
++#include "rwnx_rx.h"
++#include "rwnx_tx.h"
++#include "rwnx_prof.h"
++#include "ipc_host.h"
++#include "rwnx_events.h"
++#include "rwnx_compat.h"
++#include "aicwf_txrxif.h"
++#ifdef AICWF_ARP_OFFLOAD
++#include <linux/ip.h>
++#include <linux/udp.h>
++#include "rwnx_msg_tx.h"
++#endif
++
++#ifndef IEEE80211_MAX_CHAINS
++#define IEEE80211_MAX_CHAINS 4
++#endif
++
++u8 dhcped = 0;
++
++u16 tx_legrates_lut_rate[] = {
++ 10 ,
++ 20 ,
++ 55 ,
++ 110 ,
++ 60 ,
++ 90 ,
++ 120,
++ 180,
++ 240,
++ 360,
++ 480,
++ 540
++};
++
++
++u16 legrates_lut_rate[] = {
++ 10 ,
++ 20 ,
++ 55 ,
++ 110 ,
++ 0 ,
++ 0 ,
++ 0 ,
++ 0 ,
++ 480 ,
++ 240 ,
++ 120 ,
++ 60 ,
++ 540 ,
++ 360 ,
++ 180 ,
++ 90
++};
++
++
++const u8 legrates_lut[] = {
++ 0, /* 0 */
++ 1, /* 1 */
++ 2, /* 2 */
++ 3, /* 3 */
++ -1, /* 4 */
++ -1, /* 5 */
++ -1, /* 6 */
++ -1, /* 7 */
++ 10, /* 8 */
++ 8, /* 9 */
++ 6, /* 10 */
++ 4, /* 11 */
++ 11, /* 12 */
++ 9, /* 13 */
++ 7, /* 14 */
++ 5 /* 15 */
++};
++
++struct vendor_radiotap_hdr {
++ u8 oui[3];
++ u8 subns;
++ u16 len;
++ u8 data[];
++};
++
++void rwnx_skb_align_8bytes(struct sk_buff *skb);
++
++
++/**
++ * rwnx_rx_get_vif - Return pointer to the destination vif
++ *
++ * @rwnx_hw: main driver data
++ * @vif_idx: vif index present in rx descriptor
++ *
++ * Select the vif that should receive this frame. Returns NULL if the destination
++ * vif is not active or vif is not specified in the descriptor.
++ */
++static inline
++struct rwnx_vif *rwnx_rx_get_vif(struct rwnx_hw *rwnx_hw, int vif_idx)
++{
++ struct rwnx_vif *rwnx_vif = NULL;
++
++ if (vif_idx < NX_VIRT_DEV_MAX) {
++ rwnx_vif = rwnx_hw->vif_table[vif_idx];
++ if (!rwnx_vif || !rwnx_vif->up)
++ return NULL;
++ }
++
++ return rwnx_vif;
++}
++
++/**
++ * rwnx_rx_vector_convert - Convert a legacy RX vector into a new RX vector format
++ *
++ * @rwnx_hw: main driver data.
++ * @rx_vect1: Rx vector 1 descriptor of the received frame.
++ * @rx_vect2: Rx vector 2 descriptor of the received frame.
++ */
++static void rwnx_rx_vector_convert(struct rwnx_hw *rwnx_hw,
++ struct rx_vector_1 *rx_vect1,
++ struct rx_vector_2 *rx_vect2)
++{
++ struct rx_vector_1_old rx_vect1_leg;
++ struct rx_vector_2_old rx_vect2_leg;
++ u32_l phy_vers = rwnx_hw->version_cfm.version_phy_2;
++
++ // Check if we need to do the conversion. Only if old modem is used
++ if (__MDM_MAJOR_VERSION(phy_vers) > 0) {
++ rx_vect1->rssi1 = rx_vect1->rssi_leg;
++ return;
++ }
++
++ // Copy the received vector locally
++ memcpy(&rx_vect1_leg, rx_vect1, sizeof(struct rx_vector_1_old));
++
++ // Reset it
++ memset(rx_vect1, 0, sizeof(struct rx_vector_1));
++
++ // Perform the conversion
++ rx_vect1->format_mod = rx_vect1_leg.format_mod;
++ rx_vect1->ch_bw = rx_vect1_leg.ch_bw;
++ rx_vect1->antenna_set = rx_vect1_leg.antenna_set;
++ rx_vect1->leg_length = rx_vect1_leg.leg_length;
++ rx_vect1->leg_rate = rx_vect1_leg.leg_rate;
++ rx_vect1->rssi1 = rx_vect1_leg.rssi1;
++
++ switch (rx_vect1->format_mod) {
++ case FORMATMOD_NON_HT:
++ case FORMATMOD_NON_HT_DUP_OFDM:
++ rx_vect1->leg.lsig_valid = rx_vect1_leg.lsig_valid;
++ rx_vect1->leg.chn_bw_in_non_ht = rx_vect1_leg.num_extn_ss;
++ rx_vect1->leg.dyn_bw_in_non_ht = rx_vect1_leg.dyn_bw;
++ break;
++ case FORMATMOD_HT_MF:
++ case FORMATMOD_HT_GF:
++ rx_vect1->ht.aggregation = rx_vect1_leg.aggregation;
++ rx_vect1->ht.fec = rx_vect1_leg.fec_coding;
++ rx_vect1->ht.lsig_valid = rx_vect1_leg.lsig_valid;
++ rx_vect1->ht.length = rx_vect1_leg.ht_length;
++ rx_vect1->ht.mcs = rx_vect1_leg.mcs;
++ rx_vect1->ht.num_extn_ss = rx_vect1_leg.num_extn_ss;
++ rx_vect1->ht.short_gi = rx_vect1_leg.short_gi;
++ rx_vect1->ht.smoothing = rx_vect1_leg.smoothing;
++ rx_vect1->ht.sounding = rx_vect1_leg.sounding;
++ rx_vect1->ht.stbc = rx_vect1_leg.stbc;
++ break;
++ case FORMATMOD_VHT:
++ rx_vect1->vht.beamformed = !rx_vect1_leg.smoothing;
++ rx_vect1->vht.fec = rx_vect1_leg.fec_coding;
++ rx_vect1->vht.length = rx_vect1_leg.ht_length | rx_vect1_leg._ht_length << 8;
++ rx_vect1->vht.mcs = rx_vect1_leg.mcs & 0x0F;
++ rx_vect1->vht.nss = rx_vect1_leg.stbc ? rx_vect1_leg.n_sts/2 : rx_vect1_leg.n_sts;
++ rx_vect1->vht.doze_not_allowed = rx_vect1_leg.doze_not_allowed;
++ rx_vect1->vht.short_gi = rx_vect1_leg.short_gi;
++ rx_vect1->vht.sounding = rx_vect1_leg.sounding;
++ rx_vect1->vht.stbc = rx_vect1_leg.stbc;
++ rx_vect1->vht.group_id = rx_vect1_leg.group_id;
++ rx_vect1->vht.partial_aid = rx_vect1_leg.partial_aid;
++ rx_vect1->vht.first_user = rx_vect1_leg.first_user;
++ break;
++ }
++
++ if (!rx_vect2)
++ return;
++
++ // Copy the received vector 2 locally
++ memcpy(&rx_vect2_leg, rx_vect2, sizeof(struct rx_vector_2_old));
++
++ // Reset it
++ memset(rx_vect2, 0, sizeof(struct rx_vector_2));
++
++ rx_vect2->rcpi1 = rx_vect2_leg.rcpi;
++ rx_vect2->rcpi2 = rx_vect2_leg.rcpi;
++ rx_vect2->rcpi3 = rx_vect2_leg.rcpi;
++ rx_vect2->rcpi4 = rx_vect2_leg.rcpi;
++
++ rx_vect2->evm1 = rx_vect2_leg.evm1;
++ rx_vect2->evm2 = rx_vect2_leg.evm2;
++ rx_vect2->evm3 = rx_vect2_leg.evm3;
++ rx_vect2->evm4 = rx_vect2_leg.evm4;
++}
++
++/**
++ * rwnx_rx_statistic - save some statistics about received frames
++ *
++ * @rwnx_hw: main driver data.
++ * @hw_rxhdr: Rx Hardware descriptor of the received frame.
++ * @sta: STA that sent the frame.
++ */
++static void rwnx_rx_statistic(struct rwnx_hw *rwnx_hw, struct hw_rxhdr *hw_rxhdr,
++ struct rwnx_sta *sta)
++{
++#if 1//def CONFIG_RWNX_DEBUGFS
++ struct rwnx_stats *stats = &rwnx_hw->stats;
++ struct rwnx_rx_rate_stats *rate_stats = &sta->stats.rx_rate;
++ struct rx_vector_1 *rxvect = &hw_rxhdr->hwvect.rx_vect1;
++ int mpdu, ampdu, mpdu_prev, rate_idx;
++
++ /* save complete hwvect */
++ sta->stats.last_rx = hw_rxhdr->hwvect;
++
++ /* update ampdu rx stats */
++ mpdu = hw_rxhdr->hwvect.mpdu_cnt;
++ ampdu = hw_rxhdr->hwvect.ampdu_cnt;
++ mpdu_prev = stats->ampdus_rx_map[ampdu];
++
++ if (mpdu_prev < mpdu ) {
++ stats->ampdus_rx_miss += mpdu - mpdu_prev - 1;
++ } else {
++ stats->ampdus_rx[mpdu_prev]++;
++ }
++ stats->ampdus_rx_map[ampdu] = mpdu;
++
++ /* update rx rate statistic */
++ if (!rate_stats->size)
++ return;
++
++ if (rxvect->format_mod > FORMATMOD_NON_HT_DUP_OFDM) {
++ int mcs;
++ int bw = rxvect->ch_bw;
++ int sgi;
++ int nss;
++ switch (rxvect->format_mod) {
++ case FORMATMOD_HT_MF:
++ case FORMATMOD_HT_GF:
++ mcs = rxvect->ht.mcs % 8;
++ nss = rxvect->ht.mcs / 8;
++ sgi = rxvect->ht.short_gi;
++ rate_idx = N_CCK + N_OFDM + nss * 32 + mcs * 4 + bw * 2 + sgi;
++ break;
++ case FORMATMOD_VHT:
++ mcs = rxvect->vht.mcs;
++ nss = rxvect->vht.nss;
++ sgi = rxvect->vht.short_gi;
++ rate_idx = N_CCK + N_OFDM + N_HT + nss * 80 + mcs * 8 + bw * 2 + sgi;
++ break;
++ case FORMATMOD_HE_SU:
++ mcs = rxvect->he.mcs;
++ nss = rxvect->he.nss;
++ sgi = rxvect->he.gi_type;
++ rate_idx = N_CCK + N_OFDM + N_HT + N_VHT + nss * 144 + mcs * 12 + bw * 3 + sgi;
++ break;
++ default:
++ mcs = rxvect->he.mcs;
++ nss = rxvect->he.nss;
++ sgi = rxvect->he.gi_type;
++ rate_idx = N_CCK + N_OFDM + N_HT + N_VHT + N_HE_SU
++ + nss * 216 + mcs * 18 + rxvect->he.ru_size * 3 + sgi;
++ break;
++ }
++ } else {
++ int idx = legrates_lut[rxvect->leg_rate];
++ if (idx < 4) {
++ rate_idx = idx * 2 + rxvect->pre_type;
++ } else {
++ rate_idx = N_CCK + idx - 4;
++ }
++ }
++ if (rate_idx < rate_stats->size) {
++ if (!rate_stats->table[rate_idx])
++ rate_stats->rate_cnt++;
++ rate_stats->table[rate_idx]++;
++ rate_stats->cpt++;
++ } else {
++ wiphy_err(rwnx_hw->wiphy, "RX: Invalid index conversion => %d/%d\n",
++ rate_idx, rate_stats->size);
++ }
++#endif
++}
++
++/**
++ * rwnx_rx_data_skb - Process one data frame
++ *
++ * @rwnx_hw: main driver data
++ * @rwnx_vif: vif that received the buffer
++ * @skb: skb received
++ * @rxhdr: HW rx descriptor
++ * @return: true if buffer has been forwarded to upper layer
++ *
++ * If buffer is amsdu , it is first split into a list of skb.
++ * Then each skb may be:
++ * - forwarded to upper layer
++ * - resent on wireless interface
++ *
++ * When vif is a STA interface, every skb is only forwarded to upper layer.
++ * When vif is an AP interface, multicast skb are forwarded and resent, whereas
++ * skb for other BSS's STA are only resent.
++ */
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
++#define RAISE_RX_SOFTIRQ() \
++ cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
++#endif /* LINUX_VERSION_CODE */
++
++void rwnx_rx_data_skb_resend(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ struct sk_buff *skb, struct hw_rxhdr *rxhdr)
++{
++ struct sk_buff *rx_skb = skb;
++ //bool amsdu = rxhdr->flags_is_amsdu;
++ const struct ethhdr *eth;
++ struct sk_buff *skb_copy;
++
++ rx_skb->dev = rwnx_vif->ndev;
++ skb_reset_mac_header(rx_skb);
++ eth = eth_hdr(rx_skb);
++
++ //printk("resend\n");
++ /* resend pkt on wireless interface */
++ /* always need to copy buffer when forward=0 to get enough headrom for tsdesc */
++ skb_copy = skb_copy_expand(rx_skb, sizeof(struct rwnx_txhdr) +
++ RWNX_SWTXHDR_ALIGN_SZ + 3 + 24 + 8, 0, GFP_ATOMIC);
++
++ if (skb_copy) {
++ int res;
++ skb_copy->protocol = htons(ETH_P_802_3);
++ skb_reset_network_header(skb_copy);
++ skb_reset_mac_header(skb_copy);
++
++ rwnx_vif->is_resending = true;
++ res = dev_queue_xmit(skb_copy);
++ rwnx_vif->is_resending = false;
++ /* note: buffer is always consummed by dev_queue_xmit */
++ if (res == NET_XMIT_DROP) {
++ rwnx_vif->net_stats.rx_dropped++;
++ rwnx_vif->net_stats.tx_dropped++;
++ } else if (res != NET_XMIT_SUCCESS) {
++ netdev_err(rwnx_vif->ndev,
++ "Failed to re-send buffer to driver (res=%d)",
++ res);
++ rwnx_vif->net_stats.tx_errors++;
++ }
++ } else {
++ netdev_err(rwnx_vif->ndev, "Failed to copy skb");
++ }
++}
++#ifdef AICWF_RX_REORDER
++static void rwnx_rx_data_skb_forward(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ struct sk_buff *skb, struct hw_rxhdr *rxhdr)
++{
++ struct sk_buff *rx_skb;
++
++ rx_skb = skb;
++ rx_skb->dev = rwnx_vif->ndev;
++ skb_reset_mac_header(rx_skb);
++
++ /* Update statistics */
++ rwnx_vif->net_stats.rx_packets++;
++ rwnx_vif->net_stats.rx_bytes += rx_skb->len;
++
++ //printk("forward\n");
++#ifdef CONFIG_BR_SUPPORT
++ void *br_port = NULL;
++
++ if (1) {//(check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_ADHOC_STATE) == _TRUE) {
++ /* Insert NAT2.5 RX here! */
++ #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))
++ br_port = rwnx_vif->ndev->br_port;
++ #else
++ rcu_read_lock();
++ br_port = rcu_dereference(rwnx_vif->ndev->rx_handler_data);
++ rcu_read_unlock();
++ #endif
++
++ if (br_port) {
++ int nat25_handle_frame(struct rwnx_vif *vif, struct sk_buff *skb);
++
++ if (nat25_handle_frame(rwnx_vif, rx_skb) == -1) {
++ /* priv->ext_stats.rx_data_drops++; */
++ /* DEBUG_ERR("RX DROP: nat25_handle_frame fail!\n"); */
++ /* return FAIL; */
++
++ }
++ }
++ }
++#endif /* CONFIG_BR_SUPPORT */
++
++ rwnx_skb_align_8bytes(rx_skb);
++
++ rx_skb->protocol = eth_type_trans(rx_skb, rwnx_vif->ndev);
++ memset(rx_skb->cb, 0, sizeof(rx_skb->cb));
++ REG_SW_SET_PROFILING(rwnx_hw, SW_PROF_IEEE80211RX);
++ #ifdef CONFIG_RX_NETIF_RECV_SKB //modify by aic
++ local_bh_disable();
++ netif_receive_skb(rx_skb);
++ local_bh_enable();
++ #else
++ if (in_interrupt()) {
++ netif_rx(rx_skb);
++ } else {
++ /*
++ * If the receive is not processed inside an ISR, the softirqd must be woken explicitly to service the NET_RX_SOFTIRQ.
++ * * In 2.6 kernels, this is handledby netif_rx_ni(), but in earlier kernels, we need to do it manually.
++ */
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
++ netif_rx_ni(rx_skb);
++ #else
++ ulong flags;
++ netif_rx(rx_skb);
++ local_irq_save(flags);
++ RAISE_RX_SOFTIRQ();
++ local_irq_restore(flags);
++ #endif
++ }
++ #endif
++ REG_SW_CLEAR_PROFILING(rwnx_hw, SW_PROF_IEEE80211RX);
++
++ rwnx_hw->stats.last_rx = jiffies;
++}
++#endif
++
++
++static bool rwnx_rx_data_skb(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ struct sk_buff *skb, struct hw_rxhdr *rxhdr)
++{
++ struct sk_buff_head list;
++ struct sk_buff *rx_skb;
++ bool amsdu = rxhdr->flags_is_amsdu;
++ bool resend = false, forward = true;
++
++ skb->dev = rwnx_vif->ndev;
++
++ __skb_queue_head_init(&list);
++
++ if (amsdu) {
++ int count;
++ ieee80211_amsdu_to_8023s(skb, &list, rwnx_vif->ndev->dev_addr,
++ RWNX_VIF_TYPE(rwnx_vif), 0, NULL, NULL, false);
++
++ count = skb_queue_len(&list);
++ if (count > ARRAY_SIZE(rwnx_hw->stats.amsdus_rx))
++ count = ARRAY_SIZE(rwnx_hw->stats.amsdus_rx);
++ rwnx_hw->stats.amsdus_rx[count - 1]++;
++ } else {
++ rwnx_hw->stats.amsdus_rx[0]++;
++ __skb_queue_head(&list, skb);
++ }
++
++ if (((RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_AP) ||
++ (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_AP_VLAN) ||
++ (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_P2P_GO)) &&
++ !(rwnx_vif->ap.flags & RWNX_AP_ISOLATE)) {
++ const struct ethhdr *eth;
++ rx_skb = skb_peek(&list);
++ skb_reset_mac_header(rx_skb);
++ eth = eth_hdr(rx_skb);
++
++ if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
++ /* broadcast pkt need to be forwared to upper layer and resent
++ on wireless interface */
++ resend = true;
++ } else {
++ /* unicast pkt for STA inside the BSS, no need to forward to upper
++ layer simply resend on wireless interface */
++ if (rxhdr->flags_dst_idx != RWNX_INVALID_STA)
++ {
++ struct rwnx_sta *sta = &rwnx_hw->sta_table[rxhdr->flags_dst_idx];
++ if (sta->valid && (sta->vlan_idx == rwnx_vif->vif_index))
++ {
++ forward = false;
++ resend = true;
++ }
++ }
++ }
++ } else if (RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_MESH_POINT) {
++ const struct ethhdr *eth;
++ rx_skb = skb_peek(&list);
++ skb_reset_mac_header(rx_skb);
++ eth = eth_hdr(rx_skb);
++
++ if (!is_multicast_ether_addr(eth->h_dest)) {
++ /* unicast pkt for STA inside the BSS, no need to forward to upper
++ layer simply resend on wireless interface */
++ if (rxhdr->flags_dst_idx != RWNX_INVALID_STA)
++ {
++ forward = false;
++ resend = true;
++ }
++ }
++ }
++
++ while (!skb_queue_empty(&list)) {
++ rx_skb = __skb_dequeue(&list);
++
++ /* resend pkt on wireless interface */
++ if (resend) {
++ struct sk_buff *skb_copy;
++ /* always need to copy buffer when forward=0 to get enough headrom for tsdesc */
++ skb_copy = skb_copy_expand(rx_skb, sizeof(struct rwnx_txhdr) +
++ RWNX_SWTXHDR_ALIGN_SZ + 3 + 24 + 8, 0, GFP_ATOMIC);
++
++ if (skb_copy) {
++ int res;
++ skb_copy->protocol = htons(ETH_P_802_3);
++ skb_reset_network_header(skb_copy);
++ skb_reset_mac_header(skb_copy);
++
++ rwnx_vif->is_resending = true;
++ res = dev_queue_xmit(skb_copy);
++ rwnx_vif->is_resending = false;
++ /* note: buffer is always consummed by dev_queue_xmit */
++ if (res == NET_XMIT_DROP) {
++ rwnx_vif->net_stats.rx_dropped++;
++ rwnx_vif->net_stats.tx_dropped++;
++ } else if (res != NET_XMIT_SUCCESS) {
++ netdev_err(rwnx_vif->ndev,
++ "Failed to re-send buffer to driver (res=%d)",
++ res);
++ rwnx_vif->net_stats.tx_errors++;
++ }
++ } else {
++ netdev_err(rwnx_vif->ndev, "Failed to copy skb");
++ }
++ }
++
++ /* forward pkt to upper layer */
++ if (forward) {
++#ifdef CONFIG_BR_SUPPORT
++ void *br_port = NULL;
++
++
++ /* Update statistics */
++ rwnx_vif->net_stats.rx_packets++;
++ rwnx_vif->net_stats.rx_bytes += rx_skb->len;
++
++ if (1) {//(check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_ADHOC_STATE) == _TRUE) {
++ /* Insert NAT2.5 RX here! */
++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))
++ br_port = rwnx_vif->ndev->br_port;
++#else
++ rcu_read_lock();
++ br_port = rcu_dereference(rwnx_vif->ndev->rx_handler_data);
++ rcu_read_unlock();
++#endif
++
++ if (br_port) {
++ int nat25_handle_frame(struct rwnx_vif *vif, struct sk_buff *skb);
++
++ if (nat25_handle_frame(rwnx_vif, rx_skb) == -1) {
++ /* priv->ext_stats.rx_data_drops++; */
++ /* DEBUG_ERR("RX DROP: nat25_handle_frame fail!\n"); */
++ /* return FAIL; */
++ }
++ }
++ }
++#endif /* CONFIG_BR_SUPPORT */
++
++ rwnx_skb_align_8bytes(rx_skb);
++
++ rx_skb->protocol = eth_type_trans(rx_skb, rwnx_vif->ndev);
++
++#ifdef AICWF_ARP_OFFLOAD
++ if(RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_STATION || RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_P2P_CLIENT)
++ arpoffload_proc(rx_skb, rwnx_vif);
++#endif
++ memset(rx_skb->cb, 0, sizeof(rx_skb->cb));
++ REG_SW_SET_PROFILING(rwnx_hw, SW_PROF_IEEE80211RX);
++ #ifdef CONFIG_RX_NETIF_RECV_SKB //modify by aic
++ local_bh_disable();
++ netif_receive_skb(rx_skb);
++ local_bh_enable();
++ #else
++ if (in_interrupt()) {
++ netif_rx(rx_skb);
++ } else {
++ /*
++ * If the receive is not processed inside an ISR, the softirqd must be woken explicitly to service the NET_RX_SOFTIRQ.
++ * * In 2.6 kernels, this is handledby netif_rx_ni(), but in earlier kernels, we need to do it manually.
++ */
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
++ netif_rx_ni(rx_skb);
++ #else
++ ulong flags;
++ netif_rx(rx_skb);
++ local_irq_save(flags);
++ RAISE_RX_SOFTIRQ();
++ local_irq_restore(flags);
++ #endif
++ }
++ #endif
++ REG_SW_CLEAR_PROFILING(rwnx_hw, SW_PROF_IEEE80211RX);
++
++ rwnx_hw->stats.last_rx = jiffies;
++ }
++ }
++
++ return forward;
++}
++
++#ifdef CONFIG_HE_FOR_OLD_KERNEL
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0))
++const u8 *cfg80211_find_ie_match(u8 eid, const u8 *ies, int len,
++ const u8 *match, int match_len,
++ int match_offset)
++{
++ const struct element *elem;
++
++ /* match_offset can't be smaller than 2, unless match_len is
++ * zero, in which case match_offset must be zero as well.
++ */
++ if (WARN_ON((match_len && match_offset < 2) ||
++ (!match_len && match_offset)))
++ return NULL;
++
++ for_each_element_id(elem, eid, ies, len) {
++ if (elem->datalen >= match_offset - 2 + match_len &&
++ !memcmp(elem->data + match_offset - 2, match, match_len))
++ return (void *)elem;
++ }
++
++ return NULL;
++}
++#endif
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
++
++static inline const u8 *cfg80211_find_ext_ie(u8 ext_eid, const u8* ies, int len)
++{
++ return cfg80211_find_ie_match(WLAN_EID_EXTENSION, ies, len,
++ &ext_eid, 1, 2);
++}
++#endif
++
++#endif
++
++
++/**
++ * rwnx_rx_mgmt - Process one 802.11 management frame
++ *
++ * @rwnx_hw: main driver data
++ * @rwnx_vif: vif to upload the buffer to
++ * @skb: skb received
++ * @rxhdr: HW rx descriptor
++ *
++ * Forward the management frame to a given interface.
++ */
++static void rwnx_rx_mgmt(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ struct sk_buff *skb, struct hw_rxhdr *hw_rxhdr)
++{
++ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
++ struct rx_vector_1 *rxvect = &hw_rxhdr->hwvect.rx_vect1;
++#ifdef CONFIG_HE_FOR_OLD_KERNEL
++ struct ieee80211_he_cap_elem *he;
++#endif
++
++ //printk("rwnx_rx_mgmt\n");
++#if (defined CONFIG_HE_FOR_OLD_KERNEL) || (defined CONFIG_VHT_FOR_OLD_KERNEL)
++ struct aic_sta *sta = &rwnx_hw->aic_table[rwnx_vif->ap.aic_index];
++ const u8* ie;
++ u32 len;
++
++ if (ieee80211_is_assoc_req(mgmt->frame_control) && rwnx_vif->wdev.iftype == NL80211_IFTYPE_AP) {
++ printk("ASSOC_REQ: sta_idx %d MAC %pM\n", rwnx_vif->ap.aic_index, mgmt->sa);
++ sta->sta_idx = rwnx_vif->ap.aic_index;
++ len = skb->len - (mgmt->u.assoc_req.variable - skb->data);
++
++ #ifdef CONFIG_HE_FOR_OLD_KERNEL
++ ie = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, mgmt->u.assoc_req.variable, len);
++ if (ie && ie[1] >= sizeof(*he) + 1) {
++ printk("assoc_req: find he\n");
++ sta->he = true;
++ }
++ else {
++ printk("assoc_req: no find he\n");
++ sta->he = false;
++ }
++ #endif
++
++ #ifdef CONFIG_VHT_FOR_OLD_KERNEL
++ struct ieee80211_vht_cap *vht;
++ ie = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, mgmt->u.assoc_req.variable, len);
++ if (ie && ie[1] >= sizeof(*vht)) {
++ printk("assoc_req: find vht\n");
++ sta->vht = true;
++ } else {
++ printk("assoc_req: no find vht\n");
++ sta->vht = false;
++ }
++ #endif
++ }
++#endif
++
++ if (ieee80211_is_beacon(mgmt->frame_control)) {
++ if ((RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_MESH_POINT) &&
++ hw_rxhdr->flags_new_peer) {
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0))
++ cfg80211_notify_new_peer_candidate(rwnx_vif->ndev, mgmt->sa,
++ mgmt->u.beacon.variable,
++ skb->len - offsetof(struct ieee80211_mgmt,
++ u.beacon.variable),
++ GFP_ATOMIC);
++#else
++ /* TODO: the value of parameter sig_dbm need to be confirmed */
++ cfg80211_notify_new_peer_candidate(rwnx_vif->ndev, mgmt->sa,
++ mgmt->u.beacon.variable,
++ skb->len - offsetof(struct ieee80211_mgmt,
++ u.beacon.variable),
++ rxvect->rssi1, GFP_ATOMIC);
++#endif
++ } else {
++
++ #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
++ cfg80211_report_obss_beacon(rwnx_hw->wiphy, skb->data, skb->len,
++ hw_rxhdr->phy_info.phy_prim20_freq,
++ rxvect->rssi1, GFP_KERNEL);
++ #else
++ cfg80211_report_obss_beacon(rwnx_hw->wiphy, skb->data, skb->len,
++ hw_rxhdr->phy_info.phy_prim20_freq,
++ rxvect->rssi1);
++ #endif
++ }
++ } else if ((ieee80211_is_deauth(mgmt->frame_control) ||
++ ieee80211_is_disassoc(mgmt->frame_control)) &&
++ (mgmt->u.deauth.reason_code == WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA ||
++ mgmt->u.deauth.reason_code == WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA)) {
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) // TODO: process unprot mgmt
++ cfg80211_rx_unprot_mlme_mgmt(rwnx_vif->ndev, skb->data, skb->len);
++ #endif
++ } else if ((RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_STATION) &&
++ (ieee80211_is_action(mgmt->frame_control) &&
++ (mgmt->u.action.category == 6))) {
++ struct cfg80211_ft_event_params ft_event;
++ ft_event.target_ap = (uint8_t *)&mgmt->u.action + ETH_ALEN + 2;
++ ft_event.ies = (uint8_t *)&mgmt->u.action + ETH_ALEN * 2 + 2;
++ ft_event.ies_len = skb->len - (ft_event.ies - (uint8_t *)mgmt);
++ ft_event.ric_ies = NULL;
++ ft_event.ric_ies_len = 0;
++ cfg80211_ft_event(rwnx_vif->ndev, &ft_event);
++ } else {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
++ cfg80211_rx_mgmt(&rwnx_vif->wdev, hw_rxhdr->phy_info.phy_prim20_freq,
++ rxvect->rssi1, skb->data, skb->len, 0);
++#else
++ cfg80211_rx_mgmt(rwnx_vif->ndev, hw_rxhdr->phy_info.phy_prim20_freq,
++ rxvect->rssi1, skb->data, skb->len, 0);
++#endif
++ }
++}
++
++/**
++ * rwnx_rx_mgmt_any - Process one 802.11 management frame
++ *
++ * @rwnx_hw: main driver data
++ * @skb: skb received
++ * @rxhdr: HW rx descriptor
++ *
++ * Process the management frame and free the corresponding skb.
++ * If vif is not specified in the rx descriptor, the the frame is uploaded
++ * on all active vifs.
++ */
++static void rwnx_rx_mgmt_any(struct rwnx_hw *rwnx_hw, struct sk_buff *skb,
++ struct hw_rxhdr *hw_rxhdr)
++{
++ struct rwnx_vif *rwnx_vif;
++ int vif_idx = hw_rxhdr->flags_vif_idx;
++#ifdef CREATE_TRACE_POINTS
++ trace_mgmt_rx(hw_rxhdr->phy_info.phy_prim20_freq, vif_idx,
++ hw_rxhdr->flags_sta_idx, (struct ieee80211_mgmt *)skb->data);
++#endif
++
++ if (vif_idx == RWNX_INVALID_VIF) {
++ list_for_each_entry(rwnx_vif, &rwnx_hw->vifs, list) {
++ if (! rwnx_vif->up)
++ continue;
++ rwnx_rx_mgmt(rwnx_hw, rwnx_vif, skb, hw_rxhdr);
++ }
++ } else {
++ rwnx_vif = rwnx_rx_get_vif(rwnx_hw, vif_idx);
++ if (rwnx_vif)
++ rwnx_rx_mgmt(rwnx_hw, rwnx_vif, skb, hw_rxhdr);
++ }
++
++ dev_kfree_skb(skb);
++}
++
++/**
++ * rwnx_rx_rtap_hdrlen - Return radiotap header length
++ *
++ * @rxvect: Rx vector used to fill the radiotap header
++ * @has_vend_rtap: boolean indicating if vendor specific data is present
++ *
++ * Compute the length of the radiotap header based on @rxvect and vendor
++ * specific data (if any).
++ */
++static u8 rwnx_rx_rtap_hdrlen(struct rx_vector_1 *rxvect,
++ bool has_vend_rtap)
++{
++ u8 rtap_len;
++
++ /* Compute radiotap header length */
++ rtap_len = sizeof(struct ieee80211_radiotap_header) + 8;
++
++ // Check for multiple antennas
++ if (hweight32(rxvect->antenna_set) > 1)
++ // antenna and antenna signal fields
++ rtap_len += 4 * hweight8(rxvect->antenna_set);
++
++ // TSFT
++ if (!has_vend_rtap) {
++ rtap_len = ALIGN(rtap_len, 8);
++ rtap_len += 8;
++ }
++
++ // IEEE80211_HW_SIGNAL_DBM
++ rtap_len++;
++
++ // Check if single antenna
++ if (hweight32(rxvect->antenna_set) == 1)
++ rtap_len++; //Single antenna
++
++ // padding for RX FLAGS
++ rtap_len = ALIGN(rtap_len, 2);
++
++ // Check for HT frames
++ if ((rxvect->format_mod == FORMATMOD_HT_MF) ||
++ (rxvect->format_mod == FORMATMOD_HT_GF))
++ rtap_len += 3;
++
++ // Check for AMPDU
++ if (!(has_vend_rtap) && ((rxvect->format_mod >= FORMATMOD_VHT) ||
++ ((rxvect->format_mod > FORMATMOD_NON_HT_DUP_OFDM) &&
++ (rxvect->ht.aggregation)))) {
++ rtap_len = ALIGN(rtap_len, 4);
++ rtap_len += 8;
++ }
++
++ // Check for VHT frames
++ if (rxvect->format_mod == FORMATMOD_VHT) {
++ rtap_len = ALIGN(rtap_len, 2);
++ rtap_len += 12;
++ }
++
++ // Check for HE frames
++ if (rxvect->format_mod == FORMATMOD_HE_SU) {
++ rtap_len = ALIGN(rtap_len, 2);
++ rtap_len += sizeof(struct ieee80211_radiotap_he);
++ }
++
++ // Check for multiple antennas
++ if (hweight32(rxvect->antenna_set) > 1) {
++ // antenna and antenna signal fields
++ rtap_len += 2 * hweight8(rxvect->antenna_set);
++ }
++
++ // Check for vendor specific data
++ if (has_vend_rtap) {
++ /* vendor presence bitmap */
++ rtap_len += 4;
++ /* alignment for fixed 6-byte vendor data header */
++ rtap_len = ALIGN(rtap_len, 2);
++ }
++
++ return rtap_len;
++}
++
++/**
++ * rwnx_rx_add_rtap_hdr - Add radiotap header to sk_buff
++ *
++ * @rwnx_hw: main driver data
++ * @skb: skb received (will include the radiotap header)
++ * @rxvect: Rx vector
++ * @phy_info: Information regarding the phy
++ * @hwvect: HW Info (NULL if vendor specific data is available)
++ * @rtap_len: Length of the radiotap header
++ * @vend_rtap_len: radiotap vendor length (0 if not present)
++ * @vend_it_present: radiotap vendor present
++ *
++ * Builds a radiotap header and add it to @skb.
++ */
++static void rwnx_rx_add_rtap_hdr(struct rwnx_hw* rwnx_hw,
++ struct sk_buff *skb,
++ struct rx_vector_1 *rxvect,
++ struct phy_channel_info_desc *phy_info,
++ struct hw_vect *hwvect,
++ int rtap_len,
++ u8 vend_rtap_len,
++ u32 vend_it_present)
++{
++ struct ieee80211_radiotap_header *rtap;
++ u8 *pos, rate_idx;
++ __le32 *it_present;
++ u32 it_present_val = 0;
++ bool fec_coding = false;
++ bool short_gi = false;
++ bool stbc = false;
++ bool aggregation = false;
++
++ rtap = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
++ memset((u8*) rtap, 0, rtap_len);
++
++ rtap->it_version = 0;
++ rtap->it_pad = 0;
++ rtap->it_len = cpu_to_le16(rtap_len + vend_rtap_len);
++
++ it_present = &rtap->it_present;
++
++ // Check for multiple antennas
++ if (hweight32(rxvect->antenna_set) > 1) {
++ int chain;
++ unsigned long chains = rxvect->antenna_set;
++
++ for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
++ it_present_val |=
++ BIT(IEEE80211_RADIOTAP_EXT) |
++ BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE);
++ put_unaligned_le32(it_present_val, it_present);
++ it_present++;
++ it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) |
++ BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
++ }
++ }
++
++ // Check if vendor specific data is present
++ if (vend_rtap_len) {
++ it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) |
++ BIT(IEEE80211_RADIOTAP_EXT);
++ put_unaligned_le32(it_present_val, it_present);
++ it_present++;
++ it_present_val = vend_it_present;
++ }
++
++ put_unaligned_le32(it_present_val, it_present);
++ pos = (void *)(it_present + 1);
++
++ // IEEE80211_RADIOTAP_TSFT
++ if (hwvect) {
++ rtap->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
++ // padding
++ while ((pos - (u8 *)rtap) & 7)
++ *pos++ = 0;
++ put_unaligned_le64((((u64)le32_to_cpu(hwvect->tsf_hi) << 32) +
++ (u64)le32_to_cpu(hwvect->tsf_lo)), pos);
++ pos += 8;
++ }
++
++ // IEEE80211_RADIOTAP_FLAGS
++ rtap->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_FLAGS);
++ if (hwvect && (!hwvect->frm_successful_rx))
++ *pos |= IEEE80211_RADIOTAP_F_BADFCS;
++ if (!rxvect->pre_type
++ && (rxvect->format_mod <= FORMATMOD_NON_HT_DUP_OFDM))
++ *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
++ pos++;
++
++ // IEEE80211_RADIOTAP_RATE
++ // check for HT, VHT or HE frames
++ if (rxvect->format_mod >= FORMATMOD_HE_SU) {
++ rate_idx = rxvect->he.mcs;
++ fec_coding = rxvect->he.fec;
++ stbc = rxvect->he.stbc;
++ aggregation = true;
++ *pos = 0;
++ } else if (rxvect->format_mod == FORMATMOD_VHT) {
++ rate_idx = rxvect->vht.mcs;
++ fec_coding = rxvect->vht.fec;
++ short_gi = rxvect->vht.short_gi;
++ stbc = rxvect->vht.stbc;
++ aggregation = true;
++ *pos = 0;
++ } else if (rxvect->format_mod > FORMATMOD_NON_HT_DUP_OFDM) {
++ rate_idx = rxvect->ht.mcs;
++ fec_coding = rxvect->ht.fec;
++ short_gi = rxvect->ht.short_gi;
++ stbc = rxvect->ht.stbc;
++ aggregation = rxvect->ht.aggregation;
++ *pos = 0;
++ } else {
++ struct ieee80211_supported_band* band =
++ rwnx_hw->wiphy->bands[phy_info->phy_band];
++ rtap->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
++ BUG_ON((rate_idx = legrates_lut[rxvect->leg_rate]) == -1);
++ if (phy_info->phy_band == NL80211_BAND_5GHZ)
++ rate_idx -= 4; /* rwnx_ratetable_5ghz[0].hw_value == 4 */
++ *pos = DIV_ROUND_UP(band->bitrates[rate_idx].bitrate, 5);
++ }
++ pos++;
++
++ // IEEE80211_RADIOTAP_CHANNEL
++ rtap->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
++ put_unaligned_le16(phy_info->phy_prim20_freq, pos);
++ pos += 2;
++
++ if (phy_info->phy_band == NL80211_BAND_5GHZ)
++ put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ, pos);
++ else if (rxvect->format_mod > FORMATMOD_NON_HT_DUP_OFDM)
++ put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ, pos);
++ else
++ put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ, pos);
++ pos += 2;
++
++ if (hweight32(rxvect->antenna_set) == 1) {
++ // IEEE80211_RADIOTAP_DBM_ANTSIGNAL
++ rtap->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
++ *pos++ = rxvect->rssi1;
++
++ // IEEE80211_RADIOTAP_ANTENNA
++ rtap->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_ANTENNA);
++ *pos++ = rxvect->antenna_set;
++ }
++
++ // IEEE80211_RADIOTAP_LOCK_QUALITY is missing
++ // IEEE80211_RADIOTAP_DB_ANTNOISE is missing
++
++ // IEEE80211_RADIOTAP_RX_FLAGS
++ rtap->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RX_FLAGS);
++ // 2 byte alignment
++ if ((pos - (u8 *)rtap) & 1)
++ *pos++ = 0;
++ put_unaligned_le16(0, pos);
++ //Right now, we only support fcs error (no RX_FLAG_FAILED_PLCP_CRC)
++ pos += 2;
++
++ // Check if HT
++ if ((rxvect->format_mod == FORMATMOD_HT_MF)
++ || (rxvect->format_mod == FORMATMOD_HT_GF)) {
++ rtap->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
++ *pos++ = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
++ IEEE80211_RADIOTAP_MCS_HAVE_GI |
++ IEEE80211_RADIOTAP_MCS_HAVE_BW;
++ *pos = 0;
++ if (short_gi)
++ *pos |= IEEE80211_RADIOTAP_MCS_SGI;
++ if (rxvect->ch_bw == PHY_CHNL_BW_40)
++ *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
++ if (rxvect->format_mod == FORMATMOD_HT_GF)
++ *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
++ if (fec_coding)
++ *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC;
++ #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
++ *pos++ |= stbc << 5;
++ #else
++ *pos++ |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT;
++ #endif
++ *pos++ = rate_idx;
++ }
++
++ // check for HT or VHT frames
++ if (aggregation && hwvect) {
++ // 4 byte alignment
++ while ((pos - (u8 *)rtap) & 3)
++ pos++;
++ rtap->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
++ put_unaligned_le32(hwvect->ampdu_cnt, pos);
++ pos += 4;
++ put_unaligned_le32(0, pos);
++ pos += 4;
++ }
++
++ // Check for VHT frames
++ if (rxvect->format_mod == FORMATMOD_VHT) {
++ u16 vht_details = IEEE80211_RADIOTAP_VHT_KNOWN_GI |
++ IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH;
++ u8 vht_nss = rxvect->vht.nss + 1;
++
++ rtap->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT);
++
++ if ((rxvect->ch_bw == PHY_CHNL_BW_160)
++ && phy_info->phy_center2_freq)
++ vht_details &= ~IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH;
++ put_unaligned_le16(vht_details, pos);
++ pos += 2;
++
++ // flags
++ if (short_gi)
++ *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
++ if (stbc)
++ *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC;
++ pos++;
++
++ // bandwidth
++ if (rxvect->ch_bw == PHY_CHNL_BW_40)
++ *pos++ = 1;
++ if (rxvect->ch_bw == PHY_CHNL_BW_80)
++ *pos++ = 4;
++ else if ((rxvect->ch_bw == PHY_CHNL_BW_160)
++ && phy_info->phy_center2_freq)
++ *pos++ = 0; //80P80
++ else if (rxvect->ch_bw == PHY_CHNL_BW_160)
++ *pos++ = 11;
++ else // 20 MHz
++ *pos++ = 0;
++
++ // MCS/NSS
++ *pos = (rate_idx << 4) | vht_nss;
++ pos += 4;
++ if (fec_coding)
++ #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)
++ *pos |= 0x01;
++ #else
++ *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0;
++ #endif
++ pos++;
++ // group ID
++ pos++;
++ // partial_aid
++ pos += 2;
++ }
++
++ // Check for HE frames
++ if (rxvect->format_mod == FORMATMOD_HE_SU) {
++ struct ieee80211_radiotap_he he;
++ #define HE_PREP(f, val) cpu_to_le16(FIELD_PREP(IEEE80211_RADIOTAP_HE_##f, val))
++ #define D1_KNOWN(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_##f##_KNOWN)
++ #define D2_KNOWN(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_##f##_KNOWN)
++
++ he.data1 = D1_KNOWN(DATA_MCS) | D1_KNOWN(BSS_COLOR) | D1_KNOWN(BEAM_CHANGE) |
++ D1_KNOWN(UL_DL) | D1_KNOWN(CODING) | D1_KNOWN(STBC) |
++ D1_KNOWN(BW_RU_ALLOC) | D1_KNOWN(DOPPLER) | D1_KNOWN(DATA_DCM);
++ he.data2 = D2_KNOWN(GI) | D2_KNOWN(TXBF);
++
++ if (stbc) {
++ he.data6 |= HE_PREP(DATA6_NSTS, 2);
++ he.data3 |= HE_PREP(DATA3_STBC, 1);
++ } else {
++ he.data6 |= HE_PREP(DATA6_NSTS, rxvect->he.nss);
++ }
++
++ he.data3 |= HE_PREP(DATA3_BSS_COLOR, rxvect->he.bss_color);
++ he.data3 |= HE_PREP(DATA3_BEAM_CHANGE, rxvect->he.beam_change);
++ he.data3 |= HE_PREP(DATA3_UL_DL, rxvect->he.uplink_flag);
++ he.data3 |= HE_PREP(DATA3_BSS_COLOR, rxvect->he.bss_color);
++ he.data3 |= HE_PREP(DATA3_DATA_MCS, rxvect->he.mcs);
++ he.data3 |= HE_PREP(DATA3_DATA_DCM, rxvect->he.dcm);
++ he.data3 |= HE_PREP(DATA3_CODING, rxvect->he.fec);
++
++ he.data5 |= HE_PREP(DATA5_GI, rxvect->he.gi_type);
++ he.data5 |= HE_PREP(DATA5_TXBF, rxvect->he.beamformed);
++ he.data5 |= HE_PREP(DATA5_LTF_SIZE, rxvect->he.he_ltf_type + 1);
++
++ switch (rxvect->ch_bw) {
++ case PHY_CHNL_BW_20:
++ he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
++ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ);
++ break;
++ case PHY_CHNL_BW_40:
++ he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
++ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ);
++ break;
++ case PHY_CHNL_BW_80:
++ he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
++ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ);
++ break;
++ case PHY_CHNL_BW_160:
++ he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
++ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ);
++ break;
++ default:
++ WARN_ONCE(1, "Invalid SU BW %d\n", rxvect->ch_bw);
++ }
++
++ he.data6 |= HE_PREP(DATA6_DOPPLER, rxvect->he.doppler);
++
++ /* ensure 2 byte alignment */
++ while ((pos - (u8 *)rtap) & 1)
++ pos++;
++ rtap->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE);
++ memcpy(pos, &he, sizeof(he));
++ pos += sizeof(he);
++ }
++
++ // Rx Chains
++ if (hweight32(rxvect->antenna_set) > 1) {
++ int chain;
++ unsigned long chains = rxvect->antenna_set;
++ u8 rssis[4] = {rxvect->rssi1, rxvect->rssi1, rxvect->rssi1, rxvect->rssi1};
++
++ for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
++ *pos++ = rssis[chain];
++ *pos++ = chain;
++ }
++ }
++}
++
++/**
++ * rwnx_rx_monitor - Build radiotap header for skb an send it to netdev
++ *
++ * @rwnx_hw: main driver data
++ * @rwnx_vif: vif that received the buffer
++ * @skb: sk_buff received
++ * @hw_rxhdr_ptr: Pointer to HW RX header
++ * @rtap_len: Radiotap Header length
++ *
++ * Add radiotap header to the receved skb and send it to netdev
++ */
++static int rwnx_rx_monitor(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ struct sk_buff *skb, struct hw_rxhdr *hw_rxhdr_ptr,
++ u8 rtap_len)
++{
++ skb->dev = rwnx_vif->ndev;
++
++ if (rwnx_vif->wdev.iftype != NL80211_IFTYPE_MONITOR) {
++ netdev_err(rwnx_vif->ndev, "not a monitor vif\n");
++ return -1;
++ }
++
++ /* Add RadioTap Header */
++ rwnx_rx_add_rtap_hdr(rwnx_hw, skb, &hw_rxhdr_ptr->hwvect.rx_vect1,
++ &hw_rxhdr_ptr->phy_info, &hw_rxhdr_ptr->hwvect,
++ rtap_len, 0, 0);
++
++ skb_reset_mac_header(skb);
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ skb->pkt_type = PACKET_OTHERHOST;
++ skb->protocol = htons(ETH_P_802_2);
++
++ local_bh_disable();
++ netif_receive_skb(skb);
++ local_bh_enable();
++
++ return 0;
++}
++
++#ifdef AICWF_ARP_OFFLOAD
++void arpoffload_proc(struct sk_buff *skb, struct rwnx_vif *rwnx_vif)
++{
++ struct iphdr *iphead = (struct iphdr *)(skb->data);
++ struct udphdr *udph;
++ struct DHCPInfo *dhcph;
++
++ if(skb->protocol == htons(ETH_P_IP)) { // IP
++ if(iphead->protocol == IPPROTO_UDP) { // UDP
++ udph = (struct udphdr *)((u8 *)iphead + (iphead->ihl << 2));
++ if((udph->source == __constant_htons(SERVER_PORT))
++ && (udph->dest == __constant_htons(CLIENT_PORT))) { // DHCP offset/ack
++ AICWFDBG(LOGDEBUG, "dhcp: offer / ack\n");
++ dhcph = (struct DHCPInfo *)((u8 *)udph + sizeof(struct udphdr));
++ if(dhcph->cookie == htonl(DHCP_MAGIC) && dhcph->op == 2 &&
++ !memcmp(dhcph->chaddr, rwnx_vif->ndev->dev_addr, 6)) { // match magic word
++ u32 length = ntohs(udph->len) - sizeof(struct udphdr) - offsetof(struct DHCPInfo, options);
++ u16 offset = 0;
++ u8 *option = dhcph->options;
++ while (option[offset]!= DHCP_OPTION_END && offset<length) {
++ if (option[offset] == DHCP_OPTION_MESSAGE_TYPE) {
++ if (option[offset+2] == DHCP_ACK) {
++ AICWFDBG(LOGDEBUG, "dhcp: ack\n");
++ dhcped = 1;
++ if(rwnx_vif->sta.group_cipher_type == WLAN_CIPHER_SUITE_CCMP)
++ rwnx_send_arpoffload_en_req(rwnx_vif->rwnx_hw, rwnx_vif, dhcph->yiaddr, 1);
++ else
++ rwnx_send_arpoffload_en_req(rwnx_vif->rwnx_hw, rwnx_vif, dhcph->yiaddr, 0);
++ }
++ }
++ offset += 2 + option[offset+1];
++ }
++ }
++ }
++ }
++ }
++}
++#endif
++
++#ifdef AICWF_RX_REORDER
++void reord_rxframe_free(spinlock_t *lock, struct list_head *q, struct list_head *list)
++{
++ spin_lock_bh(lock);
++ list_add(list, q);
++ spin_unlock_bh(lock);
++}
++
++struct recv_msdu *reord_rxframe_alloc(spinlock_t *lock, struct list_head *q)
++{
++ struct recv_msdu *rxframe;
++
++ spin_lock_bh(lock);
++ if (list_empty(q)) {
++ spin_unlock_bh(lock);
++ return NULL;
++ }
++ rxframe = list_entry(q->next, struct recv_msdu, rxframe_list);
++ list_del_init(q->next);
++ spin_unlock_bh(lock);
++ return rxframe;
++}
++
++struct reord_ctrl_info *reord_init_sta(struct aicwf_rx_priv* rx_priv, const u8 *mac_addr)
++{
++ u8 i = 0;
++ struct reord_ctrl *preorder_ctrl = NULL;
++ struct reord_ctrl_info *reord_info;
++#ifdef AICWF_SDIO_SUPPORT
++ struct aicwf_bus *bus_if = rx_priv->sdiodev->bus_if;
++#else
++ struct aicwf_bus *bus_if = rx_priv->usbdev->bus_if;
++#endif
++
++ if (bus_if->state == BUS_DOWN_ST || rx_priv == NULL) {
++ printk("bad stat!\n");
++ return NULL;
++ }
++
++ reord_info = kmalloc(sizeof(struct reord_ctrl_info), GFP_ATOMIC);
++ if (!reord_info)
++ return NULL;
++
++ memcpy(reord_info->mac_addr, mac_addr, ETH_ALEN);
++ for (i=0; i < 8; i++) {
++ preorder_ctrl = &reord_info->preorder_ctrl[i];
++ preorder_ctrl->enable = true;
++ preorder_ctrl->ind_sn = 0xffff;
++ preorder_ctrl->wsize_b = AICWF_REORDER_WINSIZE;
++ preorder_ctrl->rx_priv= rx_priv;
++ INIT_LIST_HEAD(&preorder_ctrl->reord_list);
++ spin_lock_init(&preorder_ctrl->reord_list_lock);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
++ init_timer(&preorder_ctrl->reord_timer);
++ preorder_ctrl->reord_timer.data = (ulong) preorder_ctrl;
++ preorder_ctrl->reord_timer.function = reord_timeout_handler;
++#else
++ timer_setup(&preorder_ctrl->reord_timer, reord_timeout_handler, 0);
++#endif
++ INIT_WORK(&preorder_ctrl->reord_timer_work, reord_timeout_worker);
++ }
++
++ return reord_info;
++}
++
++int reord_flush_tid(struct aicwf_rx_priv *rx_priv, struct sk_buff *skb, u8 tid)
++{
++ struct reord_ctrl_info *reord_info;
++ struct reord_ctrl *preorder_ctrl;
++ struct rwnx_vif *rwnx_vif = (struct rwnx_vif *)rx_priv->rwnx_vif;
++ struct ethhdr *eh = (struct ethhdr *)(skb->data);
++ u8 *mac;
++ unsigned long flags;
++ u8 found = 0;
++ struct list_head *phead, *plist;
++ struct recv_msdu *prframe;
++ int ret;
++
++ if((rwnx_vif->wdev.iftype == NL80211_IFTYPE_STATION) || (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT))
++ mac = eh->h_dest;
++ else if((rwnx_vif->wdev.iftype == NL80211_IFTYPE_AP) || (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_GO))
++ mac = eh->h_source;
++ else {
++ printk("error mode:%d!\n", rwnx_vif->wdev.iftype);
++ dev_kfree_skb(skb);
++ return -1;
++ }
++
++ spin_lock_bh(&rx_priv->stas_reord_lock);
++ list_for_each_entry(reord_info, &rx_priv->stas_reord_list, list) {
++ if (!memcmp(mac, reord_info->mac_addr, ETH_ALEN)) {
++ found = 1;
++ preorder_ctrl = &reord_info->preorder_ctrl[tid];
++ break;
++ }
++ }
++ if (!found) {
++ spin_unlock_bh(&rx_priv->stas_reord_lock);
++ return 0;
++ }
++ spin_unlock_bh(&rx_priv->stas_reord_lock);
++
++ if(preorder_ctrl->enable == false)
++ return 0;
++ spin_lock_irqsave(&preorder_ctrl->reord_list_lock, flags);
++ phead = &preorder_ctrl->reord_list;
++ while (1) {
++ if (list_empty(phead)) {
++ break;
++ }
++ plist = phead->next;
++ prframe = list_entry(plist, struct recv_msdu, reord_pending_list);
++ reord_single_frame_ind(rx_priv, prframe);
++ list_del_init(&(prframe->reord_pending_list));
++ }
++
++ AICWFDBG(LOGINFO, "flush:tid=%d", tid);
++ preorder_ctrl->enable = false;
++ spin_unlock_irqrestore(&preorder_ctrl->reord_list_lock, flags);
++ if (timer_pending(&preorder_ctrl->reord_timer))
++ ret = del_timer_sync(&preorder_ctrl->reord_timer);
++ cancel_work_sync(&preorder_ctrl->reord_timer_work);
++
++ return 0;
++}
++
++void reord_deinit_sta(struct aicwf_rx_priv* rx_priv, struct reord_ctrl_info *reord_info)
++{
++ u8 i = 0;
++ //unsigned long flags;
++ struct reord_ctrl *preorder_ctrl = NULL;
++ int ret;
++
++ if (rx_priv == NULL) {
++ txrx_err("bad rx_priv!\n");
++ return;
++ }
++
++ AICWFDBG(LOGINFO, "%s\n", __func__);
++
++ for (i=0; i < 8; i++) {
++ struct recv_msdu *req, *next;
++ preorder_ctrl = &reord_info->preorder_ctrl[i];
++ if(preorder_ctrl->enable){
++ preorder_ctrl->enable = false;
++ if (timer_pending(&preorder_ctrl->reord_timer)) {
++ ret = del_timer_sync(&preorder_ctrl->reord_timer);
++ }
++ cancel_work_sync(&preorder_ctrl->reord_timer_work);
++ }
++
++ spin_lock_bh(&preorder_ctrl->reord_list_lock);
++ list_for_each_entry_safe(req, next, &preorder_ctrl->reord_list, reord_pending_list) {
++ list_del_init(&req->reord_pending_list);
++ if(req->pkt != NULL)
++ dev_kfree_skb(req->pkt);
++ req->pkt = NULL;
++ reord_rxframe_free(&rx_priv->freeq_lock, &rx_priv->rxframes_freequeue, &req->rxframe_list);
++ }
++
++ AICWFDBG(LOGINFO, "reord dinit in_irq():%d in_atomic:%d in_softirq:%d\r\n", (int)in_irq()
++ ,(int)in_atomic(), (int)in_softirq());
++ spin_unlock_bh(&preorder_ctrl->reord_list_lock);
++ }
++
++ spin_lock_bh(&rx_priv->stas_reord_lock);
++ list_del(&reord_info->list);
++ spin_unlock_bh(&rx_priv->stas_reord_lock);
++ kfree(reord_info);
++}
++
++int reord_single_frame_ind(struct aicwf_rx_priv *rx_priv, struct recv_msdu *prframe)
++{
++ struct list_head *rxframes_freequeue = NULL;
++ struct sk_buff *skb = NULL;
++ struct rwnx_vif *rwnx_vif = (struct rwnx_vif *)rx_priv->rwnx_vif;
++
++ rxframes_freequeue = &rx_priv->rxframes_freequeue;
++ skb = prframe->pkt;
++ if (skb == NULL) {
++ txrx_err("skb is NULL\n");
++ return -1;
++ }
++
++ if(!prframe->forward) {
++ //printk("single: %d not forward: drop\n", prframe->seq_num);
++ dev_kfree_skb(skb);
++ prframe->pkt = NULL;
++ reord_rxframe_free(&rx_priv->freeq_lock, rxframes_freequeue, &prframe->rxframe_list);
++ return 0;
++ }
++
++ skb->data = prframe->rx_data;
++ skb_set_tail_pointer(skb, prframe->len);
++ skb->len = prframe->len;
++
++ rwnx_vif->net_stats.rx_packets++;
++ rwnx_vif->net_stats.rx_bytes += skb->len;
++ //printk("netif sn=%d, len=%d\n", precv_frame->attrib.seq_num, skb->len);
++
++#ifdef CONFIG_BR_SUPPORT
++ void *br_port = NULL;
++
++ if (1) {//(check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_ADHOC_STATE) == _TRUE) {
++ /* Insert NAT2.5 RX here! */
++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))
++ br_port = rwnx_vif->ndev->br_port;
++#else
++ rcu_read_lock();
++ br_port = rcu_dereference(rwnx_vif->ndev->rx_handler_data);
++ rcu_read_unlock();
++#endif
++
++ if (br_port) {
++ int nat25_handle_frame(struct rwnx_vif *vif, struct sk_buff *skb);
++
++ if (nat25_handle_frame(rwnx_vif, skb) == -1) {
++ /* priv->ext_stats.rx_data_drops++; */
++ /* DEBUG_ERR("RX DROP: nat25_handle_frame fail!\n"); */
++ /* return FAIL; */
++ }
++ }
++ }
++#endif /* CONFIG_BR_SUPPORT */
++
++ skb->dev = rwnx_vif->ndev;
++
++#if 0
++ if(test_log_flag){
++ if(skb->data[42] == 0x80){
++ printk("AIDEN : SN:%d R_SN:%d pid:%d\r\n",
++ prframe->seq_num, (skb->data[44] << 8| skb->data[45]), current->pid);
++ }
++ }
++#endif
++ rwnx_skb_align_8bytes(skb);
++
++ skb->protocol = eth_type_trans(skb, rwnx_vif->ndev);
++
++#ifdef AICWF_ARP_OFFLOAD
++ if(RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_STATION || RWNX_VIF_TYPE(rwnx_vif) == NL80211_IFTYPE_P2P_CLIENT) {
++ arpoffload_proc(skb, rwnx_vif);
++ }
++#endif
++ memset(skb->cb, 0, sizeof(skb->cb));
++
++#ifdef CONFIG_RX_NETIF_RECV_SKB//AIDEN test
++ local_bh_disable();
++ netif_receive_skb(skb);
++ local_bh_enable();
++#else
++ if (in_interrupt()) {
++ netif_rx(skb);
++ } else {
++ /*
++ * If the receive is not processed inside an ISR, the softirqd must be woken explicitly to service the NET_RX_SOFTIRQ.
++ * * In 2.6 kernels, this is handledby netif_rx_ni(), but in earlier kernels, we need to do it manually.
++ */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
++ netif_rx_ni(skb);
++#else
++ ulong flags;
++ netif_rx(skb);
++ local_irq_save(flags);
++ RAISE_RX_SOFTIRQ();
++ local_irq_restore(flags);
++#endif
++ }
++#endif//CONFIG_RX_NETIF_RECV_SKB
++ prframe->pkt = NULL;
++ reord_rxframe_free(&rx_priv->freeq_lock, rxframes_freequeue, &prframe->rxframe_list);
++
++ return 0;
++}
++
++bool reord_rxframes_process(struct aicwf_rx_priv *rx_priv, struct reord_ctrl *preorder_ctrl, int bforced)
++{
++ struct list_head *phead, *plist;
++ struct recv_msdu *prframe;
++ bool bPktInBuf = false;
++
++ if (bforced == true) {
++ phead = &preorder_ctrl->reord_list;
++ if (list_empty(phead)) {
++ return false;
++ }
++
++ plist = phead->next;
++ prframe = list_entry(plist, struct recv_msdu, reord_pending_list);
++ preorder_ctrl->ind_sn = prframe->seq_num;
++ }
++
++ phead = &preorder_ctrl->reord_list;
++ if (list_empty(phead)) {
++ return bPktInBuf;
++ }
++
++ list_for_each_entry(prframe, phead, reord_pending_list) {
++ if (!SN_LESS(preorder_ctrl->ind_sn, prframe->seq_num)) {
++ if (SN_EQUAL(preorder_ctrl->ind_sn, prframe->seq_num)) {
++ preorder_ctrl->ind_sn = (preorder_ctrl->ind_sn + 1) & 0xFFF;
++ }
++ } else {
++ bPktInBuf = true;
++ break;
++ }
++ }
++
++ return bPktInBuf;
++}
++
++void reord_rxframes_ind(struct aicwf_rx_priv *rx_priv,
++ struct reord_ctrl *preorder_ctrl)
++{
++ struct list_head *phead, *plist;
++ struct recv_msdu *prframe;
++
++ //spin_lock_bh(&preorder_ctrl->reord_list_lock);//AIDEN
++ phead = &preorder_ctrl->reord_list;
++ while (1) {
++ //spin_lock_bh(&preorder_ctrl->reord_list_lock);
++ if (list_empty(phead)) {
++ //spin_unlock_bh(&preorder_ctrl->reord_list_lock);
++ break;
++ }
++
++ plist = phead->next;
++ prframe = list_entry(plist, struct recv_msdu, reord_pending_list);
++
++ if (!SN_LESS(preorder_ctrl->ind_sn, prframe->seq_num)) {
++ list_del_init(&(prframe->reord_pending_list));
++ //spin_unlock_bh(&preorder_ctrl->reord_list_lock);
++ reord_single_frame_ind(rx_priv, prframe);
++ } else {
++ //spin_unlock_bh(&preorder_ctrl->reord_list_lock);
++ break;
++ }
++ }
++ //spin_unlock_bh(&preorder_ctrl->reord_list_lock);//AIDEN
++}
++
++int reorder_timeout = REORDER_UPDATE_TIME;
++module_param(reorder_timeout, int, 0660);
++
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
++void reord_timeout_handler (ulong data)
++#else
++void reord_timeout_handler (struct timer_list *t)
++#endif
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
++ struct reord_ctrl *preorder_ctrl = (struct reord_ctrl *)data;
++#else
++ struct reord_ctrl *preorder_ctrl = from_timer(preorder_ctrl, t, reord_timer);
++#endif
++
++ AICWFDBG(LOGTRACE, "%s Enter \r\n", __func__);
++
++
++ if (g_rwnx_plat->usbdev->state == USB_DOWN_ST) {
++ usb_err("bus is down\n");
++ return;
++ }
++#if 0//AIDEN
++ struct aicwf_rx_priv *rx_priv = preorder_ctrl->rx_priv;
++
++ if (reord_rxframes_process(rx_priv, preorder_ctrl, true)==true) {
++ mod_timer(&preorder_ctrl->reord_timer, jiffies + msecs_to_jiffies(reorder_timeout/*REORDER_UPDATE_TIME*/));
++ }
++#endif
++
++ if(!work_pending(&preorder_ctrl->reord_timer_work))
++ schedule_work(&preorder_ctrl->reord_timer_work);
++
++}
++
++void reord_timeout_worker(struct work_struct *work)
++{
++ struct reord_ctrl *preorder_ctrl = container_of(work, struct reord_ctrl, reord_timer_work);
++ struct aicwf_rx_priv *rx_priv = preorder_ctrl->rx_priv;
++
++ spin_lock_bh(&preorder_ctrl->reord_list_lock);
++#if 1//AIDEN
++ if (reord_rxframes_process(rx_priv, preorder_ctrl, true)==true) {
++ mod_timer(&preorder_ctrl->reord_timer, jiffies + msecs_to_jiffies(reorder_timeout/*REORDER_UPDATE_TIME*/));
++ }
++#endif
++
++ reord_rxframes_ind(rx_priv, preorder_ctrl);
++ spin_unlock_bh(&preorder_ctrl->reord_list_lock);
++
++ return ;
++}
++
++int reord_process_unit(struct aicwf_rx_priv *rx_priv, struct sk_buff *skb, u16 seq_num, u8 tid, u8 forward)
++{
++ int ret=0;
++ u8 *mac;
++ struct recv_msdu *pframe;
++ struct reord_ctrl *preorder_ctrl;
++ struct reord_ctrl_info *reord_info;
++ struct rwnx_vif *rwnx_vif = (struct rwnx_vif *)rx_priv->rwnx_vif;
++ struct ethhdr *eh = (struct ethhdr *)(skb->data);
++ //u8 *da = eh->h_dest;
++ //u8 is_mcast = ((*da) & 0x01)? 1 : 0;
++
++ if (rwnx_vif == NULL || skb->len <= 14) {
++ dev_kfree_skb(skb);
++ return -1;
++ }
++
++ pframe = reord_rxframe_alloc(&rx_priv->freeq_lock, &rx_priv->rxframes_freequeue);
++ if (!pframe) {
++ dev_kfree_skb(skb);
++ return -1;
++ }
++
++ INIT_LIST_HEAD(&pframe->reord_pending_list);
++ pframe->seq_num = seq_num;
++ pframe->tid = tid;
++ pframe->rx_data = skb->data;
++ pframe->len = skb->len;
++ pframe->pkt = skb;
++ pframe->forward = forward;
++ preorder_ctrl = pframe->preorder_ctrl;
++
++#if 0
++ if ((ntohs(eh->h_proto) == ETH_P_PAE) || is_mcast){
++ printk("%s AIDEN pframe->seq_num:%d is bcast or mcast\r\n", __func__, pframe->seq_num);
++ ret = reord_single_frame_ind(rx_priv, pframe);
++ return ret;
++ }
++#endif
++
++ if((rwnx_vif->wdev.iftype == NL80211_IFTYPE_STATION) || (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT))
++ mac = eh->h_dest;
++ else if((rwnx_vif->wdev.iftype == NL80211_IFTYPE_AP) || (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_GO))
++ mac = eh->h_source;
++ else {
++ dev_kfree_skb(skb);
++ return -1;
++ }
++
++ spin_lock_bh(&rx_priv->stas_reord_lock);
++ list_for_each_entry(reord_info, &rx_priv->stas_reord_list, list) {
++ if (!memcmp(mac, reord_info->mac_addr, ETH_ALEN)) {
++ preorder_ctrl = &reord_info->preorder_ctrl[pframe->tid];
++ break;
++ }
++ }
++
++
++ if (&reord_info->list == &rx_priv->stas_reord_list) {//first time???
++ reord_info = reord_init_sta(rx_priv, mac);
++ //reord_info has 8 preorder_ctrl,
++ //There is a one-to-one matched preorder_ctrl and tid
++ if (!reord_info) {
++ spin_unlock_bh(&rx_priv->stas_reord_lock);
++ dev_kfree_skb(skb);
++ return -1;
++ }
++ list_add_tail(&reord_info->list, &rx_priv->stas_reord_list);
++ preorder_ctrl = &reord_info->preorder_ctrl[pframe->tid];
++ } else {
++ if(preorder_ctrl->enable == false) {
++ preorder_ctrl->enable = true;
++ preorder_ctrl->ind_sn = 0xffff;
++ preorder_ctrl->wsize_b = AICWF_REORDER_WINSIZE;
++ preorder_ctrl->rx_priv= rx_priv;
++ }
++ }
++ spin_unlock_bh(&rx_priv->stas_reord_lock);
++
++ if (preorder_ctrl->enable == false) {
++ preorder_ctrl->ind_sn = pframe->seq_num;
++ spin_lock_bh(&preorder_ctrl->reord_list_lock);//AIDEN
++ reord_single_frame_ind(rx_priv, pframe);
++ spin_unlock_bh(&preorder_ctrl->reord_list_lock);//AIDEN
++ preorder_ctrl->ind_sn = (preorder_ctrl->ind_sn + 1)%4096;
++ return 0;
++ }
++
++ spin_lock_bh(&preorder_ctrl->reord_list_lock);
++ if (reord_need_check(preorder_ctrl, pframe->seq_num)) {
++ if(pframe->rx_data[42] == 0x80){//this is rtp package
++ if(pframe->seq_num == preorder_ctrl->ind_sn){
++ printk("%s pframe->seq_num1:%d \r\n", __func__, pframe->seq_num);
++ reord_single_frame_ind(rx_priv, pframe);//not need to reorder
++ }else{
++ printk("%s free pframe->seq_num:%d \r\n", __func__, pframe->seq_num);
++ if (pframe->pkt){
++ dev_kfree_skb(pframe->pkt);
++ pframe->pkt = NULL;
++ }
++ reord_rxframe_free(&rx_priv->freeq_lock, &rx_priv->rxframes_freequeue, &pframe->rxframe_list);
++ }
++ }else{
++ //printk("%s pframe->seq_num2:%d \r\n", __func__, pframe->seq_num);
++ reord_single_frame_ind(rx_priv, pframe);//not need to reorder
++ }
++
++ spin_unlock_bh(&preorder_ctrl->reord_list_lock);
++ return 0;
++ }
++
++ if (reord_rxframe_enqueue(preorder_ctrl, pframe)) {
++ spin_unlock_bh(&preorder_ctrl->reord_list_lock);
++ goto fail;
++ }
++
++ if (reord_rxframes_process(rx_priv, preorder_ctrl, false) == true) {
++ if (!timer_pending(&preorder_ctrl->reord_timer)) {//if this timer not countdown, mod_timer timer to start count down
++ ret = mod_timer(&preorder_ctrl->reord_timer, jiffies + msecs_to_jiffies(reorder_timeout/*REORDER_UPDATE_TIME*/));
++ }
++ } else {
++ if(timer_pending(&preorder_ctrl->reord_timer)) {
++ ret = del_timer(&preorder_ctrl->reord_timer);
++ }
++ }
++
++ reord_rxframes_ind(rx_priv, preorder_ctrl);
++
++ spin_unlock_bh(&preorder_ctrl->reord_list_lock);
++
++
++
++ return 0;
++
++fail:
++ if (pframe->pkt){
++ dev_kfree_skb(pframe->pkt);
++ pframe->pkt = NULL;
++ }
++ reord_rxframe_free(&rx_priv->freeq_lock, &rx_priv->rxframes_freequeue, &pframe->rxframe_list);
++ return ret;
++}
++
++int reord_need_check(struct reord_ctrl *preorder_ctrl, u16 seq_num)
++{
++ u8 wsize = preorder_ctrl->wsize_b;
++ u16 wend = (preorder_ctrl->ind_sn + wsize -1) & 0xFFF;//0xFFF: 12 bits for seq num
++
++ //first time: wend = 0 + 64 - 1= 63
++
++ if (preorder_ctrl->ind_sn == 0xFFFF) {//first time
++ preorder_ctrl->ind_sn = seq_num;
++ }
++
++ if( SN_LESS(seq_num, preorder_ctrl->ind_sn)) {//first time seq_num = preorder_ctrl->ind_sn
++ return -1;//no need to reord
++ }
++
++ if (SN_EQUAL(seq_num, preorder_ctrl->ind_sn)) {
++ preorder_ctrl->ind_sn = (preorder_ctrl->ind_sn + 1) & 0xFFF;
++ } else if (SN_LESS(wend, seq_num)) {
++ if (seq_num >= (wsize-1))
++ preorder_ctrl->ind_sn = seq_num-(wsize-1);
++ else
++ preorder_ctrl->ind_sn = 0xFFF - (wsize - (seq_num + 1)) + 1;
++ }
++
++ return 0;
++}
++
++int reord_rxframe_enqueue(struct reord_ctrl *preorder_ctrl, struct recv_msdu *prframe)
++{
++ struct list_head *preord_list = &preorder_ctrl->reord_list;
++ struct list_head *phead, *plist;
++ struct recv_msdu *pnextrframe;
++
++//first time:not any prframe in preord_list, so phead = phead->next
++ phead = preord_list;
++ plist = phead->next;
++
++ while(phead != plist) {
++ pnextrframe = list_entry(plist, struct recv_msdu, reord_pending_list);
++ if(SN_LESS(pnextrframe->seq_num, prframe->seq_num)) {
++ plist = plist->next;
++ continue;
++ } else if(SN_EQUAL(pnextrframe->seq_num, prframe->seq_num)) {
++ return -1;
++ } else {
++ break;
++ }
++ }
++
++ //link prframe in plist
++ list_add_tail(&(prframe->reord_pending_list), plist);
++
++ return 0;
++}
++#endif /* AICWF_RX_REORDER */
++
++void remove_sec_hdr_mgmt_frame(struct hw_rxhdr *hw_rxhdr,struct sk_buff *skb)
++{
++ u8 hdr_len = 24;
++ u8 mgmt_header[24] = {0};
++
++ if(!hw_rxhdr->hwvect.ga_frame){
++ if(((skb->data[0] & 0x0C) == 0) && (skb->data[1] & 0x40) == 0x40){ //protect management frame
++ printk("frame type %x\n",skb->data[0]);
++ if(hw_rxhdr->hwvect.decr_status == RWNX_RX_HD_DECR_CCMP128){
++ memcpy(mgmt_header,skb->data,hdr_len);
++ skb_pull(skb,8);
++ memcpy(skb->data,mgmt_header,hdr_len);
++ hw_rxhdr->hwvect.len -= 8;
++ }
++ else {
++ printk("unsupport decr_status:%d\n",hw_rxhdr->hwvect.decr_status);
++ }
++ }
++ }
++}
++
++u8 rwnx_rxdataind_aicwf(struct rwnx_hw *rwnx_hw, void *hostid, void *rx_priv)
++{
++ struct hw_rxhdr *hw_rxhdr;
++ struct rxdesc_tag *rxdesc = NULL;
++ struct rwnx_vif *rwnx_vif;
++ struct sk_buff *skb = hostid;
++ int msdu_offset = sizeof(struct hw_rxhdr) + 2;
++ u16_l status = 0;
++ u8 hdr_len = 24;
++ u8 ra[MAC_ADDR_LEN] = {0};
++ u8 ta[MAC_ADDR_LEN] = {0};
++ u8 ether_type[2] = {0};
++ u8 pull_len = 0;
++ u8 tid = 0;
++ u8 is_qos = 0;
++#ifdef AICWF_RX_REORDER
++ struct aicwf_rx_priv *rx_priv_tmp;
++ u16 seq_num = 0;
++ bool resend = false;
++ bool forward = false;
++#endif
++
++ REG_SW_SET_PROFILING(rwnx_hw, SW_PROF_RWNXDATAIND);
++ hw_rxhdr = (struct hw_rxhdr *)skb->data;
++
++#ifdef AICWF_RX_REORDER
++ if(hw_rxhdr->is_monitor_vif) {
++ status = RX_STAT_MONITOR;
++ //printk("monitor rx\n");
++ }
++#endif
++
++ if(hw_rxhdr->flags_upload)
++ status |= RX_STAT_FORWARD;
++
++ /* Check if we need to delete the buffer */
++ if (status & RX_STAT_DELETE) {
++ /* Remove the SK buffer from the rxbuf_elems table */
++ #if 0
++ rwnx_ipc_rxbuf_elem_pull(rwnx_hw, skb);
++ #endif
++ /* Free the buffer */
++ dev_kfree_skb(skb);
++ goto end;
++ }
++
++ /* Check if we need to forward the buffer coming from a monitor interface */
++ if (status & RX_STAT_MONITOR) {
++ struct sk_buff *skb_monitor = NULL;
++ struct hw_rxhdr hw_rxhdr_copy;
++ u8 rtap_len;
++ u16 frm_len = 0;
++
++ //Check if monitor interface exists and is open
++ rwnx_vif = rwnx_rx_get_vif(rwnx_hw, rwnx_hw->monitor_vif);
++ if (!rwnx_vif) {
++ dev_err(rwnx_hw->dev, "Received monitor frame but there is no monitor interface open\n");
++ goto check_len_update;
++ }
++
++ rwnx_rx_vector_convert(rwnx_hw,
++ &hw_rxhdr->hwvect.rx_vect1,
++ &hw_rxhdr->hwvect.rx_vect2);
++ rtap_len = rwnx_rx_rtap_hdrlen(&hw_rxhdr->hwvect.rx_vect1, false);
++
++ if (status == RX_STAT_MONITOR)
++ {
++ /* Remove the SK buffer from the rxbuf_elems table. It will also
++ unmap the buffer and then sync the buffer for the cpu */
++ //rwnx_ipc_rxbuf_elem_pull(rwnx_hw, skb);
++ skb->data += (msdu_offset + 2); //sdio/usb word allign
++
++ //Save frame length
++ frm_len = le32_to_cpu(hw_rxhdr->hwvect.len);
++
++ // Reserve space for frame
++ skb->len = frm_len;
++
++ //Check if there is enough space to add the radiotap header
++ if (skb_headroom(skb) > rtap_len) {
++
++ skb_monitor = skb;
++
++ //Duplicate the HW Rx Header to override with the radiotap header
++ memcpy(&hw_rxhdr_copy, hw_rxhdr, sizeof(hw_rxhdr_copy));
++
++ hw_rxhdr = &hw_rxhdr_copy;
++ } else {
++ //Duplicate the skb and extend the headroom
++ skb_monitor = skb_copy_expand(skb, rtap_len, 0, GFP_ATOMIC);
++
++ //Reset original skb->data pointer
++ skb->data = (void*) hw_rxhdr;
++ }
++ } else {
++ #ifdef CONFIG_RWNX_MON_DATA
++ skb_monitor = skb_copy_expand(skb, rtap_len, 0, GFP_ATOMIC);
++ skb_monitor->data += (msdu_offset + 2); //sdio/usb word allign
++
++ //Save frame length
++ frm_len = le32_to_cpu(hw_rxhdr->hwvect.len);
++ #endif
++ }
++
++ skb_reset_tail_pointer(skb_monitor);
++ skb_monitor->len = 0;
++ skb_put(skb_monitor, frm_len);
++
++ if (rwnx_rx_monitor(rwnx_hw, rwnx_vif, skb_monitor, hw_rxhdr, rtap_len))
++ dev_kfree_skb(skb_monitor);
++
++ if (status == RX_STAT_MONITOR) {
++ if (skb_monitor != skb) {
++ dev_kfree_skb(skb);
++ }
++ }
++ }
++
++check_len_update:
++ /* Check if we need to update the length */
++ if (status & RX_STAT_LEN_UPDATE) {
++ if (rxdesc){
++ hw_rxhdr->hwvect.len = rxdesc->frame_len;
++ }
++
++ if (status & RX_STAT_ETH_LEN_UPDATE) {
++ /* Update Length Field inside the Ethernet Header */
++ struct ethhdr *hdr = (struct ethhdr *)((u8 *)hw_rxhdr + msdu_offset);
++
++ if (rxdesc){
++ hdr->h_proto = htons(rxdesc->frame_len - sizeof(struct ethhdr));
++ }
++ }
++
++ goto end;
++ }
++
++ /* Check if it must be discarded after informing upper layer */
++ if (status & RX_STAT_SPURIOUS) {
++ struct ieee80211_hdr *hdr;
++
++ hdr = (struct ieee80211_hdr *)(skb->data + msdu_offset);
++ rwnx_vif = rwnx_rx_get_vif(rwnx_hw, hw_rxhdr->flags_vif_idx);
++ if (rwnx_vif) {
++ cfg80211_rx_spurious_frame(rwnx_vif->ndev, hdr->addr2, GFP_ATOMIC);
++ }
++ goto end;
++ }
++
++ /* Check if we need to forward the buffer */
++ if (status & RX_STAT_FORWARD) {
++ rwnx_rx_vector_convert(rwnx_hw,
++ &hw_rxhdr->hwvect.rx_vect1,
++ &hw_rxhdr->hwvect.rx_vect2);
++ skb_pull(skb, msdu_offset + 2); //+2 since sdio allign 58->60
++
++ if(skb->data[1] & 0x80)//htc
++ hdr_len += 4;
++
++ if((skb->data[0] & 0x0f)==0x08) {
++ if((skb->data[0] & 0x80) == 0x80) {//qos data
++ hdr_len += 2;//802.11 mac header len
++ tid = skb->data[24] & 0x0F;
++ is_qos = 1;
++ }
++
++ if((skb->data[1] & 0x3) == 0x1) {// to ds
++ memcpy(ra, &skb->data[16], MAC_ADDR_LEN);//destination addr
++ memcpy(ta, &skb->data[10], MAC_ADDR_LEN);//source addr
++ } else if((skb->data[1] & 0x3) == 0x2) { //from ds
++ memcpy(ta, &skb->data[16], MAC_ADDR_LEN);//destination addr
++ memcpy(ra, &skb->data[4], MAC_ADDR_LEN);//BSSID
++ }
++
++ pull_len += (hdr_len + 8);
++#ifdef AICWF_RX_REORDER
++ seq_num = ((skb->data[22]&0xf0)>>4) | (skb->data[23]<<4);
++#endif
++ switch(hw_rxhdr->hwvect.decr_status)
++ {
++ case RWNX_RX_HD_DECR_CCMP128:
++ pull_len += 8;//ccmp_header
++ //skb_pull(&skb->data[skb->len-8], 8); //ccmp_mic_len
++ memcpy(ether_type, &skb->data[hdr_len + 6 + 8], 2);
++ break;
++ case RWNX_RX_HD_DECR_TKIP:
++ pull_len += 8;//tkip_header
++ memcpy(ether_type, &skb->data[hdr_len + 6 + 8], 2);
++ break;
++ case RWNX_RX_HD_DECR_WEP:
++ pull_len += 4;//wep_header
++ memcpy(ether_type, &skb->data[hdr_len + 6 + 4], 2);
++ break;
++ case RWNX_RX_HD_DECR_WAPI:
++ pull_len += 18;//wapi_header
++ memcpy(ether_type, &skb->data[hdr_len + 6 + 18], 2);
++ break;
++
++ default:
++ memcpy(ether_type, &skb->data[hdr_len + 6], 2);
++ break;
++ }
++
++ skb_pull(skb, pull_len);
++ skb_push(skb, 14);
++ //fill 802.3 header
++ memcpy(skb->data, ra, MAC_ADDR_LEN);//destination addr
++ memcpy(&skb->data[6], ta, MAC_ADDR_LEN);//source addr
++ memcpy(&skb->data[12], ether_type, 2);//802.3 type
++ }
++
++ if (hw_rxhdr->flags_is_80211_mpdu) {
++ remove_sec_hdr_mgmt_frame(hw_rxhdr,skb);
++ rwnx_rx_mgmt_any(rwnx_hw, skb, hw_rxhdr);
++ } else {
++ rwnx_vif = rwnx_rx_get_vif(rwnx_hw, hw_rxhdr->flags_vif_idx);
++
++ if (!rwnx_vif) {
++ dev_err(rwnx_hw->dev, "Frame received but no active vif (%d)",
++ hw_rxhdr->flags_vif_idx);
++ dev_kfree_skb(skb);
++ goto end;
++ }
++
++ if (hw_rxhdr->flags_sta_idx != RWNX_INVALID_STA) {
++ struct rwnx_sta *sta;
++
++ sta = &rwnx_hw->sta_table[hw_rxhdr->flags_sta_idx];
++ rwnx_rx_statistic(rwnx_hw, hw_rxhdr, sta);
++
++ if (sta->vlan_idx != rwnx_vif->vif_index) {
++ rwnx_vif = rwnx_hw->vif_table[sta->vlan_idx];
++ if (!rwnx_vif) {
++ dev_kfree_skb(skb);
++ goto end;
++ }
++ }
++
++ if (hw_rxhdr->flags_is_4addr && !rwnx_vif->use_4addr) {
++ cfg80211_rx_unexpected_4addr_frame(rwnx_vif->ndev,
++ sta->mac_addr, GFP_ATOMIC);
++ }
++ }
++
++ skb->priority = 256 + tid;//hw_rxhdr->flags_user_prio;
++
++#ifdef AICWF_RX_REORDER
++ rx_priv_tmp = rx_priv;
++ rx_priv_tmp->rwnx_vif = (void *)rwnx_vif;
++
++ if( (rwnx_vif->wdev.iftype == NL80211_IFTYPE_STATION) || (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) ){
++ if(is_qos && hw_rxhdr->flags_need_reord){
++ reord_process_unit((struct aicwf_rx_priv *)rx_priv, skb, seq_num, tid, 1);
++ }else if(is_qos && !hw_rxhdr->flags_need_reord) {
++ reord_flush_tid((struct aicwf_rx_priv *)rx_priv, skb, tid);
++ if (!rwnx_rx_data_skb(rwnx_hw, rwnx_vif, skb, hw_rxhdr))
++ dev_kfree_skb(skb);
++ }
++ else {
++ if (!rwnx_rx_data_skb(rwnx_hw, rwnx_vif, skb, hw_rxhdr))
++ dev_kfree_skb(skb);
++ }
++ } else if( (rwnx_vif->wdev.iftype == NL80211_IFTYPE_AP) || (rwnx_vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) ) {
++ #if 1
++ const struct ethhdr *eth;
++ resend = false;
++ forward = true;
++ skb_reset_mac_header(skb);
++ eth = eth_hdr(skb);
++ //printk("da:%pM, %x,%x, len=%d\n", eth->h_dest, skb->data[12], skb->data[13], skb->len);
++
++ if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
++ /* broadcast pkt need to be forwared to upper layer and resent
++ on wireless interface */
++ resend = true;
++ } else {
++ /* unicast pkt for STA inside the BSS, no need to forward to upper
++ layer simply resend on wireless interface */
++ if (hw_rxhdr->flags_dst_idx != RWNX_INVALID_STA) {
++ struct rwnx_sta *sta = &rwnx_hw->sta_table[hw_rxhdr->flags_dst_idx];
++ if (sta->valid && (sta->vlan_idx == rwnx_vif->vif_index)) {
++ resend = true;
++ forward = false;
++ }
++ }
++ }
++
++ if(resend){
++ rwnx_rx_data_skb_resend(rwnx_hw, rwnx_vif, skb, hw_rxhdr);
++ }
++
++ if(forward) {
++ if (is_qos && hw_rxhdr->flags_need_reord)
++ reord_process_unit((struct aicwf_rx_priv *)rx_priv, skb, seq_num, tid, 1);
++ else if (is_qos && !hw_rxhdr->flags_need_reord) {
++ reord_flush_tid((struct aicwf_rx_priv *)rx_priv, skb, tid);
++ rwnx_rx_data_skb_forward(rwnx_hw, rwnx_vif, skb, hw_rxhdr);
++ } else
++ rwnx_rx_data_skb_forward(rwnx_hw, rwnx_vif, skb, hw_rxhdr);
++ } else if(resend) {
++ if (is_qos && hw_rxhdr->flags_need_reord)
++ reord_process_unit((struct aicwf_rx_priv *)rx_priv, skb, seq_num, tid, 0);
++ else if (is_qos && !hw_rxhdr->flags_need_reord) {
++ reord_flush_tid((struct aicwf_rx_priv *)rx_priv, skb, tid);
++ dev_kfree_skb(skb);
++ }
++ } else
++ dev_kfree_skb(skb);
++ #else
++ if (!rwnx_rx_data_skb(rwnx_hw, rwnx_vif, skb, hw_rxhdr))
++ dev_kfree_skb(skb);
++ #endif
++ }
++#else
++ if (!rwnx_rx_data_skb(rwnx_hw, rwnx_vif, skb, hw_rxhdr))
++ dev_kfree_skb(skb);
++#endif
++ }
++ }
++
++end:
++ REG_SW_CLEAR_PROFILING(rwnx_hw, SW_PROF_RWNXDATAIND);
++ return 0;
++}
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_rx.h
+@@ -0,0 +1,402 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_rx.h
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ******************************************************************************
++ */
++#ifndef _RWNX_RX_H_
++#define _RWNX_RX_H_
++
++#include "aicwf_txrxif.h"
++
++#define SERVER_PORT 67
++#define CLIENT_PORT 68
++#define DHCP_MAGIC 0x63825363
++#define DHCP_ACK 5
++#define DHCP_OPTION_MESSAGE_TYPE 53 /* RFC 2132 9.6, important for DHCP */
++#define DHCP_OPTION_END 255
++
++enum rx_status_bits
++{
++ /// The buffer can be forwarded to the networking stack
++ RX_STAT_FORWARD = 1 << 0,
++ /// A new buffer has to be allocated
++ RX_STAT_ALLOC = 1 << 1,
++ /// The buffer has to be deleted
++ RX_STAT_DELETE = 1 << 2,
++ /// The length of the buffer has to be updated
++ RX_STAT_LEN_UPDATE = 1 << 3,
++ /// The length in the Ethernet header has to be updated
++ RX_STAT_ETH_LEN_UPDATE = 1 << 4,
++ /// Simple copy
++ RX_STAT_COPY = 1 << 5,
++ /// Spurious frame (inform upper layer and discard)
++ RX_STAT_SPURIOUS = 1 << 6,
++ /// packet for monitor interface
++ RX_STAT_MONITOR = 1 << 7,
++};
++
++
++/*
++ * Decryption status subfields.
++ * {
++ */
++#define RWNX_RX_HD_DECR_UNENC 0 // ENCRYPTION TYPE NONE
++#define RWNX_RX_HD_DECR_WEP 1 // ENCRYPTION TYPE WEP
++#define RWNX_RX_HD_DECR_TKIP 2 // ENCRYPTION TYPE TKIP
++#define RWNX_RX_HD_DECR_CCMP128 3 // ENCRYPTION TYPE CCMP128
++#define RWNX_RX_HD_DECR_CCMP256 4 // ENCRYPTION TYPE CCMP256
++#define RWNX_RX_HD_DECR_GCMP128 5 // ENCRYPTION TYPE GCMP128
++#define RWNX_RX_HD_DECR_GCMP256 6 // ENCRYPTION TYPE GCMP256
++#define RWNX_RX_HD_DECR_WAPI 7 // ENCRYPTION TYPE WAPI
++// @}
++
++//#ifdef CONFIG_RWNX_MON_DATA
++#if 0
++#define RX_MACHDR_BACKUP_LEN 64
++#endif
++
++struct rx_vector_1_old {
++ /** Receive Vector 1a */
++ u32 leg_length :12;
++ u32 leg_rate : 4;
++ u32 ht_length :16;
++
++ /** Receive Vector 1b */
++ u32 _ht_length : 4; // FIXME
++ u32 short_gi : 1;
++ u32 stbc : 2;
++ u32 smoothing : 1;
++ u32 mcs : 7;
++ u32 pre_type : 1;
++ u32 format_mod : 3;
++ u32 ch_bw : 2;
++ u32 n_sts : 3;
++ u32 lsig_valid : 1;
++ u32 sounding : 1;
++ u32 num_extn_ss : 2;
++ u32 aggregation : 1;
++ u32 fec_coding : 1;
++ u32 dyn_bw : 1;
++ u32 doze_not_allowed : 1;
++
++ /** Receive Vector 1c */
++ u32 antenna_set : 8;
++ u32 partial_aid : 9;
++ u32 group_id : 6;
++ u32 first_user : 1;
++ s32 rssi1 : 8;
++
++ /** Receive Vector 1d */
++ s32 rssi2 : 8;
++ s32 rssi3 : 8;
++ s32 rssi4 : 8;
++ u32 reserved_1d : 8;
++};
++
++struct rx_leg_vect
++{
++ u8 dyn_bw_in_non_ht : 1;
++ u8 chn_bw_in_non_ht : 2;
++ u8 rsvd_nht : 4;
++ u8 lsig_valid : 1;
++} __packed;
++
++struct rx_ht_vect
++{
++ u16 sounding : 1;
++ u16 smoothing : 1;
++ u16 short_gi : 1;
++ u16 aggregation : 1;
++ u16 stbc : 1;
++ u16 num_extn_ss : 2;
++ u16 lsig_valid : 1;
++ u16 mcs : 7;
++ u16 fec : 1;
++ u16 length :16;
++} __packed;
++
++struct rx_vht_vect
++{
++ u8 sounding : 1;
++ u8 beamformed : 1;
++ u8 short_gi : 1;
++ u8 rsvd_vht1 : 1;
++ u8 stbc : 1;
++ u8 doze_not_allowed : 1;
++ u8 first_user : 1;
++ u8 rsvd_vht2 : 1;
++ u16 partial_aid : 9;
++ u16 group_id : 6;
++ u16 rsvd_vht3 : 1;
++ u32 mcs : 4;
++ u32 nss : 3;
++ u32 fec : 1;
++ u32 length :20;
++ u32 rsvd_vht4 : 4;
++} __packed;
++
++struct rx_he_vect
++{
++ u8 sounding : 1;
++ u8 beamformed : 1;
++ u8 gi_type : 2;
++ u8 stbc : 1;
++ u8 rsvd_he1 : 3;
++
++ u8 uplink_flag : 1;
++ u8 beam_change : 1;
++ u8 dcm : 1;
++ u8 he_ltf_type : 2;
++ u8 doppler : 1;
++ u8 rsvd_he2 : 2;
++
++ u8 bss_color : 6;
++ u8 rsvd_he3 : 2;
++
++ u8 txop_duration : 7;
++ u8 rsvd_he4 : 1;
++
++ u8 pe_duration : 4;
++ u8 spatial_reuse : 4;
++
++ u8 sig_b_comp_mode : 1;
++ u8 dcm_sig_b : 1;
++ u8 mcs_sig_b : 3;
++ u8 ru_size : 3;
++
++ u32 mcs : 4;
++ u32 nss : 3;
++ u32 fec : 1;
++ u32 length :20;
++ u32 rsvd_he6 : 4;
++} __packed;
++
++struct rx_vector_1 {
++ u8 format_mod : 4;
++ u8 ch_bw : 3;
++ u8 pre_type : 1;
++ u8 antenna_set : 8;
++ s32 rssi_leg : 8;
++ u32 leg_length :12;
++ u32 leg_rate : 4;
++ s32 rssi1 : 8;
++
++ union
++ {
++ struct rx_leg_vect leg;
++ struct rx_ht_vect ht;
++ struct rx_vht_vect vht;
++ struct rx_he_vect he;
++ };
++} __packed;
++
++struct rx_vector_2_old {
++ /** Receive Vector 2a */
++ u32 rcpi : 8;
++ u32 evm1 : 8;
++ u32 evm2 : 8;
++ u32 evm3 : 8;
++
++ /** Receive Vector 2b */
++ u32 evm4 : 8;
++ u32 reserved2b_1 : 8;
++ u32 reserved2b_2 : 8;
++ u32 reserved2b_3 : 8;
++
++};
++
++struct rx_vector_2 {
++ /** Receive Vector 2a */
++ u32 rcpi1 : 8;
++ u32 rcpi2 : 8;
++ u32 rcpi3 : 8;
++ u32 rcpi4 : 8;
++
++ /** Receive Vector 2b */
++ u32 evm1 : 8;
++ u32 evm2 : 8;
++ u32 evm3 : 8;
++ u32 evm4 : 8;
++};
++
++struct phy_channel_info_desc {
++ /** PHY channel information 1 */
++ u32 phy_band : 8;
++ u32 phy_channel_type : 8;
++ u32 phy_prim20_freq : 16;
++ /** PHY channel information 2 */
++ u32 phy_center1_freq : 16;
++ u32 phy_center2_freq : 16;
++};
++
++struct hw_vect {
++ /** Total length for the MPDU transfer */
++ u32 len :16;
++
++ u32 reserved : 8;//data type is included
++ /** AMPDU Status Information */
++ u32 mpdu_cnt : 6;
++ u32 ampdu_cnt : 2;
++
++ /** TSF Low */
++ __le32 tsf_lo;
++ /** TSF High */
++ __le32 tsf_hi;
++
++ /** Receive Vector 1 */
++ struct rx_vector_1 rx_vect1;
++ /** Receive Vector 2 */
++ struct rx_vector_2 rx_vect2;
++
++ /** Status **/
++ u32 rx_vect2_valid : 1;
++ u32 resp_frame : 1;
++ /** Decryption Status */
++ u32 decr_status : 3;
++ u32 rx_fifo_oflow : 1;
++
++ /** Frame Unsuccessful */
++ u32 undef_err : 1;
++ u32 phy_err : 1;
++ u32 fcs_err : 1;
++ u32 addr_mismatch : 1;
++ u32 ga_frame : 1;
++ u32 current_ac : 2;
++
++ u32 frm_successful_rx : 1;
++ /** Descriptor Done */
++ u32 desc_done_rx : 1;
++ /** Key Storage RAM Index */
++ u32 key_sram_index : 10;
++ /** Key Storage RAM Index Valid */
++ u32 key_sram_v : 1;
++ u32 type : 2;
++ u32 subtype : 4;
++};
++
++//#ifdef CONFIG_RWNX_MON_DATA
++#if 0
++/// MAC header backup descriptor
++struct mon_machdrdesc
++{
++ /// Length of the buffer
++ u32 buf_len;
++ /// Buffer containing mac header, LLC and SNAP
++ u8 buffer[RX_MACHDR_BACKUP_LEN];
++};
++#endif
++
++struct hw_rxhdr {
++ /** RX vector */
++ struct hw_vect hwvect;
++
++ /** PHY channel information */
++ struct phy_channel_info_desc phy_info;
++
++ /** RX flags */
++ u32 flags_is_amsdu : 1;
++ u32 flags_is_80211_mpdu: 1;
++ u32 flags_is_4addr : 1;
++ u32 flags_new_peer : 1;
++#if defined(AICWF_SDIO_SUPPORT) || defined(AICWF_USB_SUPPORT)
++ u32 flags_user_prio : 1; // aic: fw not fill any more
++ u32 flags_need_reord : 1;
++ u32 flags_upload : 1;
++#else
++ u32 flags_user_prio : 3;
++#endif
++#ifndef AICWF_RX_REORDER
++ u32 flags_rsvd0 : 1;
++#else
++ u32 is_monitor_vif : 1;
++#endif
++ u32 flags_vif_idx : 8; // 0xFF if invalid VIF index
++ u32 flags_sta_idx : 8; // 0xFF if invalid STA index
++ u32 flags_dst_idx : 8; // 0xFF if unknown destination STA
++//#ifdef CONFIG_RWNX_MON_DATA
++#if 0
++ /// MAC header backup descriptor (used only for MSDU when there is a monitor and a data interface)
++ struct mon_machdrdesc mac_hdr_backup;
++#endif
++ /** Pattern indicating if the buffer is available for the driver */
++ u32 pattern;
++};
++
++extern const u8 legrates_lut[];
++extern u16 legrates_lut_rate[];
++extern u16 tx_legrates_lut_rate[];
++
++struct DHCPInfo {
++ u8 op;
++ u8 htype;
++ u8 hlen;
++ u8 hops;
++ u32 xid;
++ u16 secs;
++ u16 flags;
++ u32 ciaddr;
++ u32 yiaddr;
++ u32 siaddr;
++ u32 giaddr;
++ u8 chaddr[16];
++ u8 sname[64];
++ u8 file[128];
++ u32 cookie;
++ u8 options[308]; /* 312 - cookie */
++};
++
++u8 rwnx_unsup_rx_vec_ind(void *pthis, void *hostid);
++u8 rwnx_rxdataind(void *pthis, void *hostid);
++u8 rwnx_rxdataind_aicwf(struct rwnx_hw *rwnx_hw, void *hostid, void *rx_priv);
++int aicwf_process_rxframes(struct aicwf_rx_priv *rx_priv);
++#ifdef CONFIG_USB_MSG_IN_EP
++int aicwf_process_msg_rxframes(struct aicwf_rx_priv *rx_priv);
++#endif
++
++#ifdef AICWF_ARP_OFFLOAD
++void arpoffload_proc(struct sk_buff *skb, struct rwnx_vif *rwnx_vif);
++#endif
++#ifdef AICWF_RX_REORDER
++struct recv_msdu *reord_rxframe_alloc(spinlock_t *lock, struct list_head *q);
++void reord_rxframe_free(spinlock_t *lock, struct list_head *q, struct list_head *list);
++struct reord_ctrl_info *reord_init_sta( struct aicwf_rx_priv *rx_priv, const u8 *mac_addr);
++void reord_deinit_sta(struct aicwf_rx_priv *rx_priv, struct reord_ctrl_info *reord_info);
++int reord_need_check(struct reord_ctrl *preorder_ctrl, u16 seq_num);
++int reord_rxframe_enqueue(struct reord_ctrl *preorder_ctrl, struct recv_msdu *prframe);
++void reord_timeout_worker(struct work_struct *work);
++int reord_single_frame_ind(struct aicwf_rx_priv *rx_priv, struct recv_msdu *prframe);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
++void reord_timeout_handler (ulong data);
++#else
++void reord_timeout_handler (struct timer_list *t);
++#endif
++
++#endif
++
++#ifdef CONFIG_HE_FOR_OLD_KERNEL
++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 197)
++struct element {
++ u8 id;
++ u8 datalen;
++ u8 data[];
++};
++/* element iteration helpers */
++#define for_each_element(_elem, _data, _datalen) \
++ for (_elem = (const struct element *)(_data); \
++ (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >= \
++ (int)sizeof(*_elem) && \
++ (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >= \
++ (int)sizeof(*_elem) + _elem->datalen; \
++ _elem = (const struct element *)(_elem->data + _elem->datalen))
++
++#define for_each_element_id(element, _id, data, datalen) \
++ for_each_element(element, data, datalen) \
++ if (element->id == (_id))
++#endif
++#endif
++
++#endif /* _RWNX_RX_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_strs.c
+@@ -0,0 +1,261 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_strs.c
++ *
++ * @brief Miscellaneous debug strings
++ *
++ * Copyright (C) RivieraWaves 2014-2019
++ *
++ ******************************************************************************
++ */
++
++#include "lmac_msg.h"
++
++static const char *const rwnx_mmid2str[MSG_I(MM_MAX)] = {
++ [MSG_I(MM_RESET_REQ)] = "MM_RESET_REQ",
++ [MSG_I(MM_RESET_CFM)] = "MM_RESET_CFM",
++ [MSG_I(MM_START_REQ)] = "MM_START_REQ",
++ [MSG_I(MM_START_CFM)] = "MM_START_CFM",
++ [MSG_I(MM_VERSION_REQ)] = "MM_VERSION_REQ",
++ [MSG_I(MM_VERSION_CFM)] = "MM_VERSION_CFM",
++ [MSG_I(MM_ADD_IF_REQ)] = "MM_ADD_IF_REQ",
++ [MSG_I(MM_ADD_IF_CFM)] = "MM_ADD_IF_CFM",
++ [MSG_I(MM_REMOVE_IF_REQ)] = "MM_REMOVE_IF_REQ",
++ [MSG_I(MM_REMOVE_IF_CFM)] = "MM_REMOVE_IF_CFM",
++ [MSG_I(MM_STA_ADD_REQ)] = "MM_STA_ADD_REQ",
++ [MSG_I(MM_STA_ADD_CFM)] = "MM_STA_ADD_CFM",
++ [MSG_I(MM_STA_DEL_REQ)] = "MM_STA_DEL_REQ",
++ [MSG_I(MM_STA_DEL_CFM)] = "MM_STA_DEL_CFM",
++ [MSG_I(MM_SET_FILTER_REQ)] = "MM_SET_FILTER_REQ",
++ [MSG_I(MM_SET_FILTER_CFM)] = "MM_SET_FILTER_CFM",
++ [MSG_I(MM_SET_CHANNEL_REQ)] = "MM_SET_CHANNEL_REQ",
++ [MSG_I(MM_SET_CHANNEL_CFM)] = "MM_SET_CHANNEL_CFM",
++ [MSG_I(MM_SET_DTIM_REQ)] = "MM_SET_DTIM_REQ",
++ [MSG_I(MM_SET_DTIM_CFM)] = "MM_SET_DTIM_CFM",
++ [MSG_I(MM_SET_BEACON_INT_REQ)] = "MM_SET_BEACON_INT_REQ",
++ [MSG_I(MM_SET_BEACON_INT_CFM)] = "MM_SET_BEACON_INT_CFM",
++ [MSG_I(MM_SET_BASIC_RATES_REQ)] = "MM_SET_BASIC_RATES_REQ",
++ [MSG_I(MM_SET_BASIC_RATES_CFM)] = "MM_SET_BASIC_RATES_CFM",
++ [MSG_I(MM_SET_BSSID_REQ)] = "MM_SET_BSSID_REQ",
++ [MSG_I(MM_SET_BSSID_CFM)] = "MM_SET_BSSID_CFM",
++ [MSG_I(MM_SET_EDCA_REQ)] = "MM_SET_EDCA_REQ",
++ [MSG_I(MM_SET_EDCA_CFM)] = "MM_SET_EDCA_CFM",
++ [MSG_I(MM_SET_MODE_REQ)] = "MM_SET_MODE_REQ",
++ [MSG_I(MM_SET_MODE_CFM)] = "MM_SET_MODE_CFM",
++ [MSG_I(MM_SET_VIF_STATE_REQ)] = "MM_SET_VIF_STATE_REQ",
++ [MSG_I(MM_SET_VIF_STATE_CFM)] = "MM_SET_VIF_STATE_CFM",
++ [MSG_I(MM_SET_SLOTTIME_REQ)] = "MM_SET_SLOTTIME_REQ",
++ [MSG_I(MM_SET_SLOTTIME_CFM)] = "MM_SET_SLOTTIME_CFM",
++ [MSG_I(MM_SET_IDLE_REQ)] = "MM_SET_IDLE_REQ",
++ [MSG_I(MM_SET_IDLE_CFM)] = "MM_SET_IDLE_CFM",
++ [MSG_I(MM_KEY_ADD_REQ)] = "MM_KEY_ADD_REQ",
++ [MSG_I(MM_KEY_ADD_CFM)] = "MM_KEY_ADD_CFM",
++ [MSG_I(MM_KEY_DEL_REQ)] = "MM_KEY_DEL_REQ",
++ [MSG_I(MM_KEY_DEL_CFM)] = "MM_KEY_DEL_CFM",
++ [MSG_I(MM_BA_ADD_REQ)] = "MM_BA_ADD_REQ",
++ [MSG_I(MM_BA_ADD_CFM)] = "MM_BA_ADD_CFM",
++ [MSG_I(MM_BA_DEL_REQ)] = "MM_BA_DEL_REQ",
++ [MSG_I(MM_BA_DEL_CFM)] = "MM_BA_DEL_CFM",
++ [MSG_I(MM_PRIMARY_TBTT_IND)] = "MM_PRIMARY_TBTT_IND",
++ [MSG_I(MM_SECONDARY_TBTT_IND)] = "MM_SECONDARY_TBTT_IND",
++ [MSG_I(MM_SET_POWER_REQ)] = "MM_SET_POWER_REQ",
++ [MSG_I(MM_SET_POWER_CFM)] = "MM_SET_POWER_CFM",
++ [MSG_I(MM_DBG_TRIGGER_REQ)] = "MM_DBG_TRIGGER_REQ",
++ [MSG_I(MM_SET_PS_MODE_REQ)] = "MM_SET_PS_MODE_REQ",
++ [MSG_I(MM_SET_PS_MODE_CFM)] = "MM_SET_PS_MODE_CFM",
++ [MSG_I(MM_CHAN_CTXT_ADD_REQ)] = "MM_CHAN_CTXT_ADD_REQ",
++ [MSG_I(MM_CHAN_CTXT_ADD_CFM)] = "MM_CHAN_CTXT_ADD_CFM",
++ [MSG_I(MM_CHAN_CTXT_DEL_REQ)] = "MM_CHAN_CTXT_DEL_REQ",
++ [MSG_I(MM_CHAN_CTXT_DEL_CFM)] = "MM_CHAN_CTXT_DEL_CFM",
++ [MSG_I(MM_CHAN_CTXT_LINK_REQ)] = "MM_CHAN_CTXT_LINK_REQ",
++ [MSG_I(MM_CHAN_CTXT_LINK_CFM)] = "MM_CHAN_CTXT_LINK_CFM",
++ [MSG_I(MM_CHAN_CTXT_UNLINK_REQ)] = "MM_CHAN_CTXT_UNLINK_REQ",
++ [MSG_I(MM_CHAN_CTXT_UNLINK_CFM)] = "MM_CHAN_CTXT_UNLINK_CFM",
++ [MSG_I(MM_CHAN_CTXT_UPDATE_REQ)] = "MM_CHAN_CTXT_UPDATE_REQ",
++ [MSG_I(MM_CHAN_CTXT_UPDATE_CFM)] = "MM_CHAN_CTXT_UPDATE_CFM",
++ [MSG_I(MM_CHAN_CTXT_SCHED_REQ)] = "MM_CHAN_CTXT_SCHED_REQ",
++ [MSG_I(MM_CHAN_CTXT_SCHED_CFM)] = "MM_CHAN_CTXT_SCHED_CFM",
++ [MSG_I(MM_BCN_CHANGE_REQ)] = "MM_BCN_CHANGE_REQ",
++ [MSG_I(MM_BCN_CHANGE_CFM)] = "MM_BCN_CHANGE_CFM",
++ [MSG_I(MM_TIM_UPDATE_REQ)] = "MM_TIM_UPDATE_REQ",
++ [MSG_I(MM_TIM_UPDATE_CFM)] = "MM_TIM_UPDATE_CFM",
++ [MSG_I(MM_CONNECTION_LOSS_IND)] = "MM_CONNECTION_LOSS_IND",
++ [MSG_I(MM_CHANNEL_SWITCH_IND)] = "MM_CHANNEL_SWITCH_IND",
++ [MSG_I(MM_CHANNEL_PRE_SWITCH_IND)] = "MM_CHANNEL_PRE_SWITCH_IND",
++ [MSG_I(MM_REMAIN_ON_CHANNEL_REQ)] = "MM_REMAIN_ON_CHANNEL_REQ",
++ [MSG_I(MM_REMAIN_ON_CHANNEL_CFM)] = "MM_REMAIN_ON_CHANNEL_CFM",
++ [MSG_I(MM_REMAIN_ON_CHANNEL_EXP_IND)] = "MM_REMAIN_ON_CHANNEL_EXP_IND",
++ [MSG_I(MM_PS_CHANGE_IND)] = "MM_PS_CHANGE_IND",
++ [MSG_I(MM_TRAFFIC_REQ_IND)] = "MM_TRAFFIC_REQ_IND",
++ [MSG_I(MM_SET_PS_OPTIONS_REQ)] = "MM_SET_PS_OPTIONS_REQ",
++ [MSG_I(MM_SET_PS_OPTIONS_CFM)] = "MM_SET_PS_OPTIONS_CFM",
++ [MSG_I(MM_P2P_VIF_PS_CHANGE_IND)] = "MM_P2P_VIF_PS_CHANGE_IND",
++ [MSG_I(MM_CSA_COUNTER_IND)] = "MM_CSA_COUNTER_IND",
++ [MSG_I(MM_CHANNEL_SURVEY_IND)] = "MM_CHANNEL_SURVEY_IND",
++ [MSG_I(MM_SET_P2P_NOA_REQ)] = "MM_SET_P2P_NOA_REQ",
++ [MSG_I(MM_SET_P2P_OPPPS_REQ)] = "MM_SET_P2P_OPPPS_REQ",
++ [MSG_I(MM_SET_P2P_NOA_CFM)] = "MM_SET_P2P_NOA_CFM",
++ [MSG_I(MM_SET_P2P_OPPPS_CFM)] = "MM_SET_P2P_OPPPS_CFM",
++ [MSG_I(MM_CFG_RSSI_REQ)] = "MM_CFG_RSSI_REQ",
++ [MSG_I(MM_RSSI_STATUS_IND)] = "MM_RSSI_STATUS_IND",
++ [MSG_I(MM_CSA_FINISH_IND)] = "MM_CSA_FINISH_IND",
++ [MSG_I(MM_CSA_TRAFFIC_IND)] = "MM_CSA_TRAFFIC_IND",
++ [MSG_I(MM_MU_GROUP_UPDATE_REQ)] = "MM_MU_GROUP_UPDATE_REQ",
++ [MSG_I(MM_MU_GROUP_UPDATE_CFM)] = "MM_MU_GROUP_UPDATE_CFM",
++
++ [MSG_I(MM_SET_ARPOFFLOAD_REQ)] = "MM_SET_ARPOFFLOAD_REQ",
++ [MSG_I(MM_SET_ARPOFFLOAD_CFM)] = "MM_SET_ARPOFFLOAD_CFM",
++ [MSG_I(MM_SET_AGG_DISABLE_REQ)] = "MM_SET_AGG_DISABLE_REQ",
++ [MSG_I(MM_SET_AGG_DISABLE_CFM)] = "MM_SET_AGG_DISABLE_CFM",
++ [MSG_I(MM_SET_COEX_REQ)] = "MM_SET_COEX_REQ",
++ [MSG_I(MM_SET_COEX_CFM)] = "MM_SET_COEX_CFM",
++ [MSG_I(MM_SET_RF_CONFIG_REQ)] = "MM_SET_RF_CONFIG_REQ",
++ [MSG_I(MM_SET_RF_CONFIG_CFM)] = "MM_SET_RF_CONFIG_CFM",
++ [MSG_I(MM_SET_RF_CALIB_REQ)] = "MM_SET_RF_CALIB_REQ",
++ [MSG_I(MM_SET_RF_CALIB_CFM)] = "MM_SET_RF_CALIB_CFM",
++
++ [MSG_I(MM_GET_MAC_ADDR_REQ)] = "MM_GET_MAC_ADDR_REQ",
++ [MSG_I(MM_GET_MAC_ADDR_CFM)] = "MM_GET_MAC_ADDR_CFM",
++ [MSG_I(MM_GET_STA_INFO_REQ)] = "MM_GET_STA_INFO_REQ",
++ [MSG_I(MM_GET_STA_INFO_CFM)] = "MM_GET_STA_INFO_CFM",
++ [MSG_I(MM_SET_TXPWR_IDX_LVL_REQ)] = "MM_SET_TXPWR_IDX_LVL_REQ",
++ [MSG_I(MM_SET_TXPWR_IDX_LVL_CFM)] = "MM_SET_TXPWR_IDX_LVL_CFM",
++ [MSG_I(MM_SET_TXPWR_OFST_REQ)] = "MM_SET_TXPWR_OFST_REQ",
++ [MSG_I(MM_SET_TXPWR_OFST_CFM)] = "MM_SET_TXPWR_OFST_CFM",
++ [MSG_I(MM_SET_STACK_START_REQ)] = "MM_SET_STACK_START_REQ",
++ [MSG_I(MM_SET_STACK_START_CFM)] = "MM_SET_STACK_START_CFM",
++ [MSG_I(MM_APM_STALOSS_IND)] = "MM_APM_STALOSS_IND",
++ [MSG_I(MM_SET_VENDOR_HWCONFIG_REQ)] = "MM_SET_VENDOR_HWCONFIG_REQ",
++ [MSG_I(MM_SET_VENDOR_HWCONFIG_CFM)] = "MM_SET_VENDOR_HWCONFIG_CFM",
++ [MSG_I(MM_GET_FW_VERSION_REQ)] = "MM_GET_FW_VERSION_REQ",
++ [MSG_I(MM_GET_FW_VERSION_CFM)] = "MM_GET_FW_VERSION_CFM",
++};
++
++static const char *const rwnx_dbgid2str[MSG_I(DBG_MAX)] = {
++ [MSG_I(DBG_MEM_READ_REQ)] = "DBG_MEM_READ_REQ",
++ [MSG_I(DBG_MEM_READ_CFM)] = "DBG_MEM_READ_CFM",
++ [MSG_I(DBG_MEM_WRITE_REQ)] = "DBG_MEM_WRITE_REQ",
++ [MSG_I(DBG_MEM_WRITE_CFM)] = "DBG_MEM_WRITE_CFM",
++ [MSG_I(DBG_SET_MOD_FILTER_REQ)] = "DBG_SET_MOD_FILTER_REQ",
++ [MSG_I(DBG_SET_MOD_FILTER_CFM)] = "DBG_SET_MOD_FILTER_CFM",
++ [MSG_I(DBG_SET_SEV_FILTER_REQ)] = "DBG_SET_SEV_FILTER_REQ",
++ [MSG_I(DBG_SET_SEV_FILTER_CFM)] = "DBG_SET_SEV_FILTER_CFM",
++ [MSG_I(DBG_ERROR_IND)] = "DBG_ERROR_IND",
++ [MSG_I(DBG_GET_SYS_STAT_REQ)] = "DBG_GET_SYS_STAT_REQ",
++ [MSG_I(DBG_GET_SYS_STAT_CFM)] = "DBG_GET_SYS_STAT_CFM",
++};
++
++static const char *const rwnx_scanid2str[MSG_I(SCAN_MAX)] = {
++ [MSG_I(SCAN_START_REQ)] = "SCAN_START_REQ",
++ [MSG_I(SCAN_START_CFM)] = "SCAN_START_CFM",
++ [MSG_I(SCAN_DONE_IND)] = "SCAN_DONE_IND",
++};
++
++static const char *const rwnx_tdlsid2str[MSG_I(TDLS_MAX)] = {
++ [MSG_I(TDLS_CHAN_SWITCH_CFM)] = "TDLS_CHAN_SWITCH_CFM",
++ [MSG_I(TDLS_CHAN_SWITCH_REQ)] = "TDLS_CHAN_SWITCH_REQ",
++ [MSG_I(TDLS_CHAN_SWITCH_IND)] = "TDLS_CHAN_SWITCH_IND",
++ [MSG_I(TDLS_CHAN_SWITCH_BASE_IND)] = "TDLS_CHAN_SWITCH_BASE_IND",
++ [MSG_I(TDLS_CANCEL_CHAN_SWITCH_REQ)] = "TDLS_CANCEL_CHAN_SWITCH_REQ",
++ [MSG_I(TDLS_CANCEL_CHAN_SWITCH_CFM)] = "TDLS_CANCEL_CHAN_SWITCH_CFM",
++ [MSG_I(TDLS_PEER_PS_IND)] = "TDLS_PEER_PS_IND",
++ [MSG_I(TDLS_PEER_TRAFFIC_IND_REQ)] = "TDLS_PEER_TRAFFIC_IND_REQ",
++ [MSG_I(TDLS_PEER_TRAFFIC_IND_CFM)] = "TDLS_PEER_TRAFFIC_IND_CFM",
++};
++
++#ifdef CONFIG_RWNX_FULLMAC
++
++static const char *const rwnx_scanuid2str[MSG_I(SCANU_MAX)] = {
++ [MSG_I(SCANU_START_REQ)] = "SCANU_START_REQ",
++ [MSG_I(SCANU_START_CFM)] = "SCANU_START_CFM",
++ [MSG_I(SCANU_JOIN_REQ)] = "SCANU_JOIN_REQ",
++ [MSG_I(SCANU_JOIN_CFM)] = "SCANU_JOIN_CFM",
++ [MSG_I(SCANU_RESULT_IND)] = "SCANU_RESULT_IND",
++ [MSG_I(SCANU_FAST_REQ)] = "SCANU_FAST_REQ",
++ [MSG_I(SCANU_FAST_CFM)] = "SCANU_FAST_CFM",
++ [MSG_I(SCANU_VENDOR_IE_REQ)] = "SCANU_VENDOR_IE_REQ",
++ [MSG_I(SCANU_VENDOR_IE_CFM)] = "SCANU_VENDOR_IE_CFM",
++ [MSG_I(SCANU_START_CFM_ADDTIONAL)] = "SCANU_START_CFM_ADDTIONAL",
++ [MSG_I(SCANU_CANCEL_REQ)] = "SCANU_CANCEL_REQ",
++ [MSG_I(SCANU_CANCEL_CFM)] = "SCANU_CANCEL_CFM",
++};
++
++static const char *const rwnx_meid2str[MSG_I(ME_MAX)] = {
++ [MSG_I(ME_CONFIG_REQ)] = "ME_CONFIG_REQ",
++ [MSG_I(ME_CONFIG_CFM)] = "ME_CONFIG_CFM",
++ [MSG_I(ME_CHAN_CONFIG_REQ)] = "ME_CHAN_CONFIG_REQ",
++ [MSG_I(ME_CHAN_CONFIG_CFM)] = "ME_CHAN_CONFIG_CFM",
++ [MSG_I(ME_SET_CONTROL_PORT_REQ)] = "ME_SET_CONTROL_PORT_REQ",
++ [MSG_I(ME_SET_CONTROL_PORT_CFM)] = "ME_SET_CONTROL_PORT_CFM",
++ [MSG_I(ME_TKIP_MIC_FAILURE_IND)] = "ME_TKIP_MIC_FAILURE_IND",
++ [MSG_I(ME_STA_ADD_REQ)] = "ME_STA_ADD_REQ",
++ [MSG_I(ME_STA_ADD_CFM)] = "ME_STA_ADD_CFM",
++ [MSG_I(ME_STA_DEL_REQ)] = "ME_STA_DEL_REQ",
++ [MSG_I(ME_STA_DEL_CFM)] = "ME_STA_DEL_CFM",
++ [MSG_I(ME_TX_CREDITS_UPDATE_IND)]= "ME_TX_CREDITS_UPDATE_IND",
++ [MSG_I(ME_RC_STATS_REQ)] = "ME_RC_STATS_REQ",
++ [MSG_I(ME_RC_STATS_CFM)] = "ME_RC_STATS_CFM",
++ [MSG_I(ME_RC_SET_RATE_REQ)] = "ME_RC_SET_RATE_REQ",
++ [MSG_I(ME_TRAFFIC_IND_REQ)] = "ME_TRAFFIC_IND_REQ",
++ [MSG_I(ME_TRAFFIC_IND_CFM)] = "ME_TRAFFIC_IND_CFM",
++ [MSG_I(ME_SET_PS_MODE_REQ)] = "ME_SET_PS_MODE_REQ",
++ [MSG_I(ME_SET_PS_MODE_CFM)] = "ME_SET_PS_MODE_CFM",
++};
++
++static const char *const rwnx_smid2str[MSG_I(SM_MAX)] = {
++ [MSG_I(SM_CONNECT_REQ)] = "SM_CONNECT_REQ",
++ [MSG_I(SM_CONNECT_CFM)] = "SM_CONNECT_CFM",
++ [MSG_I(SM_CONNECT_IND)] = "SM_CONNECT_IND",
++ [MSG_I(SM_DISCONNECT_REQ)] = "SM_DISCONNECT_REQ",
++ [MSG_I(SM_DISCONNECT_CFM)] = "SM_DISCONNECT_CFM",
++ [MSG_I(SM_DISCONNECT_IND)] = "SM_DISCONNECT_IND",
++ [MSG_I(SM_EXTERNAL_AUTH_REQUIRED_IND)] = "SM_EXTERNAL_AUTH_REQUIRED_IND",
++ [MSG_I(SM_EXTERNAL_AUTH_REQUIRED_RSP)] = "SM_EXTERNAL_AUTH_REQUIRED_RSP",
++};
++
++static const char *const rwnx_apmid2str[MSG_I(APM_MAX)] = {
++ [MSG_I(APM_START_REQ)] = "APM_START_REQ",
++ [MSG_I(APM_START_CFM)] = "APM_START_CFM",
++ [MSG_I(APM_STOP_REQ)] = "APM_STOP_REQ",
++ [MSG_I(APM_STOP_CFM)] = "APM_STOP_CFM",
++ [MSG_I(APM_START_CAC_REQ)] = "APM_START_CAC_REQ",
++ [MSG_I(APM_START_CAC_CFM)] = "APM_START_CAC_CFM",
++ [MSG_I(APM_STOP_CAC_REQ)] = "APM_STOP_CAC_REQ",
++ [MSG_I(APM_STOP_CAC_CFM)] = "APM_STOP_CAC_CFM",
++ [MSG_I(APM_SET_BEACON_IE_REQ)] = "APM_SET_BEACON_IE_REQ",
++ [MSG_I(APM_SET_BEACON_IE_CFM)] = "APM_SET_BEACON_IE_CFM",
++};
++
++static const char *const rwnx_meshid2str[MSG_I(MESH_MAX)] = {
++ [MSG_I(MESH_START_REQ)] = "MESH_START_REQ",
++ [MSG_I(MESH_START_CFM)] = "MESH_START_CFM",
++ [MSG_I(MESH_STOP_REQ)] = "MESH_STOP_REQ",
++ [MSG_I(MESH_STOP_CFM)] = "MESH_STOP_CFM",
++ [MSG_I(MESH_UPDATE_REQ)] = "MESH_UPDATE_REQ",
++ [MSG_I(MESH_UPDATE_CFM)] = "MESH_UPDATE_CFM",
++ [MSG_I(MESH_PATH_CREATE_REQ)] = "MESH_PATH_CREATE_REQ",
++ [MSG_I(MESH_PATH_CREATE_CFM)] = "MESH_PATH_CREATE_CFM",
++ [MSG_I(MESH_PATH_UPDATE_REQ)] = "MESH_PATH_UPDATE_REQ",
++ [MSG_I(MESH_PATH_UPDATE_CFM)] = "MESH_PATH_UPDATE_CFM",
++ [MSG_I(MESH_PROXY_ADD_REQ)] = "MESH_PROXY_ADD_REQ",
++ [MSG_I(MESH_PEER_UPDATE_IND)] = "MESH_PEER_UPDATE_IND",
++ [MSG_I(MESH_PATH_UPDATE_IND)] = "MESH_PATH_UPDATE_IND",
++ [MSG_I(MESH_PROXY_UPDATE_IND)] = "MESH_PROXY_UPDATE_IND",
++};
++
++#endif /* CONFIG_RWNX_FULLMAC */
++
++const char *const *rwnx_id2str[TASK_LAST_EMB + 1] = {
++ [TASK_MM] = rwnx_mmid2str,
++ [TASK_DBG] = rwnx_dbgid2str,
++ [TASK_SCAN] = rwnx_scanid2str,
++ [TASK_TDLS] = rwnx_tdlsid2str,
++#ifdef CONFIG_RWNX_FULLMAC
++ [TASK_SCANU] = rwnx_scanuid2str,
++ [TASK_ME] = rwnx_meid2str,
++ [TASK_SM] = rwnx_smid2str,
++ [TASK_APM] = rwnx_apmid2str,
++ [TASK_MESH] = rwnx_meshid2str,
++#endif
++};
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_strs.h
+@@ -0,0 +1,31 @@
++/**
++ ****************************************************************************************
++ *
++ * @file rwnx_strs.h
++ *
++ * @brief Miscellaneous debug strings
++ *
++ * Copyright (C) RivieraWaves 2014-2019
++ *
++ ****************************************************************************************
++ */
++
++#ifndef _RWNX_STRS_H_
++#define _RWNX_STRS_H_
++
++#ifdef CONFIG_RWNX_FHOST
++
++#define RWNX_ID2STR(tag) "Cmd"
++
++#else
++#include "lmac_msg.h"
++
++#define RWNX_ID2STR(tag) (((MSG_T(tag) < ARRAY_SIZE(rwnx_id2str)) && \
++ (rwnx_id2str[MSG_T(tag)]) && \
++ ((rwnx_id2str[MSG_T(tag)])[MSG_I(tag)])) ? \
++ (rwnx_id2str[MSG_T(tag)])[MSG_I(tag)] : "unknown")
++
++extern const char *const *rwnx_id2str[TASK_LAST_EMB + 1];
++#endif /* CONFIG_RWNX_FHOST */
++
++#endif /* _RWNX_STRS_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_tdls.c
+@@ -0,0 +1,796 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_tx.c
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ******************************************************************************
++ */
++
++/**
++ * INCLUDE FILES
++ ******************************************************************************
++ */
++
++#include "rwnx_tdls.h"
++#include "rwnx_compat.h"
++
++/**
++ * FUNCTION DEFINITIONS
++ ******************************************************************************
++ */
++
++static u16
++rwnx_get_tdls_sta_capab(struct rwnx_vif *rwnx_vif, u16 status_code)
++{
++ u16 capab = 0;
++
++ /* The capability will be 0 when sending a failure code */
++ if (status_code != 0)
++ return capab;
++
++ if (rwnx_vif->sta.ap->band != NL80211_BAND_2GHZ)
++ return capab;
++
++ capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
++ capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
++
++ return capab;
++}
++
++static int
++rwnx_tdls_prepare_encap_data(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ const u8 *peer, u8 action_code, u8 dialog_token,
++ u16 status_code, struct sk_buff *skb)
++{
++ struct ieee80211_tdls_data *tf;
++ tf = (void *)skb_put(skb, sizeof(struct ieee80211_tdls_data) - sizeof(tf->u));
++
++ // set eth header
++ memcpy(tf->da, peer, ETH_ALEN);
++ memcpy(tf->sa, rwnx_hw->wiphy->perm_addr, ETH_ALEN);
++ tf->ether_type = cpu_to_be16(ETH_P_TDLS);
++
++ // set common TDLS info
++ tf->payload_type = WLAN_TDLS_SNAP_RFTYPE;
++ tf->category = WLAN_CATEGORY_TDLS;
++ tf->action_code = action_code;
++
++ // set action specific TDLS info
++ switch (action_code) {
++ case WLAN_TDLS_SETUP_REQUEST:
++ skb_put(skb, sizeof(tf->u.setup_req));
++ tf->u.setup_req.dialog_token = dialog_token;
++ tf->u.setup_req.capability =
++ cpu_to_le16(rwnx_get_tdls_sta_capab(rwnx_vif, status_code));
++ break;
++
++ case WLAN_TDLS_SETUP_RESPONSE:
++ skb_put(skb, sizeof(tf->u.setup_resp));
++ tf->u.setup_resp.status_code = cpu_to_le16(status_code);
++ tf->u.setup_resp.dialog_token = dialog_token;
++ tf->u.setup_resp.capability =
++ cpu_to_le16(rwnx_get_tdls_sta_capab(rwnx_vif, status_code));
++ break;
++
++ case WLAN_TDLS_SETUP_CONFIRM:
++ skb_put(skb, sizeof(tf->u.setup_cfm));
++ tf->u.setup_cfm.status_code = cpu_to_le16(status_code);
++ tf->u.setup_cfm.dialog_token = dialog_token;
++ break;
++
++ case WLAN_TDLS_TEARDOWN:
++ skb_put(skb, sizeof(tf->u.teardown));
++ tf->u.teardown.reason_code = cpu_to_le16(status_code);
++ break;
++
++ case WLAN_TDLS_DISCOVERY_REQUEST:
++ skb_put(skb, sizeof(tf->u.discover_req));
++ tf->u.discover_req.dialog_token = dialog_token;
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int
++rwnx_prep_tdls_direct(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ const u8 *peer, u8 action_code, u8 dialog_token,
++ u16 status_code, struct sk_buff *skb)
++{
++ struct ieee80211_mgmt *mgmt;
++
++ mgmt = (void *)skb_put(skb, 24);
++ memset(mgmt, 0, 24);
++ memcpy(mgmt->da, peer, ETH_ALEN);
++ memcpy(mgmt->sa, rwnx_hw->wiphy->perm_addr, ETH_ALEN);
++ memcpy(mgmt->bssid, rwnx_vif->sta.ap->mac_addr, ETH_ALEN);
++
++ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
++ IEEE80211_STYPE_ACTION);
++
++ switch (action_code) {
++ case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
++ skb_put(skb, 1 + sizeof(mgmt->u.action.u.tdls_discover_resp));
++ mgmt->u.action.category = WLAN_CATEGORY_PUBLIC;
++ mgmt->u.action.u.tdls_discover_resp.action_code = WLAN_PUB_ACTION_TDLS_DISCOVER_RES;
++ mgmt->u.action.u.tdls_discover_resp.dialog_token = dialog_token;
++ mgmt->u.action.u.tdls_discover_resp.capability =
++ cpu_to_le16(rwnx_get_tdls_sta_capab(rwnx_vif, status_code));
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int
++rwnx_add_srates_ie(struct rwnx_hw *rwnx_hw, struct sk_buff *skb)
++{
++ u8 i, rates, *pos;
++ int rate;
++ struct ieee80211_supported_band *rwnx_band_2GHz = rwnx_hw->wiphy->bands[NL80211_BAND_2GHZ];
++
++ rates = 8;
++
++ if (skb_tailroom(skb) < rates + 2)
++ return -ENOMEM;
++
++ pos = skb_put(skb, rates + 2);
++ *pos++ = WLAN_EID_SUPP_RATES;
++ *pos++ = rates;
++ for (i = 0; i < rates; i++) {
++ rate = rwnx_band_2GHz->bitrates[i].bitrate;
++ rate = DIV_ROUND_UP(rate, 5);
++ *pos++ = (u8)rate;
++ }
++
++ return 0;
++}
++
++static int
++rwnx_add_ext_srates_ie(struct rwnx_hw *rwnx_hw, struct sk_buff *skb)
++{
++ u8 i, exrates, *pos;
++ int rate;
++ struct ieee80211_supported_band *rwnx_band_2GHz = rwnx_hw->wiphy->bands[NL80211_BAND_2GHZ];
++
++ exrates = rwnx_band_2GHz->n_bitrates - 8;
++
++ if (skb_tailroom(skb) < exrates + 2)
++ return -ENOMEM;
++
++ pos = skb_put(skb, exrates + 2);
++ *pos++ = WLAN_EID_EXT_SUPP_RATES;
++ *pos++ = exrates;
++ for (i = 8; i < (8+exrates); i++) {
++ rate = rwnx_band_2GHz->bitrates[i].bitrate;
++ rate = DIV_ROUND_UP(rate, 5);
++ *pos++ = (u8)rate;
++ }
++
++ return 0;
++}
++
++static void
++rwnx_tdls_add_supp_channels(struct rwnx_hw *rwnx_hw, struct sk_buff *skb)
++{
++ /*
++ * Add possible channels for TDLS. These are channels that are allowed
++ * to be active.
++ */
++ u8 subband_cnt = 0;
++ u8 *pos_subband;
++ u8 *pos = skb_put(skb, 2);
++ struct ieee80211_supported_band *rwnx_band_2GHz = rwnx_hw->wiphy->bands[NL80211_BAND_2GHZ];
++ //#ifdef USE_5G
++ struct ieee80211_supported_band *rwnx_band_5GHz = rwnx_hw->wiphy->bands[NL80211_BAND_5GHZ];
++ //#endif
++
++ *pos++ = WLAN_EID_SUPPORTED_CHANNELS;
++
++ /*
++ * 5GHz and 2GHz channels numbers can overlap. Ignore this for now, as
++ * this doesn't happen in real world scenarios.
++ */
++
++ /* 2GHz, with 5MHz spacing */
++ pos_subband = skb_put(skb, 2);
++ if (rwnx_band_2GHz->n_channels > 0)
++ {
++ *pos_subband++ = ieee80211_frequency_to_channel(rwnx_band_2GHz->channels[0].center_freq);
++ *pos_subband++ = rwnx_band_2GHz->n_channels;
++ subband_cnt++;
++ }
++
++ /* 5GHz, with 20MHz spacing */
++ pos_subband = skb_put(skb, 2);
++ //#ifdef USE_5G
++ if(rwnx_hw->band_5g_support){
++ if (rwnx_band_5GHz->n_channels > 0)
++ {
++ *pos_subband++ = ieee80211_frequency_to_channel(rwnx_band_5GHz->channels[0].center_freq);
++ *pos_subband++ = rwnx_band_5GHz->n_channels;
++ subband_cnt++;
++ }
++ }
++ //#endif
++ /* length */
++ *pos = 2 * subband_cnt;
++}
++
++static void
++rwnx_tdls_add_ext_capab(struct rwnx_hw *rwnx_hw, struct sk_buff *skb)
++{
++ u8 *pos = (void *)skb_put(skb, 7);
++ bool chan_switch = rwnx_hw->wiphy->features &
++ NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
++
++ *pos++ = WLAN_EID_EXT_CAPABILITY;
++ *pos++ = 5; /* len */
++ *pos++ = 0x0;
++ *pos++ = 0x0;
++ *pos++ = 0x0;
++ *pos++ = WLAN_EXT_CAPA4_TDLS_BUFFER_STA |
++ (chan_switch ? WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH : 0);
++ *pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED;
++}
++
++static void
++rwnx_add_wmm_info_ie(struct sk_buff *skb, u8 qosinfo)
++{
++ u8 *pos = (void *)skb_put(skb, 9);
++
++ *pos++ = WLAN_EID_VENDOR_SPECIFIC;
++ *pos++ = 7; /* len */
++ *pos++ = 0x00; /* Microsoft OUI 00:50:F2 */
++ *pos++ = 0x50;
++ *pos++ = 0xf2;
++ *pos++ = 2; /* WME */
++ *pos++ = 0; /* WME info */
++ *pos++ = 1; /* WME ver */
++ *pos++ = qosinfo; /* U-APSD no in use */
++}
++
++/* translate numbering in the WMM parameter IE to the mac80211 notation */
++static u8 rwnx_ac_from_wmm(int ac)
++{
++ switch (ac) {
++ default:
++ WARN_ON_ONCE(1);
++ case 0:
++ return AC_BE;
++ case 1:
++ return AC_BK;
++ case 2:
++ return AC_VI;
++ case 3:
++ return AC_VO;
++ }
++}
++
++static void
++rwnx_add_wmm_param_ie(struct sk_buff *skb, u8 acm_bits, u32 *ac_params)
++{
++ struct ieee80211_wmm_param_ie *wmm;
++ int i, j;
++ u8 cw_min, cw_max;
++ bool acm;
++
++ wmm = (void *)skb_put(skb, sizeof(struct ieee80211_wmm_param_ie));
++ memset(wmm, 0, sizeof(*wmm));
++
++ wmm->element_id = WLAN_EID_VENDOR_SPECIFIC;
++ wmm->len = sizeof(*wmm) - 2;
++
++ wmm->oui[0] = 0x00; /* Microsoft OUI 00:50:F2 */
++ wmm->oui[1] = 0x50;
++ wmm->oui[2] = 0xf2;
++ wmm->oui_type = 2; /* WME */
++ wmm->oui_subtype = 1; /* WME param */
++ wmm->version = 1; /* WME ver */
++ wmm->qos_info = 0; /* U-APSD not in use */
++
++ /*
++ * Use the EDCA parameters defined for the BSS, or default if the AP
++ * doesn't support it, as mandated by 802.11-2012 section 10.22.4
++ */
++ for (i = 0; i < AC_MAX; i++) {
++ j = rwnx_ac_from_wmm(i);
++ cw_min = (ac_params[j] & 0xF0 ) >> 4;
++ cw_max = (ac_params[j] & 0xF00 ) >> 8;
++ acm = (acm_bits & (1 << j)) != 0;
++
++ wmm->ac[i].aci_aifsn = (i << 5) | (acm << 4) | (ac_params[j] & 0xF);
++ wmm->ac[i].cw = (cw_max << 4) | cw_min;
++ wmm->ac[i].txop_limit = (ac_params[j] & 0x0FFFF000 ) >> 12;
++ }
++}
++
++static void
++rwnx_tdls_add_oper_classes(struct rwnx_vif *rwnx_vif, struct sk_buff *skb)
++{
++ u8 *pos;
++ u8 op_class;
++ struct cfg80211_chan_def chan_def;
++ struct ieee80211_channel chan;
++
++ chan.band = rwnx_vif->sta.ap->band;
++ chan.center_freq = rwnx_vif->sta.ap->center_freq;
++ chan_def.chan = &chan;
++ chan_def.width = rwnx_vif->sta.ap->width;
++ chan_def.center_freq1 = rwnx_vif->sta.ap->center_freq1;
++ chan_def.center_freq2 = rwnx_vif->sta.ap->center_freq2;
++
++ if (!ieee80211_chandef_to_operating_class(&chan_def, &op_class))
++ return;
++
++ pos = skb_put(skb, 4);
++ *pos++ = WLAN_EID_SUPPORTED_REGULATORY_CLASSES;
++ *pos++ = 2; /* len */
++
++ // current op class
++ *pos++ = op_class;
++ *pos++ = op_class; /* give current operating class as alternate too */
++
++ // need to add 5GHz classes?
++}
++
++static void
++rwnx_ie_build_ht_cap(struct sk_buff *skb, struct ieee80211_sta_ht_cap *ht_cap,
++ u16 cap)
++{
++ u8 *pos;
++ __le16 tmp;
++
++ pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
++ *pos++ = WLAN_EID_HT_CAPABILITY;
++ *pos++ = sizeof(struct ieee80211_ht_cap);
++ memset(pos, 0, sizeof(struct ieee80211_ht_cap));
++
++ /* capability flags */
++ tmp = cpu_to_le16(cap);
++ memcpy(pos, &tmp, sizeof(u16));
++ pos += sizeof(u16);
++
++ /* AMPDU parameters */
++ *pos++ = ht_cap->ampdu_factor |
++ (ht_cap->ampdu_density <<
++ IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
++
++ /* MCS set */
++ memcpy(pos, &ht_cap->mcs, sizeof(ht_cap->mcs));
++ pos += sizeof(ht_cap->mcs);
++
++ /* extended capabilities */
++ pos += sizeof(__le16);
++
++ /* BF capabilities */
++ pos += sizeof(__le32);
++
++ /* antenna selection */
++ pos += sizeof(u8);
++}
++
++static void
++rwnx_ie_build_vht_cap(struct sk_buff *skb, struct ieee80211_sta_vht_cap *vht_cap,
++ u32 cap)
++{
++ u8 *pos;
++ __le32 tmp;
++
++ pos = skb_put(skb, 14);
++
++ *pos++ = WLAN_EID_VHT_CAPABILITY;
++ *pos++ = sizeof(struct ieee80211_vht_cap);
++ memset(pos, 0, sizeof(struct ieee80211_vht_cap));
++
++ /* capability flags */
++ tmp = cpu_to_le32(cap);
++ memcpy(pos, &tmp, sizeof(u32));
++ pos += sizeof(u32);
++
++ /* VHT MCS set */
++ memcpy(pos, &vht_cap->vht_mcs, sizeof(vht_cap->vht_mcs));
++ pos += sizeof(vht_cap->vht_mcs);
++}
++
++static void
++rwnx_tdls_add_bss_coex_ie(struct sk_buff *skb)
++{
++ u8 *pos = (void *)skb_put(skb, 3);
++
++ *pos++ = WLAN_EID_BSS_COEX_2040;
++ *pos++ = 1; /* len */
++
++ *pos++ = WLAN_BSS_COEX_INFORMATION_REQUEST;
++}
++
++static void
++rwnx_tdls_add_link_ie(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ struct sk_buff *skb, const u8 *peer,
++ bool initiator)
++{
++ struct ieee80211_tdls_lnkie *lnkid;
++ const u8 *init_addr, *rsp_addr;
++
++ if (initiator) {
++ init_addr = rwnx_hw->wiphy->perm_addr;
++ rsp_addr = peer;
++ } else {
++ init_addr = peer;
++ rsp_addr = rwnx_hw->wiphy->perm_addr;
++ }
++
++ lnkid = (void *)skb_put(skb, sizeof(struct ieee80211_tdls_lnkie));
++
++ lnkid->ie_type = WLAN_EID_LINK_ID;
++ lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) - 2;
++
++ memcpy(lnkid->bssid, rwnx_vif->sta.ap->mac_addr, ETH_ALEN);
++ memcpy(lnkid->init_sta, init_addr, ETH_ALEN);
++ memcpy(lnkid->resp_sta, rsp_addr, ETH_ALEN);
++}
++
++static void
++rwnx_tdls_add_aid_ie(struct rwnx_vif *rwnx_vif, struct sk_buff *skb)
++{
++ u8 *pos = (void *)skb_put(skb, 4);
++
++ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0)
++ *pos++ = WLAN_EID_AID;
++ #else
++ *pos++ = 197;
++ #endif
++ *pos++ = 2; /* len */
++ *pos++ = rwnx_vif->sta.ap->aid;
++}
++
++static u8 *
++rwnx_ie_build_ht_oper(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
++ u16 prot_mode)
++{
++ struct ieee80211_ht_operation *ht_oper;
++ /* Build HT Information */
++ *pos++ = WLAN_EID_HT_OPERATION;
++ *pos++ = sizeof(struct ieee80211_ht_operation);
++ ht_oper = (struct ieee80211_ht_operation *)pos;
++ ht_oper->primary_chan = ieee80211_frequency_to_channel(
++ rwnx_vif->sta.ap->center_freq);
++ switch (rwnx_vif->sta.ap->width) {
++ case NL80211_CHAN_WIDTH_160:
++ case NL80211_CHAN_WIDTH_80P80:
++ case NL80211_CHAN_WIDTH_80:
++ case NL80211_CHAN_WIDTH_40:
++ if (rwnx_vif->sta.ap->center_freq1 > rwnx_vif->sta.ap->center_freq)
++ ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
++ else
++ ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
++ break;
++ default:
++ ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE;
++ break;
++ }
++ if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
++ rwnx_vif->sta.ap->width != NL80211_CHAN_WIDTH_20_NOHT &&
++ rwnx_vif->sta.ap->width != NL80211_CHAN_WIDTH_20)
++ ht_oper->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
++
++ ht_oper->operation_mode = cpu_to_le16(prot_mode);
++ ht_oper->stbc_param = 0x0000;
++
++ /* It seems that Basic MCS set and Supported MCS set
++ are identical for the first 10 bytes */
++ memset(&ht_oper->basic_set, 0, 16);
++ memcpy(&ht_oper->basic_set, &ht_cap->mcs, 10);
++
++ return pos + sizeof(struct ieee80211_ht_operation);
++}
++
++static u8 *
++rwnx_ie_build_vht_oper(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
++ u16 prot_mode)
++{
++ struct ieee80211_vht_operation *vht_oper;
++ /* Build HT Information */
++ *pos++ = WLAN_EID_VHT_OPERATION;
++ *pos++ = sizeof(struct ieee80211_vht_operation);
++ vht_oper = (struct ieee80211_vht_operation *)pos;
++
++ switch (rwnx_vif->sta.ap->width) {
++ case NL80211_CHAN_WIDTH_80:
++ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ; // Channel Width
++ CCFS0(vht_oper) =
++ ieee80211_frequency_to_channel(rwnx_vif->sta.ap->center_freq); // Channel Center Frequency Segment 0
++ CCFS1(vht_oper) = 0; // Channel Center Frequency Segment 1 (N.A.)
++ break;
++ case NL80211_CHAN_WIDTH_160:
++ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_160MHZ; // Channel Width
++ CCFS0(vht_oper) =
++ ieee80211_frequency_to_channel(rwnx_vif->sta.ap->center_freq); // Channel Center Frequency Segment 0
++ CCFS1(vht_oper) = 0; // Channel Center Frequency Segment 1 (N.A.)
++ break;
++ case NL80211_CHAN_WIDTH_80P80:
++ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80P80MHZ; // Channel Width
++ CCFS0(vht_oper) =
++ ieee80211_frequency_to_channel(rwnx_vif->sta.ap->center_freq1); // Channel Center Frequency Segment 0
++ CCFS1(vht_oper) =
++ ieee80211_frequency_to_channel(rwnx_vif->sta.ap->center_freq2); // Channel Center Frequency Segment 1
++ break;
++ default:
++ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_USE_HT;
++ CCFS0(vht_oper) = 0;
++ CCFS1(vht_oper) = 0;
++ break;
++ }
++
++ vht_oper->basic_mcs_set = cpu_to_le16(rwnx_hw->mod_params->mcs_map);
++
++ return pos + sizeof(struct ieee80211_vht_operation);
++
++}
++
++static void
++rwnx_tdls_add_setup_start_ies(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ struct sk_buff *skb, const u8 *peer,
++ u8 action_code, bool initiator,
++ const u8 *extra_ies, size_t extra_ies_len)
++{
++ enum nl80211_band band = rwnx_vif->sta.ap->band;
++ struct ieee80211_supported_band *sband;
++ struct ieee80211_sta_ht_cap ht_cap;
++ struct ieee80211_sta_vht_cap vht_cap;
++ size_t offset = 0, noffset;
++ u8 *pos;
++
++ rcu_read_lock();
++
++ rwnx_add_srates_ie(rwnx_hw, skb);
++ rwnx_add_ext_srates_ie(rwnx_hw, skb);
++ rwnx_tdls_add_supp_channels(rwnx_hw, skb);
++ rwnx_tdls_add_ext_capab(rwnx_hw, skb);
++
++ /* add the QoS element if we support it */
++ if (/*local->hw.queues >= IEEE80211_NUM_ACS &&*/
++ action_code != WLAN_PUB_ACTION_TDLS_DISCOVER_RES)
++ rwnx_add_wmm_info_ie(skb, 0); /* no U-APSD */
++
++ rwnx_tdls_add_oper_classes(rwnx_vif, skb);
++
++ /*
++ * with TDLS we can switch channels, and HT-caps are not necessarily
++ * the same on all bands. The specification limits the setup to a
++ * single HT-cap, so use the current band for now.
++ */
++ sband = rwnx_hw->wiphy->bands[band];
++ memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
++ if (((action_code == WLAN_TDLS_SETUP_REQUEST) ||
++ (action_code == WLAN_TDLS_SETUP_RESPONSE) ||
++ (action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES)) &&
++ ht_cap.ht_supported /* (!sta || sta->sta.ht_cap.ht_supported)*/) {
++ rwnx_ie_build_ht_cap(skb, &ht_cap, ht_cap.cap);
++ }
++
++ if (ht_cap.ht_supported &&
++ (ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
++ rwnx_tdls_add_bss_coex_ie(skb);
++
++ rwnx_tdls_add_link_ie(rwnx_hw, rwnx_vif, skb, peer, initiator);
++
++ memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap));
++ if (vht_cap.vht_supported) {
++ rwnx_tdls_add_aid_ie(rwnx_vif, skb);
++ rwnx_ie_build_vht_cap(skb, &vht_cap, vht_cap.cap);
++ // Operating mode Notification (optional)
++ }
++
++ /* add any remaining IEs */
++ if (extra_ies_len) {
++ noffset = extra_ies_len;
++ pos = skb_put(skb, noffset - offset);
++ memcpy(pos, extra_ies + offset, noffset - offset);
++ }
++
++ rcu_read_unlock();
++}
++
++
++static void
++rwnx_tdls_add_setup_cfm_ies(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ struct sk_buff *skb, const u8 *peer, bool initiator,
++ const u8 *extra_ies, size_t extra_ies_len)
++{
++ struct ieee80211_supported_band *sband;
++ enum nl80211_band band = rwnx_vif->sta.ap->band;
++ struct ieee80211_sta_ht_cap ht_cap;
++ struct ieee80211_sta_vht_cap vht_cap;
++
++ size_t offset = 0, noffset;
++ struct rwnx_sta *sta, *ap_sta;
++ u8 *pos;
++
++ rcu_read_lock();
++
++ sta = rwnx_get_sta(rwnx_hw, peer);
++ ap_sta = rwnx_vif->sta.ap;
++ if (WARN_ON_ONCE(!sta || !ap_sta)) {
++ rcu_read_unlock();
++ return;
++ }
++
++ /* add the QoS param IE if both the peer and we support it */
++ if (sta->qos)
++ rwnx_add_wmm_param_ie(skb, ap_sta->acm, ap_sta->ac_param);
++
++ /* if HT support is only added in TDLS, we need an HT-operation IE */
++ sband = rwnx_hw->wiphy->bands[band];
++ memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
++ if (ht_cap.ht_supported && !ap_sta->ht && sta->ht) {
++ pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation));
++ /* send an empty HT operation IE */
++ rwnx_ie_build_ht_oper(rwnx_hw, rwnx_vif, pos, &ht_cap, 0);
++ }
++
++ rwnx_tdls_add_link_ie(rwnx_hw, rwnx_vif, skb, peer, initiator);
++
++ memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap));
++ if (vht_cap.vht_supported && !ap_sta->vht && sta->vht) {
++ pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation));
++ rwnx_ie_build_vht_oper(rwnx_hw, rwnx_vif, pos, &ht_cap, 0);
++ // Operating mode Notification (optional)
++ }
++
++ /* add any remaining IEs */
++ if (extra_ies_len) {
++ noffset = extra_ies_len;
++ pos = skb_put(skb, noffset - offset);
++ memcpy(pos, extra_ies + offset, noffset - offset);
++ }
++
++ rcu_read_unlock();
++}
++
++static void
++rwnx_tdls_add_ies(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ struct sk_buff *skb, const u8 *peer,
++ u8 action_code, u16 status_code,
++ bool initiator, const u8 *extra_ies,
++ size_t extra_ies_len, u8 oper_class,
++ struct cfg80211_chan_def *chandef)
++{
++ switch (action_code) {
++ case WLAN_TDLS_SETUP_REQUEST:
++ case WLAN_TDLS_SETUP_RESPONSE:
++ case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
++ if (status_code == 0)
++ rwnx_tdls_add_setup_start_ies(rwnx_hw, rwnx_vif, skb, peer, action_code,
++ initiator, extra_ies, extra_ies_len);
++ break;
++ case WLAN_TDLS_SETUP_CONFIRM:
++ if (status_code == 0)
++ rwnx_tdls_add_setup_cfm_ies(rwnx_hw, rwnx_vif, skb, peer, initiator,
++ extra_ies, extra_ies_len);
++ break;
++
++ case WLAN_TDLS_TEARDOWN:
++ case WLAN_TDLS_DISCOVERY_REQUEST:
++ if (extra_ies_len)
++ memcpy(skb_put(skb, extra_ies_len), extra_ies,
++ extra_ies_len);
++ if (status_code == 0 || action_code == WLAN_TDLS_TEARDOWN)
++ rwnx_tdls_add_link_ie(rwnx_hw, rwnx_vif, skb, peer, initiator);
++ break;
++ }
++}
++
++int
++rwnx_tdls_send_mgmt_packet_data(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ const u8 *peer, u8 action_code, u8 dialog_token,
++ u16 status_code, u32 peer_capability, bool initiator,
++ const u8 *extra_ies, size_t extra_ies_len, u8 oper_class,
++ struct cfg80211_chan_def *chandef)
++{
++ struct sk_buff *skb;
++ int ret = 0;
++ struct ieee80211_supported_band *rwnx_band_2GHz = rwnx_hw->wiphy->bands[NL80211_BAND_2GHZ];
++ //#ifdef USE_5G
++ struct ieee80211_supported_band *rwnx_band_5GHz = rwnx_hw->wiphy->bands[NL80211_BAND_5GHZ];
++ //#endif
++ int channels = rwnx_band_2GHz->n_channels;
++
++ if (rwnx_hw->band_5g_support){
++ channels += rwnx_band_5GHz->n_channels;
++ }
++
++ skb = netdev_alloc_skb(rwnx_vif->ndev,
++ sizeof(struct ieee80211_tdls_data) + // ethhdr + TDLS info
++ 10 + /* supported rates */
++ 6 + /* extended supported rates */
++ (2 + channels) + /* supported channels */
++ //#ifdef USE_5G
++ //(2 + rwnx_band_2GHz->n_channels + rwnx_band_5GHz->n_channels) + /* supported channels */
++ //#else
++ //(2 + rwnx_band_2GHz->n_channels) + /* supported channels */
++ //#endif
++ sizeof(struct ieee_types_extcap) +
++ sizeof(struct ieee80211_wmm_param_ie) +
++ 4 + /* oper classes */
++ 28 + //sizeof(struct ieee80211_ht_cap) +
++ sizeof(struct ieee_types_bss_co_2040) +
++ sizeof(struct ieee80211_tdls_lnkie) +
++ (2 + sizeof(struct ieee80211_vht_cap)) +
++ 4 + /*AID*/
++ (2 + sizeof(struct ieee80211_ht_operation)) +
++ extra_ies_len);
++
++ if (!skb)
++ return 0;
++
++ switch (action_code) {
++ case WLAN_TDLS_SETUP_REQUEST:
++ case WLAN_TDLS_SETUP_RESPONSE:
++ case WLAN_TDLS_SETUP_CONFIRM:
++ case WLAN_TDLS_TEARDOWN:
++ case WLAN_TDLS_DISCOVERY_REQUEST:
++ ret = rwnx_tdls_prepare_encap_data(rwnx_hw, rwnx_vif, peer, action_code,
++ dialog_token, status_code, skb);
++ break;
++
++ case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
++ ret = rwnx_prep_tdls_direct(rwnx_hw, rwnx_vif, peer, action_code,
++ dialog_token, status_code, skb);
++ break;
++
++ default:
++ ret = -ENOTSUPP;
++ break;
++ }
++
++ if (ret < 0)
++ goto fail;
++
++ rwnx_tdls_add_ies(rwnx_hw, rwnx_vif, skb, peer, action_code, status_code,
++ initiator, extra_ies, extra_ies_len, oper_class, chandef);
++
++ if (action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) {
++ u64 cookie;
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
++ struct cfg80211_mgmt_tx_params params;
++
++ params.len = skb->len;
++ params.buf = skb->data;
++ ret = rwnx_start_mgmt_xmit(rwnx_vif, NULL, ¶ms, false, &cookie);
++ #else
++ ret = rwnx_start_mgmt_xmit(rwnx_vif, NULL, NULL, false, 0, skb->data, skb->len, false, false, &cookie);
++ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
++
++ return ret;
++ }
++
++ switch (action_code) {
++ case WLAN_TDLS_SETUP_REQUEST:
++ case WLAN_TDLS_SETUP_RESPONSE:
++ case WLAN_TDLS_SETUP_CONFIRM:
++ skb->priority = 2;
++ break;
++ default:
++ skb->priority = 5;
++ break;
++ }
++
++ ret = rwnx_select_txq(rwnx_vif, skb);
++ ret = rwnx_start_xmit(skb, rwnx_vif->ndev);
++
++ return ret;
++
++fail:
++ dev_kfree_skb(skb);
++ return ret;
++}
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_tdls.h
+@@ -0,0 +1,54 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_tdls.h
++ *
++ * @brief TDLS function declarations
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ******************************************************************************
++ */
++
++#ifndef RWNX_TDLS_H_
++#define RWNX_TDLS_H_
++
++#include "rwnx_defs.h"
++
++struct ieee_types_header {
++ u8 element_id;
++ u8 len;
++} __packed;
++
++struct ieee_types_bss_co_2040 {
++ struct ieee_types_header ieee_hdr;
++ u8 bss_2040co;
++} __packed;
++
++struct ieee_types_extcap {
++ struct ieee_types_header ieee_hdr;
++ u8 ext_capab[8];
++} __packed;
++
++struct ieee_types_vht_cap {
++ struct ieee_types_header ieee_hdr;
++ struct ieee80211_vht_cap vhtcap;
++} __packed;
++
++struct ieee_types_vht_oper {
++ struct ieee_types_header ieee_hdr;
++ struct ieee80211_vht_operation vhtoper;
++} __packed;
++
++struct ieee_types_aid {
++ struct ieee_types_header ieee_hdr;
++ u16 aid;
++} __packed;
++
++int rwnx_tdls_send_mgmt_packet_data(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ const u8 *peer, u8 action_code, u8 dialog_token,
++ u16 status_code, u32 peer_capability, bool initiator,
++ const u8 *extra_ies, size_t extra_ies_len, u8 oper_class,
++ struct cfg80211_chan_def *chandef);
++
++#endif /* RWNX_TDLS_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_testmode.c
+@@ -0,0 +1,226 @@
++/**
++ ****************************************************************************************
++ *
++ * @file rwnx_testmode.c
++ *
++ * @brief Test mode function definitions
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ****************************************************************************************
++ */
++
++#include <net/mac80211.h>
++#include <net/netlink.h>
++
++#include "rwnx_testmode.h"
++#include "rwnx_msg_tx.h"
++#include "rwnx_dini.h"
++#include "reg_access.h"
++
++/*
++ * This function handles the user application commands for register access.
++ *
++ * It retrieves command ID carried with RWNX_TM_ATTR_COMMAND and calls to the
++ * handlers respectively.
++ *
++ * If it's an unknown commdn ID, -ENOSYS is returned; or -ENOMSG if the
++ * mandatory fields(RWNX_TM_ATTR_REG_OFFSET,RWNX_TM_ATTR_REG_VALUE32)
++ * are missing; Otherwise 0 is replied indicating the success of the command execution.
++ *
++ * If RWNX_TM_ATTR_COMMAND is RWNX_TM_CMD_APP2DEV_REG_READ, the register read
++ * value is returned with RWNX_TM_ATTR_REG_VALUE32.
++ *
++ * @hw: ieee80211_hw object that represents the device
++ * @tb: general message fields from the user space
++ */
++int rwnx_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
++{
++ struct rwnx_hw *rwnx_hw = hw->priv;
++ u32 mem_addr, val32;
++ struct sk_buff *skb;
++ int status = 0;
++
++ /* First check if register address is there */
++ if (!tb[RWNX_TM_ATTR_REG_OFFSET]) {
++ printk("Error finding register offset\n");
++ return -ENOMSG;
++ }
++
++ mem_addr = nla_get_u32(tb[RWNX_TM_ATTR_REG_OFFSET]);
++
++ switch (nla_get_u32(tb[RWNX_TM_ATTR_COMMAND])) {
++ case RWNX_TM_CMD_APP2DEV_REG_READ:
++ {
++ struct dbg_mem_read_cfm mem_read_cfm;
++
++ /*** Send the command to the LMAC ***/
++ if ((status = rwnx_send_dbg_mem_read_req(rwnx_hw, mem_addr, &mem_read_cfm)))
++ return status;
++
++ /* Allocate the answer message */
++ skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
++ if (!skb) {
++ printk("Error allocating memory\n");
++ return -ENOMEM;
++ }
++
++ val32 = mem_read_cfm.memdata;
++ if (nla_put_u32(skb, RWNX_TM_ATTR_REG_VALUE32, val32))
++ goto nla_put_failure;
++
++ /* Send the answer to upper layer */
++ status = cfg80211_testmode_reply(skb);
++ if (status < 0)
++ printk("Error sending msg : %d\n", status);
++ }
++ break;
++
++ case RWNX_TM_CMD_APP2DEV_REG_WRITE:
++ {
++ if (!tb[RWNX_TM_ATTR_REG_VALUE32]) {
++ printk("Error finding value to write\n");
++ return -ENOMSG;
++ } else {
++ val32 = nla_get_u32(tb[RWNX_TM_ATTR_REG_VALUE32]);
++ /* Send the command to the LMAC */
++ if ((status = rwnx_send_dbg_mem_write_req(rwnx_hw, mem_addr, val32)))
++ return status;
++ }
++ }
++ break;
++
++ default:
++ printk("Unknown testmode register command ID\n");
++ return -ENOSYS;
++ }
++
++ return status;
++
++nla_put_failure:
++ kfree_skb(skb);
++ return -EMSGSIZE;
++}
++
++/*
++ * This function handles the user application commands for Debug filter settings.
++ *
++ * @hw: ieee80211_hw object that represents the device
++ * @tb: general message fields from the user space
++ */
++int rwnx_testmode_dbg_filter(struct ieee80211_hw *hw, struct nlattr **tb)
++{
++ struct rwnx_hw *rwnx_hw = hw->priv;
++ u32 filter;
++ int status = 0;
++
++ /* First check if the filter is there */
++ if (!tb[RWNX_TM_ATTR_REG_FILTER]) {
++ printk("Error finding filter value\n");
++ return -ENOMSG;
++ }
++
++ filter = nla_get_u32(tb[RWNX_TM_ATTR_REG_FILTER]);
++ RWNX_DBG("testmode debug filter, setting: 0x%x\n", filter);
++
++ switch (nla_get_u32(tb[RWNX_TM_ATTR_COMMAND])) {
++ case RWNX_TM_CMD_APP2DEV_SET_DBGMODFILTER:
++ {
++ /* Send the command to the LMAC */
++ if ((status = rwnx_send_dbg_set_mod_filter_req(rwnx_hw, filter)))
++ return status;
++ }
++ break;
++ case RWNX_TM_CMD_APP2DEV_SET_DBGSEVFILTER:
++ {
++ /* Send the command to the LMAC */
++ if ((status = rwnx_send_dbg_set_sev_filter_req(rwnx_hw, filter)))
++ return status;
++ }
++ break;
++
++ default:
++ printk("Unknown testmode register command ID\n");
++ return -ENOSYS;
++ }
++
++ return status;
++}
++
++/*
++ * This function handles the user application commands for register access without using
++ * the normal LMAC messaging way.
++ * This time register access will be done through direct PCI BAR windows. This can be used
++ * to access registers even when the :AMC FW is stuck.
++ *
++ * @hw: ieee80211_hw object that represents the device
++ * @tb: general message fields from the user space
++ */
++int rwnx_testmode_reg_dbg(struct ieee80211_hw *hw, struct nlattr **tb)
++{
++ struct rwnx_hw *rwnx_hw = hw->priv;
++ struct rwnx_plat *rwnx_plat = rwnx_hw->plat;
++ u32 mem_addr;
++ struct sk_buff *skb;
++ int status = 0;
++ volatile unsigned int reg_value = 0;
++ unsigned int offset;
++
++ /* First check if register address is there */
++ if (!tb[RWNX_TM_ATTR_REG_OFFSET]) {
++ printk("Error finding register offset\n");
++ return -ENOMSG;
++ }
++
++ mem_addr = nla_get_u32(tb[RWNX_TM_ATTR_REG_OFFSET]);
++ offset = mem_addr & 0x00FFFFFF;
++
++ switch (nla_get_u32(tb[RWNX_TM_ATTR_COMMAND])) {
++ case RWNX_TM_CMD_APP2DEV_REG_READ_DBG:
++ {
++ /*** Send the command to the LMAC ***/
++ reg_value = RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM, offset);
++
++ /* Allocate the answer message */
++ skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
++ if (!skb) {
++ printk("Error allocating memory\n");
++ return -ENOMEM;
++ }
++
++ if (nla_put_u32(skb, RWNX_TM_ATTR_REG_VALUE32, reg_value))
++ goto nla_put_failure;
++
++ /* Send the answer to upper layer */
++ status = cfg80211_testmode_reply(skb);
++ if (status < 0)
++ printk("Error sending msg : %d\n", status);
++ }
++ break;
++
++ case RWNX_TM_CMD_APP2DEV_REG_WRITE_DBG:
++ {
++ if (!tb[RWNX_TM_ATTR_REG_VALUE32]) {
++ printk("Error finding value to write\n");
++ return -ENOMSG;
++ } else {
++ reg_value = nla_get_u32(tb[RWNX_TM_ATTR_REG_VALUE32]);
++
++ /* Send the command to the LMAC */
++ RWNX_REG_WRITE(reg_value, rwnx_plat, RWNX_ADDR_SYSTEM,
++ offset);
++ }
++ }
++ break;
++
++ default:
++ printk("Unknown testmode register command ID\n");
++ return -ENOSYS;
++ }
++
++ return status;
++
++nla_put_failure:
++ kfree_skb(skb);
++ return -EMSGSIZE;
++}
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_testmode.h
+@@ -0,0 +1,64 @@
++/**
++ ****************************************************************************************
++ *
++ * @file rwnx_testmode.h
++ *
++ * @brief Test mode function declarations
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ****************************************************************************************
++ */
++
++#ifndef RWNX_TESTMODE_H_
++#define RWNX_TESTMODE_H_
++
++#include <net/mac80211.h>
++#include <net/netlink.h>
++
++/* Commands from user space to kernel space(RWNX_TM_CMD_APP2DEV_XX) and
++ * from and kernel space to user space(RWNX_TM_CMD_DEV2APP_XX).
++ * The command ID is carried with RWNX_TM_ATTR_COMMAND.
++ */
++enum rwnx_tm_cmd_t {
++ /* commands from user application to access register */
++ RWNX_TM_CMD_APP2DEV_REG_READ = 1,
++ RWNX_TM_CMD_APP2DEV_REG_WRITE,
++
++ /* commands from user application to select the Debug levels */
++ RWNX_TM_CMD_APP2DEV_SET_DBGMODFILTER,
++ RWNX_TM_CMD_APP2DEV_SET_DBGSEVFILTER,
++
++ /* commands to access registers without sending messages to LMAC layer,
++ * this must be used when LMAC FW is stuck. */
++ RWNX_TM_CMD_APP2DEV_REG_READ_DBG,
++ RWNX_TM_CMD_APP2DEV_REG_WRITE_DBG,
++
++ RWNX_TM_CMD_MAX,
++};
++
++enum rwnx_tm_attr_t {
++ RWNX_TM_ATTR_NOT_APPLICABLE = 0,
++
++ RWNX_TM_ATTR_COMMAND,
++
++ /* When RWNX_TM_ATTR_COMMAND is RWNX_TM_CMD_APP2DEV_REG_XXX,
++ * The mandatory fields are:
++ * RWNX_TM_ATTR_REG_OFFSET for the offset of the target register;
++ * RWNX_TM_ATTR_REG_VALUE32 for value */
++ RWNX_TM_ATTR_REG_OFFSET,
++ RWNX_TM_ATTR_REG_VALUE32,
++
++ /* When RWNX_TM_ATTR_COMMAND is RWNX_TM_CMD_APP2DEV_SET_DBGXXXFILTER,
++ * The mandatory field is RWNX_TM_ATTR_REG_FILTER. */
++ RWNX_TM_ATTR_REG_FILTER,
++
++ RWNX_TM_ATTR_MAX,
++};
++
++/***********************************************************************/
++int rwnx_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb);
++int rwnx_testmode_dbg_filter(struct ieee80211_hw *hw, struct nlattr **tb);
++int rwnx_testmode_reg_dbg(struct ieee80211_hw *hw, struct nlattr **tb);
++
++#endif /* RWNX_TESTMODE_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_tx.c
+@@ -0,0 +1,2143 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_tx.c
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ******************************************************************************
++ */
++#include <linux/dma-mapping.h>
++#include <linux/etherdevice.h>
++#include <net/sock.h>
++
++#include "rwnx_defs.h"
++#include "rwnx_tx.h"
++#include "rwnx_msg_tx.h"
++#include "rwnx_mesh.h"
++#include "rwnx_events.h"
++#include "rwnx_compat.h"
++#include "aicwf_txrxif.h"
++#ifdef CONFIG_RWNX_MON_XMIT
++#include <net/ieee80211_radiotap.h>
++#endif
++
++/******************************************************************************
++ * Power Save functions
++ *****************************************************************************/
++/**
++ * rwnx_set_traffic_status - Inform FW if traffic is available for STA in PS
++ *
++ * @rwnx_hw: Driver main data
++ * @sta: Sta in PS mode
++ * @available: whether traffic is buffered for the STA
++ * @ps_id: type of PS data requested (@LEGACY_PS_ID or @UAPSD_ID)
++ */
++void rwnx_set_traffic_status(struct rwnx_hw *rwnx_hw,
++ struct rwnx_sta *sta,
++ bool available,
++ u8 ps_id)
++{
++ if (sta->tdls.active) {
++ rwnx_send_tdls_peer_traffic_ind_req(rwnx_hw,
++ rwnx_hw->vif_table[sta->vif_idx]);
++ } else {
++ bool uapsd = (ps_id != LEGACY_PS_ID);
++ rwnx_send_me_traffic_ind(rwnx_hw, sta->sta_idx, uapsd, available);
++#ifdef CREATE_TRACE_POINTS
++ trace_ps_traffic_update(sta->sta_idx, available, uapsd);
++#endif
++ }
++}
++
++/**
++ * rwnx_ps_bh_enable - Enable/disable PS mode for one STA
++ *
++ * @rwnx_hw: Driver main data
++ * @sta: Sta which enters/leaves PS mode
++ * @enable: PS mode status
++ *
++ * This function will enable/disable PS mode for one STA.
++ * When enabling PS mode:
++ * - Stop all STA's txq for RWNX_TXQ_STOP_STA_PS reason
++ * - Count how many buffers are already ready for this STA
++ * - For BC/MC sta, update all queued SKB to use hw_queue BCMC
++ * - Update TIM if some packet are ready
++ *
++ * When disabling PS mode:
++ * - Start all STA's txq for RWNX_TXQ_STOP_STA_PS reason
++ * - For BC/MC sta, update all queued SKB to use hw_queue AC_BE
++ * - Update TIM if some packet are ready (otherwise fw will not update TIM
++ * in beacon for this STA)
++ *
++ * All counter/skb updates are protected from TX path by taking tx_lock
++ *
++ * NOTE: _bh_ in function name indicates that this function is called
++ * from a bottom_half tasklet.
++ */
++void rwnx_ps_bh_enable(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta,
++ bool enable)
++{
++ struct rwnx_txq *txq;
++
++ if (enable) {
++#ifdef CREATE_TRACE_POINTS
++ trace_ps_enable(sta);
++#endif
++ spin_lock_bh(&rwnx_hw->tx_lock);
++ sta->ps.active = true;
++ sta->ps.sp_cnt[LEGACY_PS_ID] = 0;
++ sta->ps.sp_cnt[UAPSD_ID] = 0;
++ rwnx_txq_sta_stop(sta, RWNX_TXQ_STOP_STA_PS, rwnx_hw);
++
++ if (is_multicast_sta(sta->sta_idx)) {
++ txq = rwnx_txq_sta_get(sta, 0, rwnx_hw);
++ sta->ps.pkt_ready[LEGACY_PS_ID] = skb_queue_len(&txq->sk_list);
++ sta->ps.pkt_ready[UAPSD_ID] = 0;
++ //txq->hwq = &rwnx_hw->hwq[RWNX_HWQ_BCMC];
++ } else {
++ int i;
++ sta->ps.pkt_ready[LEGACY_PS_ID] = 0;
++ sta->ps.pkt_ready[UAPSD_ID] = 0;
++ foreach_sta_txq(sta, txq, i, rwnx_hw) {
++ sta->ps.pkt_ready[txq->ps_id] += skb_queue_len(&txq->sk_list);
++ }
++ }
++
++ spin_unlock_bh(&rwnx_hw->tx_lock);
++
++ //if (sta->ps.pkt_ready[LEGACY_PS_ID])
++ // rwnx_set_traffic_status(rwnx_hw, sta, true, LEGACY_PS_ID);
++
++ //if (sta->ps.pkt_ready[UAPSD_ID])
++ // rwnx_set_traffic_status(rwnx_hw, sta, true, UAPSD_ID);
++ } else {
++#ifdef CREATE_TRACE_POINTS
++ trace_ps_disable(sta->sta_idx);
++#endif
++ spin_lock_bh(&rwnx_hw->tx_lock);
++ sta->ps.active = false;
++
++ if (is_multicast_sta(sta->sta_idx)) {
++ txq = rwnx_txq_sta_get(sta, 0, rwnx_hw);
++ txq->hwq = &rwnx_hw->hwq[RWNX_HWQ_BE];
++ txq->push_limit = 0;
++ } else {
++ int i;
++ foreach_sta_txq(sta, txq, i, rwnx_hw) {
++ txq->push_limit = 0;
++ }
++ }
++
++ rwnx_txq_sta_start(sta, RWNX_TXQ_STOP_STA_PS, rwnx_hw);
++ spin_unlock_bh(&rwnx_hw->tx_lock);
++
++ //if (sta->ps.pkt_ready[LEGACY_PS_ID])
++ // rwnx_set_traffic_status(rwnx_hw, sta, false, LEGACY_PS_ID);
++
++ //if (sta->ps.pkt_ready[UAPSD_ID])
++ // rwnx_set_traffic_status(rwnx_hw, sta, false, UAPSD_ID);
++
++ tasklet_schedule(&rwnx_hw->task);
++ }
++}
++
++/**
++ * rwnx_ps_bh_traffic_req - Handle traffic request for STA in PS mode
++ *
++ * @rwnx_hw: Driver main data
++ * @sta: Sta which enters/leaves PS mode
++ * @pkt_req: number of pkt to push
++ * @ps_id: type of PS data requested (@LEGACY_PS_ID or @UAPSD_ID)
++ *
++ * This function will make sure that @pkt_req are pushed to fw
++ * whereas the STA is in PS mode.
++ * If request is 0, send all traffic
++ * If request is greater than available pkt, reduce request
++ * Note: request will also be reduce if txq credits are not available
++ *
++ * All counter updates are protected from TX path by taking tx_lock
++ *
++ * NOTE: _bh_ in function name indicates that this function is called
++ * from the bottom_half tasklet.
++ */
++void rwnx_ps_bh_traffic_req(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta,
++ u16 pkt_req, u8 ps_id)
++{
++ int pkt_ready_all;
++ struct rwnx_txq *txq;
++ int schedule = 0;
++
++ //if (WARN(!sta->ps.active, "sta %pM is not in Power Save mode",
++ // sta->mac_addr))
++ // return;
++ if(!sta->ps.active) {
++ //AICWFDBG(LOGERROR, "sta(%d) %pM is not in Power Save mode", sta->sta_idx, sta->mac_addr);
++ return;
++ }
++#ifdef CREATE_TRACE_POINTS
++ trace_ps_traffic_req(sta, pkt_req, ps_id);
++#endif
++ spin_lock_bh(&rwnx_hw->tx_lock);
++
++ /* Fw may ask to stop a service period with PS_SP_INTERRUPTED. This only
++ happens for p2p-go interface if NOA starts during a service period */
++ if ((pkt_req == PS_SP_INTERRUPTED) && (ps_id == UAPSD_ID)) {
++ int tid;
++ sta->ps.sp_cnt[ps_id] = 0;
++ foreach_sta_txq(sta, txq, tid, rwnx_hw) {
++ txq->push_limit = 0;
++ }
++ goto done;
++ }
++
++ pkt_ready_all = (sta->ps.pkt_ready[ps_id] - sta->ps.sp_cnt[ps_id]);
++
++ /* Don't start SP until previous one is finished or we don't have
++ packet ready (which must not happen for U-APSD) */
++ if (sta->ps.sp_cnt[ps_id] || pkt_ready_all <= 0) {
++ goto done;
++ }
++
++ /* Adapt request to what is available. */
++ if (pkt_req == 0 || pkt_req > pkt_ready_all) {
++ pkt_req = pkt_ready_all;
++ }
++
++ /* Reset the SP counter */
++ sta->ps.sp_cnt[ps_id] = 0;
++ schedule = 1;
++
++ /* "dispatch" the request between txq */
++ if (is_multicast_sta(sta->sta_idx)) {
++ txq = rwnx_txq_sta_get(sta, 0, rwnx_hw);
++ //if (txq->credits <= 0)
++ // goto done;
++ if (pkt_req > txq->credits)
++ pkt_req = txq->credits;
++ txq->push_limit = pkt_req;
++ sta->ps.sp_cnt[ps_id] = pkt_req;
++ rwnx_txq_add_to_hw_list(txq);
++ } else {
++ int i, tid;
++
++ foreach_sta_txq_prio(sta, txq, tid, i, rwnx_hw) {
++ u16 txq_len = skb_queue_len(&txq->sk_list);
++
++ if (txq->ps_id != ps_id)
++ continue;
++
++ if (txq_len > txq->credits)
++ txq_len = txq->credits;
++
++ if (txq_len == 0)
++ continue;
++
++ if (txq_len < pkt_req) {
++ /* Not enough pkt queued in this txq, add this
++ txq to hwq list and process next txq */
++ pkt_req -= txq_len;
++ txq->push_limit = txq_len;
++ sta->ps.sp_cnt[ps_id] += txq_len;
++ rwnx_txq_add_to_hw_list(txq);
++ } else {
++ /* Enough pkt in this txq to comlete the request
++ add this txq to hwq list and stop processing txq */
++ txq->push_limit = pkt_req;
++ sta->ps.sp_cnt[ps_id] += pkt_req;
++ rwnx_txq_add_to_hw_list(txq);
++ break;
++ }
++ }
++ }
++
++ done:
++ spin_unlock_bh(&rwnx_hw->tx_lock);
++ if(schedule)
++ tasklet_schedule(&rwnx_hw->task);
++}
++
++/******************************************************************************
++ * TX functions
++ *****************************************************************************/
++#define PRIO_STA_NULL 0xAA
++
++static const int rwnx_down_hwq2tid[3] = {
++ [RWNX_HWQ_BK] = 2,
++ [RWNX_HWQ_BE] = 3,
++ [RWNX_HWQ_VI] = 5,
++};
++
++static void rwnx_downgrade_ac(struct rwnx_sta *sta, struct sk_buff *skb)
++{
++ int8_t ac = rwnx_tid2hwq[skb->priority];
++
++ if (WARN((ac > RWNX_HWQ_VO),
++ "Unexepcted ac %d for skb before downgrade", ac))
++ ac = RWNX_HWQ_VO;
++
++ while (sta->acm & BIT(ac)) {
++ if (ac == RWNX_HWQ_BK) {
++ skb->priority = 1;
++ return;
++ }
++ ac--;
++ skb->priority = rwnx_down_hwq2tid[ac];
++ }
++}
++
++u16 rwnx_select_txq(struct rwnx_vif *rwnx_vif, struct sk_buff *skb)
++{
++ struct rwnx_hw *rwnx_hw = rwnx_vif->rwnx_hw;
++ struct wireless_dev *wdev = &rwnx_vif->wdev;
++ struct rwnx_sta *sta = NULL;
++ struct rwnx_txq *txq;
++ u16 netdev_queue;
++ bool tdls_mgmgt_frame = false;
++ int nx_bcmc_txq_ndev_idx = NX_BCMC_TXQ_NDEV_IDX;
++
++ if((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8801) ||
++ ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC ||
++ g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW) && chip_id < 3)){
++ nx_bcmc_txq_ndev_idx = NX_BCMC_TXQ_NDEV_IDX_FOR_OLD_IC;
++ }
++
++ switch (wdev->iftype) {
++ case NL80211_IFTYPE_STATION:
++ case NL80211_IFTYPE_P2P_CLIENT:
++ {
++ struct ethhdr *eth;
++ eth = (struct ethhdr *)skb->data;
++ if (eth->h_proto == cpu_to_be16(ETH_P_TDLS)) {
++ tdls_mgmgt_frame = true;
++ }
++ if ((rwnx_vif->tdls_status == TDLS_LINK_ACTIVE) &&
++ (rwnx_vif->sta.tdls_sta != NULL) &&
++ (memcmp(eth->h_dest, rwnx_vif->sta.tdls_sta->mac_addr, ETH_ALEN) == 0))
++ sta = rwnx_vif->sta.tdls_sta;
++ else
++ sta = rwnx_vif->sta.ap;
++ break;
++ }
++ case NL80211_IFTYPE_AP_VLAN:
++ if (rwnx_vif->ap_vlan.sta_4a) {
++ sta = rwnx_vif->ap_vlan.sta_4a;
++ break;
++ }
++
++ /* AP_VLAN interface is not used for a 4A STA,
++ fallback searching sta amongs all AP's clients */
++ rwnx_vif = rwnx_vif->ap_vlan.master;
++ case NL80211_IFTYPE_AP:
++ case NL80211_IFTYPE_P2P_GO:
++ {
++ struct rwnx_sta *cur;
++ struct ethhdr *eth = (struct ethhdr *)skb->data;
++
++ if (is_multicast_ether_addr(eth->h_dest)) {
++ sta = &rwnx_hw->sta_table[rwnx_vif->ap.bcmc_index];
++ } else {
++ spin_lock_bh(&rwnx_vif->rwnx_hw->cb_lock);
++ list_for_each_entry(cur, &rwnx_vif->ap.sta_list, list) {
++ if (!memcmp(cur->mac_addr, eth->h_dest, ETH_ALEN)) {
++ sta = cur;
++ break;
++ }
++ }
++ spin_unlock_bh(&rwnx_vif->rwnx_hw->cb_lock);
++ }
++
++ break;
++ }
++ case NL80211_IFTYPE_MESH_POINT:
++ {
++ struct ethhdr *eth = (struct ethhdr *)skb->data;
++
++ if (!rwnx_vif->is_resending) {
++ /*
++ * If ethernet source address is not the address of a mesh wireless interface, we are proxy for
++ * this address and have to inform the HW
++ */
++ if (memcmp(ð->h_source[0], &rwnx_vif->ndev->perm_addr[0], ETH_ALEN)) {
++ /* Check if LMAC is already informed */
++ if (!rwnx_get_mesh_proxy_info(rwnx_vif, (u8 *)ð->h_source, true)) {
++ rwnx_send_mesh_proxy_add_req(rwnx_hw, rwnx_vif, (u8 *)ð->h_source);
++ }
++ }
++ }
++
++ if (is_multicast_ether_addr(eth->h_dest)) {
++ sta = &rwnx_hw->sta_table[rwnx_vif->ap.bcmc_index];
++ } else {
++ /* Path to be used */
++ struct rwnx_mesh_path *p_mesh_path = NULL;
++ struct rwnx_mesh_path *p_cur_path;
++ /* Check if destination is proxied by a peer Mesh STA */
++ struct rwnx_mesh_proxy *p_mesh_proxy = rwnx_get_mesh_proxy_info(rwnx_vif, (u8 *)ð->h_dest, false);
++ /* Mesh Target address */
++ struct mac_addr *p_tgt_mac_addr;
++
++ if (p_mesh_proxy) {
++ p_tgt_mac_addr = &p_mesh_proxy->proxy_addr;
++ } else {
++ p_tgt_mac_addr = (struct mac_addr *)ð->h_dest;
++ }
++
++ /* Look for path with provided target address */
++ list_for_each_entry(p_cur_path, &rwnx_vif->ap.mpath_list, list) {
++ if (!memcmp(&p_cur_path->tgt_mac_addr, p_tgt_mac_addr, ETH_ALEN)) {
++ p_mesh_path = p_cur_path;
++ break;
++ }
++ }
++
++ if (p_mesh_path) {
++ sta = p_mesh_path->p_nhop_sta;
++ } else {
++ rwnx_send_mesh_path_create_req(rwnx_hw, rwnx_vif, (u8 *)p_tgt_mac_addr);
++ }
++ }
++
++ break;
++ }
++ default:
++ break;
++ }
++
++ if (sta && sta->qos)
++ {
++ if (tdls_mgmgt_frame) {
++ skb_set_queue_mapping(skb, NX_STA_NDEV_IDX(skb->priority, sta->sta_idx));
++ } else {
++ /* use the data classifier to determine what 802.1d tag the
++ * data frame has */
++ #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
++ skb->priority = cfg80211_classify8021d(skb) & IEEE80211_QOS_CTL_TAG1D_MASK;
++ #else
++ skb->priority = cfg80211_classify8021d(skb, NULL) & IEEE80211_QOS_CTL_TAG1D_MASK;
++ #endif
++ }
++ if (sta->acm)
++ rwnx_downgrade_ac(sta, skb);
++
++ txq = rwnx_txq_sta_get(sta, skb->priority, rwnx_hw);
++ netdev_queue = txq->ndev_idx;
++ }
++ else if (sta)
++ {
++ skb->priority = 0xFF;
++ txq = rwnx_txq_sta_get(sta, 0, rwnx_hw);
++ netdev_queue = txq->ndev_idx;
++ }
++ else
++ {
++ /* This packet will be dropped in xmit function, still need to select
++ an active queue for xmit to be called. As it most likely to happen
++ for AP interface, select BCMC queue
++ (TODO: select another queue if BCMC queue is stopped) */
++ skb->priority = PRIO_STA_NULL;
++ netdev_queue = nx_bcmc_txq_ndev_idx;
++ }
++
++#ifndef CONFIG_ONE_TXQ
++ BUG_ON(netdev_queue >= NX_NB_NDEV_TXQ);
++#endif
++
++ return netdev_queue;
++}
++
++/**
++ * rwnx_set_more_data_flag - Update MORE_DATA flag in tx sw desc
++ *
++ * @rwnx_hw: Driver main data
++ * @sw_txhdr: Header for pkt to be pushed
++ *
++ * If STA is in PS mode
++ * - Set EOSP in case the packet is the last of the UAPSD service period
++ * - Set MORE_DATA flag if more pkt are ready for this sta
++ * - Update TIM if this is the last pkt buffered for this sta
++ *
++ * note: tx_lock already taken.
++ */
++static inline void rwnx_set_more_data_flag(struct rwnx_hw *rwnx_hw,
++ struct rwnx_sw_txhdr *sw_txhdr)
++{
++ struct rwnx_sta *sta = sw_txhdr->rwnx_sta;
++ struct rwnx_vif *vif = sw_txhdr->rwnx_vif;
++ struct rwnx_txq *txq = sw_txhdr->txq;
++
++ if (unlikely(sta->ps.active)) {
++ sta->ps.pkt_ready[txq->ps_id]--;
++ sta->ps.sp_cnt[txq->ps_id]--;
++#ifdef CREATE_TRACE_POINTS
++ trace_ps_push(sta);
++#endif
++ if (((txq->ps_id == UAPSD_ID) || (vif->wdev.iftype == NL80211_IFTYPE_MESH_POINT) || (sta->tdls.active))
++ && !sta->ps.sp_cnt[txq->ps_id]) {
++ sw_txhdr->desc.host.flags |= TXU_CNTRL_EOSP;
++ }
++
++ if (sta->ps.pkt_ready[txq->ps_id]) {
++ sw_txhdr->desc.host.flags |= TXU_CNTRL_MORE_DATA;
++ } else {
++ rwnx_set_traffic_status(rwnx_hw, sta, false, txq->ps_id);
++ }
++ }
++}
++
++/**
++ * rwnx_get_tx_priv - Get STA and tid for one skb
++ *
++ * @rwnx_vif: vif ptr
++ * @skb: skb
++ * @tid: pointer updated with the tid to use for this skb
++ *
++ * @return: pointer on the destination STA (may be NULL)
++ *
++ * skb has already been parsed in rwnx_select_queue function
++ * simply re-read information form skb.
++ */
++static struct rwnx_sta *rwnx_get_tx_priv(struct rwnx_vif *rwnx_vif,
++ struct sk_buff *skb,
++ u8 *tid)
++{
++ struct rwnx_hw *rwnx_hw = rwnx_vif->rwnx_hw;
++ struct rwnx_sta *sta;
++ int sta_idx;
++ int nx_remote_sta_max = NX_REMOTE_STA_MAX;
++ int nx_bcmc_txq_ndev_idx = NX_BCMC_TXQ_NDEV_IDX;
++
++ if((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8801) ||
++ ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC ||
++ g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW) && chip_id < 3)){
++ nx_remote_sta_max = NX_REMOTE_STA_MAX_FOR_OLD_IC;
++ nx_bcmc_txq_ndev_idx = NX_BCMC_TXQ_NDEV_IDX_FOR_OLD_IC;
++ }
++
++
++ *tid = skb->priority;
++ if (unlikely(skb->priority == PRIO_STA_NULL)) {
++ return NULL;
++ } else {
++ int ndev_idx = skb_get_queue_mapping(skb);
++
++ if (ndev_idx == nx_bcmc_txq_ndev_idx)
++ sta_idx = nx_remote_sta_max + master_vif_idx(rwnx_vif);
++ else
++ sta_idx = ndev_idx / NX_NB_TID_PER_STA;
++
++ sta = &rwnx_hw->sta_table[sta_idx];
++ }
++
++ return sta;
++}
++
++/**
++ * rwnx_prep_tx - Prepare buffer for DMA transmission
++ *
++ * @rwnx_hw: Driver main data
++ * @txhdr: Tx descriptor
++ *
++ * Maps hw_txhdr and buffer data for transmission via DMA.
++ * - Data buffer with be downloaded by embebded side.
++ * - hw_txhdr will be uploaded by embedded side when buffer has been
++ * transmitted over the air.
++ */
++static int rwnx_prep_tx(struct rwnx_hw *rwnx_hw, struct rwnx_txhdr *txhdr)
++{
++#if 0
++ struct rwnx_sw_txhdr *sw_txhdr = txhdr->sw_hdr;
++ struct rwnx_hw_txhdr *hw_txhdr = &txhdr->hw_hdr;
++ dma_addr_t dma_addr;
++
++ /* MAP (and sync) memory for DMA */
++ dma_addr = dma_map_single(rwnx_hw->dev, hw_txhdr,
++ sw_txhdr->map_len, DMA_BIDIRECTIONAL);
++ if (WARN_ON(dma_mapping_error(rwnx_hw->dev, dma_addr)))
++ return -1;
++
++ sw_txhdr->dma_addr = dma_addr;
++#endif
++ return 0;
++}
++
++/**
++ * rwnx_tx_push - Push one packet to fw
++ *
++ * @rwnx_hw: Driver main data
++ * @txhdr: tx desc of the buffer to push
++ * @flags: push flags (see @rwnx_push_flags)
++ *
++ * Push one packet to fw. Sw desc of the packet has already been updated.
++ * Only MORE_DATA flag will be set if needed.
++ */
++void rwnx_tx_push(struct rwnx_hw *rwnx_hw, struct rwnx_txhdr *txhdr, int flags)
++{
++ struct rwnx_sw_txhdr *sw_txhdr = txhdr->sw_hdr;
++ struct sk_buff *skb = sw_txhdr->skb;
++ struct rwnx_txq *txq = sw_txhdr->txq;
++ u16 hw_queue = txq->hwq->id;
++ int user = 0;
++
++ lockdep_assert_held(&rwnx_hw->tx_lock);
++
++ //printk("rwnx_tx_push\n");
++ /* RETRY flag is not always set so retest here */
++ if (txq->nb_retry) {
++ flags |= RWNX_PUSH_RETRY;
++ txq->nb_retry--;
++ if (txq->nb_retry == 0) {
++ WARN(skb != txq->last_retry_skb,
++ "last retry buffer is not the expected one");
++ txq->last_retry_skb = NULL;
++ }
++ } else if (!(flags & RWNX_PUSH_RETRY)) {
++ txq->pkt_sent++;
++ }
++
++#ifdef CONFIG_RWNX_AMSDUS_TX
++ if (txq->amsdu == sw_txhdr) {
++ WARN((flags & RWNX_PUSH_RETRY), "End A-MSDU on a retry");
++ rwnx_hw->stats.amsdus[sw_txhdr->amsdu.nb - 1].done++;
++ txq->amsdu = NULL;
++ } else if (!(flags & RWNX_PUSH_RETRY) &&
++ !(sw_txhdr->desc.host.flags & TXU_CNTRL_AMSDU)) {
++ rwnx_hw->stats.amsdus[0].done++;
++ }
++#endif /* CONFIG_RWNX_AMSDUS_TX */
++
++ /* Wait here to update hw_queue, as for multicast STA hwq may change
++ between queue and push (because of PS) */
++ sw_txhdr->hw_queue = hw_queue;
++
++ //sw_txhdr->desc.host.packet_addr = hw_queue; //use packet_addr field for hw_txq
++ sw_txhdr->desc.host.ac = hw_queue; //use ac field for hw_txq
++#ifdef CONFIG_RWNX_MUMIMO_TX
++ /* MU group is only selected during hwq processing */
++ sw_txhdr->desc.host.mumimo_info = txq->mumimo_info;
++ user = RWNX_TXQ_POS_ID(txq);
++#endif /* CONFIG_RWNX_MUMIMO_TX */
++
++ if (sw_txhdr->rwnx_sta) {
++ /* only for AP mode */
++ rwnx_set_more_data_flag(rwnx_hw, sw_txhdr);
++ }
++#ifdef CREATE_TRACE_POINTS
++ trace_push_desc(skb, sw_txhdr, flags);
++#endif
++ #if 0
++ txq->credits--;
++ #endif
++ txq->pkt_pushed[user]++;
++ //printk("txq->credits=%d\n",txq->credits);
++ #if 0
++ if (txq->credits <= 0)
++ rwnx_txq_stop(txq, RWNX_TXQ_STOP_FULL);
++ #endif
++
++ if (txq->push_limit)
++ txq->push_limit--;
++#if 0
++ rwnx_ipc_txdesc_push(rwnx_hw, &sw_txhdr->desc, skb, hw_queue, user);
++#else
++#ifdef AICWF_SDIO_SUPPORT
++ if( ((sw_txhdr->desc.host.flags & TXU_CNTRL_MGMT) && \
++ ((*(skb->data+sw_txhdr->headroom)==0xd0) || (*(skb->data+sw_txhdr->headroom)==0x10) || (*(skb->data+sw_txhdr->headroom)==0x30))) || \
++ (sw_txhdr->desc.host.ethertype == 0x8e88) ) { // 0xd0:Action, 0x10:AssocRsp, 0x8e88:EAPOL
++ sw_txhdr->need_cfm = 1;
++ sw_txhdr->desc.host.status_desc_addr = ((1<<31) | rwnx_hw->sdio_env.txdesc_free_idx[0]);
++ aicwf_sdio_host_txdesc_push(&(rwnx_hw->sdio_env), 0, (long)skb);
++ AICWFDBG(LOGINFO, "need cfm ethertype:%8x,user_idx=%d, skb=%p sta_idx:%d\n",
++ sw_txhdr->desc.host.ethertype,
++ rwnx_hw->sdio_env.txdesc_free_idx[0],
++ skb,
++ sw_txhdr->desc.host.staid);
++ } else {
++ sw_txhdr->need_cfm = 0;
++ if (sw_txhdr->raw_frame) {
++ sw_txhdr->desc.host.flags |= TXU_CNTRL_MGMT;
++ }
++ if (sw_txhdr->fixed_rate) {
++ sw_txhdr->desc.host.status_desc_addr = (0x01UL << 30) | sw_txhdr->rate_config;
++ } else {
++ sw_txhdr->desc.host.status_desc_addr = 0;
++ }
++
++ sw_txhdr->rwnx_vif->net_stats.tx_packets++;
++ sw_txhdr->rwnx_vif->net_stats.tx_bytes += sw_txhdr->frame_len;
++ rwnx_hw->stats.last_tx = jiffies;
++ }
++ aicwf_frame_tx((void *)(rwnx_hw->sdiodev), skb);
++#endif
++#ifdef AICWF_USB_SUPPORT
++ if( ((sw_txhdr->desc.host.flags & TXU_CNTRL_MGMT) && \
++ ((*(skb->data+sw_txhdr->headroom)==0xd0) || (*(skb->data+sw_txhdr->headroom)==0x10) || (*(skb->data+sw_txhdr->headroom)==0x30))) || \
++ (sw_txhdr->desc.host.ethertype == 0x8e88) || (sw_txhdr->desc.host.ethertype == 0xb488)) {
++ // 0xd0:Action, 0x10:AssocRsp, 0x8e88:EAPOL, 0xb488: WAPI
++ sw_txhdr->need_cfm = 1;
++ sw_txhdr->desc.host.status_desc_addr = ((1<<31) | rwnx_hw->usb_env.txdesc_free_idx[0]);
++ aicwf_usb_host_txdesc_push(&(rwnx_hw->usb_env), 0, (long)(skb));
++ AICWFDBG(LOGINFO, "need cfm ethertype:%8x,user_idx=%d, skb=%p sta_idx:%d\n",
++ sw_txhdr->desc.host.ethertype,
++ rwnx_hw->usb_env.txdesc_free_idx[0],
++ skb,
++ sw_txhdr->desc.host.staid);
++ } else {
++ sw_txhdr->need_cfm = 0;
++ if (sw_txhdr->raw_frame) {
++ sw_txhdr->desc.host.flags |= TXU_CNTRL_MGMT;
++ }
++ if (sw_txhdr->fixed_rate) {
++ sw_txhdr->desc.host.status_desc_addr = (0x01UL << 30) | sw_txhdr->rate_config;
++ } else {
++ sw_txhdr->desc.host.status_desc_addr = 0;
++ }
++
++ sw_txhdr->rwnx_vif->net_stats.tx_packets++;
++ sw_txhdr->rwnx_vif->net_stats.tx_bytes += sw_txhdr->frame_len;
++ rwnx_hw->stats.last_tx = jiffies;
++ }
++ aicwf_frame_tx((void *)(rwnx_hw->usbdev), skb);
++#endif
++#endif
++ #if 0
++ txq->hwq->credits[user]--;
++ #endif
++ rwnx_hw->stats.cfm_balance[hw_queue]++;
++}
++
++
++
++/**
++ * rwnx_tx_retry - Push an AMPDU pkt that need to be retried
++ *
++ * @rwnx_hw: Driver main data
++ * @skb: pkt to re-push
++ * @txhdr: tx desc of the pkt to re-push
++ * @sw_retry: Indicates if fw decide to retry this buffer
++ * (i.e. it has never been transmitted over the air)
++ *
++ * Called when a packet needs to be repushed to the firmware.
++ * First update sw descriptor and then queue it in the retry list.
++ */
++static void rwnx_tx_retry(struct rwnx_hw *rwnx_hw, struct sk_buff *skb,
++ struct rwnx_txhdr *txhdr, bool sw_retry)
++{
++ struct rwnx_sw_txhdr *sw_txhdr = txhdr->sw_hdr;
++ struct tx_cfm_tag *cfm = &txhdr->hw_hdr.cfm;
++ struct rwnx_txq *txq = sw_txhdr->txq;
++ int peek_off = offsetof(struct rwnx_hw_txhdr, cfm);
++ int peek_len = sizeof(((struct rwnx_hw_txhdr *)0)->cfm);
++
++ if (!sw_retry) {
++ /* update sw desc */
++ #if 0
++ sw_txhdr->desc.host.sn = cfm->sn;
++ sw_txhdr->desc.host.pn[0] = cfm->pn[0];
++ sw_txhdr->desc.host.pn[1] = cfm->pn[1];
++ sw_txhdr->desc.host.pn[2] = cfm->pn[2];
++ sw_txhdr->desc.host.pn[3] = cfm->pn[3];
++ sw_txhdr->desc.host.timestamp = cfm->timestamp;
++ #endif
++ sw_txhdr->desc.host.flags |= TXU_CNTRL_RETRY;
++
++ #ifdef CONFIG_RWNX_AMSDUS_TX
++ if (sw_txhdr->desc.host.flags & TXU_CNTRL_AMSDU)
++ rwnx_hw->stats.amsdus[sw_txhdr->amsdu.nb - 1].failed++;
++ #endif
++ }
++
++ /* MORE_DATA will be re-set if needed when pkt will be repushed */
++ sw_txhdr->desc.host.flags &= ~TXU_CNTRL_MORE_DATA;
++
++ cfm->status.value = 0;
++ dma_sync_single_for_device(rwnx_hw->dev, sw_txhdr->dma_addr + peek_off,
++ peek_len, DMA_BIDIRECTIONAL);
++
++ txq->credits++;
++ if (txq->credits > 0)
++ rwnx_txq_start(txq, RWNX_TXQ_STOP_FULL);
++
++ /* Queue the buffer */
++ rwnx_txq_queue_skb(skb, txq, rwnx_hw, true);
++}
++
++
++#ifdef CONFIG_RWNX_AMSDUS_TX
++/* return size of subframe (including header) */
++static inline int rwnx_amsdu_subframe_length(struct ethhdr *eth, int eth_len)
++{
++ /* ethernet header is replaced with amdsu header that have the same size
++ Only need to check if LLC/SNAP header will be added */
++ int len = eth_len;
++
++ if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) {
++ len += sizeof(rfc1042_header) + 2;
++ }
++
++ return len;
++}
++
++static inline bool rwnx_amsdu_is_aggregable(struct sk_buff *skb)
++{
++ /* need to add some check on buffer to see if it can be aggregated ? */
++ return true;
++}
++
++
++/**
++ * rwnx_amsdu_del_subframe_header - remove AMSDU header
++ *
++ * amsdu_txhdr: amsdu tx descriptor
++ *
++ * Move back the ethernet header at the "beginning" of the data buffer.
++ * (which has been moved in @rwnx_amsdu_add_subframe_header)
++ */
++static void rwnx_amsdu_del_subframe_header(struct rwnx_amsdu_txhdr *amsdu_txhdr)
++{
++ struct sk_buff *skb = amsdu_txhdr->skb;
++ struct ethhdr *eth;
++ u8 *pos;
++
++ pos = skb->data;
++ pos += sizeof(struct rwnx_amsdu_txhdr);
++ eth = (struct ethhdr*)pos;
++ pos += amsdu_txhdr->pad + sizeof(struct ethhdr);
++
++ if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) {
++ pos += sizeof(rfc1042_header) + 2;
++ }
++
++ memmove(pos, eth, sizeof(*eth));
++ skb_pull(skb, (pos - skb->data));
++}
++
++/**
++ * rwnx_amsdu_add_subframe_header - Add AMSDU header and link subframe
++ *
++ * @rwnx_hw Driver main data
++ * @skb Buffer to aggregate
++ * @sw_txhdr Tx descriptor for the first A-MSDU subframe
++ *
++ * return 0 on sucess, -1 otherwise
++ *
++ * This functions Add A-MSDU header and LLC/SNAP header in the buffer
++ * and update sw_txhdr of the first subframe to link this buffer.
++ * If an error happens, the buffer will be queued as a normal buffer.
++ *
++ *
++ * Before After
++ * +-------------+ +-------------+
++ * | HEADROOM | | HEADROOM |
++ * | | +-------------+ <- data
++ * | | | amsdu_txhdr |
++ * | | | * pad size |
++ * | | +-------------+
++ * | | | ETH hdr | keep original eth hdr
++ * | | | | to restore it once transmitted
++ * | | +-------------+ <- packet_addr[x]
++ * | | | Pad |
++ * | | +-------------+
++ * data -> +-------------+ | AMSDU HDR |
++ * | ETH hdr | +-------------+
++ * | | | LLC/SNAP |
++ * +-------------+ +-------------+
++ * | DATA | | DATA |
++ * | | | |
++ * +-------------+ +-------------+
++ *
++ * Called with tx_lock hold
++ */
++static int rwnx_amsdu_add_subframe_header(struct rwnx_hw *rwnx_hw,
++ struct sk_buff *skb,
++ struct rwnx_sw_txhdr *sw_txhdr)
++{
++ struct rwnx_amsdu *amsdu = &sw_txhdr->amsdu;
++ struct rwnx_amsdu_txhdr *amsdu_txhdr;
++ struct ethhdr *amsdu_hdr, *eth = (struct ethhdr *)skb->data;
++ int headroom_need, map_len, msdu_len;
++ dma_addr_t dma_addr;
++ u8 *pos, *map_start;
++
++ msdu_len = skb->len - sizeof(*eth);
++ headroom_need = sizeof(*amsdu_txhdr) + amsdu->pad +
++ sizeof(*amsdu_hdr);
++ if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) {
++ headroom_need += sizeof(rfc1042_header) + 2;
++ msdu_len += sizeof(rfc1042_header) + 2;
++ }
++
++ /* we should have enough headroom (checked in xmit) */
++ if (WARN_ON(skb_headroom(skb) < headroom_need)) {
++ return -1;
++ }
++
++ /* allocate headroom */
++ pos = skb_push(skb, headroom_need);
++ amsdu_txhdr = (struct rwnx_amsdu_txhdr *)pos;
++ pos += sizeof(*amsdu_txhdr);
++
++ /* move eth header */
++ memmove(pos, eth, sizeof(*eth));
++ eth = (struct ethhdr *)pos;
++ pos += sizeof(*eth);
++
++ /* Add padding from previous subframe */
++ map_start = pos;
++ memset(pos, 0, amsdu->pad);
++ pos += amsdu->pad;
++
++ /* Add AMSDU hdr */
++ amsdu_hdr = (struct ethhdr *)pos;
++ memcpy(amsdu_hdr->h_dest, eth->h_dest, ETH_ALEN);
++ memcpy(amsdu_hdr->h_source, eth->h_source, ETH_ALEN);
++ amsdu_hdr->h_proto = htons(msdu_len);
++ pos += sizeof(*amsdu_hdr);
++
++ if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) {
++ memcpy(pos, rfc1042_header, sizeof(rfc1042_header));
++ pos += sizeof(rfc1042_header);
++ }
++
++ /* MAP (and sync) memory for DMA */
++ map_len = msdu_len + amsdu->pad + sizeof(*amsdu_hdr);
++ dma_addr = dma_map_single(rwnx_hw->dev, map_start, map_len,
++ DMA_BIDIRECTIONAL);
++ if (WARN_ON(dma_mapping_error(rwnx_hw->dev, dma_addr))) {
++ pos -= sizeof(*eth);
++ memmove(pos, eth, sizeof(*eth));
++ skb_pull(skb, headroom_need);
++ return -1;
++ }
++
++ /* update amdsu_txhdr */
++ amsdu_txhdr->map_len = map_len;
++ amsdu_txhdr->dma_addr = dma_addr;
++ amsdu_txhdr->skb = skb;
++ amsdu_txhdr->pad = amsdu->pad;
++ amsdu_txhdr->msdu_len = msdu_len;
++
++ /* update rwnx_sw_txhdr (of the first subframe) */
++ BUG_ON(amsdu->nb != sw_txhdr->desc.host.packet_cnt);
++ sw_txhdr->desc.host.packet_addr[amsdu->nb] = dma_addr;
++ sw_txhdr->desc.host.packet_len[amsdu->nb] = map_len;
++ sw_txhdr->desc.host.packet_cnt++;
++ amsdu->nb++;
++
++ amsdu->pad = AMSDU_PADDING(map_len - amsdu->pad);
++ list_add_tail(&amsdu_txhdr->list, &amsdu->hdrs);
++ amsdu->len += map_len;
++
++ rwnx_ipc_sta_buffer(rwnx_hw, sw_txhdr->txq->sta,
++ sw_txhdr->txq->tid, msdu_len);
++
++ trace_amsdu_subframe(sw_txhdr);
++ return 0;
++}
++
++/**
++ * rwnx_amsdu_add_subframe - Add this buffer as an A-MSDU subframe if possible
++ *
++ * @rwnx_hw Driver main data
++ * @skb Buffer to aggregate if possible
++ * @sta Destination STA
++ * @txq sta's txq used for this buffer
++ *
++ * Tyr to aggregate the buffer in an A-MSDU. If it succeed then the
++ * buffer is added as a new A-MSDU subframe with AMSDU and LLC/SNAP
++ * headers added (so FW won't have to modify this subframe).
++ *
++ * To be added as subframe :
++ * - sta must allow amsdu
++ * - buffer must be aggregable (to be defined)
++ * - at least one other aggregable buffer is pending in the queue
++ * or an a-msdu (with enough free space) is currently in progress
++ *
++ * returns true if buffer has been added as A-MDSP subframe, false otherwise
++ *
++ */
++static bool rwnx_amsdu_add_subframe(struct rwnx_hw *rwnx_hw, struct sk_buff *skb,
++ struct rwnx_sta *sta, struct rwnx_txq *txq)
++{
++ bool res = false;
++ struct ethhdr *eth;
++
++ /* immediately return if amsdu are not allowed for this sta */
++ if (!txq->amsdu_len || rwnx_hw->mod_params->amsdu_maxnb < 2 ||
++ !rwnx_amsdu_is_aggregable(skb)
++ )
++ return false;
++
++ spin_lock_bh(&rwnx_hw->tx_lock);
++ if (txq->amsdu) {
++ /* aggreagation already in progress, add this buffer if enough space
++ available, otherwise end the current amsdu */
++ struct rwnx_sw_txhdr *sw_txhdr = txq->amsdu;
++ eth = (struct ethhdr *)(skb->data);
++
++ if (((sw_txhdr->amsdu.len + sw_txhdr->amsdu.pad +
++ rwnx_amsdu_subframe_length(eth, skb->len)) > txq->amsdu_len) ||
++ rwnx_amsdu_add_subframe_header(rwnx_hw, skb, sw_txhdr)) {
++ txq->amsdu = NULL;
++ goto end;
++ }
++
++ if (sw_txhdr->amsdu.nb >= rwnx_hw->mod_params->amsdu_maxnb) {
++ rwnx_hw->stats.amsdus[sw_txhdr->amsdu.nb - 1].done++;
++ /* max number of subframes reached */
++ txq->amsdu = NULL;
++ }
++ } else {
++ /* Check if a new amsdu can be started with the previous buffer
++ (if any) and this one */
++ struct sk_buff *skb_prev = skb_peek_tail(&txq->sk_list);
++ struct rwnx_txhdr *txhdr;
++ struct rwnx_sw_txhdr *sw_txhdr;
++ int len1, len2;
++
++ if (!skb_prev || !rwnx_amsdu_is_aggregable(skb_prev))
++ goto end;
++
++ txhdr = (struct rwnx_txhdr *)skb_prev->data;
++ sw_txhdr = txhdr->sw_hdr;
++ if ((sw_txhdr->amsdu.len) ||
++ (sw_txhdr->desc.host.flags & TXU_CNTRL_RETRY))
++ /* previous buffer is already a complete amsdu or a retry */
++ goto end;
++
++ eth = (struct ethhdr *)(skb_prev->data + sw_txhdr->headroom);
++ len1 = rwnx_amsdu_subframe_length(eth, (sw_txhdr->frame_len +
++ sizeof(struct ethhdr)));
++
++ eth = (struct ethhdr *)(skb->data);
++ len2 = rwnx_amsdu_subframe_length(eth, skb->len);
++
++ if (len1 + AMSDU_PADDING(len1) + len2 > txq->amsdu_len)
++ /* not enough space to aggregate those two buffers */
++ goto end;
++
++ /* Add subframe header.
++ Note: Fw will take care of adding AMDSU header for the first
++ subframe while generating 802.11 MAC header */
++ INIT_LIST_HEAD(&sw_txhdr->amsdu.hdrs);
++ sw_txhdr->amsdu.len = len1;
++ sw_txhdr->amsdu.nb = 1;
++ sw_txhdr->amsdu.pad = AMSDU_PADDING(len1);
++ if (rwnx_amsdu_add_subframe_header(rwnx_hw, skb, sw_txhdr))
++ goto end;
++
++ sw_txhdr->desc.host.flags |= TXU_CNTRL_AMSDU;
++
++ if (sw_txhdr->amsdu.nb < rwnx_hw->mod_params->amsdu_maxnb)
++ txq->amsdu = sw_txhdr;
++ else
++ rwnx_hw->stats.amsdus[sw_txhdr->amsdu.nb - 1].done++;
++ }
++
++ res = true;
++
++ end:
++ spin_unlock_bh(&rwnx_hw->tx_lock);
++ return res;
++}
++#endif /* CONFIG_RWNX_AMSDUS_TX */
++
++#ifdef CONFIG_BR_SUPPORT
++int aic_br_client_tx(struct rwnx_vif *vif, struct sk_buff **pskb)
++{
++ struct sk_buff *skb = *pskb;
++
++ /* if(check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE) == _TRUE) */
++ {
++ void dhcp_flag_bcast(struct rwnx_vif *vif, struct sk_buff *skb);
++ int res, is_vlan_tag = 0, i, do_nat25 = 1;
++ unsigned short vlan_hdr = 0;
++ void *br_port = NULL;
++
++ /* mac_clone_handle_frame(priv, skb); */
++
++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))
++ br_port = vif->ndev->br_port;
++#else /* (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) */
++ rcu_read_lock();
++ br_port = rcu_dereference(vif->ndev->rx_handler_data);
++ rcu_read_unlock();
++#endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) */
++#ifdef BR_SUPPORT_DEBUG
++ printk("SA=%pM, br_mac=%pM, type=0x%x, da[0]=%x, scdb=%pM, vif_type=%d\n", skb->data + MACADDRLEN, vif->br_mac, *((unsigned short *)(skb->data + MACADDRLEN * 2)),
++ skb->data[0], vif->scdb_mac,RWNX_VIF_TYPE(vif));
++#endif
++ spin_lock_bh(&vif->br_ext_lock);
++ if (!(skb->data[0] & 1) &&
++ br_port &&
++ memcmp(skb->data + MACADDRLEN, vif->br_mac, MACADDRLEN) &&
++ *((unsigned short *)(skb->data + MACADDRLEN * 2)) != __constant_htons(ETH_P_8021Q) &&
++ *((unsigned short *)(skb->data + MACADDRLEN * 2)) == __constant_htons(ETH_P_IP) &&
++ !memcmp(vif->scdb_mac, skb->data + MACADDRLEN, MACADDRLEN) && vif->scdb_entry) {
++ memcpy(skb->data + MACADDRLEN, vif->ndev->dev_addr, MACADDRLEN);
++ vif->scdb_entry->ageing_timer = jiffies;
++ spin_unlock_bh(&vif->br_ext_lock);
++ } else
++ /* if (!priv->pmib->ethBrExtInfo.nat25_disable) */
++ {
++ /* if (priv->dev->br_port &&
++ * !memcmp(skb->data+MACADDRLEN, priv->br_mac, MACADDRLEN)) { */
++#if 1
++ if (*((unsigned short *)(skb->data + MACADDRLEN * 2)) == __constant_htons(ETH_P_8021Q)) {
++ is_vlan_tag = 1;
++ vlan_hdr = *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2));
++ for (i = 0; i < 6; i++)
++ *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2 - i * 2)) = *((unsigned short *)(skb->data + MACADDRLEN * 2 - 2 - i * 2));
++ skb_pull(skb, 4);
++ }
++ /* if SA == br_mac && skb== IP => copy SIP to br_ip ?? why */
++ if (!memcmp(skb->data + MACADDRLEN, vif->br_mac, MACADDRLEN) &&
++ (*((unsigned short *)(skb->data + MACADDRLEN * 2)) == __constant_htons(ETH_P_IP)))
++ memcpy(vif->br_ip, skb->data + WLAN_ETHHDR_LEN + 12, 4);
++
++ if (*((unsigned short *)(skb->data + MACADDRLEN * 2)) == __constant_htons(ETH_P_IP)) {
++ if (memcmp(vif->scdb_mac, skb->data + MACADDRLEN, MACADDRLEN)) {
++ #if 1
++ void *scdb_findEntry(struct rwnx_vif *vif, unsigned char *macAddr, unsigned char *ipAddr);
++
++ vif->scdb_entry = (struct nat25_network_db_entry *)scdb_findEntry(vif,
++ skb->data + MACADDRLEN, skb->data + WLAN_ETHHDR_LEN + 12);
++ if (vif->scdb_entry != NULL) {
++ memcpy(vif->scdb_mac, skb->data + MACADDRLEN, MACADDRLEN);
++ memcpy(vif->scdb_ip, skb->data + WLAN_ETHHDR_LEN + 12, 4);
++ vif->scdb_entry->ageing_timer = jiffies;
++ do_nat25 = 0;
++ }
++ #endif
++ } else {
++ if (vif->scdb_entry) {
++ vif->scdb_entry->ageing_timer = jiffies;
++ do_nat25 = 0;
++ } else {
++ memset(vif->scdb_mac, 0, MACADDRLEN);
++ memset(vif->scdb_ip, 0, 4);
++ }
++ }
++ }
++ spin_unlock_bh(&vif->br_ext_lock);
++#endif /* 1 */
++ if (do_nat25) {
++ #if 1
++ int nat25_db_handle(struct rwnx_vif *vif, struct sk_buff *skb, int method);
++ if (nat25_db_handle(vif, skb, NAT25_CHECK) == 0) {
++ struct sk_buff *newskb;
++
++ if (is_vlan_tag) {
++ skb_push(skb, 4);
++ for (i = 0; i < 6; i++)
++ *((unsigned short *)(skb->data + i * 2)) = *((unsigned short *)(skb->data + 4 + i * 2));
++ *((unsigned short *)(skb->data + MACADDRLEN * 2)) = __constant_htons(ETH_P_8021Q);
++ *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2)) = vlan_hdr;
++ }
++
++ newskb = skb_copy(skb, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
++ if (newskb == NULL) {
++ /* priv->ext_stats.tx_drops++; */
++ printk("TX DROP: skb_copy fail!\n");
++ /* goto stop_proc; */
++ return -1;
++ }
++ dev_kfree_skb_any(skb);
++
++ *pskb = skb = newskb;
++ if (is_vlan_tag) {
++ vlan_hdr = *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2));
++ for (i = 0; i < 6; i++)
++ *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2 - i * 2)) = *((unsigned short *)(skb->data + MACADDRLEN * 2 - 2 - i * 2));
++ skb_pull(skb, 4);
++ }
++ }
++
++ if (skb_is_nonlinear(skb))
++ printk("%s(): skb_is_nonlinear!!\n", __FUNCTION__);
++
++
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
++ res = skb_linearize(skb, GFP_ATOMIC);
++#else /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)) */
++ res = skb_linearize(skb);
++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)) */
++ if (res < 0) {
++ printk("TX DROP: skb_linearize fail!\n");
++ /* goto free_and_stop; */
++ return -1;
++ }
++
++ res = nat25_db_handle(vif, skb, NAT25_INSERT);
++ if (res < 0) {
++ if (res == -2) {
++ /* priv->ext_stats.tx_drops++; */
++ printk("TX DROP: nat25_db_handle fail!\n");
++ /* goto free_and_stop; */
++ return -1;
++
++ }
++ /* we just print warning message and let it go */
++ /* DEBUG_WARN("%s()-%d: nat25_db_handle INSERT Warning!\n", __FUNCTION__, __LINE__); */
++ /* return -1; */ /* return -1 will cause system crash on 2011/08/30! */
++ return 0;
++ }
++ #endif
++ }
++
++ memcpy(skb->data + MACADDRLEN, vif->ndev->dev_addr, MACADDRLEN);
++
++ dhcp_flag_bcast(vif, skb);
++
++ if (is_vlan_tag) {
++ skb_push(skb, 4);
++ for (i = 0; i < 6; i++)
++ *((unsigned short *)(skb->data + i * 2)) = *((unsigned short *)(skb->data + 4 + i * 2));
++ *((unsigned short *)(skb->data + MACADDRLEN * 2)) = __constant_htons(ETH_P_8021Q);
++ *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2)) = vlan_hdr;
++ }
++ }
++#if 0
++ else {
++ if (*((unsigned short *)(skb->data + MACADDRLEN * 2)) == __constant_htons(ETH_P_8021Q))
++ is_vlan_tag = 1;
++
++ if (is_vlan_tag) {
++ if (ICMPV6_MCAST_MAC(skb->data) && ICMPV6_PROTO1A_VALN(skb->data))
++ memcpy(skb->data + MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN);
++ } else {
++ if (ICMPV6_MCAST_MAC(skb->data) && ICMPV6_PROTO1A(skb->data))
++ memcpy(skb->data + MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN);
++ }
++ }
++#endif /* 0 */
++
++ /* check if SA is equal to our MAC */
++ if (memcmp(skb->data + MACADDRLEN, vif->ndev->dev_addr, MACADDRLEN)) {
++ /* priv->ext_stats.tx_drops++; */
++ printk("TX DROP: untransformed frame SA:%02X%02X%02X%02X%02X%02X!\n",
++ skb->data[6], skb->data[7], skb->data[8], skb->data[9], skb->data[10], skb->data[11]);
++ /* goto free_and_stop; */
++ return -1;
++ }
++ }
++ printk("%s:exit\n",__func__);
++ return 0;
++}
++#endif /* CONFIG_BR_SUPPORT */
++
++
++/**
++ * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
++ * struct net_device *dev);
++ * Called when a packet needs to be transmitted.
++ * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
++ * (can also return NETDEV_TX_LOCKED if NETIF_F_LLTX)
++ *
++ * - Initialize the desciptor for this pkt (stored in skb before data)
++ * - Push the pkt in the corresponding Txq
++ * - If possible (i.e. credit available and not in PS) the pkt is pushed
++ * to fw
++ */
++netdev_tx_t rwnx_start_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ struct rwnx_vif *rwnx_vif = netdev_priv(dev);
++ struct rwnx_hw *rwnx_hw = rwnx_vif->rwnx_hw;
++ struct rwnx_txhdr *txhdr;
++ struct rwnx_sw_txhdr *sw_txhdr;
++ struct txdesc_api *desc;
++ struct rwnx_sta *sta;
++ struct rwnx_txq *txq;
++ int headroom;
++ int max_headroom;
++ int hdr_pads;
++
++ u16 frame_len;
++ u16 frame_oft;
++ u8 tid;
++
++ struct ethhdr eth_t;
++
++#ifdef CONFIG_ONE_TXQ
++ skb->queue_mapping = rwnx_select_txq(rwnx_vif, skb);
++#endif
++
++ memcpy(ð_t, skb->data, sizeof(struct ethhdr));
++
++ sk_pacing_shift_update(skb->sk, rwnx_hw->tcp_pacing_shift);
++ max_headroom = sizeof(struct rwnx_txhdr);
++
++ /* check whether the current skb can be used */
++ if (skb_shared(skb) || (skb_headroom(skb) < max_headroom) ||
++ (skb_cloned(skb) && (dev->priv_flags & IFF_BRIDGE_PORT))) {
++ struct sk_buff *newskb = skb_copy_expand(skb, max_headroom, 0,
++ GFP_ATOMIC);
++ if (unlikely(newskb == NULL))
++ goto free;
++
++ dev_kfree_skb_any(skb);
++
++ skb = newskb;
++ }
++
++ /* Get the STA id and TID information */
++ sta = rwnx_get_tx_priv(rwnx_vif, skb, &tid);
++ if (!sta)
++ goto free;
++
++ txq = rwnx_txq_sta_get(sta, tid, rwnx_hw);
++ if (txq->idx == TXQ_INACTIVE)
++ goto free;
++
++#ifdef CONFIG_RWNX_AMSDUS_TX
++ if (rwnx_amsdu_add_subframe(rwnx_hw, skb, sta, txq))
++ return NETDEV_TX_OK;
++#endif
++
++#ifdef CONFIG_BR_SUPPORT
++ if (1) {//(check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE | WIFI_ADHOC_STATE) == _TRUE) {
++ void *br_port = NULL;
++
++ #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))
++ br_port = rwnx_vif->ndev->br_port;
++ #else
++ rcu_read_lock();
++ br_port = rcu_dereference(rwnx_vif->ndev->rx_handler_data);
++ rcu_read_unlock();
++ #endif
++
++ if (br_port) {
++ s32 res = aic_br_client_tx(rwnx_vif, &skb);
++ if (res == -1) {
++ goto free;
++ }
++ }
++ }
++#endif /* CONFIG_BR_SUPPORT */
++
++ /* Retrieve the pointer to the Ethernet data */
++ // eth = (struct ethhdr *)skb->data;
++
++ skb_pull(skb, 14);
++ //hdr_pads = RWNX_SWTXHDR_ALIGN_PADS((long)eth);
++ hdr_pads = RWNX_SWTXHDR_ALIGN_PADS((long)skb->data);
++ headroom = sizeof(struct rwnx_txhdr) + hdr_pads;
++
++ skb_push(skb, headroom);
++
++ txhdr = (struct rwnx_txhdr *)skb->data;
++ sw_txhdr = kmem_cache_alloc(rwnx_hw->sw_txhdr_cache, GFP_ATOMIC);
++
++ if (unlikely(sw_txhdr == NULL))
++ goto free;
++ txhdr->sw_hdr = sw_txhdr;
++ desc = &sw_txhdr->desc;
++
++ frame_len = (u16)skb->len - headroom;// - sizeof(*eth);
++
++ sw_txhdr->txq = txq;
++ sw_txhdr->frame_len = frame_len;
++ sw_txhdr->rwnx_sta = sta;
++ sw_txhdr->rwnx_vif = rwnx_vif;
++ sw_txhdr->skb = skb;
++ sw_txhdr->headroom = headroom;
++ sw_txhdr->map_len = skb->len - offsetof(struct rwnx_txhdr, hw_hdr);
++
++#ifdef CONFIG_RWNX_AMSDUS_TX
++ sw_txhdr->amsdu.len = 0;
++ sw_txhdr->amsdu.nb = 0;
++#endif
++ sw_txhdr->raw_frame = 0;
++ sw_txhdr->fixed_rate = 0;
++ // Fill-in the descriptor
++ memcpy(&desc->host.eth_dest_addr, eth_t.h_dest, ETH_ALEN);
++ memcpy(&desc->host.eth_src_addr, eth_t.h_source, ETH_ALEN);
++ desc->host.ethertype = eth_t.h_proto;
++ desc->host.staid = sta->sta_idx;
++ desc->host.tid = tid;
++ if (unlikely(rwnx_vif->wdev.iftype == NL80211_IFTYPE_AP_VLAN))
++ desc->host.vif_idx = rwnx_vif->ap_vlan.master->vif_index;
++ else
++ desc->host.vif_idx = rwnx_vif->vif_index;
++
++ if (rwnx_vif->use_4addr && (sta->sta_idx < NX_REMOTE_STA_MAX))
++ desc->host.flags = TXU_CNTRL_USE_4ADDR;
++ else
++ desc->host.flags = 0;
++
++ if ((rwnx_vif->tdls_status == TDLS_LINK_ACTIVE) &&
++ rwnx_vif->sta.tdls_sta &&
++ (memcmp(desc->host.eth_dest_addr.array, rwnx_vif->sta.tdls_sta->mac_addr, ETH_ALEN) == 0)) {
++ desc->host.flags |= TXU_CNTRL_TDLS;
++ rwnx_vif->sta.tdls_sta->tdls.last_tid = desc->host.tid;
++ //rwnx_vif->sta.tdls_sta->tdls.last_sn = desc->host.sn;
++ }
++
++ if (rwnx_vif->wdev.iftype == NL80211_IFTYPE_MESH_POINT) {
++ if (rwnx_vif->is_resending) {
++ desc->host.flags |= TXU_CNTRL_MESH_FWD;
++ }
++ }
++
++#ifdef CONFIG_RWNX_SPLIT_TX_BUF
++ desc->host.packet_len[0] = frame_len;
++#else
++ desc->host.packet_len = frame_len;
++#endif
++
++ txhdr->hw_hdr.cfm.status.value = 0;
++
++ if (unlikely(rwnx_prep_tx(rwnx_hw, txhdr))) {
++ kmem_cache_free(rwnx_hw->sw_txhdr_cache, sw_txhdr);
++ skb_pull(skb, headroom);
++ dev_kfree_skb_any(skb);
++ return NETDEV_TX_BUSY;
++ }
++
++ /* Fill-in TX descriptor */
++ frame_oft = sizeof(struct rwnx_txhdr) - offsetof(struct rwnx_txhdr, hw_hdr)
++ + hdr_pads;// + sizeof(*eth);
++ #if 0
++#ifdef CONFIG_RWNX_SPLIT_TX_BUF
++ desc->host.packet_addr[0] = sw_txhdr->dma_addr + frame_oft;
++ desc->host.packet_cnt = 1;
++#else
++ desc->host.packet_addr = sw_txhdr->dma_addr + frame_oft;
++#endif
++#endif
++ desc->host.status_desc_addr = sw_txhdr->dma_addr;
++
++ spin_lock_bh(&rwnx_hw->tx_lock);
++ if (rwnx_txq_queue_skb(skb, txq, rwnx_hw, false))
++ rwnx_hwq_process(rwnx_hw, txq->hwq);
++ spin_unlock_bh(&rwnx_hw->tx_lock);
++
++ return NETDEV_TX_OK;
++
++free:
++ dev_kfree_skb_any(skb);
++
++ return NETDEV_TX_OK;
++}
++
++/**
++ * rwnx_start_mgmt_xmit - Transmit a management frame
++ *
++ * @vif: Vif that send the frame
++ * @sta: Destination of the frame. May be NULL if the destiantion is unknown
++ * to the AP.
++ * @params: Mgmt frame parameters
++ * @offchan: Indicate whether the frame must be send via the offchan TXQ.
++ * (is is redundant with params->offchan ?)
++ * @cookie: updated with a unique value to identify the frame with upper layer
++ *
++ */
++
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
++int rwnx_start_mgmt_xmit(struct rwnx_vif *vif, struct rwnx_sta *sta,
++ struct cfg80211_mgmt_tx_params *params, bool offchan,
++ u64 *cookie)
++#else
++int rwnx_start_mgmt_xmit(struct rwnx_vif *vif, struct rwnx_sta *sta,
++ struct ieee80211_channel *channel, bool offchan,
++ unsigned int wait, const u8* buf, size_t len,
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
++ bool no_cck,
++ #endif
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
++ bool dont_wait_for_ack,
++ #endif
++ u64 *cookie)
++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
++{
++ struct rwnx_hw *rwnx_hw = vif->rwnx_hw;
++ struct rwnx_txhdr *txhdr;
++ struct rwnx_sw_txhdr *sw_txhdr;
++ struct txdesc_api *desc;
++ struct sk_buff *skb;
++ u16 frame_len, headroom, frame_oft;
++ u8 *data;
++ int nx_off_chan_txq_idx = NX_OFF_CHAN_TXQ_IDX;
++ struct rwnx_txq *txq;
++ bool robust;
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
++ const u8 *buf = params->buf;
++ size_t len = params->len;
++ bool no_cck = params->no_cck;
++ #endif
++ headroom = sizeof(struct rwnx_txhdr);
++ frame_len = len;
++
++ //----------------------------------------------------------------------
++
++ if((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8801) ||
++ ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC ||
++ g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW) && chip_id < 3)){
++ nx_off_chan_txq_idx = NX_OFF_CHAN_TXQ_IDX_FOR_OLD_IC;
++ }
++
++ /* Set TID and Queues indexes */
++ if (sta) {
++ txq = rwnx_txq_sta_get(sta, 8, rwnx_hw);
++ } else {
++ if (offchan)
++ txq = &rwnx_hw->txq[nx_off_chan_txq_idx];
++ else
++ txq = rwnx_txq_vif_get(vif, NX_UNK_TXQ_TYPE);
++ }
++
++ /* Ensure that TXQ is active */
++ if (txq->idx == TXQ_INACTIVE) {
++ netdev_dbg(vif->ndev, "TXQ inactive\n");
++ return -EBUSY;
++ }
++
++ /*
++ * Create a SK Buff object that will contain the provided data
++ */
++ skb = dev_alloc_skb(headroom + frame_len);
++
++ if (!skb) {
++ return -ENOMEM;
++ }
++
++ *cookie = (unsigned long)skb;
++
++ /*
++ * Move skb->data pointer in order to reserve room for rwnx_txhdr
++ * headroom value will be equal to sizeof(struct rwnx_txhdr)
++ */
++ skb_reserve(skb, headroom);
++
++ /*
++ * Extend the buffer data area in order to contain the provided packet
++ * len value (for skb) will be equal to param->len
++ */
++ data = skb_put(skb, frame_len);
++ /* Copy the provided data */
++ memcpy(data, buf, frame_len);
++
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
++ robust = ieee80211_is_robust_mgmt_frame(skb);
++#else
++ if (skb->len < 25){
++ robust = false;
++ }
++ robust = ieee80211_is_robust_mgmt_frame((void *)skb->data);
++#endif
++
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
++ /* Update CSA counter if present */
++ if (unlikely(params->n_csa_offsets) &&
++ vif->wdev.iftype == NL80211_IFTYPE_AP &&
++ vif->ap.csa) {
++ int i;
++
++ data = skb->data;
++ for (i = 0; i < params->n_csa_offsets ; i++) {
++ data[params->csa_offsets[i]] = vif->ap.csa->count;
++ }
++ }
++ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
++
++ /*
++ * Go back to the beginning of the allocated data area
++ * skb->data pointer will move backward
++ */
++ skb_push(skb, headroom);
++
++ //----------------------------------------------------------------------
++
++ /* Fill the TX Header */
++ txhdr = (struct rwnx_txhdr *)skb->data;
++
++ txhdr->hw_hdr.cfm.status.value = 0;
++
++ //----------------------------------------------------------------------
++
++ /* Fill the SW TX Header */
++ sw_txhdr = kmem_cache_alloc(rwnx_hw->sw_txhdr_cache, GFP_ATOMIC);
++
++ if (unlikely(sw_txhdr == NULL)) {
++ dev_kfree_skb(skb);
++ return -ENOMEM;
++ }
++
++
++ txhdr->sw_hdr = sw_txhdr;
++
++ sw_txhdr->txq = txq;
++ sw_txhdr->frame_len = frame_len;
++ sw_txhdr->rwnx_sta = sta;
++ sw_txhdr->rwnx_vif = vif;
++ sw_txhdr->skb = skb;
++ sw_txhdr->headroom = headroom;
++ sw_txhdr->map_len = skb->len - offsetof(struct rwnx_txhdr, hw_hdr);
++#ifdef CONFIG_RWNX_AMSDUS_TX
++ sw_txhdr->amsdu.len = 0;
++ sw_txhdr->amsdu.nb = 0;
++#endif
++ sw_txhdr->raw_frame = 0;
++ sw_txhdr->fixed_rate = 0;
++ //----------------------------------------------------------------------
++
++ /* Fill the Descriptor to be provided to the MAC SW */
++ desc = &sw_txhdr->desc;
++
++ desc->host.ethertype = 0;
++ desc->host.staid = (sta) ? sta->sta_idx : 0xFF;
++ desc->host.vif_idx = vif->vif_index;
++ desc->host.tid = 0xFF;
++ desc->host.flags = TXU_CNTRL_MGMT;
++ if (robust)
++ desc->host.flags |= TXU_CNTRL_MGMT_ROBUST;
++
++#ifdef CONFIG_RWNX_SPLIT_TX_BUF
++ desc->host.packet_len[0] = frame_len;
++#else
++ desc->host.packet_len = frame_len;
++#endif
++
++ if (no_cck) {
++ desc->host.flags |= TXU_CNTRL_MGMT_NO_CCK;
++ }
++
++ /* Get DMA Address */
++ if (unlikely(rwnx_prep_tx(rwnx_hw, txhdr))) {
++ kmem_cache_free(rwnx_hw->sw_txhdr_cache, sw_txhdr);
++ dev_kfree_skb(skb);
++ return -EBUSY;
++ }
++
++ frame_oft = sizeof(struct rwnx_txhdr) - offsetof(struct rwnx_txhdr, hw_hdr);
++ #if 0
++#ifdef CONFIG_RWNX_SPLIT_TX_BUF
++ desc->host.packet_addr[0] = sw_txhdr->dma_addr + frame_oft;
++ desc->host.packet_cnt = 1;
++#else
++ desc->host.packet_addr = sw_txhdr->dma_addr + frame_oft;
++#endif
++ #endif
++ desc->host.status_desc_addr = sw_txhdr->dma_addr;
++
++ //----------------------------------------------------------------------
++
++ spin_lock_bh(&rwnx_hw->tx_lock);
++ AICWFDBG(LOGDEBUG, "%s sta:%p skb:%p desc->host.staid:%d \r\n", __func__, sta, skb, desc->host.staid);
++ if (rwnx_txq_queue_skb(skb, txq, rwnx_hw, false))
++ rwnx_hwq_process(rwnx_hw, txq->hwq);
++ spin_unlock_bh(&rwnx_hw->tx_lock);
++
++ return 0;
++}
++
++#ifdef CONFIG_RWNX_MON_XMIT
++/**
++ * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
++ * struct net_device *dev);
++ * Called when a packet needs to be transmitted.
++ * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
++ * (can also return NETDEV_TX_LOCKED if NETIF_F_LLTX)
++ *
++ * - Initialize the desciptor for this pkt (stored in skb before data)
++ * - Push the pkt in the corresponding Txq
++ * - If possible (i.e. credit available and not in PS) the pkt is pushed
++ * to fw
++ */
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0))
++#define IEEE80211_RADIOTAP_MCS_HAVE_STBC 0x20
++#define IEEE80211_RADIOTAP_MCS_STBC_MASK 0x60
++#define IEEE80211_RADIOTAP_MCS_STBC_SHIFT 5
++#endif
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0))
++#define IEEE80211_RADIOTAP_CODING_LDPC_USER0 0x01
++#endif
++
++netdev_tx_t rwnx_start_monitor_if_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ int rtap_len, ret, idx, tmp_len;
++ struct ieee80211_radiotap_header *rtap_hdr; // net/ieee80211_radiotap.h
++ struct ieee80211_radiotap_iterator iterator; // net/cfg80211.h
++ u8_l *rtap_buf = (u8_l *)skb->data;
++ u8_l rate;
++
++ struct rwnx_vif *vif = netdev_priv(dev);
++ struct rwnx_hw *rwnx_hw = vif->rwnx_hw;
++ struct rwnx_txhdr *txhdr;
++ struct rwnx_sw_txhdr *sw_txhdr;
++ struct txdesc_api *desc;
++ struct rwnx_sta *sta;
++ struct rwnx_txq *txq;
++ u16_l frame_len, headroom, frame_oft;
++ u8_l tid, rate_fmt = FORMATMOD_NON_HT, rate_idx = 0, txsig_bw = PHY_CHNL_BW_20;
++ u8_l *pframe, *data;
++ bool robust;
++ struct sk_buff *skb_mgmt;
++ bool offchan = false;
++ int nx_off_chan_txq_idx = NX_OFF_CHAN_TXQ_IDX;
++
++ rtap_hdr = (struct ieee80211_radiotap_header*)(rtap_buf);
++ rtap_len = ieee80211_get_radiotap_len(rtap_buf);
++ frame_len = skb->len;
++
++ printk("rwnx_start_monitor_if_xmit, skb_len=%d, rtap_len=%d\n", skb->len, rtap_len);
++
++ if((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8801) ||
++ ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC ||
++ g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW) && chip_id < 3)){
++ nx_off_chan_txq_idx = NX_OFF_CHAN_TXQ_IDX_FOR_OLD_IC;
++ }
++
++
++ if (unlikely(rtap_hdr->it_version))
++ goto free_tag;
++
++ if (unlikely(skb->len < rtap_len))
++ goto free_tag;
++
++ if (unlikely(rtap_len < sizeof(struct ieee80211_radiotap_header)))
++ goto free_tag;
++
++ frame_len -= rtap_len;
++ pframe = rtap_buf + rtap_len;
++
++ // Parse radiotap for injection items and overwrite attribs as needed
++ ret = ieee80211_radiotap_iterator_init(&iterator, rtap_hdr, rtap_len, NULL);
++ while (!ret) {
++ ret = ieee80211_radiotap_iterator_next(&iterator);
++ if (ret) {
++ continue;
++ }
++ switch (iterator.this_arg_index) {
++ case IEEE80211_RADIOTAP_RATE:
++ // This is basic 802.11b/g rate; use MCS/VHT for higher rates
++ rate = *iterator.this_arg;
++ printk("rate=0x%x\n", rate);
++ for (idx = 0; idx < HW_RATE_MAX; idx++) {
++ if ((rate * 5) == tx_legrates_lut_rate[idx]) {
++ break;
++ }
++ }
++ if (idx < HW_RATE_MAX) {
++ rate_idx = idx;
++ } else {
++ printk("invalid radiotap rate: %d\n", rate);
++ }
++ break;
++
++ case IEEE80211_RADIOTAP_TX_FLAGS: {
++ u16_l txflags = get_unaligned_le16(iterator.this_arg);
++ printk("txflags=0x%x\n", txflags);
++ if ((txflags & IEEE80211_RADIOTAP_F_TX_NOACK) == 0) {
++ printk(" TX_NOACK\n");
++ }
++ if (txflags & 0x0010) { // Use preconfigured seq num
++ // NOTE: this is currently ignored due to qos_en=_FALSE and HW seq num override
++ printk(" GetSequence\n");
++ }
++ }
++ break;
++
++ case IEEE80211_RADIOTAP_MCS: {
++ u8_l mcs_have = iterator.this_arg[0];
++ printk("mcs_have=0x%x\n", mcs_have);
++ rate_fmt = FORMATMOD_HT_MF;
++ if (mcs_have & IEEE80211_RADIOTAP_MCS_HAVE_BW) {
++ u8_l bw = (iterator.this_arg[1] & IEEE80211_RADIOTAP_MCS_BW_MASK);
++ u8_l ch_offset = 0;
++ if (bw == IEEE80211_RADIOTAP_MCS_BW_40) {
++ txsig_bw = PHY_CHNL_BW_40;
++ } else if (bw == IEEE80211_RADIOTAP_MCS_BW_20L) {
++ bw = IEEE80211_RADIOTAP_MCS_BW_20;
++ ch_offset = 1; // CHNL_OFFSET_LOWER;
++ } else if (bw == IEEE80211_RADIOTAP_MCS_BW_20U) {
++ bw = IEEE80211_RADIOTAP_MCS_BW_20;
++ ch_offset = 2; // CHNL_OFFSET_UPPER;
++ }
++ printk(" bw=%d, ch_offset=%d\n", bw, ch_offset);
++ }
++ if (mcs_have & IEEE80211_RADIOTAP_MCS_HAVE_MCS) {
++ u8_l fixed_rate = iterator.this_arg[2] & 0x7f;
++ if (fixed_rate > 31) {
++ fixed_rate = 0;
++ }
++ rate_idx = fixed_rate;
++ printk(" fixed_rate=0x%x\n", fixed_rate);
++ }
++ if ((mcs_have & IEEE80211_RADIOTAP_MCS_HAVE_GI) && (iterator.this_arg[1] & IEEE80211_RADIOTAP_MCS_SGI)) {
++ printk(" sgi\n");
++ }
++ if ((mcs_have & IEEE80211_RADIOTAP_MCS_HAVE_FEC) && (iterator.this_arg[1] & IEEE80211_RADIOTAP_MCS_FEC_LDPC)) {
++ printk(" ldpc\n");
++ }
++ if (mcs_have & IEEE80211_RADIOTAP_MCS_HAVE_STBC) {
++ u8 stbc = (iterator.this_arg[1] & IEEE80211_RADIOTAP_MCS_STBC_MASK) >> IEEE80211_RADIOTAP_MCS_STBC_SHIFT;
++ printk(" stbc=0x%x\n", stbc);
++ }
++ }
++ break;
++
++ case IEEE80211_RADIOTAP_VHT: {
++ unsigned int mcs, nss;
++ u8 known = iterator.this_arg[0];
++ u8 flags = iterator.this_arg[2];
++ rate_fmt = FORMATMOD_VHT;
++ printk("known=0x%x, flags=0x%x\n", known, flags);
++ // NOTE: this code currently only supports 1SS for radiotap defined rates
++ if ((known & IEEE80211_RADIOTAP_VHT_KNOWN_STBC) && (flags & IEEE80211_RADIOTAP_VHT_FLAG_STBC)) {
++ printk(" stbc\n");
++ }
++ if ((known & IEEE80211_RADIOTAP_VHT_KNOWN_GI) && (flags & IEEE80211_RADIOTAP_VHT_FLAG_SGI)) {
++ printk(" sgi\n");
++ }
++ if (known & IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH) {
++ u8_l bw = iterator.this_arg[3] & 0x1F;
++ printk(" bw=0x%x\n",bw);
++ // NOTE: there are various L and U, but we just use straight 20/40/80
++ // since it's not clear how to set CHNL_OFFSET_LOWER/_UPPER with different
++ // sideband sizes/configurations. TODO.
++ // Also, any 160 is treated as 80 due to lack of WIDTH_160.
++ txsig_bw = PHY_CHNL_BW_40;
++ if (bw == 0) {
++ txsig_bw = PHY_CHNL_BW_20;
++ printk(" 20M\n");
++ } else if (bw >=1 && bw <= 3) {
++ printk(" 40M\n");
++ } else if (bw >=4 && bw <= 10) {
++ printk(" 80M\n");
++ } else if (bw >= 11 && bw <= 25) {
++ printk(" 160M\n");
++ }
++ }
++ // User 0
++ nss = iterator.this_arg[4] & 0x0F; // Number of spatial streams
++ printk(" nss=0x%x\n", nss);
++ if (nss > 0) {
++ if (nss > 4) nss = 4;
++ mcs = (iterator.this_arg[4]>>4) & 0x0F; // MCS rate index
++ if (mcs > 8) mcs = 9;
++ rate_idx = mcs;
++ printk(" mcs=0x%x\n", mcs);
++ if (iterator.this_arg[8] & IEEE80211_RADIOTAP_CODING_LDPC_USER0) {
++ printk(" ldpc\n");
++ }
++ }
++ }
++ break;
++
++ case IEEE80211_RADIOTAP_HE: {
++ u16 data1 = ((u16)iterator.this_arg[1] << 8) | iterator.this_arg[0];
++ u16 data2 = ((u16)iterator.this_arg[3] << 8) | iterator.this_arg[2];
++ u16 data3 = ((u16)iterator.this_arg[5] << 8) | iterator.this_arg[4];
++ u16 data5 = ((u16)iterator.this_arg[9] << 8) | iterator.this_arg[8];
++ u8 fmt_he = data1 & IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MASK;
++ if (fmt_he == IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MU) {
++ rate_fmt = FORMATMOD_HE_MU;
++ } else if (fmt_he == IEEE80211_RADIOTAP_HE_DATA1_FORMAT_EXT_SU) {
++ rate_fmt = FORMATMOD_HE_ER;
++ } else {
++ rate_fmt = FORMATMOD_HE_SU;
++ }
++ if (data1 & IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN) {
++ u8 mcs = (data3 & IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS) >> 8;
++ if (mcs > 11) mcs = 11;
++ rate_idx = mcs;
++ }
++ if (data1 & IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN) {
++ u8 bw = data5 & IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC;
++ txsig_bw = (bw == IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ) ? PHY_CHNL_BW_20 : PHY_CHNL_BW_40;
++ }
++ if (data2 & IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN) {
++ u8 gi = (data5 & IEEE80211_RADIOTAP_HE_DATA5_GI) >> 4;
++ printk(" gi: %d\n", gi);
++ }
++ }
++ break;
++
++ default:
++ printk("unparsed arg: 0x%x\n",iterator.this_arg_index);
++ break;
++ }
++ }
++
++ #if 0
++ // dump buffer
++ tmp_len = 128;
++ if (skb->len < 128) {
++ tmp_len = skb->len;
++ }
++ for (idx = 0; idx < tmp_len; idx+=16) {
++ printk("[%04X] %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", idx,
++ rtap_buf[idx+0],rtap_buf[idx+1],rtap_buf[idx+2],rtap_buf[idx+3],
++ rtap_buf[idx+4],rtap_buf[idx+5],rtap_buf[idx+6],rtap_buf[idx+7],
++ rtap_buf[idx+8],rtap_buf[idx+9],rtap_buf[idx+10],rtap_buf[idx+11],
++ rtap_buf[idx+12],rtap_buf[idx+13],rtap_buf[idx+14],rtap_buf[idx+15]);
++ }
++ #endif
++
++ /* Get the STA id and TID information */
++ sta = rwnx_get_tx_priv(vif, skb, &tid);
++ //if (!sta) {
++ // printk("sta=null, tid=0x%x\n", tid);
++ //}
++ /* Set TID and Queues indexes */
++ if (sta) {
++ txq = rwnx_txq_sta_get(sta, 8, rwnx_hw);
++ } else {
++ if (offchan)
++ txq = &rwnx_hw->txq[nx_off_chan_txq_idx];
++ else
++ txq = rwnx_txq_vif_get(vif, NX_UNK_TXQ_TYPE);
++ }
++ if (txq->idx == TXQ_INACTIVE) {
++ printk("TXQ_INACTIVE\n");
++ goto free_tag;
++ }
++ // prepare to xmit
++ headroom = sizeof(struct rwnx_txhdr);
++ skb_mgmt = dev_alloc_skb(headroom + frame_len);
++ if (!skb_mgmt) {
++ printk("skb_mgmt alloc fail\n");
++ goto free_tag;
++ }
++ skb_reserve(skb_mgmt, headroom);
++ data = skb_put(skb_mgmt, frame_len);
++ /* Copy the provided data */
++ memcpy(data, pframe, frame_len);
++ robust = ieee80211_is_robust_mgmt_frame(
++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0))
++ (void*)skb_mgmt
++#else
++ skb_mgmt
++#endif
++ );
++ skb_push(skb_mgmt, headroom);
++ /* Fill the TX Header */
++ txhdr = (struct rwnx_txhdr *)skb_mgmt->data;
++ txhdr->hw_hdr.cfm.status.value = 0;
++ /* Fill the SW TX Header */
++ sw_txhdr = kmem_cache_alloc(rwnx_hw->sw_txhdr_cache, GFP_ATOMIC);
++ if (unlikely(sw_txhdr == NULL)) {
++ dev_kfree_skb(skb_mgmt);
++ printk("sw_txhdr alloc fail\n");
++ goto free_tag;
++ }
++ txhdr->sw_hdr = sw_txhdr;
++ sw_txhdr->txq = txq;
++ sw_txhdr->frame_len = frame_len;
++ sw_txhdr->rwnx_sta = sta;
++ sw_txhdr->rwnx_vif = vif;
++ sw_txhdr->skb = skb_mgmt;
++ sw_txhdr->headroom = headroom;
++ sw_txhdr->map_len = skb_mgmt->len - offsetof(struct rwnx_txhdr, hw_hdr);
++ sw_txhdr->raw_frame = 1;
++ sw_txhdr->fixed_rate = 1;
++ sw_txhdr->rate_config = ((rate_fmt << FORMAT_MOD_TX_RCX_OFT) & FORMAT_MOD_TX_RCX_MASK) |
++ ((txsig_bw << BW_TX_RCX_OFT) & BW_TX_RCX_MASK) |
++ ((rate_idx << MCS_INDEX_TX_RCX_OFT) & MCS_INDEX_TX_RCX_MASK); // from radiotap
++ /* Fill the Descriptor to be provided to the MAC SW */
++ desc = &sw_txhdr->desc;
++ desc->host.staid = (sta) ? sta->sta_idx : 0xFF;
++ desc->host.vif_idx = vif->vif_index;
++ desc->host.tid = 0xFF;
++ desc->host.flags = TXU_CNTRL_MGMT;
++ if (robust) {
++ desc->host.flags |= TXU_CNTRL_MGMT_ROBUST;
++ }
++ frame_oft = sizeof(struct rwnx_txhdr) - offsetof(struct rwnx_txhdr, hw_hdr);
++ #if 0
++ #ifdef CONFIG_RWNX_SPLIT_TX_BUF
++ desc->host.packet_addr[0] = sw_txhdr->dma_addr + frame_oft;
++ desc->host.packet_len[0] = frame_len;
++ desc->host.packet_cnt = 1;
++ #else
++ desc->host.packet_addr = sw_txhdr->dma_addr + frame_oft;
++ desc->host.packet_len = frame_len;
++ #endif
++ #else
++ desc->host.packet_len = frame_len;
++ #endif
++
++ desc->host.status_desc_addr = sw_txhdr->dma_addr;
++
++ spin_lock_bh(&rwnx_hw->tx_lock);
++ if (rwnx_txq_queue_skb(skb_mgmt, txq, rwnx_hw, false))
++ rwnx_hwq_process(rwnx_hw, txq->hwq);
++ spin_unlock_bh(&rwnx_hw->tx_lock);
++
++free_tag:
++ dev_kfree_skb_any(skb);
++ return NETDEV_TX_OK;
++}
++#endif
++
++/**
++ * rwnx_txdatacfm - FW callback for TX confirmation
++ *
++ * called with tx_lock hold
++ */
++int rwnx_txdatacfm(void *pthis, void *host_id)
++{
++ struct rwnx_hw *rwnx_hw = (struct rwnx_hw *)pthis;
++ struct sk_buff *skb = host_id;
++ struct rwnx_txhdr *txhdr;
++ union rwnx_hw_txstatus rwnx_txst;
++ struct rwnx_sw_txhdr *sw_txhdr;
++ struct rwnx_hwq *hwq;
++ struct rwnx_txq *txq;
++ u16 headroom;
++ //int peek_off = offsetof(struct rwnx_hw_txhdr, cfm);
++ //int peek_len = sizeof(((struct rwnx_hw_txhdr *)0)->cfm);
++
++ txhdr = (struct rwnx_txhdr *)skb->data;
++ sw_txhdr = txhdr->sw_hdr;
++
++ /* Read status in the TX control header */
++ rwnx_txst = txhdr->hw_hdr.cfm.status;
++
++ /* Check status in the header. If status is null, it means that the buffer
++ * was not transmitted and we have to return immediately */
++ if (rwnx_txst.value == 0) {
++ return -1;
++ }
++
++#ifdef AICWF_USB_SUPPORT
++ if (rwnx_hw->usbdev->state == USB_DOWN_ST) {
++ headroom = sw_txhdr->headroom;
++ kmem_cache_free(rwnx_hw->sw_txhdr_cache, sw_txhdr);
++ skb_pull(skb, headroom);
++ consume_skb(skb);
++ return 0;
++ }
++#endif
++#ifdef AICWF_SDIO_SUPPORT
++ if(rwnx_hw->sdiodev->bus_if->state == BUS_DOWN_ST) {
++ headroom = sw_txhdr->headroom;
++ kmem_cache_free(rwnx_hw->sw_txhdr_cache, sw_txhdr);
++ skb_pull(skb, headroom);
++ consume_skb(skb);
++ return 0;
++ }
++#endif
++
++ txq = sw_txhdr->txq;
++ /* don't use txq->hwq as it may have changed between push and confirm */
++ hwq = &rwnx_hw->hwq[sw_txhdr->hw_queue];
++ rwnx_txq_confirm_any(rwnx_hw, txq, hwq, sw_txhdr);
++
++ /* Update txq and HW queue credits */
++ if (sw_txhdr->desc.host.flags & TXU_CNTRL_MGMT) {
++ trace_printk("done=%d retry_required=%d sw_retry_required=%d acknowledged=%d\n",
++ rwnx_txst.tx_done, rwnx_txst.retry_required,
++ rwnx_txst.sw_retry_required, rwnx_txst.acknowledged);
++#ifdef CREATE_TRACE_POINTS
++ trace_mgmt_cfm(sw_txhdr->rwnx_vif->vif_index,
++ (sw_txhdr->rwnx_sta) ? sw_txhdr->rwnx_sta->sta_idx : 0xFF,
++ rwnx_txst.acknowledged);
++#endif
++ /* Confirm transmission to CFG80211 */
++ cfg80211_mgmt_tx_status(&sw_txhdr->rwnx_vif->wdev,
++ (unsigned long)skb,
++ (skb->data + sw_txhdr->headroom),
++ sw_txhdr->frame_len,
++ rwnx_txst.acknowledged,
++ GFP_ATOMIC);
++ } else if ((txq->idx != TXQ_INACTIVE) &&
++ (rwnx_txst.retry_required || rwnx_txst.sw_retry_required)) {
++ bool sw_retry = (rwnx_txst.sw_retry_required) ? true : false;
++
++ /* Reset the status */
++ txhdr->hw_hdr.cfm.status.value = 0;
++
++ /* The confirmed packet was part of an AMPDU and not acked
++ * correctly, so reinject it in the TX path to be retried */
++ rwnx_tx_retry(rwnx_hw, skb, txhdr, sw_retry);
++ return 0;
++ }
++#ifdef CREATE_TRACE_POINTS
++ trace_skb_confirm(skb, txq, hwq, &txhdr->hw_hdr.cfm);
++#endif
++ /* STA may have disconnect (and txq stopped) when buffers were stored
++ in fw. In this case do nothing when they're returned */
++ if (txq->idx != TXQ_INACTIVE) {
++ #if 0
++ if (txhdr->hw_hdr.cfm.credits) {
++ txq->credits += txhdr->hw_hdr.cfm.credits;
++ if (txq->credits <= 0)
++ rwnx_txq_stop(txq, RWNX_TXQ_STOP_FULL);
++ else if (txq->credits > 0)
++ rwnx_txq_start(txq, RWNX_TXQ_STOP_FULL);
++ }
++ #endif
++
++ /* continue service period */
++ if (unlikely(txq->push_limit && !rwnx_txq_is_full(txq))) {
++ rwnx_txq_add_to_hw_list(txq);
++ }
++ }
++
++ if (txhdr->hw_hdr.cfm.ampdu_size &&
++ txhdr->hw_hdr.cfm.ampdu_size < IEEE80211_MAX_AMPDU_BUF)
++ rwnx_hw->stats.ampdus_tx[txhdr->hw_hdr.cfm.ampdu_size - 1]++;
++
++#ifdef CONFIG_RWNX_AMSDUS_TX
++ txq->amsdu_len = txhdr->hw_hdr.cfm.amsdu_size;
++#endif
++
++ /* Update statistics */
++ sw_txhdr->rwnx_vif->net_stats.tx_packets++;
++ sw_txhdr->rwnx_vif->net_stats.tx_bytes += sw_txhdr->frame_len;
++
++ /* Release SKBs */
++#ifdef CONFIG_RWNX_AMSDUS_TX
++ if (sw_txhdr->desc.host.flags & TXU_CNTRL_AMSDU) {
++ struct rwnx_amsdu_txhdr *amsdu_txhdr;
++ list_for_each_entry(amsdu_txhdr, &sw_txhdr->amsdu.hdrs, list) {
++ rwnx_amsdu_del_subframe_header(amsdu_txhdr);
++ consume_skb(amsdu_txhdr->skb);
++ }
++ }
++#endif /* CONFIG_RWNX_AMSDUS_TX */
++
++ kmem_cache_free(rwnx_hw->sw_txhdr_cache, sw_txhdr);
++ skb_pull(skb, sw_txhdr->headroom);
++ consume_skb(skb);
++
++ return 0;
++}
++
++/**
++ * rwnx_txq_credit_update - Update credit for one txq
++ *
++ * @rwnx_hw: Driver main data
++ * @sta_idx: STA idx
++ * @tid: TID
++ * @update: offset to apply in txq credits
++ *
++ * Called when fw send ME_TX_CREDITS_UPDATE_IND message.
++ * Apply @update to txq credits, and stop/start the txq if needed
++ */
++void rwnx_txq_credit_update(struct rwnx_hw *rwnx_hw, int sta_idx, u8 tid,
++ s8 update)
++{
++ struct rwnx_sta *sta = &rwnx_hw->sta_table[sta_idx];
++ struct rwnx_txq *txq;
++
++ txq = rwnx_txq_sta_get(sta, tid, rwnx_hw);
++
++ spin_lock_bh(&rwnx_hw->tx_lock);
++
++ if (txq->idx != TXQ_INACTIVE) {
++ //txq->credits += update;
++#ifdef CREATE_TRACE_POINTS
++ trace_credit_update(txq, update);
++#endif
++ if (txq->credits <= 0)
++ rwnx_txq_stop(txq, RWNX_TXQ_STOP_FULL);
++ else
++ rwnx_txq_start(txq, RWNX_TXQ_STOP_FULL);
++ }
++
++ spin_unlock_bh(&rwnx_hw->tx_lock);
++}
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_tx.h
+@@ -0,0 +1,198 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_tx.h
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ******************************************************************************
++ */
++#ifndef _RWNX_TX_H_
++#define _RWNX_TX_H_
++
++#include <linux/ieee80211.h>
++#include <net/cfg80211.h>
++#include <linux/netdevice.h>
++#include "lmac_types.h"
++#include "ipc_shared.h"
++#include "rwnx_txq.h"
++#include "hal_desc.h"
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
++#define IEEE80211_NUM_TIDS 16
++#endif
++
++#define RWNX_HWQ_BK 0
++#define RWNX_HWQ_BE 1
++#define RWNX_HWQ_VI 2
++#define RWNX_HWQ_VO 3
++#define RWNX_HWQ_BCMC 4
++#define RWNX_HWQ_NB NX_TXQ_CNT
++#define RWNX_HWQ_ALL_ACS (RWNX_HWQ_BK | RWNX_HWQ_BE | RWNX_HWQ_VI | RWNX_HWQ_VO)
++#define RWNX_HWQ_ALL_ACS_BIT ( BIT(RWNX_HWQ_BK) | BIT(RWNX_HWQ_BE) | \
++ BIT(RWNX_HWQ_VI) | BIT(RWNX_HWQ_VO) )
++
++#define RWNX_TX_LIFETIME_MS 1000
++#define RWNX_TX_MAX_RATES NX_TX_MAX_RATES
++
++#define RWNX_SWTXHDR_ALIGN_SZ 4
++#define RWNX_SWTXHDR_ALIGN_MSK (RWNX_SWTXHDR_ALIGN_SZ - 1)
++#define RWNX_SWTXHDR_ALIGN_PADS(x) \
++ ((RWNX_SWTXHDR_ALIGN_SZ - ((x) & RWNX_SWTXHDR_ALIGN_MSK)) \
++ & RWNX_SWTXHDR_ALIGN_MSK)
++#if RWNX_SWTXHDR_ALIGN_SZ & RWNX_SWTXHDR_ALIGN_MSK
++#error bad RWNX_SWTXHDR_ALIGN_SZ
++#endif
++
++#define AMSDU_PADDING(x) ((4 - ((x) & 0x3)) & 0x3)
++
++#define TXU_CNTRL_RETRY BIT(0)
++#define TXU_CNTRL_MORE_DATA BIT(2)
++#define TXU_CNTRL_MGMT BIT(3)
++#define TXU_CNTRL_MGMT_NO_CCK BIT(4)
++#define TXU_CNTRL_AMSDU BIT(6)
++#define TXU_CNTRL_MGMT_ROBUST BIT(7)
++#define TXU_CNTRL_USE_4ADDR BIT(8)
++#define TXU_CNTRL_EOSP BIT(9)
++#define TXU_CNTRL_MESH_FWD BIT(10)
++#define TXU_CNTRL_TDLS BIT(11)
++
++extern const int rwnx_tid2hwq[IEEE80211_NUM_TIDS];
++
++/**
++ * struct rwnx_amsdu_txhdr - Structure added in skb headroom (instead of
++ * rwnx_txhdr) for amsdu subframe buffer (except for the first subframe
++ * that has a normal rwnx_txhdr)
++ *
++ * @list List of other amsdu subframe (rwnx_sw_txhdr.amsdu.hdrs)
++ * @map_len Length to be downloaded for this subframe
++ * @dma_addr Buffer address form embedded point of view
++ * @skb skb
++ * @pad padding added before this subframe
++ * (only use when amsdu must be dismantled)
++ * @msdu_len Size, in bytes, of the MSDU (without padding nor amsdu header)
++ */
++struct rwnx_amsdu_txhdr {
++ struct list_head list;
++ size_t map_len;
++ dma_addr_t dma_addr;
++ struct sk_buff *skb;
++ u16 pad;
++ u16 msdu_len;
++};
++
++/**
++ * struct rwnx_amsdu - Structure to manage creation of an A-MSDU, updated
++ * only In the first subframe of an A-MSDU
++ *
++ * @hdrs List of subframe of rwnx_amsdu_txhdr
++ * @len Current size for this A-MDSU (doesn't take padding into account)
++ * 0 means that no amsdu is in progress
++ * @nb Number of subframe in the amsdu
++ * @pad Padding to add before adding a new subframe
++ */
++struct rwnx_amsdu {
++ struct list_head hdrs;
++ u16 len;
++ u8 nb;
++ u8 pad;
++};
++
++/**
++ * struct rwnx_sw_txhdr - Software part of tx header
++ *
++ * @rwnx_sta sta to which this buffer is addressed
++ * @rwnx_vif vif that send the buffer
++ * @txq pointer to TXQ used to send the buffer
++ * @hw_queue Index of the HWQ used to push the buffer.
++ * May be different than txq->hwq->id on confirmation.
++ * @frame_len Size of the frame (doesn't not include mac header)
++ * (Only used to update stat, can't we use skb->len instead ?)
++ * @headroom Headroom added in skb to add rwnx_txhdr
++ * (Only used to remove it before freeing skb, is it needed ?)
++ * @amsdu Description of amsdu whose first subframe is this buffer
++ * (amsdu.nb = 0 means this buffer is not part of amsdu)
++ * @skb skb received from transmission
++ * @map_len Length mapped for DMA (only rwnx_hw_txhdr and data are mapped)
++ * @dma_addr DMA address after mapping
++ * @desc Buffer description that will be copied in shared mem for FW
++ */
++struct rwnx_sw_txhdr {
++ struct rwnx_sta *rwnx_sta;
++ struct rwnx_vif *rwnx_vif;
++ struct rwnx_txq *txq;
++ u8 hw_queue;
++ u16 frame_len;
++ u16 headroom;
++#ifdef CONFIG_RWNX_AMSDUS_TX
++ struct rwnx_amsdu amsdu;
++#endif
++ u32 need_cfm;
++ struct sk_buff *skb;
++
++ size_t map_len;
++ dma_addr_t dma_addr;
++ struct txdesc_api desc;
++ u8 raw_frame;
++ u8 fixed_rate;
++ u16 rate_config;
++};
++
++/**
++ * struct rwnx_txhdr - Stucture to control transimission of packet
++ * (Added in skb headroom)
++ *
++ * @sw_hdr: Information from driver
++ * @cache_guard:
++ * @hw_hdr: Information for/from hardware
++ */
++struct rwnx_txhdr {
++ struct rwnx_sw_txhdr *sw_hdr;
++ char cache_guard[L1_CACHE_BYTES];
++ struct rwnx_hw_txhdr hw_hdr;
++};
++
++u16 rwnx_select_txq(struct rwnx_vif *rwnx_vif, struct sk_buff *skb);
++netdev_tx_t rwnx_start_xmit(struct sk_buff *skb, struct net_device *dev);
++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
++int rwnx_start_mgmt_xmit(struct rwnx_vif *vif, struct rwnx_sta *sta,
++ struct cfg80211_mgmt_tx_params *params, bool offchan,
++ u64 *cookie);
++#else
++int rwnx_start_mgmt_xmit(struct rwnx_vif *vif, struct rwnx_sta *sta,
++ struct ieee80211_channel *channel, bool offchan,
++ unsigned int wait, const u8* buf, size_t len,
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
++ bool no_cck,
++ #endif
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0))
++ bool dont_wait_for_ack,
++ #endif
++ u64 *cookie);
++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
++#ifdef CONFIG_RWNX_MON_XMIT
++int rwnx_start_monitor_if_xmit(struct sk_buff *skb, struct net_device *dev);
++#endif
++int rwnx_txdatacfm(void *pthis, void *host_id);
++
++struct rwnx_hw;
++struct rwnx_sta;
++void rwnx_set_traffic_status(struct rwnx_hw *rwnx_hw,
++ struct rwnx_sta *sta,
++ bool available,
++ u8 ps_id);
++void rwnx_ps_bh_enable(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta,
++ bool enable);
++void rwnx_ps_bh_traffic_req(struct rwnx_hw *rwnx_hw, struct rwnx_sta *sta,
++ u16 pkt_req, u8 ps_id);
++
++void rwnx_switch_vif_sta_txq(struct rwnx_sta *sta, struct rwnx_vif *old_vif,
++ struct rwnx_vif *new_vif);
++
++int rwnx_dbgfs_print_sta(char *buf, size_t size, struct rwnx_sta *sta,
++ struct rwnx_hw *rwnx_hw);
++void rwnx_txq_credit_update(struct rwnx_hw *rwnx_hw, int sta_idx, u8 tid,
++ s8 update);
++void rwnx_tx_push(struct rwnx_hw *rwnx_hw, struct rwnx_txhdr *txhdr, int flags);
++
++#endif /* _RWNX_TX_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_txq.c
+@@ -0,0 +1,1364 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_txq.c
++ *
++ * Copyright (C) RivieraWaves 2016-2019
++ *
++ ******************************************************************************
++ */
++
++#include "rwnx_defs.h"
++#include "rwnx_tx.h"
++#include "ipc_host.h"
++#include "rwnx_events.h"
++#include "rwnx_gki.h"
++
++/******************************************************************************
++ * Utils functions
++ *****************************************************************************/
++#ifdef CONFIG_RWNX_FULLMAC
++const int nx_tid_prio[NX_NB_TID_PER_STA] = {7, 6, 5, 4, 3, 0, 2, 1};
++
++static inline int rwnx_txq_sta_idx(struct rwnx_sta *sta, u8 tid)
++{
++ if (is_multicast_sta(sta->sta_idx)){
++ if((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8801) ||
++ ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC ||
++ g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW) && chip_id < 3)){
++ return NX_FIRST_VIF_TXQ_IDX_FOR_OLD_IC + sta->vif_idx;
++ }else{
++ return NX_FIRST_VIF_TXQ_IDX + sta->vif_idx;
++ }
++ }else{
++ return (sta->sta_idx * NX_NB_TXQ_PER_STA) + tid;
++ }
++}
++
++static inline int rwnx_txq_vif_idx(struct rwnx_vif *vif, u8 type)
++{
++ if((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8801) ||
++ ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC ||
++ g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW) && chip_id < 3)){
++ return NX_FIRST_VIF_TXQ_IDX_FOR_OLD_IC + master_vif_idx(vif) + (type * NX_VIRT_DEV_MAX);
++ }else{
++ return NX_FIRST_VIF_TXQ_IDX + master_vif_idx(vif) + (type * NX_VIRT_DEV_MAX);
++ }
++}
++
++struct rwnx_txq *rwnx_txq_sta_get(struct rwnx_sta *sta, u8 tid,
++ struct rwnx_hw * rwnx_hw)
++{
++ if (tid >= NX_NB_TXQ_PER_STA)
++ tid = 0;
++
++ return &rwnx_hw->txq[rwnx_txq_sta_idx(sta, tid)];
++}
++
++struct rwnx_txq *rwnx_txq_vif_get(struct rwnx_vif *vif, u8 type)
++{
++ if (type > NX_UNK_TXQ_TYPE)
++ type = NX_BCMC_TXQ_TYPE;
++
++ return &vif->rwnx_hw->txq[rwnx_txq_vif_idx(vif, type)];
++}
++
++static inline struct rwnx_sta *rwnx_txq_2_sta(struct rwnx_txq *txq)
++{
++ return txq->sta;
++}
++
++#endif /* CONFIG_RWNX_FULLMAC */
++
++
++/******************************************************************************
++ * Init/Deinit functions
++ *****************************************************************************/
++/**
++ * rwnx_txq_init - Initialize a TX queue
++ *
++ * @txq: TX queue to be initialized
++ * @idx: TX queue index
++ * @status: TX queue initial status
++ * @hwq: Associated HW queue
++ * @ndev: Net device this queue belongs to
++ * (may be null for non netdev txq)
++ *
++ * Each queue is initialized with the credit of @NX_TXQ_INITIAL_CREDITS.
++ */
++static void rwnx_txq_init(struct rwnx_txq *txq, int idx, u8 status,
++ struct rwnx_hwq *hwq, int tid,
++#ifdef CONFIG_RWNX_FULLMAC
++ struct rwnx_sta *sta, struct net_device *ndev
++#endif
++ )
++{
++ int i;
++ int nx_first_unk_txq_idx = NX_FIRST_UNK_TXQ_IDX;
++ int nx_bcmc_txq_ndev_idx = NX_BCMC_TXQ_NDEV_IDX;
++ int nx_first_vif_txq_idx = NX_FIRST_VIF_TXQ_IDX;
++
++ if((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8801) ||
++ ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC ||
++ g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW) && chip_id < 3)){
++ nx_first_unk_txq_idx = NX_FIRST_UNK_TXQ_IDX_FOR_OLD_IC;
++ nx_bcmc_txq_ndev_idx = NX_BCMC_TXQ_NDEV_IDX_FOR_OLD_IC;
++ nx_first_vif_txq_idx = NX_FIRST_VIF_TXQ_IDX_FOR_OLD_IC;
++ }
++
++
++ txq->idx = idx;
++ txq->status = status;
++ txq->credits = NX_TXQ_INITIAL_CREDITS;
++ txq->pkt_sent = 0;
++ skb_queue_head_init(&txq->sk_list);
++ txq->last_retry_skb = NULL;
++ txq->nb_retry = 0;
++ txq->hwq = hwq;
++ txq->sta = sta;
++ for (i = 0; i < CONFIG_USER_MAX ; i++)
++ txq->pkt_pushed[i] = 0;
++ txq->push_limit = 0;
++ txq->tid = tid;
++#ifdef CONFIG_MAC80211_TXQ
++ txq->nb_ready_mac80211 = 0;
++#endif
++#ifdef CONFIG_RWNX_FULLMAC
++ txq->ps_id = LEGACY_PS_ID;
++ if (idx < nx_first_vif_txq_idx) {
++ int sta_idx = sta->sta_idx;
++ int tid = idx - (sta_idx * NX_NB_TXQ_PER_STA);
++ if (tid < NX_NB_TID_PER_STA)
++ txq->ndev_idx = NX_STA_NDEV_IDX(tid, sta_idx);
++ else
++ txq->ndev_idx = NDEV_NO_TXQ;
++ } else if (idx < nx_first_unk_txq_idx) {
++ txq->ndev_idx = nx_bcmc_txq_ndev_idx;
++ } else {
++ txq->ndev_idx = NDEV_NO_TXQ;
++ }
++ txq->ndev = ndev;
++#ifdef CONFIG_RWNX_AMSDUS_TX
++ txq->amsdu = NULL;
++ txq->amsdu_len = 0;
++#endif /* CONFIG_RWNX_AMSDUS_TX */
++#endif /* CONFIG_RWNX_FULLMAC */
++}
++
++/**
++ * rwnx_txq_flush - Flush all buffers queued for a TXQ
++ *
++ * @rwnx_hw: main driver data
++ * @txq: txq to flush
++ */
++void rwnx_txq_flush(struct rwnx_hw *rwnx_hw, struct rwnx_txq *txq)
++{
++ struct sk_buff *skb;
++
++
++ while((skb = skb_dequeue(&txq->sk_list)) != NULL) {
++ struct rwnx_sw_txhdr *sw_txhdr = ((struct rwnx_txhdr *)skb->data)->sw_hdr;
++
++#ifdef CONFIG_RWNX_AMSDUS_TX
++ if (sw_txhdr->desc.host.packet_cnt > 1) {
++ struct rwnx_amsdu_txhdr *amsdu_txhdr;
++ list_for_each_entry(amsdu_txhdr, &sw_txhdr->amsdu.hdrs, list) {
++ //dma_unmap_single(rwnx_hw->dev, amsdu_txhdr->dma_addr,
++ // amsdu_txhdr->map_len, DMA_TO_DEVICE);
++ dev_kfree_skb_any(amsdu_txhdr->skb);
++ }
++ }
++#endif
++ kmem_cache_free(rwnx_hw->sw_txhdr_cache, sw_txhdr);
++ //dma_unmap_single(rwnx_hw->dev, sw_txhdr->dma_addr, sw_txhdr->map_len,
++ // DMA_TO_DEVICE);
++
++#ifdef CONFIG_RWNX_FULLMAC
++ dev_kfree_skb_any(skb);
++#endif /* CONFIG_RWNX_FULLMAC */
++ }
++}
++
++/**
++ * rwnx_txq_deinit - De-initialize a TX queue
++ *
++ * @rwnx_hw: Driver main data
++ * @txq: TX queue to be de-initialized
++ * Any buffer stuck in a queue will be freed.
++ */
++static void rwnx_txq_deinit(struct rwnx_hw *rwnx_hw, struct rwnx_txq *txq)
++{
++ if (txq->idx == TXQ_INACTIVE)
++ return;
++
++ spin_lock_bh(&rwnx_hw->tx_lock);
++ rwnx_txq_del_from_hw_list(txq);
++ txq->idx = TXQ_INACTIVE;
++ spin_unlock_bh(&rwnx_hw->tx_lock);
++
++ rwnx_txq_flush(rwnx_hw, txq);
++}
++
++/**
++ * rwnx_txq_vif_init - Initialize all TXQ linked to a vif
++ *
++ * @rwnx_hw: main driver data
++ * @rwnx_vif: Pointer on VIF
++ * @status: Intial txq status
++ *
++ * Softmac : 1 VIF TXQ per HWQ
++ *
++ * Fullmac : 1 VIF TXQ for BC/MC
++ * 1 VIF TXQ for MGMT to unknown STA
++ */
++void rwnx_txq_vif_init(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ u8 status)
++{
++ struct rwnx_txq *txq;
++ int idx;
++
++#ifdef CONFIG_RWNX_FULLMAC
++ txq = rwnx_txq_vif_get(rwnx_vif, NX_BCMC_TXQ_TYPE);
++ idx = rwnx_txq_vif_idx(rwnx_vif, NX_BCMC_TXQ_TYPE);
++ rwnx_txq_init(txq, idx, status, &rwnx_hw->hwq[RWNX_HWQ_BE], 0,
++ &rwnx_hw->sta_table[rwnx_vif->ap.bcmc_index], rwnx_vif->ndev);
++
++ txq = rwnx_txq_vif_get(rwnx_vif, NX_UNK_TXQ_TYPE);
++ idx = rwnx_txq_vif_idx(rwnx_vif, NX_UNK_TXQ_TYPE);
++ rwnx_txq_init(txq, idx, status, &rwnx_hw->hwq[RWNX_HWQ_VO], TID_MGT,
++ NULL, rwnx_vif->ndev);
++
++#endif /* CONFIG_RWNX_FULLMAC */
++}
++
++/**
++ * rwnx_txq_vif_deinit - Deinitialize all TXQ linked to a vif
++ *
++ * @rwnx_hw: main driver data
++ * @rwnx_vif: Pointer on VIF
++ */
++void rwnx_txq_vif_deinit(struct rwnx_hw * rwnx_hw, struct rwnx_vif *rwnx_vif)
++{
++ struct rwnx_txq *txq;
++
++#ifdef CONFIG_RWNX_FULLMAC
++ txq = rwnx_txq_vif_get(rwnx_vif, NX_BCMC_TXQ_TYPE);
++ rwnx_txq_deinit(rwnx_hw, txq);
++
++ txq = rwnx_txq_vif_get(rwnx_vif, NX_UNK_TXQ_TYPE);
++ rwnx_txq_deinit(rwnx_hw, txq);
++
++#endif /* CONFIG_RWNX_FULLMAC */
++}
++
++
++/**
++ * rwnx_txq_sta_init - Initialize TX queues for a STA
++ *
++ * @rwnx_hw: Main driver data
++ * @rwnx_sta: STA for which tx queues need to be initialized
++ * @status: Intial txq status
++ *
++ * This function initialize all the TXQ associated to a STA.
++ * Softmac : 1 TXQ per TID
++ *
++ * Fullmac : 1 TXQ per TID (limited to 8)
++ * 1 TXQ for MGMT
++ */
++void rwnx_txq_sta_init(struct rwnx_hw *rwnx_hw, struct rwnx_sta *rwnx_sta,
++ u8 status)
++{
++ struct rwnx_txq *txq;
++ int tid, idx;
++
++#ifdef CONFIG_RWNX_FULLMAC
++ struct rwnx_vif *rwnx_vif = rwnx_hw->vif_table[rwnx_sta->vif_idx];
++ idx = rwnx_txq_sta_idx(rwnx_sta, 0);
++
++ foreach_sta_txq(rwnx_sta, txq, tid, rwnx_hw) {
++ rwnx_txq_init(txq, idx, status, &rwnx_hw->hwq[rwnx_tid2hwq[tid]], tid,
++ rwnx_sta, rwnx_vif->ndev);
++ txq->ps_id = rwnx_sta->uapsd_tids & (1 << tid) ? UAPSD_ID : LEGACY_PS_ID;
++ idx++;
++ }
++
++#endif /* CONFIG_RWNX_FULLMAC*/
++}
++
++/**
++ * rwnx_txq_sta_deinit - Deinitialize TX queues for a STA
++ *
++ * @rwnx_hw: Main driver data
++ * @rwnx_sta: STA for which tx queues need to be deinitialized
++ */
++void rwnx_txq_sta_deinit(struct rwnx_hw *rwnx_hw, struct rwnx_sta *rwnx_sta)
++{
++ struct rwnx_txq *txq;
++ int tid;
++
++ foreach_sta_txq(rwnx_sta, txq, tid, rwnx_hw) {
++ rwnx_txq_deinit(rwnx_hw, txq);
++ }
++}
++
++#ifdef CONFIG_RWNX_FULLMAC
++/**
++ * rwnx_txq_unk_vif_init - Initialize TXQ for unknown STA linked to a vif
++ *
++ * @rwnx_vif: Pointer on VIF
++ */
++void rwnx_txq_unk_vif_init(struct rwnx_vif *rwnx_vif)
++{
++ struct rwnx_hw *rwnx_hw = rwnx_vif->rwnx_hw;
++ struct rwnx_txq *txq;
++ int idx;
++
++ txq = rwnx_txq_vif_get(rwnx_vif, NX_UNK_TXQ_TYPE);
++ idx = rwnx_txq_vif_idx(rwnx_vif, NX_UNK_TXQ_TYPE);
++ rwnx_txq_init(txq, idx, 0, &rwnx_hw->hwq[RWNX_HWQ_VO], TID_MGT, NULL, rwnx_vif->ndev);
++}
++
++/**
++ * rwnx_txq_tdls_vif_deinit - Deinitialize TXQ for unknown STA linked to a vif
++ *
++ * @rwnx_vif: Pointer on VIF
++ */
++void rwnx_txq_unk_vif_deinit(struct rwnx_vif *rwnx_vif)
++{
++ struct rwnx_txq *txq;
++
++ txq = rwnx_txq_vif_get(rwnx_vif, NX_UNK_TXQ_TYPE);
++ rwnx_txq_deinit(rwnx_vif->rwnx_hw, txq);
++}
++
++/**
++ * rwnx_init_unk_txq - Initialize TX queue for the transmission on a offchannel
++ *
++ * @vif: Interface for which the queue has to be initialized
++ *
++ * NOTE: Offchannel txq is only active for the duration of the ROC
++ */
++void rwnx_txq_offchan_init(struct rwnx_vif *rwnx_vif)
++{
++ struct rwnx_hw *rwnx_hw = rwnx_vif->rwnx_hw;
++ struct rwnx_txq *txq;
++ int nx_off_chan_txq_idx = NX_OFF_CHAN_TXQ_IDX;
++
++ if((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8801) ||
++ ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC ||
++ g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW) && chip_id < 3)){
++ nx_off_chan_txq_idx = NX_OFF_CHAN_TXQ_IDX_FOR_OLD_IC;
++ }
++
++
++ txq = &rwnx_hw->txq[nx_off_chan_txq_idx];
++ rwnx_txq_init(txq, nx_off_chan_txq_idx, RWNX_TXQ_STOP_CHAN,
++ &rwnx_hw->hwq[RWNX_HWQ_VO], TID_MGT, NULL, rwnx_vif->ndev);
++}
++
++/**
++ * rwnx_deinit_offchan_txq - Deinitialize TX queue for offchannel
++ *
++ * @vif: Interface that manages the STA
++ *
++ * This function deintialize txq for one STA.
++ * Any buffer stuck in a queue will be freed.
++ */
++void rwnx_txq_offchan_deinit(struct rwnx_vif *rwnx_vif)
++{
++ struct rwnx_txq *txq;
++ int nx_off_chan_txq_idx = NX_OFF_CHAN_TXQ_IDX;
++
++ if((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8801) ||
++ ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC ||
++ g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW) && chip_id < 3)){
++ nx_off_chan_txq_idx = NX_OFF_CHAN_TXQ_IDX_FOR_OLD_IC;
++ }
++
++ txq = &rwnx_vif->rwnx_hw->txq[nx_off_chan_txq_idx];
++ rwnx_txq_deinit(rwnx_vif->rwnx_hw, txq);
++}
++
++
++/**
++ * rwnx_txq_tdls_vif_init - Initialize TXQ vif for TDLS
++ *
++ * @rwnx_vif: Pointer on VIF
++ */
++void rwnx_txq_tdls_vif_init(struct rwnx_vif *rwnx_vif)
++{
++ struct rwnx_hw *rwnx_hw = rwnx_vif->rwnx_hw;
++
++ if (!(rwnx_hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
++ return;
++
++ rwnx_txq_unk_vif_init(rwnx_vif);
++}
++
++/**
++ * rwnx_txq_tdls_vif_deinit - Deinitialize TXQ vif for TDLS
++ *
++ * @rwnx_vif: Pointer on VIF
++ */
++void rwnx_txq_tdls_vif_deinit(struct rwnx_vif *rwnx_vif)
++{
++ struct rwnx_hw *rwnx_hw = rwnx_vif->rwnx_hw;
++
++ if (!(rwnx_hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
++ return;
++
++ rwnx_txq_unk_vif_deinit(rwnx_vif);
++}
++#endif
++
++/******************************************************************************
++ * Start/Stop functions
++ *****************************************************************************/
++/**
++ * rwnx_txq_add_to_hw_list - Add TX queue to a HW queue schedule list.
++ *
++ * @txq: TX queue to add
++ *
++ * Add the TX queue if not already present in the HW queue list.
++ * To be called with tx_lock hold
++ */
++void rwnx_txq_add_to_hw_list(struct rwnx_txq *txq)
++{
++ if (!(txq->status & RWNX_TXQ_IN_HWQ_LIST)) {
++#ifdef CREATE_TRACE_POINTS
++ trace_txq_add_to_hw(txq);
++#endif
++ txq->status |= RWNX_TXQ_IN_HWQ_LIST;
++ list_add_tail(&txq->sched_list, &txq->hwq->list);
++ txq->hwq->need_processing = true;
++ }
++}
++
++/**
++ * rwnx_txq_del_from_hw_list - Delete TX queue from a HW queue schedule list.
++ *
++ * @txq: TX queue to delete
++ *
++ * Remove the TX queue from the HW queue list if present.
++ * To be called with tx_lock hold
++ */
++void rwnx_txq_del_from_hw_list(struct rwnx_txq *txq)
++{
++ if (txq->status & RWNX_TXQ_IN_HWQ_LIST) {
++#ifdef CREATE_TRACE_POINTS
++ trace_txq_del_from_hw(txq);
++#endif
++ txq->status &= ~RWNX_TXQ_IN_HWQ_LIST;
++ list_del(&txq->sched_list);
++ }
++}
++
++/**
++ * rwnx_txq_skb_ready - Check if skb are available for the txq
++ *
++ * @txq: Pointer on txq
++ * @return True if there are buffer ready to be pushed on this txq,
++ * false otherwise
++ */
++static inline bool rwnx_txq_skb_ready(struct rwnx_txq *txq)
++{
++#ifdef CONFIG_MAC80211_TXQ
++ if (txq->nb_ready_mac80211 != NOT_MAC80211_TXQ)
++ return ((txq->nb_ready_mac80211 > 0) || !skb_queue_empty(&txq->sk_list));
++ else
++#endif
++ return !skb_queue_empty(&txq->sk_list);
++}
++
++/**
++ * rwnx_txq_start - Try to Start one TX queue
++ *
++ * @txq: TX queue to start
++ * @reason: reason why the TX queue is started (among RWNX_TXQ_STOP_xxx)
++ *
++ * Re-start the TX queue for one reason.
++ * If after this the txq is no longer stopped and some buffers are ready,
++ * the TX queue is also added to HW queue list.
++ * To be called with tx_lock hold
++ */
++void rwnx_txq_start(struct rwnx_txq *txq, u16 reason)
++{
++ BUG_ON(txq==NULL);
++ if (txq->idx != TXQ_INACTIVE && (txq->status & reason))
++ {
++#ifdef CREATE_TRACE_POINTS
++ trace_txq_start(txq, reason);
++#endif
++ txq->status &= ~reason;
++ if (!rwnx_txq_is_stopped(txq) && rwnx_txq_skb_ready(txq))
++ rwnx_txq_add_to_hw_list(txq);
++ }
++}
++
++/**
++ * rwnx_txq_stop - Stop one TX queue
++ *
++ * @txq: TX queue to stop
++ * @reason: reason why the TX queue is stopped (among RWNX_TXQ_STOP_xxx)
++ *
++ * Stop the TX queue. It will remove the TX queue from HW queue list
++ * To be called with tx_lock hold
++ */
++void rwnx_txq_stop(struct rwnx_txq *txq, u16 reason)
++{
++ BUG_ON(txq==NULL);
++ if (txq->idx != TXQ_INACTIVE)
++ {
++#ifdef CREATE_TRACE_POINTS
++ trace_txq_stop(txq, reason);
++#endif
++ txq->status |= reason;
++ rwnx_txq_del_from_hw_list(txq);
++ }
++}
++
++
++/**
++ * rwnx_txq_sta_start - Start all the TX queue linked to a STA
++ *
++ * @sta: STA whose TX queues must be re-started
++ * @reason: Reason why the TX queue are restarted (among RWNX_TXQ_STOP_xxx)
++ * @rwnx_hw: Driver main data
++ *
++ * This function will re-start all the TX queues of the STA for the reason
++ * specified. It can be :
++ * - RWNX_TXQ_STOP_STA_PS: the STA is no longer in power save mode
++ * - RWNX_TXQ_STOP_VIF_PS: the VIF is in power save mode (p2p absence)
++ * - RWNX_TXQ_STOP_CHAN: the STA's VIF is now on the current active channel
++ *
++ * Any TX queue with buffer ready and not Stopped for other reasons, will be
++ * added to the HW queue list
++ * To be called with tx_lock hold
++ */
++void rwnx_txq_sta_start(struct rwnx_sta *rwnx_sta, u16 reason
++#ifdef CONFIG_RWNX_FULLMAC
++ , struct rwnx_hw *rwnx_hw
++#endif
++ )
++{
++ struct rwnx_txq *txq;
++ int tid;
++#ifdef CREATE_TRACE_POINTS
++ trace_txq_sta_start(rwnx_sta->sta_idx);
++#endif
++ foreach_sta_txq(rwnx_sta, txq, tid, rwnx_hw) {
++ rwnx_txq_start(txq, reason);
++ }
++}
++
++
++/**
++ * rwnx_stop_sta_txq - Stop all the TX queue linked to a STA
++ *
++ * @sta: STA whose TX queues must be stopped
++ * @reason: Reason why the TX queue are stopped (among RWNX_TX_STOP_xxx)
++ * @rwnx_hw: Driver main data
++ *
++ * This function will stop all the TX queues of the STA for the reason
++ * specified. It can be :
++ * - RWNX_TXQ_STOP_STA_PS: the STA is in power save mode
++ * - RWNX_TXQ_STOP_VIF_PS: the VIF is in power save mode (p2p absence)
++ * - RWNX_TXQ_STOP_CHAN: the STA's VIF is not on the current active channel
++ *
++ * Any TX queue present in a HW queue list will be removed from this list.
++ * To be called with tx_lock hold
++ */
++void rwnx_txq_sta_stop(struct rwnx_sta *rwnx_sta, u16 reason
++#ifdef CONFIG_RWNX_FULLMAC
++ , struct rwnx_hw *rwnx_hw
++#endif
++ )
++{
++ struct rwnx_txq *txq;
++ int tid;
++
++ if (!rwnx_sta)
++ return;
++#ifdef CREATE_TRACE_POINTS
++ trace_txq_sta_stop(rwnx_sta->sta_idx);
++#endif
++ foreach_sta_txq(rwnx_sta, txq, tid, rwnx_hw) {
++ rwnx_txq_stop(txq, reason);
++ }
++}
++
++#ifdef CONFIG_RWNX_FULLMAC
++void rwnx_txq_tdls_sta_start(struct rwnx_vif *rwnx_vif, u16 reason,
++ struct rwnx_hw *rwnx_hw)
++{
++#ifdef CREATE_TRACE_POINTS
++ trace_txq_vif_start(rwnx_vif->vif_index);
++#endif
++ spin_lock_bh(&rwnx_hw->tx_lock);
++
++ if (rwnx_vif->sta.tdls_sta)
++ rwnx_txq_sta_start(rwnx_vif->sta.tdls_sta, reason, rwnx_hw);
++
++ spin_unlock_bh(&rwnx_hw->tx_lock);
++}
++#endif
++
++#ifdef CONFIG_RWNX_FULLMAC
++void rwnx_txq_tdls_sta_stop(struct rwnx_vif *rwnx_vif, u16 reason,
++ struct rwnx_hw *rwnx_hw)
++{
++#ifdef CREATE_TRACE_POINTS
++ trace_txq_vif_stop(rwnx_vif->vif_index);
++#endif
++ spin_lock_bh(&rwnx_hw->tx_lock);
++
++ if (rwnx_vif->sta.tdls_sta)
++ rwnx_txq_sta_stop(rwnx_vif->sta.tdls_sta, reason, rwnx_hw);
++
++ spin_unlock_bh(&rwnx_hw->tx_lock);
++}
++#endif
++
++#ifdef CONFIG_RWNX_FULLMAC
++static inline
++void rwnx_txq_vif_for_each_sta(struct rwnx_hw *rwnx_hw, struct rwnx_vif *rwnx_vif,
++ void (*f)(struct rwnx_sta *, u16, struct rwnx_hw *),
++ u16 reason)
++{
++
++ switch (RWNX_VIF_TYPE(rwnx_vif)) {
++ case NL80211_IFTYPE_STATION:
++ case NL80211_IFTYPE_P2P_CLIENT:
++ {
++ if (rwnx_vif->tdls_status == TDLS_LINK_ACTIVE)
++ f(rwnx_vif->sta.tdls_sta, reason, rwnx_hw);
++ if (!WARN_ON(rwnx_vif->sta.ap == NULL))
++ f(rwnx_vif->sta.ap, reason, rwnx_hw);
++ break;
++ }
++ case NL80211_IFTYPE_AP_VLAN:
++ rwnx_vif = rwnx_vif->ap_vlan.master;
++ case NL80211_IFTYPE_AP:
++ case NL80211_IFTYPE_MESH_POINT:
++ case NL80211_IFTYPE_P2P_GO:
++ {
++ struct rwnx_sta *sta;
++ list_for_each_entry(sta, &rwnx_vif->ap.sta_list, list) {
++ f(sta, reason, rwnx_hw);
++ }
++ break;
++ }
++ default:
++ BUG();
++ break;
++ }
++}
++
++#endif
++
++/**
++ * rwnx_txq_vif_start - START TX queues of all STA associated to the vif
++ * and vif's TXQ
++ *
++ * @vif: Interface to start
++ * @reason: Start reason (RWNX_TXQ_STOP_CHAN or RWNX_TXQ_STOP_VIF_PS)
++ * @rwnx_hw: Driver main data
++ *
++ * Iterate over all the STA associated to the vif and re-start them for the
++ * reason @reason
++ * Take tx_lock
++ */
++void rwnx_txq_vif_start(struct rwnx_vif *rwnx_vif, u16 reason,
++ struct rwnx_hw *rwnx_hw)
++{
++ struct rwnx_txq *txq;
++#ifdef CREATE_TRACE_POINTS
++ trace_txq_vif_start(rwnx_vif->vif_index);
++#endif
++ spin_lock_bh(&rwnx_hw->tx_lock);
++
++#ifdef CONFIG_RWNX_FULLMAC
++ //Reject if monitor interface
++ if (rwnx_vif->wdev.iftype == NL80211_IFTYPE_MONITOR)
++ goto end;
++
++ if (rwnx_vif->roc_tdls && rwnx_vif->sta.tdls_sta && rwnx_vif->sta.tdls_sta->tdls.chsw_en) {
++ rwnx_txq_sta_start(rwnx_vif->sta.tdls_sta, reason, rwnx_hw);
++ }
++ if (!rwnx_vif->roc_tdls) {
++ rwnx_txq_vif_for_each_sta(rwnx_hw, rwnx_vif, rwnx_txq_sta_start, reason);
++ }
++
++ txq = rwnx_txq_vif_get(rwnx_vif, NX_BCMC_TXQ_TYPE);
++ rwnx_txq_start(txq, reason);
++ txq = rwnx_txq_vif_get(rwnx_vif, NX_UNK_TXQ_TYPE);
++ rwnx_txq_start(txq, reason);
++
++end:
++#endif /* CONFIG_RWNX_FULLMAC */
++
++ spin_unlock_bh(&rwnx_hw->tx_lock);
++}
++
++
++/**
++ * rwnx_txq_vif_stop - STOP TX queues of all STA associated to the vif
++ *
++ * @vif: Interface to stop
++ * @arg: Stop reason (RWNX_TXQ_STOP_CHAN or RWNX_TXQ_STOP_VIF_PS)
++ * @rwnx_hw: Driver main data
++ *
++ * Iterate over all the STA associated to the vif and stop them for the
++ * reason RWNX_TXQ_STOP_CHAN or RWNX_TXQ_STOP_VIF_PS
++ * Take tx_lock
++ */
++void rwnx_txq_vif_stop(struct rwnx_vif *rwnx_vif, u16 reason,
++ struct rwnx_hw *rwnx_hw)
++{
++ struct rwnx_txq *txq;
++
++ //RWNX_DBG(RWNX_FN_ENTRY_STR);
++#ifdef CREATE_TRACE_POINTS
++ trace_txq_vif_stop(rwnx_vif->vif_index);
++#endif
++ spin_lock_bh(&rwnx_hw->tx_lock);
++
++#ifdef CONFIG_RWNX_FULLMAC
++ //Reject if monitor interface
++ if (rwnx_vif->wdev.iftype == NL80211_IFTYPE_MONITOR)
++ goto end;
++
++ rwnx_txq_vif_for_each_sta(rwnx_hw, rwnx_vif, rwnx_txq_sta_stop, reason);
++
++ txq = rwnx_txq_vif_get(rwnx_vif, NX_BCMC_TXQ_TYPE);
++ rwnx_txq_stop(txq, reason);
++ txq = rwnx_txq_vif_get(rwnx_vif, NX_UNK_TXQ_TYPE);
++ rwnx_txq_stop(txq, reason);
++
++end:
++#endif /* CONFIG_RWNX_FULLMAC*/
++
++ spin_unlock_bh(&rwnx_hw->tx_lock);
++}
++
++#ifdef CONFIG_RWNX_FULLMAC
++
++/**
++ * rwnx_start_offchan_txq - START TX queue for offchannel frame
++ *
++ * @rwnx_hw: Driver main data
++ */
++void rwnx_txq_offchan_start(struct rwnx_hw *rwnx_hw)
++{
++ struct rwnx_txq *txq;
++ int nx_off_chan_txq_idx = NX_OFF_CHAN_TXQ_IDX;
++
++ if((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8801) ||
++ ((g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DC ||
++ g_rwnx_plat->usbdev->chipid == PRODUCT_ID_AIC8800DW) && chip_id < 3)){
++ nx_off_chan_txq_idx = NX_OFF_CHAN_TXQ_IDX_FOR_OLD_IC;
++ }
++
++
++ txq = &rwnx_hw->txq[nx_off_chan_txq_idx];
++ spin_lock_bh(&rwnx_hw->tx_lock);
++ rwnx_txq_start(txq, RWNX_TXQ_STOP_CHAN);
++ spin_unlock_bh(&rwnx_hw->tx_lock);
++}
++
++/**
++ * rwnx_switch_vif_sta_txq - Associate TXQ linked to a STA to a new vif
++ *
++ * @sta: STA whose txq must be switched
++ * @old_vif: Vif currently associated to the STA (may no longer be active)
++ * @new_vif: vif which should be associated to the STA for now on
++ *
++ * This function will switch the vif (i.e. the netdev) associated to all STA's
++ * TXQ. This is used when AP_VLAN interface are created.
++ * If one STA is associated to an AP_vlan vif, it will be moved from the master
++ * AP vif to the AP_vlan vif.
++ * If an AP_vlan vif is removed, then STA will be moved back to mastert AP vif.
++ *
++ */
++void rwnx_txq_sta_switch_vif(struct rwnx_sta *sta, struct rwnx_vif *old_vif,
++ struct rwnx_vif *new_vif)
++{
++ struct rwnx_hw *rwnx_hw = new_vif->rwnx_hw;
++ struct rwnx_txq *txq;
++ int i;
++
++ /* start TXQ on the new interface, and update ndev field in txq */
++ if (!netif_carrier_ok(new_vif->ndev))
++ netif_carrier_on(new_vif->ndev);
++ txq = rwnx_txq_sta_get(sta, 0, rwnx_hw);
++ for (i = 0; i < NX_NB_TID_PER_STA; i++, txq++) {
++ txq->ndev = new_vif->ndev;
++ netif_wake_subqueue(txq->ndev, txq->ndev_idx);
++ }
++}
++#endif /* CONFIG_RWNX_FULLMAC */
++
++/******************************************************************************
++ * TXQ queue/schedule functions
++ *****************************************************************************/
++/**
++ * rwnx_txq_queue_skb - Queue a buffer in a TX queue
++ *
++ * @skb: Buffer to queue
++ * @txq: TX Queue in which the buffer must be added
++ * @rwnx_hw: Driver main data
++ * @retry: Should it be queued in the retry list
++ *
++ * @return: Retrun 1 if txq has been added to hwq list, 0 otherwise
++ *
++ * Add a buffer in the buffer list of the TX queue
++ * and add this TX queue in the HW queue list if the txq is not stopped.
++ * If this is a retry packet it is added after the last retry packet or at the
++ * beginning if there is no retry packet queued.
++ *
++ * If the STA is in PS mode and this is the first packet queued for this txq
++ * update TIM.
++ *
++ * To be called with tx_lock hold
++ */
++int rwnx_txq_queue_skb(struct sk_buff *skb, struct rwnx_txq *txq,
++ struct rwnx_hw *rwnx_hw, bool retry)
++{
++#ifndef CONFIG_ONE_TXQ
++ unsigned long flags;
++#endif
++
++#ifdef CONFIG_RWNX_FULLMAC
++ if (unlikely(txq->sta && txq->sta->ps.active)) {
++ txq->sta->ps.pkt_ready[txq->ps_id]++;
++#ifdef CREATE_TRACE_POINT
++ trace_ps_queue(txq->sta);
++#endif
++ if (txq->sta->ps.pkt_ready[txq->ps_id] == 1) {
++ rwnx_set_traffic_status(rwnx_hw, txq->sta, true, txq->ps_id);
++ }
++ }
++#endif
++
++ if (!retry) {
++ /* add buffer in the sk_list */
++ skb_queue_tail(&txq->sk_list, skb);
++ } else {
++ if (txq->last_retry_skb)
++ rwnx_skb_append(txq->last_retry_skb, skb, &txq->sk_list);
++ else
++ skb_queue_head(&txq->sk_list, skb);
++
++ txq->last_retry_skb = skb;
++ txq->nb_retry++;
++ }
++#ifdef CREATE_TRACE_POINTS
++ trace_txq_queue_skb(skb, txq, retry);
++#endif
++ /* Flowctrl corresponding netdev queue if needed */
++#ifdef CONFIG_RWNX_FULLMAC
++#ifndef CONFIG_ONE_TXQ
++ /* If too many buffer are queued for this TXQ stop netdev queue */
++ spin_lock_irqsave(&rwnx_hw->usbdev->tx_flow_lock, flags);
++ if ((txq->ndev_idx != NDEV_NO_TXQ) && !rwnx_hw->usbdev->tbusy && ((skb_queue_len(&txq->sk_list) > RWNX_NDEV_FLOW_CTRL_STOP))) {
++ txq->status |= RWNX_TXQ_NDEV_FLOW_CTRL;
++ netif_stop_subqueue(txq->ndev, txq->ndev_idx);
++#ifdef CREATE_TRACE_POINTS
++ trace_txq_flowctrl_stop(txq);
++#endif
++ }
++ spin_unlock_irqrestore(&rwnx_hw->usbdev->tx_flow_lock, flags);
++
++#endif /* CONFIG_ONE_TXQ */
++#else /* ! CONFIG_RWNX_FULLMAC */
++
++ if (!retry && ++txq->hwq->len == txq->hwq->len_stop) {
++#ifdef CREATE_TRACE_POINTS
++ trace_hwq_flowctrl_stop(txq->hwq->id);
++#endif
++ ieee80211_stop_queue(rwnx_hw->hw, txq->hwq->id);
++ rwnx_hw->stats.queues_stops++;
++ }
++
++#endif /* CONFIG_RWNX_FULLMAC */
++
++ /* add it in the hwq list if not stopped and not yet present */
++ if (!rwnx_txq_is_stopped(txq)) {
++ rwnx_txq_add_to_hw_list(txq);
++ return 1;
++ }
++
++ return 0;
++}
++
++/**
++ * rwnx_txq_confirm_any - Process buffer confirmed by fw
++ *
++ * @rwnx_hw: Driver main data
++ * @txq: TX Queue
++ * @hwq: HW Queue
++ * @sw_txhdr: software descriptor of the confirmed packet
++ *
++ * Process a buffer returned by the fw. It doesn't check buffer status
++ * and only does systematic counter update:
++ * - hw credit
++ * - buffer pushed to fw
++ *
++ * To be called with tx_lock hold
++ */
++void rwnx_txq_confirm_any(struct rwnx_hw *rwnx_hw, struct rwnx_txq *txq,
++ struct rwnx_hwq *hwq, struct rwnx_sw_txhdr *sw_txhdr)
++{
++ int user = 0;
++
++#ifdef CONFIG_RWNX_MUMIMO_TX
++ int group_id;
++
++ user = RWNX_MUMIMO_INFO_POS_ID(sw_txhdr->desc.host.mumimo_info);
++ group_id = RWNX_MUMIMO_INFO_GROUP_ID(sw_txhdr->desc.host.mumimo_info);
++
++ if ((txq->idx != TXQ_INACTIVE) &&
++ (txq->pkt_pushed[user] == 1) &&
++ (txq->status & RWNX_TXQ_STOP_MU_POS))
++ rwnx_txq_start(txq, RWNX_TXQ_STOP_MU_POS);
++
++#endif /* CONFIG_RWNX_MUMIMO_TX */
++
++ if (txq->pkt_pushed[user])
++ txq->pkt_pushed[user]--;
++
++ hwq->need_processing = true;
++ rwnx_hw->stats.cfm_balance[hwq->id]--;
++}
++
++/******************************************************************************
++ * HWQ processing
++ *****************************************************************************/
++static inline
++bool rwnx_txq_take_mu_lock(struct rwnx_hw *rwnx_hw)
++{
++ bool res = false;
++#ifdef CONFIG_RWNX_MUMIMO_TX
++ if (rwnx_hw->mod_params->mutx)
++ res = (down_trylock(&rwnx_hw->mu.lock) == 0);
++#endif /* CONFIG_RWNX_MUMIMO_TX */
++ return res;
++}
++
++static inline
++void rwnx_txq_release_mu_lock(struct rwnx_hw *rwnx_hw)
++{
++#ifdef CONFIG_RWNX_MUMIMO_TX
++ up(&rwnx_hw->mu.lock);
++#endif /* CONFIG_RWNX_MUMIMO_TX */
++}
++
++static inline
++void rwnx_txq_set_mu_info(struct rwnx_hw *rwnx_hw, struct rwnx_txq *txq,
++ int group_id, int pos)
++{
++#ifdef CONFIG_RWNX_MUMIMO_TX
++ trace_txq_select_mu_group(txq, group_id, pos);
++ if (group_id) {
++ txq->mumimo_info = group_id | (pos << 6);
++ rwnx_mu_set_active_group(rwnx_hw, group_id);
++ } else
++ txq->mumimo_info = 0;
++#endif /* CONFIG_RWNX_MUMIMO_TX */
++}
++
++static inline
++s8 rwnx_txq_get_credits(struct rwnx_txq *txq)
++{
++ s8 cred = txq->credits;
++ /* if destination is in PS mode, push_limit indicates the maximum
++ number of packet that can be pushed on this txq. */
++ if (txq->push_limit && (cred > txq->push_limit)) {
++ cred = txq->push_limit;
++ }
++ return cred;
++}
++
++/**
++ * skb_queue_extract - Extract buffer from skb list
++ *
++ * @list: List of skb to extract from
++ * @head: List of skb to append to
++ * @nb_elt: Number of skb to extract
++ *
++ * extract the first @nb_elt of @list and append them to @head
++ * It is assume that:
++ * - @list contains more that @nb_elt
++ * - There is no need to take @list nor @head lock to modify them
++ */
++static inline void skb_queue_extract(struct sk_buff_head *list,
++ struct sk_buff_head *head, int nb_elt)
++{
++ int i;
++ struct sk_buff *first, *last, *ptr;
++
++ first = ptr = list->next;
++ for (i = 0; i < nb_elt; i++) {
++ ptr = ptr->next;
++ }
++ last = ptr->prev;
++
++ /* unlink nb_elt in list */
++ list->qlen -= nb_elt;
++ list->next = ptr;
++ ptr->prev = (struct sk_buff *)list;
++
++ /* append nb_elt at end of head */
++ head->qlen += nb_elt;
++ last->next = (struct sk_buff *)head;
++ head->prev->next = first;
++ first->prev = head->prev;
++ head->prev = last;
++}
++
++
++#ifdef CONFIG_MAC80211_TXQ
++/**
++ * rwnx_txq_mac80211_dequeue - Dequeue buffer from mac80211 txq and
++ * add them to push list
++ *
++ * @rwnx_hw: Main driver data
++ * @sk_list: List of buffer to push (initialized without lock)
++ * @txq: TXQ to dequeue buffers from
++ * @max: Max number of buffer to dequeue
++ *
++ * Dequeue buffer from mac80211 txq, prepare them for transmission and chain them
++ * to the list of buffer to push.
++ *
++ * @return true if no more buffer are queued in mac80211 txq and false otherwise.
++ */
++static bool rwnx_txq_mac80211_dequeue(struct rwnx_hw *rwnx_hw,
++ struct sk_buff_head *sk_list,
++ struct rwnx_txq *txq, int max)
++{
++ struct ieee80211_txq *mac_txq;
++ struct sk_buff *skb;
++ unsigned long mac_txq_len;
++
++ if (txq->nb_ready_mac80211 == NOT_MAC80211_TXQ)
++ return true;
++
++ mac_txq = container_of((void *)txq, struct ieee80211_txq, drv_priv);
++
++ for (; max > 0; max--) {
++ skb = rwnx_tx_dequeue_prep(rwnx_hw, mac_txq);
++ if (skb == NULL)
++ return true;
++
++ __skb_queue_tail(sk_list, skb);
++ }
++
++ /* re-read mac80211 txq current length.
++ It is mainly for debug purpose to trace dropped packet. There is no
++ problems to have nb_ready_mac80211 != actual mac80211 txq length */
++ ieee80211_txq_get_depth(mac_txq, &mac_txq_len, NULL);
++ if (txq->nb_ready_mac80211 > mac_txq_len)
++ trace_txq_drop(txq, txq->nb_ready_mac80211 - mac_txq_len);
++ txq->nb_ready_mac80211 = mac_txq_len;
++
++ return (txq->nb_ready_mac80211 == 0);
++}
++#endif
++
++/**
++ * rwnx_txq_get_skb_to_push - Get list of buffer to push for one txq
++ *
++ * @rwnx_hw: main driver data
++ * @hwq: HWQ on wich buffers will be pushed
++ * @txq: TXQ to get buffers from
++ * @user: user postion to use
++ * @sk_list_push: list to update
++ *
++ *
++ * This function will returned a list of buffer to push for one txq.
++ * It will take into account the number of credit of the HWQ for this user
++ * position and TXQ (and push_limit).
++ * This allow to get a list that can be pushed without having to test for
++ * hwq/txq status after each push
++ *
++ * If a MU group has been selected for this txq, it will also update the
++ * counter for the group
++ *
++ * @return true if txq no longer have buffer ready after the ones returned.
++ * false otherwise
++ */
++static
++bool rwnx_txq_get_skb_to_push(struct rwnx_hw *rwnx_hw, struct rwnx_hwq *hwq,
++ struct rwnx_txq *txq, int user,
++ struct sk_buff_head *sk_list_push)
++{
++ int nb_ready = skb_queue_len(&txq->sk_list);
++ int credits = rwnx_txq_get_credits(txq);
++ bool res = false;
++
++ __skb_queue_head_init(sk_list_push);
++
++ if (credits >= nb_ready) {
++ skb_queue_splice_init(&txq->sk_list, sk_list_push);
++#ifdef CONFIG_MAC80211_TXQ
++ res = rwnx_txq_mac80211_dequeue(rwnx_hw, sk_list_push, txq, credits - nb_ready);
++ credits = skb_queue_len(sk_list_push);
++#else
++ res = true;
++ credits = nb_ready;
++#endif
++ } else {
++ skb_queue_extract(&txq->sk_list, sk_list_push, credits);
++
++ /* When processing PS service period (i.e. push_limit != 0), no longer
++ process this txq if the buffers extracted will complete the SP for
++ this txq */
++ if (txq->push_limit && (credits == txq->push_limit))
++ res = true;
++ }
++
++ rwnx_mu_set_active_sta(rwnx_hw, rwnx_txq_2_sta(txq), credits);
++
++ return res;
++}
++
++/**
++ * rwnx_txq_select_user - Select User queue for a txq
++ *
++ * @rwnx_hw: main driver data
++ * @mu_lock: true is MU lock is taken
++ * @txq: TXQ to select MU group for
++ * @hwq: HWQ for the TXQ
++ * @user: Updated with user position selected
++ *
++ * @return false if it is no possible to process this txq.
++ * true otherwise
++ *
++ * This function selects the MU group to use for a TXQ.
++ * The selection is done as follow:
++ *
++ * - return immediately for STA that don't belongs to any group and select
++ * group 0 / user 0
++ *
++ * - If MU tx is disabled (by user mutx_on, or because mu group are being
++ * updated !mu_lock), select group 0 / user 0
++ *
++ * - Use the best group selected by @rwnx_mu_group_sta_select.
++ *
++ * Each time a group is selected (except for the first case where sta
++ * doesn't belongs to a MU group), the function checks that no buffer is
++ * pending for this txq on another user position. If this is the case stop
++ * the txq (RWNX_TXQ_STOP_MU_POS) and return false.
++ *
++ */
++static
++bool rwnx_txq_select_user(struct rwnx_hw *rwnx_hw, bool mu_lock,
++ struct rwnx_txq *txq, struct rwnx_hwq *hwq, int *user)
++{
++ int pos = 0;
++#ifdef CONFIG_RWNX_MUMIMO_TX
++ int id, group_id = 0;
++ struct rwnx_sta *sta = rwnx_txq_2_sta(txq);
++
++ /* for sta that belong to no group return immediately */
++ if (!sta || !sta->group_info.cnt)
++ goto end;
++
++ /* If MU is disabled, need to check user */
++ if (!rwnx_hw->mod_params->mutx_on || !mu_lock)
++ goto check_user;
++
++ /* Use the "best" group selected */
++ group_id = sta->group_info.group;
++
++ if (group_id > 0)
++ pos = rwnx_mu_group_sta_get_pos(rwnx_hw, sta, group_id);
++
++ check_user:
++ /* check that we can push on this user position */
++#if CONFIG_USER_MAX == 2
++ id = (pos + 1) & 0x1;
++ if (txq->pkt_pushed[id]) {
++ rwnx_txq_stop(txq, RWNX_TXQ_STOP_MU_POS);
++ return false;
++ }
++
++#else
++ for (id = 0 ; id < CONFIG_USER_MAX ; id++) {
++ if (id != pos && txq->pkt_pushed[id]) {
++ rwnx_txq_stop(txq, RWNX_TXQ_STOP_MU_POS);
++ return false;
++ }
++ }
++#endif
++
++ end:
++ rwnx_txq_set_mu_info(rwnx_hw, txq, group_id, pos);
++#endif /* CONFIG_RWNX_MUMIMO_TX */
++
++ *user = pos;
++ return true;
++}
++
++
++/**
++ * rwnx_hwq_process - Process one HW queue list
++ *
++ * @rwnx_hw: Driver main data
++ * @hw_queue: HW queue index to process
++ *
++ * The function will iterate over all the TX queues linked in this HW queue
++ * list. For each TX queue, push as many buffers as possible in the HW queue.
++ * (NB: TX queue have at least 1 buffer, otherwise it wouldn't be in the list)
++ * - If TX queue no longer have buffer, remove it from the list and check next
++ * TX queue
++ * - If TX queue no longer have credits or has a push_limit (PS mode) and it
++ * is reached , remove it from the list and check next TX queue
++ * - If HW queue is full, update list head to start with the next TX queue on
++ * next call if current TX queue already pushed "too many" pkt in a row, and
++ * return
++ *
++ * To be called when HW queue list is modified:
++ * - when a buffer is pushed on a TX queue
++ * - when new credits are received
++ * - when a STA returns from Power Save mode or receives traffic request.
++ * - when Channel context change
++ *
++ * To be called with tx_lock hold
++ */
++#define ALL_HWQ_MASK ((1 << CONFIG_USER_MAX) - 1)
++
++void rwnx_hwq_process(struct rwnx_hw *rwnx_hw, struct rwnx_hwq *hwq)
++{
++ struct rwnx_txq *txq, *next;
++ int user, credit_map = 0;
++ bool mu_enable;
++#ifndef CONFIG_ONE_TXQ
++ unsigned long flags;
++#endif
++#ifdef CREATE_TRACE_POINTS
++ trace_process_hw_queue(hwq);
++#endif
++ hwq->need_processing = false;
++
++ mu_enable = rwnx_txq_take_mu_lock(rwnx_hw);
++ if (!mu_enable)
++ credit_map = ALL_HWQ_MASK - 1;
++
++ list_for_each_entry_safe(txq, next, &hwq->list, sched_list) {
++ struct rwnx_txhdr *txhdr = NULL;
++ struct sk_buff_head sk_list_push;
++ struct sk_buff *skb;
++ bool txq_empty;
++#ifdef CREATE_TRACE_POINTS
++ trace_process_txq(txq);
++#endif
++ /* sanity check for debug */
++ BUG_ON(!(txq->status & RWNX_TXQ_IN_HWQ_LIST));
++ if(txq->idx == TXQ_INACTIVE){
++ printk("%s txq->idx == TXQ_INACTIVE \r\n", __func__);
++ continue;
++ }
++ BUG_ON(txq->idx == TXQ_INACTIVE);
++ BUG_ON(txq->credits <= 0);
++ BUG_ON(!rwnx_txq_skb_ready(txq));
++
++ if (!rwnx_txq_select_user(rwnx_hw, mu_enable, txq, hwq, &user))
++ continue;
++
++ txq_empty = rwnx_txq_get_skb_to_push(rwnx_hw, hwq, txq, user,
++ &sk_list_push);
++
++ while ((skb = __skb_dequeue(&sk_list_push)) != NULL) {
++ txhdr = (struct rwnx_txhdr *)skb->data;
++ rwnx_tx_push(rwnx_hw, txhdr, 0);
++ }
++
++ if (txq_empty) {
++ rwnx_txq_del_from_hw_list(txq);
++ txq->pkt_sent = 0;
++ } else if (rwnx_txq_is_scheduled(txq)) {
++ /* txq not empty,
++ - To avoid starving need to process other txq in the list
++ - For better aggregation, need to send "as many consecutive
++ pkt as possible" for he same txq
++ ==> Add counter to trigger txq switch
++ */
++ if (txq->pkt_sent > hwq->size) {
++ txq->pkt_sent = 0;
++ list_rotate_left(&hwq->list);
++ }
++ }
++
++#ifdef CONFIG_RWNX_FULLMAC
++ /* Unable to complete PS traffic request because of hwq credit */
++ if (txq->push_limit && txq->sta) {
++ if (txq->ps_id == LEGACY_PS_ID) {
++ /* for legacy PS abort SP and wait next ps-poll */
++ txq->sta->ps.sp_cnt[txq->ps_id] -= txq->push_limit;
++ txq->push_limit = 0;
++ }
++ /* for u-apsd need to complete the SP to send EOSP frame */
++ }
++#ifndef CONFIG_ONE_TXQ
++ /* restart netdev queue if number of queued buffer is below threshold */
++ spin_lock_irqsave(&rwnx_hw->usbdev->tx_flow_lock, flags);
++ if (unlikely(txq->status & RWNX_TXQ_NDEV_FLOW_CTRL) &&
++ skb_queue_len(&txq->sk_list) < RWNX_NDEV_FLOW_CTRL_RESTART) {
++ txq->status &= ~RWNX_TXQ_NDEV_FLOW_CTRL;
++ if(!rwnx_hw->usbdev->tbusy)
++ netif_wake_subqueue(txq->ndev, txq->ndev_idx);
++#ifdef CREATE_TRACE_POINTS
++ trace_txq_flowctrl_restart(txq);
++#endif
++ }
++ spin_unlock_irqrestore(&rwnx_hw->usbdev->tx_flow_lock, flags);
++#endif /* CONFIG_ONE_TXQ */
++#endif /* CONFIG_RWNX_FULLMAC */
++ }
++
++
++ if (mu_enable)
++ rwnx_txq_release_mu_lock(rwnx_hw);
++}
++
++/**
++ * rwnx_hwq_process_all - Process all HW queue list
++ *
++ * @rwnx_hw: Driver main data
++ *
++ * Loop over all HWQ, and process them if needed
++ * To be called with tx_lock hold
++ */
++void rwnx_hwq_process_all(struct rwnx_hw *rwnx_hw)
++{
++ int id;
++
++ rwnx_mu_group_sta_select(rwnx_hw);
++
++ for (id = ARRAY_SIZE(rwnx_hw->hwq) - 1; id >= 0 ; id--) {
++ if (rwnx_hw->hwq[id].need_processing) {
++ rwnx_hwq_process(rwnx_hw, &rwnx_hw->hwq[id]);
++ }
++ }
++}
++
++/**
++ * rwnx_hwq_init - Initialize all hwq structures
++ *
++ * @rwnx_hw: Driver main data
++ *
++ */
++void rwnx_hwq_init(struct rwnx_hw *rwnx_hw)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(rwnx_hw->hwq); i++) {
++ struct rwnx_hwq *hwq = &rwnx_hw->hwq[i];
++
++ hwq->id = i;
++ hwq->size = nx_txdesc_cnt[i];
++ INIT_LIST_HEAD(&hwq->list);
++
++ }
++}
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_txq.h
+@@ -0,0 +1,397 @@
++/**
++ ****************************************************************************************
++ *
++ * @file rwnx_txq.h
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ****************************************************************************************
++ */
++#ifndef _RWNX_TXQ_H_
++#define _RWNX_TXQ_H_
++
++#include <linux/types.h>
++#include <linux/bitops.h>
++#include <linux/ieee80211.h>
++
++#include <net/mac80211.h>
++
++#ifdef CONFIG_RWNX_FULLMAC
++/**
++ * Fullmac TXQ configuration:
++ * - STA: 1 TXQ per TID (limited to 8)
++ * 1 TXQ for bufferable MGT frames
++ * - VIF: 1 TXQ for Multi/Broadcast +
++ * 1 TXQ for MGT for unknown STAs or non-bufferable MGT frames
++ * - 1 TXQ for offchannel transmissions
++ *
++ *
++ * Txq mapping looks like
++ * for NX_REMOTE_STA_MAX=10 and NX_VIRT_DEV_MAX=4
++ *
++ * | TXQ | NDEV_ID | VIF | STA | TID | HWQ |
++ * |-----+---------+-----+-------+------+-----|-
++ * | 0 | 0 | | 0 | 0 | 1 | 9 TXQ per STA
++ * | 1 | 1 | | 0 | 1 | 0 | (8 data + 1 mgmt)
++ * | 2 | 2 | | 0 | 2 | 0 |
++ * | 3 | 3 | | 0 | 3 | 1 |
++ * | 4 | 4 | | 0 | 4 | 2 |
++ * | 5 | 5 | | 0 | 5 | 2 |
++ * | 6 | 6 | | 0 | 6 | 3 |
++ * | 7 | 7 | | 0 | 7 | 3 |
++ * | 8 | N/A | | 0 | MGMT | 3 |
++ * |-----+---------+-----+-------+------+-----|-
++ * | ... | | | | | | Same for all STAs
++ * |-----+---------+-----+-------+------+-----|-
++ * | 90 | 80 | 0 | BC/MC | 0 | 1/4 | 1 TXQ for BC/MC per VIF
++ * | ... | | | | | |
++ * | 93 | 80 | 3 | BC/MC | 0 | 1/4 |
++ * |-----+---------+-----+-------+------+-----|-
++ * | 94 | N/A | 0 | N/A | MGMT | 3 | 1 TXQ for unknown STA per VIF
++ * | ... | | | | | |
++ * | 97 | N/A | 3 | N/A | MGMT | 3 |
++ * |-----+---------+-----+-------+------+-----|-
++ * | 98 | N/A | | N/A | MGMT | 3 | 1 TXQ for offchannel frame
++ */
++#define NX_NB_TID_PER_STA 8
++#define NX_NB_TXQ_PER_STA (NX_NB_TID_PER_STA + 1)
++#define NX_NB_TXQ_PER_VIF 2
++#define NX_NB_TXQ ((NX_NB_TXQ_PER_STA * NX_REMOTE_STA_MAX) + \
++ (NX_NB_TXQ_PER_VIF * NX_VIRT_DEV_MAX) + 1)
++
++#define NX_FIRST_VIF_TXQ_IDX (NX_REMOTE_STA_MAX * NX_NB_TXQ_PER_STA)
++#define NX_FIRST_BCMC_TXQ_IDX NX_FIRST_VIF_TXQ_IDX
++#define NX_FIRST_UNK_TXQ_IDX (NX_FIRST_BCMC_TXQ_IDX + NX_VIRT_DEV_MAX)
++
++#define NX_FIRST_VIF_TXQ_IDX_FOR_OLD_IC (NX_REMOTE_STA_MAX_FOR_OLD_IC * NX_NB_TXQ_PER_STA)
++#define NX_FIRST_BCMC_TXQ_IDX_FOR_OLD_IC NX_FIRST_VIF_TXQ_IDX_FOR_OLD_IC
++#define NX_FIRST_UNK_TXQ_IDX_FOR_OLD_IC (NX_FIRST_BCMC_TXQ_IDX_FOR_OLD_IC + NX_VIRT_DEV_MAX)
++
++
++#define NX_OFF_CHAN_TXQ_IDX (NX_FIRST_VIF_TXQ_IDX + \
++ (NX_VIRT_DEV_MAX * NX_NB_TXQ_PER_VIF))
++
++#define NX_OFF_CHAN_TXQ_IDX_FOR_OLD_IC (NX_FIRST_VIF_TXQ_IDX_FOR_OLD_IC + \
++ (NX_VIRT_DEV_MAX * NX_NB_TXQ_PER_VIF))
++
++
++#define NX_BCMC_TXQ_TYPE 0
++#define NX_UNK_TXQ_TYPE 1
++
++/**
++ * Each data TXQ is a netdev queue. TXQ to send MGT are not data TXQ as
++ * they did not recieved buffer from netdev interface.
++ * Need to allocate the maximum case.
++ * AP : all STAs + 1 BC/MC
++ */
++#define NX_NB_NDEV_TXQ ((NX_NB_TID_PER_STA * NX_REMOTE_STA_MAX) + 1 )
++#define NX_NB_NDEV_TXQ_FOR_OLD_IC ((NX_NB_TID_PER_STA * NX_REMOTE_STA_MAX_FOR_OLD_IC) + 1)
++
++#define NX_BCMC_TXQ_NDEV_IDX (NX_NB_TID_PER_STA * NX_REMOTE_STA_MAX)
++#define NX_BCMC_TXQ_NDEV_IDX_FOR_OLD_IC (NX_NB_TID_PER_STA * NX_REMOTE_STA_MAX_FOR_OLD_IC)
++#define NX_STA_NDEV_IDX(tid, sta_idx) ((tid) + (sta_idx) * NX_NB_TID_PER_STA)
++#define NDEV_NO_TXQ 0xffff
++#if (NX_NB_NDEV_TXQ >= NDEV_NO_TXQ)
++#error("Need to increase struct rwnx_txq->ndev_idx size")
++#endif
++
++/* stop netdev queue when number of queued buffers if greater than this */
++#define RWNX_NDEV_FLOW_CTRL_STOP 64
++/* restart netdev queue when number of queued buffers is lower than this */
++#define RWNX_NDEV_FLOW_CTRL_RESTART 64
++
++#endif /* CONFIG_RWNX_FULLMAC */
++
++#define TXQ_INACTIVE 0xffff
++#if (NX_NB_TXQ >= TXQ_INACTIVE)
++#error("Need to increase struct rwnx_txq->idx size")
++#endif
++
++#define NX_TXQ_INITIAL_CREDITS 64
++
++/**
++ * TXQ tid sorted by decreasing priority
++ */
++extern const int nx_tid_prio[NX_NB_TID_PER_STA];
++
++/**
++ * struct rwnx_hwq - Structure used to save information relative to
++ * an AC TX queue (aka HW queue)
++ * @list: List of TXQ, that have buffers ready for this HWQ
++ * @credits: available credit for the queue (i.e. nb of buffers that
++ * can be pushed to FW )
++ * @id Id of the HWQ among RWNX_HWQ_....
++ * @size size of the queue
++ * @need_processing Indicate if hwq should be processed
++ * @len number of packet ready to be pushed to fw for this HW queue
++ * @len_stop threshold to stop mac80211(i.e. netdev) queues. Stop queue when
++ * driver has more than @len_stop packets ready.
++ * @len_start threshold to wake mac8011 queues. Wake queue when driver has
++ * less than @len_start packets ready.
++ */
++struct rwnx_hwq {
++ struct list_head list;
++ u8 size;
++ u8 id;
++ bool need_processing;
++};
++
++/**
++ * enum rwnx_push_flags - Flags of pushed buffer
++ *
++ * @RWNX_PUSH_RETRY Pushing a buffer for retry
++ * @RWNX_PUSH_IMMEDIATE Pushing a buffer without queuing it first
++ */
++enum rwnx_push_flags {
++ RWNX_PUSH_RETRY = BIT(0),
++ RWNX_PUSH_IMMEDIATE = BIT(1),
++};
++
++/**
++ * enum rwnx_txq_flags - TXQ status flag
++ *
++ * @RWNX_TXQ_IN_HWQ_LIST: The queue is scheduled for transmission
++ * @RWNX_TXQ_STOP_FULL: No more credits for the queue
++ * @RWNX_TXQ_STOP_CSA: CSA is in progress
++ * @RWNX_TXQ_STOP_STA_PS: Destiniation sta is currently in power save mode
++ * @RWNX_TXQ_STOP_VIF_PS: Vif owning this queue is currently in power save mode
++ * @RWNX_TXQ_STOP_CHAN: Channel of this queue is not the current active channel
++ * @RWNX_TXQ_STOP_MU_POS: TXQ is stopped waiting for all the buffers pushed to
++ * fw to be confirmed
++ * @RWNX_TXQ_STOP: All possible reason to have a txq stopped
++ * @RWNX_TXQ_NDEV_FLOW_CTRL: associated netdev queue is currently stopped.
++ * Note: when a TXQ is flowctrl it is NOT stopped
++ */
++enum rwnx_txq_flags {
++ RWNX_TXQ_IN_HWQ_LIST = BIT(0),
++ RWNX_TXQ_STOP_FULL = BIT(1),
++ RWNX_TXQ_STOP_CSA = BIT(2),
++ RWNX_TXQ_STOP_STA_PS = BIT(3),
++ RWNX_TXQ_STOP_VIF_PS = BIT(4),
++ RWNX_TXQ_STOP_CHAN = BIT(5),
++ RWNX_TXQ_STOP_MU_POS = BIT(6),
++ RWNX_TXQ_STOP = (RWNX_TXQ_STOP_FULL | RWNX_TXQ_STOP_CSA |
++ RWNX_TXQ_STOP_STA_PS | RWNX_TXQ_STOP_VIF_PS |
++ RWNX_TXQ_STOP_CHAN) ,
++ RWNX_TXQ_NDEV_FLOW_CTRL = BIT(7),
++};
++
++
++/**
++ * struct rwnx_txq - Structure used to save information relative to
++ * a RA/TID TX queue
++ *
++ * @idx: Unique txq idx. Set to TXQ_INACTIVE if txq is not used.
++ * @status: bitfield of @rwnx_txq_flags.
++ * @credits: available credit for the queue (i.e. nb of buffers that
++ * can be pushed to FW).
++ * @pkt_sent: number of consecutive pkt sent without leaving HW queue list
++ * @pkt_pushed: number of pkt currently pending for transmission confirmation
++ * @sched_list: list node for HW queue schedule list (rwnx_hwq.list)
++ * @sk_list: list of buffers to push to fw
++ * @last_retry_skb: pointer on the last skb in @sk_list that is a retry.
++ * (retry skb are stored at the beginning of the list)
++ * NULL if no retry skb is queued in @sk_list
++ * @nb_retry: Number of retry packet queued.
++ * @hwq: Pointer on the associated HW queue.
++ * @push_limit: number of packet to push before removing the txq from hwq list.
++ * (we always have push_limit < skb_queue_len(sk_list))
++ * @tid: TID
++ *
++ * FULLMAC specific
++ * @ps_id: Index to use for Power save mode (LEGACY or UAPSD)
++ * @ndev_idx: txq idx from netdev point of view (0xFF for non netdev queue)
++ * @ndev: pointer to ndev of the corresponding vif
++ * @amsdu: pointer to rwnx_sw_txhdr of the first subframe of the A-MSDU.
++ * NULL if no A-MSDU frame is in construction
++ * @amsdu_len: Maximum size allowed for an A-MSDU. 0 means A-MSDU not allowed
++ */
++struct rwnx_txq {
++ u16 idx;
++ u8 status;
++ s8 credits;
++ u8 pkt_sent;
++ u8 pkt_pushed[CONFIG_USER_MAX];
++ struct list_head sched_list;
++ struct sk_buff_head sk_list;
++ struct sk_buff *last_retry_skb;
++ struct rwnx_hwq *hwq;
++ int nb_retry;
++ u8 push_limit;
++ u8 tid;
++#ifdef CONFIG_MAC80211_TXQ
++ unsigned long nb_ready_mac80211;
++#endif
++#ifdef CONFIG_RWNX_FULLMAC
++ struct rwnx_sta *sta;
++ u8 ps_id;
++ u16 ndev_idx;
++ struct net_device *ndev;
++#ifdef CONFIG_RWNX_AMSDUS_TX
++ struct rwnx_sw_txhdr *amsdu;
++ u16 amsdu_len;
++#endif /* CONFIG_RWNX_AMSDUS_TX */
++#endif /* CONFIG_RWNX_FULLMAC */
++#ifdef CONFIG_RWNX_MUMIMO_TX
++ u8 mumimo_info;
++#endif
++};
++
++struct rwnx_sta;
++struct rwnx_vif;
++struct rwnx_hw;
++struct rwnx_sw_txhdr;
++
++#ifdef CONFIG_RWNX_MUMIMO_TX
++#define RWNX_TXQ_GROUP_ID(txq) ((txq)->mumimo_info & 0x3f)
++#define RWNX_TXQ_POS_ID(txq) (((txq)->mumimo_info >> 6) & 0x3)
++#else
++#define RWNX_TXQ_GROUP_ID(txq) 0
++#define RWNX_TXQ_POS_ID(txq) 0
++#endif /* CONFIG_RWNX_MUMIMO_TX */
++
++static inline bool rwnx_txq_is_stopped(struct rwnx_txq *txq)
++{
++ return (txq->status & RWNX_TXQ_STOP);
++}
++
++static inline bool rwnx_txq_is_full(struct rwnx_txq *txq)
++{
++ return (txq->status & RWNX_TXQ_STOP_FULL);
++}
++
++static inline bool rwnx_txq_is_scheduled(struct rwnx_txq *txq)
++{
++ return (txq->status & RWNX_TXQ_IN_HWQ_LIST);
++}
++
++/**
++ * foreach_sta_txq - Macro to iterate over all TXQ of a STA in increasing
++ * TID order
++ *
++ * @sta: pointer to rwnx_sta
++ * @txq: pointer to rwnx_txq updated with the next TXQ at each iteration
++ * @tid: int updated with the TXQ tid at each iteration
++ * @rwnx_hw: main driver data
++ */
++#ifdef CONFIG_MAC80211_TXQ
++#define foreach_sta_txq(sta, txq, tid, rwnx_hw) \
++ for (tid = 0, txq = rwnx_txq_sta_get(sta, 0); \
++ tid < NX_NB_TXQ_PER_STA; \
++ tid++, txq = rwnx_txq_sta_get(sta, tid))
++
++#elif defined(CONFIG_RWNX_FULLMAC) /* CONFIG_RWNX_FULLMAC */
++#define foreach_sta_txq(sta, txq, tid, rwnx_hw) \
++ for (tid = 0, txq = rwnx_txq_sta_get(sta, 0, rwnx_hw); \
++ tid < (is_multicast_sta(sta->sta_idx) ? 1 : NX_NB_TXQ_PER_STA); \
++ tid++, txq++)
++
++#endif
++
++/**
++ * foreach_sta_txq_prio - Macro to iterate over all TXQ of a STA in
++ * decreasing priority order
++ *
++ * @sta: pointer to rwnx_sta
++ * @txq: pointer to rwnx_txq updated with the next TXQ at each iteration
++ * @tid: int updated with the TXQ tid at each iteration
++ * @i: int updated with ieration count
++ * @rwnx_hw: main driver data
++ *
++ * Note: For fullmac txq for mgmt frame is skipped
++ */
++#ifdef CONFIG_RWNX_FULLMAC
++#define foreach_sta_txq_prio(sta, txq, tid, i, rwnx_hw) \
++ for (i = 0, tid = nx_tid_prio[0], txq = rwnx_txq_sta_get(sta, tid, rwnx_hw); \
++ i < NX_NB_TID_PER_STA; \
++ i++, tid = nx_tid_prio[i], txq = rwnx_txq_sta_get(sta, tid, rwnx_hw))
++#endif
++
++/**
++ * foreach_vif_txq - Macro to iterate over all TXQ of a VIF (in AC order)
++ *
++ * @vif: pointer to rwnx_vif
++ * @txq: pointer to rwnx_txq updated with the next TXQ at each iteration
++ * @ac: int updated with the TXQ ac at each iteration
++ */
++#ifdef CONFIG_MAC80211_TXQ
++#define foreach_vif_txq(vif, txq, ac) \
++ for (ac = RWNX_HWQ_BK, txq = rwnx_txq_vif_get(vif, ac); \
++ ac < NX_NB_TXQ_PER_VIF; \
++ ac++, txq = rwnx_txq_vif_get(vif, ac))
++
++#else
++#define foreach_vif_txq(vif, txq, ac) \
++ for (ac = RWNX_HWQ_BK, txq = &vif->txqs[0]; \
++ ac < NX_NB_TXQ_PER_VIF; \
++ ac++, txq++)
++#endif
++
++#ifdef CONFIG_RWNX_FULLMAC
++struct rwnx_txq *rwnx_txq_sta_get(struct rwnx_sta *sta, u8 tid,
++ struct rwnx_hw * rwnx_hw);
++struct rwnx_txq *rwnx_txq_vif_get(struct rwnx_vif *vif, u8 type);
++#endif /* CONFIG_RWNX_FULLMAC */
++
++/**
++ * rwnx_txq_vif_get_status - return status bits related to the vif
++ *
++ * @rwnx_vif: Pointer to vif structure
++ */
++static inline u8 rwnx_txq_vif_get_status(struct rwnx_vif *rwnx_vif)
++{
++ struct rwnx_txq *txq = rwnx_txq_vif_get(rwnx_vif, 0);
++ return (txq->status & (RWNX_TXQ_STOP_CHAN | RWNX_TXQ_STOP_VIF_PS));
++}
++
++void rwnx_txq_vif_init(struct rwnx_hw * rwnx_hw, struct rwnx_vif *vif,
++ u8 status);
++void rwnx_txq_vif_deinit(struct rwnx_hw * rwnx_hw, struct rwnx_vif *vif);
++void rwnx_txq_sta_init(struct rwnx_hw * rwnx_hw, struct rwnx_sta *rwnx_sta,
++ u8 status);
++void rwnx_txq_sta_deinit(struct rwnx_hw * rwnx_hw, struct rwnx_sta *rwnx_sta);
++#ifdef CONFIG_RWNX_FULLMAC
++void rwnx_txq_unk_vif_init(struct rwnx_vif *rwnx_vif);
++void rwnx_txq_unk_vif_deinit(struct rwnx_vif *vif);
++void rwnx_txq_offchan_init(struct rwnx_vif *rwnx_vif);
++void rwnx_txq_offchan_deinit(struct rwnx_vif *rwnx_vif);
++void rwnx_txq_tdls_vif_init(struct rwnx_vif *rwnx_vif);
++void rwnx_txq_tdls_vif_deinit(struct rwnx_vif *vif);
++void rwnx_txq_tdls_sta_start(struct rwnx_vif *rwnx_vif, u16 reason,
++ struct rwnx_hw *rwnx_hw);
++void rwnx_txq_tdls_sta_stop(struct rwnx_vif *rwnx_vif, u16 reason,
++ struct rwnx_hw *rwnx_hw);
++#endif
++
++
++void rwnx_txq_add_to_hw_list(struct rwnx_txq *txq);
++void rwnx_txq_del_from_hw_list(struct rwnx_txq *txq);
++void rwnx_txq_stop(struct rwnx_txq *txq, u16 reason);
++void rwnx_txq_start(struct rwnx_txq *txq, u16 reason);
++void rwnx_txq_vif_start(struct rwnx_vif *vif, u16 reason,
++ struct rwnx_hw *rwnx_hw);
++void rwnx_txq_vif_stop(struct rwnx_vif *vif, u16 reason,
++ struct rwnx_hw *rwnx_hw);
++
++#ifdef CONFIG_RWNX_FULLMAC
++void rwnx_txq_sta_start(struct rwnx_sta *sta, u16 reason,
++ struct rwnx_hw *rwnx_hw);
++void rwnx_txq_sta_stop(struct rwnx_sta *sta, u16 reason,
++ struct rwnx_hw *rwnx_hw);
++void rwnx_txq_offchan_start(struct rwnx_hw *rwnx_hw);
++void rwnx_txq_sta_switch_vif(struct rwnx_sta *sta, struct rwnx_vif *old_vif,
++ struct rwnx_vif *new_vif);
++
++#endif /* CONFIG_RWNX_FULLMAC */
++
++int rwnx_txq_queue_skb(struct sk_buff *skb, struct rwnx_txq *txq,
++ struct rwnx_hw *rwnx_hw, bool retry);
++void rwnx_txq_confirm_any(struct rwnx_hw *rwnx_hw, struct rwnx_txq *txq,
++ struct rwnx_hwq *hwq, struct rwnx_sw_txhdr *sw_txhdr);
++
++
++void rwnx_hwq_init(struct rwnx_hw *rwnx_hw);
++void rwnx_hwq_process(struct rwnx_hw *rwnx_hw, struct rwnx_hwq *hwq);
++void rwnx_hwq_process_all(struct rwnx_hw *rwnx_hw);
++
++#endif /* _RWNX_TXQ_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_utils.c
+@@ -0,0 +1,39 @@
++/**
++ * rwnx_utils.c
++ *
++ * IPC utility function definitions
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ */
++#include "rwnx_utils.h"
++#include "rwnx_defs.h"
++#include "rwnx_rx.h"
++#include "rwnx_tx.h"
++#include "rwnx_msg_rx.h"
++#include "rwnx_debugfs.h"
++#include "rwnx_prof.h"
++#include "ipc_host.h"
++
++
++extern int get_testmode(void);
++extern void get_fw_path(char* fw_path);
++extern int testmode;
++extern char aic_fw_path[200];
++
++int rwnx_init_aic(struct rwnx_hw *rwnx_hw)
++{
++ RWNX_DBG(RWNX_FN_ENTRY_STR);
++#ifdef AICWF_SDIO_SUPPORT
++ aicwf_sdio_host_init(&(rwnx_hw->sdio_env), NULL, NULL, rwnx_hw);
++#else
++ aicwf_usb_host_init(&(rwnx_hw->usb_env), NULL, NULL, rwnx_hw);
++#endif
++ rwnx_cmd_mgr_init(rwnx_hw->cmd_mgr);
++
++ testmode = get_testmode();
++ memset(aic_fw_path, 0, 200);
++ get_fw_path(aic_fw_path);
++
++ return 0;
++}
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_utils.h
+@@ -0,0 +1,142 @@
++/**
++ * rwnx_ipc_utils.h
++ *
++ * IPC utility function declarations
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ */
++#ifndef _RWNX_IPC_UTILS_H_
++#define _RWNX_IPC_UTILS_H_
++
++#include <linux/dma-mapping.h>
++#include <linux/dmapool.h>
++#include <linux/skbuff.h>
++
++#include "lmac_msg.h"
++#include "aicwf_debug.h"
++
++#ifdef ANDROID_PLATFORM
++#define HIGH_KERNEL_VERSION KERNEL_VERSION(5, 15, 41)
++#else
++#define HIGH_KERNEL_VERSION KERNEL_VERSION(6, 0, 0)
++#endif
++
++
++#if 0
++#ifdef CONFIG_RWNX_DBG
++/* #define RWNX_DBG(format, arg...) pr_warn(format, ## arg) */
++#define RWNX_DBG printk
++#else
++#define RWNX_DBG(a...) do {} while (0)
++#endif
++#endif
++
++
++
++enum rwnx_dev_flag {
++ RWNX_DEV_RESTARTING,
++ RWNX_DEV_STACK_RESTARTING,
++ RWNX_DEV_STARTED,
++};
++
++struct rwnx_hw;
++struct rwnx_sta;
++
++/**
++ * struct rwnx_ipc_elem - Generic IPC buffer of fixed size
++ *
++ * @addr: Host address of the buffer.
++ * @dma_addr: DMA address of the buffer.
++ */
++struct rwnx_ipc_elem {
++ void *addr;
++ dma_addr_t dma_addr;
++};
++
++/**
++ * struct rwnx_ipc_elem_pool - Generic pool of IPC buffers of fixed size
++ *
++ * @nb: Number of buffers currenlty allocated in the pool
++ * @buf: Array of buffers (size of array is @nb)
++ * @pool: DMA pool in which buffers have been allocated
++ */
++struct rwnx_ipc_elem_pool {
++ int nb;
++ struct rwnx_ipc_elem *buf;
++ struct dma_pool *pool;
++};
++
++/**
++ * struct rwnx_ipc_elem - Generic IPC buffer of variable size
++ *
++ * @addr: Host address of the buffer.
++ * @dma_addr: DMA address of the buffer.
++ * @size: Size, in bytes, of the buffer
++ */
++struct rwnx_ipc_elem_var {
++ void *addr;
++ dma_addr_t dma_addr;
++ size_t size;
++};
++
++/**
++ * struct rwnx_ipc_dbgdump_elem - IPC buffer for debug dump
++ *
++ * @mutex: Mutex to protect access to debug dump
++ * @buf: IPC buffer
++ */
++struct rwnx_ipc_dbgdump_elem {
++ struct mutex mutex;
++ struct rwnx_ipc_elem_var buf;
++};
++
++static const u32 rwnx_rxbuff_pattern = 0xCAFEFADE;
++
++/*
++ * Maximum Length of Radiotap header vendor specific data(in bytes)
++ */
++#define RADIOTAP_HDR_VEND_MAX_LEN 16
++
++/*
++ * Maximum Radiotap Header Length without vendor specific data (in bytes)
++ */
++#define RADIOTAP_HDR_MAX_LEN 80
++
++/*
++ * Unsupported HT Frame data length (in bytes)
++ */
++#define UNSUP_RX_VEC_DATA_LEN 2
++
++/**
++ * struct rwnx_ipc_skb_elem - IPC buffer for SKB element
++ *
++ * @skb: Pointer to the skb buffer allocated
++ * @dma_addr: DMA address of the data buffer fo skb
++ *
++ */
++struct rwnx_ipc_skb_elem {
++ struct sk_buff *skb;
++ dma_addr_t dma_addr;
++};
++
++#ifdef CONFIG_RWNX_FULLMAC
++
++/* Maximum number of rx buffer the fw may use at the same time */
++#define RWNX_RXBUFF_MAX (64 * NX_REMOTE_STA_MAX)
++
++/**
++ * struct rwnx_ipc_rxbuf_elems - IPC buffers for RX
++ *
++ * @skb: Array of buffer push to FW.
++ * @idx: Index of the last pushed skb.(Use to find the next free entry quicker)
++ *
++ * Note: contrary to softmac version, dma_addr are stored inside skb->cb.
++ * (cf &struct rwnx_skb_cb)
++ */
++struct rwnx_ipc_rxbuf_elems {
++ struct sk_buff *skb[RWNX_RXBUFF_MAX];
++ int idx;
++};
++
++#endif /* CONFIG_RWNX_FULLMAC */
++#endif /* _RWNX_IPC_UTILS_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_v7.c
+@@ -0,0 +1,192 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_v7.c - Support for v7 platform
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ******************************************************************************
++ */
++
++#include "rwnx_v7.h"
++#include "rwnx_defs.h"
++#include "rwnx_irqs.h"
++#include "reg_access.h"
++#include "hal_desc.h"
++
++struct rwnx_v7
++{
++ u8 *pci_bar0_vaddr;
++ u8 *pci_bar1_vaddr;
++};
++
++static int rwnx_v7_platform_enable(struct rwnx_hw *rwnx_hw)
++{
++ int ret;
++
++ /* sched_setscheduler on ONESHOT threaded irq handler for BCNs ? */
++ ret = request_irq(rwnx_hw->plat->pci_dev->irq, rwnx_irq_hdlr, 0,
++ "rwnx", rwnx_hw);
++ return ret;
++}
++
++static int rwnx_v7_platform_disable(struct rwnx_hw *rwnx_hw)
++{
++ free_irq(rwnx_hw->plat->pci_dev->irq, rwnx_hw);
++ return 0;
++}
++
++static void rwnx_v7_platform_deinit(struct rwnx_plat *rwnx_plat)
++{
++ #ifdef CONFIG_PCI
++ struct rwnx_v7 *rwnx_v7 = (struct rwnx_v7 *)rwnx_plat->priv;
++
++ pci_disable_device(rwnx_plat->pci_dev);
++ iounmap(rwnx_v7->pci_bar0_vaddr);
++ iounmap(rwnx_v7->pci_bar1_vaddr);
++ pci_release_regions(rwnx_plat->pci_dev);
++ pci_clear_master(rwnx_plat->pci_dev);
++ pci_disable_msi(rwnx_plat->pci_dev);
++ #endif
++ kfree(rwnx_plat);
++}
++
++static u8* rwnx_v7_get_address(struct rwnx_plat *rwnx_plat, int addr_name,
++ unsigned int offset)
++{
++ struct rwnx_v7 *rwnx_v7 = (struct rwnx_v7 *)rwnx_plat->priv;
++
++ if (WARN(addr_name >= RWNX_ADDR_MAX, "Invalid address %d", addr_name))
++ return NULL;
++
++ if (addr_name == RWNX_ADDR_CPU)
++ return rwnx_v7->pci_bar0_vaddr + offset;
++ else
++ return rwnx_v7->pci_bar1_vaddr + offset;
++}
++
++static void rwnx_v7_ack_irq(struct rwnx_plat *rwnx_plat)
++{
++
++}
++
++static const u32 rwnx_v7_config_reg[] = {
++ NXMAC_DEBUG_PORT_SEL_ADDR,
++ SYSCTRL_DIAG_CONF_ADDR,
++ SYSCTRL_PHYDIAG_CONF_ADDR,
++ SYSCTRL_RIUDIAG_CONF_ADDR,
++ RF_V7_DIAGPORT_CONF1_ADDR,
++};
++
++static const u32 rwnx_v7_he_config_reg[] = {
++ SYSCTRL_DIAG_CONF0,
++ SYSCTRL_DIAG_CONF1,
++ SYSCTRL_DIAG_CONF2,
++ SYSCTRL_DIAG_CONF3,
++};
++
++static int rwnx_v7_get_config_reg(struct rwnx_plat *rwnx_plat, const u32 **list)
++{
++ u32 fpga_sign;
++
++ if (!list)
++ return 0;
++
++ fpga_sign = RWNX_REG_READ(rwnx_plat, RWNX_ADDR_SYSTEM, SYSCTRL_SIGNATURE_ADDR);
++ if (__FPGA_TYPE(fpga_sign) == 0xc0ca) {
++ *list = rwnx_v7_he_config_reg;
++ return ARRAY_SIZE(rwnx_v7_he_config_reg);
++ } else {
++ *list = rwnx_v7_config_reg;
++ return ARRAY_SIZE(rwnx_v7_config_reg);
++ }
++}
++
++
++/**
++ * rwnx_v7_platform_init - Initialize the DINI platform
++ *
++ * @pci_dev PCI device
++ * @rwnx_plat Pointer on struct rwnx_stat * to be populated
++ *
++ * @return 0 on success, < 0 otherwise
++ *
++ * Allocate and initialize a rwnx_plat structure for the dini platform.
++ */
++int rwnx_v7_platform_init(struct pci_dev *pci_dev, struct rwnx_plat **rwnx_plat)
++{
++ struct rwnx_v7 *rwnx_v7;
++ u16 pci_cmd;
++ int ret = 0;
++
++ *rwnx_plat = kzalloc(sizeof(struct rwnx_plat) + sizeof(struct rwnx_v7),
++ GFP_KERNEL);
++ if (!*rwnx_plat)
++ return -ENOMEM;
++
++ rwnx_v7 = (struct rwnx_v7 *)(*rwnx_plat)->priv;
++
++ /* Hotplug fixups */
++ pci_read_config_word(pci_dev, PCI_COMMAND, &pci_cmd);
++ pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
++ pci_write_config_word(pci_dev, PCI_COMMAND, pci_cmd);
++ pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, L1_CACHE_BYTES >> 2);
++
++ if ((ret = pci_enable_device(pci_dev))) {
++ dev_err(&(pci_dev->dev), "pci_enable_device failed\n");
++ goto out_enable;
++ }
++
++ pci_set_master(pci_dev);
++
++ if ((ret = pci_request_regions(pci_dev, KBUILD_MODNAME))) {
++ dev_err(&(pci_dev->dev), "pci_request_regions failed\n");
++ goto out_request;
++ }
++
++ #ifdef CONFIG_PCI
++ if (pci_enable_msi(pci_dev))
++ {
++ dev_err(&(pci_dev->dev), "pci_enable_msi failed\n");
++ goto out_msi;
++
++ }
++ #endif
++
++ if (!(rwnx_v7->pci_bar0_vaddr = (u8 *)pci_ioremap_bar(pci_dev, 0))) {
++ dev_err(&(pci_dev->dev), "pci_ioremap_bar(%d) failed\n", 0);
++ ret = -ENOMEM;
++ goto out_bar0;
++ }
++ if (!(rwnx_v7->pci_bar1_vaddr = (u8 *)pci_ioremap_bar(pci_dev, 1))) {
++ dev_err(&(pci_dev->dev), "pci_ioremap_bar(%d) failed\n", 1);
++ ret = -ENOMEM;
++ goto out_bar1;
++ }
++
++ (*rwnx_plat)->enable = rwnx_v7_platform_enable;
++ (*rwnx_plat)->disable = rwnx_v7_platform_disable;
++ (*rwnx_plat)->deinit = rwnx_v7_platform_deinit;
++ (*rwnx_plat)->get_address = rwnx_v7_get_address;
++ (*rwnx_plat)->ack_irq = rwnx_v7_ack_irq;
++ (*rwnx_plat)->get_config_reg = rwnx_v7_get_config_reg;
++
++ return 0;
++
++ out_bar1:
++ iounmap(rwnx_v7->pci_bar0_vaddr);
++ out_bar0:
++#ifdef CONFIG_PCI
++ pci_disable_msi(pci_dev);
++ out_msi:
++#endif
++ pci_release_regions(pci_dev);
++ out_request:
++#ifdef CONFIG_PCI
++ pci_clear_master(pci_dev);
++#endif
++ pci_disable_device(pci_dev);
++ out_enable:
++ kfree(*rwnx_plat);
++ return ret;
++}
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_v7.h
+@@ -0,0 +1,20 @@
++/**
++ ******************************************************************************
++ *
++ * @file rwnx_v7.h
++ *
++ * Copyright (C) RivieraWaves 2012-2019
++ *
++ ******************************************************************************
++ */
++
++#ifndef _RWNX_V7_H_
++#define _RWNX_V7_H_
++
++#include <linux/pci.h>
++#include "rwnx_platform.h"
++
++int rwnx_v7_platform_init(struct pci_dev *pci_dev,
++ struct rwnx_plat **rwnx_plat);
++
++#endif /* _RWNX_V7_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_version.h
+@@ -0,0 +1,11 @@
++#ifndef _RWNX_VERSION_H_
++#define _RWNX_VERSION_H_
++
++#include "rwnx_version_gen.h"
++
++static inline void rwnx_print_version(void)
++{
++ AICWFDBG(LOGINFO, RWNX_VERS_BANNER"\n");
++}
++
++#endif /* _RWNX_VERSION_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/rwnx_version_gen.h
+@@ -0,0 +1,5 @@
++#define RWNX_VERS_REV "1a4b0054d2M (master)"
++#define RWNX_VERS_MOD "6.4.3.0"
++#define RWNX_VERS_BANNER "rwnx v6.4.3.0 - 1a4b0054d2M (master)"
++#define RELEASE_DATE "2023_0707_1001"
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/sdio_host.c
+@@ -0,0 +1,142 @@
++/**
++ * sdio_host.c
++ *
++ * SDIO host function declarations
++ *
++ * Copyright (C) AICSemi 2018-2020
++ */
++
++
++#include "sdio_host.h"
++//#include "ipc_compat.h"
++#include "rwnx_tx.h"
++#include "rwnx_platform.h"
++
++/**
++ ****************************************************************************************
++ */
++void aicwf_sdio_host_init(struct sdio_host_env_tag *env,
++ void *cb,
++ void *shared_env_ptr,
++ void *pthis)
++{
++ // Reset the environments
++
++ // Reset the Host environment
++ memset(env, 0, sizeof(struct sdio_host_env_tag));
++ // Save the pointer to the register base
++ env->pthis = pthis;
++}
++
++/**
++ ****************************************************************************************
++ */
++volatile struct txdesc_host *aicwf_sdio_host_txdesc_get(struct sdio_host_env_tag *env, const int queue_idx)
++{
++ // struct ipc_shared_env_tag *shared_env_ptr = env->shared;
++ volatile struct txdesc_host *txdesc_free;
++ uint32_t used_idx = env->txdesc_used_idx[queue_idx];
++ uint32_t free_idx = env->txdesc_free_idx[queue_idx];
++
++ // ASSERT_ERR(queue_idx < SDIO_TXQUEUE_CNT);
++ // ASSERT_ERR((free_idx - used_idx) <= SDIO_TXDESC_CNT);
++
++ // Check if a free descriptor is available
++ if (free_idx != (used_idx + SDIO_TXDESC_CNT))
++ {
++ // Get the pointer to the first free descriptor
++ // txdesc_free = shared_env_ptr->txdesc[queue_idx] + (free_idx % IPC_TXDESC_CNT);
++ }
++ else
++ {
++ txdesc_free = NULL;
++ }
++
++ return txdesc_free;
++}
++
++/**
++ ****************************************************************************************
++ */
++void aicwf_sdio_host_txdesc_push(struct sdio_host_env_tag *env, const int queue_idx, const uint64_t host_id)
++{
++ //printk("push, %d, %d, 0x%llx \r\n", queue_idx, env->txdesc_free_idx[queue_idx], host_id);
++ // Save the host id in the environment
++ env->tx_host_id[queue_idx][env->txdesc_free_idx[queue_idx] % SDIO_TXDESC_CNT] = host_id;
++
++ // Increment the index
++ env->txdesc_free_idx[queue_idx]++;
++ if(env->txdesc_free_idx[queue_idx]==0x40000000)
++ env->txdesc_free_idx[queue_idx] = 0;
++}
++
++/**
++ ****************************************************************************************
++ */
++void aicwf_sdio_host_tx_cfm_handler(struct sdio_host_env_tag *env, u32 *data)
++{
++ u32 queue_idx = 0;// data[0];
++ //struct rwnx_hw *rwnx_hw = (struct rwnx_hw *)env->pthis;
++ struct sk_buff *skb = NULL;
++ struct rwnx_txhdr *txhdr;
++
++ // TX confirmation descriptors have been received
++ // REG_SW_SET_PROFILING(env->pthis, SW_PROF_IRQ_E2A_TXCFM);
++ //while (1)
++ {
++ // Get the used index and increase it. We do the increase before knowing if the
++ // current buffer is confirmed because the callback function may call the
++ // ipc_host_txdesc_get() in case flow control was enabled and the index has to be
++ // already at the good value to ensure that the test of FIFO full is correct
++ //uint32_t used_idx = env->txdesc_used_idx[queue_idx]++;
++ uint32_t used_idx = data[1];
++ uint32_t host_id = env->tx_host_id[queue_idx][used_idx % SDIO_TXDESC_CNT];
++
++ // Reset the host id in the array
++ env->tx_host_id[queue_idx][used_idx % SDIO_TXDESC_CNT] = 0;
++
++ // call the external function to indicate that a TX packet is freed
++ if (host_id == 0)
++ {
++ // No more confirmations, so put back the used index at its initial value
++ env->txdesc_used_idx[queue_idx] = used_idx;
++ printk("ERROR:No more confirmations\r\n");
++ //break;
++ }
++ // set the cfm status
++ skb = (struct sk_buff *)(uint32_t)host_id;
++ txhdr = (struct rwnx_txhdr *)skb->data;
++ txhdr->hw_hdr.cfm.status = (union rwnx_hw_txstatus)data[0];
++ printk("sdio_host_tx_cfm_handler:used_idx=%d, 0x%p, status=%x\r\n",used_idx, env->pthis, txhdr->hw_hdr.cfm.status.value);
++ //if (env->cb.send_data_cfm(env->pthis, host_id) != 0)
++ if (rwnx_txdatacfm(env->pthis, (void *)host_id) != 0)
++ {
++ // No more confirmations, so put back the used index at its initial value
++ env->txdesc_used_idx[queue_idx] = used_idx;
++ env->tx_host_id[queue_idx][used_idx % SDIO_TXDESC_CNT] = host_id;
++ // and exit the loop
++ printk("ERROR:rwnx_txdatacfm,\r\n");
++ // break;
++ }
++
++ }
++}
++
++int aicwf_rwnx_sdio_platform_init(struct aic_sdio_dev *sdiodev)
++{
++ struct rwnx_plat *rwnx_plat = NULL;
++ void *drvdata;
++ int ret = -ENODEV;
++
++ rwnx_plat = kzalloc(sizeof(struct rwnx_plat), GFP_KERNEL);
++
++ if (!rwnx_plat) {
++ return -ENOMEM;
++ }
++
++ rwnx_plat->sdiodev = sdiodev;
++ ret = rwnx_platform_init(rwnx_plat, &drvdata);
++
++ return ret;
++}
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/sdio_host.h
+@@ -0,0 +1,42 @@
++/**
++ * sdio_host.h
++ *
++ * SDIO host function declarations
++ *
++ * Copyright (C) AICSemi 2018-2020
++ */
++
++
++#ifndef _SDIO_HOST_H_
++#define _SDIO_HOST_H_
++
++#include "lmac_types.h"
++#include "aicwf_sdio.h"
++
++#define SDIO_TXQUEUE_CNT NX_TXQ_CNT
++#define SDIO_TXDESC_CNT NX_TXDESC_CNT
++
++
++/// Definition of the IPC Host environment structure.
++struct sdio_host_env_tag
++{
++ // Index used that points to the first free TX desc
++ uint32_t txdesc_free_idx[SDIO_TXQUEUE_CNT];
++ // Index used that points to the first used TX desc
++ uint32_t txdesc_used_idx[SDIO_TXQUEUE_CNT];
++ // Array storing the currently pushed host ids, per IPC queue
++ uint64_t tx_host_id[SDIO_TXQUEUE_CNT][SDIO_TXDESC_CNT];
++
++ /// Pointer to the attached object (used in callbacks and register accesses)
++ void *pthis;
++};
++
++extern void aicwf_sdio_host_init(struct sdio_host_env_tag *env,
++ void *cb, void *shared_env_ptr, void *pthis);
++
++extern void aicwf_sdio_host_txdesc_push(struct sdio_host_env_tag *env, const int queue_idx, const uint64_t host_id);
++
++extern void aicwf_sdio_host_tx_cfm_handler(struct sdio_host_env_tag *env, u32 *data);
++extern int aicwf_rwnx_sdio_platform_init(struct aic_sdio_dev *sdiodev);
++
++#endif
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/usb_host.c
+@@ -0,0 +1,154 @@
++/**
++ * usb_host.c
++ *
++ * USB host function declarations
++ *
++ * Copyright (C) AICSemi 2018-2020
++ */
++
++
++#include "usb_host.h"
++//#include "ipc_compat.h"
++#include "rwnx_tx.h"
++#include "rwnx_platform.h"
++#include "aicwf_debug.h"
++
++/**
++ ****************************************************************************************
++ */
++void aicwf_usb_host_init(struct usb_host_env_tag *env,
++ void *cb,
++ void *shared_env_ptr,
++ void *pthis)
++{
++ // Reset the environments
++
++ // Reset the Host environment
++ memset(env, 0, sizeof(struct usb_host_env_tag));
++ // Save the pointer to the register base
++ env->pthis = pthis;
++}
++
++/**
++ ****************************************************************************************
++ */
++volatile struct txdesc_host *aicwf_usb_host_txdesc_get(struct usb_host_env_tag *env, const int queue_idx)
++{
++ // struct ipc_shared_env_tag *shared_env_ptr = env->shared;
++ volatile struct txdesc_host *txdesc_free = NULL;
++ uint32_t used_idx = env->txdesc_used_idx[queue_idx];
++ uint32_t free_idx = env->txdesc_free_idx[queue_idx];
++
++ // ASSERT_ERR(queue_idx < SDIO_TXQUEUE_CNT);
++ // ASSERT_ERR((free_idx - used_idx) <= USB_TXDESC_CNT);
++
++ // Check if a free descriptor is available
++ if (free_idx != (used_idx + USB_TXDESC_CNT))
++ {
++ // Get the pointer to the first free descriptor
++ // txdesc_free = shared_env_ptr->txdesc[queue_idx] + (free_idx % IPC_TXDESC_CNT);
++ }
++ else
++ {
++ //txdesc_free = NULL;
++ }
++
++ return txdesc_free;
++}
++
++/**
++ ****************************************************************************************
++ */
++void aicwf_usb_host_txdesc_push(struct usb_host_env_tag *env, const int queue_idx, const uint64_t host_id)
++{
++ //printk("push, %d, %d, 0x%llx \r\n", queue_idx, env->txdesc_free_idx[queue_idx], host_id);
++ // Save the host id in the environment
++ env->tx_host_id[queue_idx][env->txdesc_free_idx[queue_idx] % USB_TXDESC_CNT] = host_id;
++
++ // Increment the index
++ env->txdesc_free_idx[queue_idx]++;
++ if(env->txdesc_free_idx[queue_idx]==0x40000000)
++ env->txdesc_free_idx[queue_idx] = 0;
++}
++
++/**
++ ****************************************************************************************
++ */
++void aicwf_usb_host_tx_cfm_handler(struct usb_host_env_tag *env, u32 *data)
++{
++ u32 queue_idx = 0;//data[0];
++ //struct rwnx_hw *rwnx_hw = (struct rwnx_hw *)env->pthis;
++ struct sk_buff *skb = NULL;
++ struct rwnx_txhdr *txhdr;
++ AICWFDBG(LOGTRACE, "%s Enter \n", __func__);
++ //printk("sdio_host_tx_cfm_handler, %d, 0x%08x\r\n", queue_idx, data[1]);
++ // TX confirmation descriptors have been received
++ // REG_SW_SET_PROFILING(env->pthis, SW_PROF_IRQ_E2A_TXCFM);
++ //while (1)
++ {
++ // Get the used index and increase it. We do the increase before knowing if the
++ // current buffer is confirmed because the callback function may call the
++ // ipc_host_txdesc_get() in case flow control was enabled and the index has to be
++ // already at the good value to ensure that the test of FIFO full is correct
++ //uint32_t used_idx = env->txdesc_used_idx[queue_idx]++;
++ uint32_t used_idx = data[1];
++ //uint64_t host_id = env->tx_host_id[queue_idx][used_idx % USB_TXDESC_CNT];
++ unsigned long host_id = env->tx_host_id[queue_idx][used_idx % USB_TXDESC_CNT];
++
++ // Reset the host id in the array
++ env->tx_host_id[queue_idx][used_idx % USB_TXDESC_CNT] = 0;
++
++ // call the external function to indicate that a TX packet is freed
++ if (host_id == 0)
++ {
++ // No more confirmations, so put back the used index at its initial value
++ env->txdesc_used_idx[queue_idx] = used_idx;
++ AICWFDBG(LOGERROR, "ERROR:No more confirmations\r\n");
++ return;
++ //break;
++ }
++ AICWFDBG(LOGINFO, "usb_host_tx_cfm_handler:used_idx=%d, hostid=%p, 0x%p, status=%x\r\n",used_idx, (void *)host_id, env->pthis, data[0]);
++
++ // set the cfm status
++ skb = (struct sk_buff *)host_id;
++ txhdr = (struct rwnx_txhdr *)skb->data;
++ txhdr->hw_hdr.cfm.status = (union rwnx_hw_txstatus)data[0];
++ //txhdr->hw_hdr.status = data[1];
++ //if (env->cb.send_data_cfm(env->pthis, host_id) != 0)
++ if (rwnx_txdatacfm(env->pthis, (void *)host_id) != 0)
++ {
++ // No more confirmations, so put back the used index at its initial value
++ env->txdesc_used_idx[queue_idx] = used_idx;
++ env->tx_host_id[queue_idx][used_idx % USB_TXDESC_CNT] = host_id;
++ // and exit the loop
++ AICWFDBG(LOGERROR, "ERROR:rwnx_txdatacfm,\r\n");
++ // break;
++ }
++
++ }
++}
++
++int aicwf_rwnx_usb_platform_init(struct aic_usb_dev *usbdev)
++{
++ struct rwnx_plat *rwnx_plat = NULL;
++ void *drvdata;
++ int ret = -ENODEV;
++
++ rwnx_plat = kzalloc(sizeof(struct rwnx_plat), GFP_KERNEL);
++
++ if (!rwnx_plat)
++ return -ENOMEM;
++
++// rwnx_plat->pci_dev = pci_dev;
++ rwnx_plat->usbdev = usbdev;
++
++ ret = rwnx_platform_init(rwnx_plat, &drvdata);
++#if 0
++ pci_set_drvdata(pci_dev, drvdata);
++
++ if (ret)
++ rwnx_plat->deinit(rwnx_plat);
++#endif
++ return ret;
++}
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic8800_fdrv/usb_host.h
+@@ -0,0 +1,43 @@
++/**
++ * usb_host.h
++ *
++ * USB host function declarations
++ *
++ * Copyright (C) AICSemi 2018-2020
++ */
++
++
++#ifndef _USB_HOST_H_
++#define _USB_HOST_H_
++
++#include "lmac_types.h"
++#include "aicwf_usb.h"
++
++#define USB_TXQUEUE_CNT NX_TXQ_CNT
++#define USB_TXDESC_CNT NX_TXDESC_CNT
++
++
++/// Definition of the IPC Host environment structure.
++struct usb_host_env_tag
++{
++ // Index used that points to the first free TX desc
++ uint32_t txdesc_free_idx[USB_TXQUEUE_CNT];
++ // Index used that points to the first used TX desc
++ uint32_t txdesc_used_idx[USB_TXQUEUE_CNT];
++ // Array storing the currently pushed host ids, per IPC queue
++ //uint64_t tx_host_id[USB_TXQUEUE_CNT][USB_TXDESC_CNT];
++ unsigned long tx_host_id[USB_TXQUEUE_CNT][USB_TXDESC_CNT];
++
++ /// Pointer to the attached object (used in callbacks and register accesses)
++ void *pthis;
++};
++
++extern void aicwf_usb_host_init(struct usb_host_env_tag *env,
++ void *cb, void *shared_env_ptr, void *pthis);
++
++extern void aicwf_usb_host_txdesc_push(struct usb_host_env_tag *env, const int queue_idx, const uint64_t host_id);
++
++extern void aicwf_usb_host_tx_cfm_handler(struct usb_host_env_tag *env, u32 *data);
++extern int aicwf_rwnx_usb_platform_init(struct aic_usb_dev *usbdev);
++
++#endif
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic_load_fw/Kconfig
+@@ -0,0 +1,5 @@
++config AIC_LOADFW_SUPPORT
++ tristate "AIC8800 Load Firmware Support"
++ help
++ This is support for aic load firmware driver.
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic_load_fw/Makefile
+@@ -0,0 +1,105 @@
++CONFIG_USB_SUPPORT =y
++CONFIG_RFTEST =y
++CONFIG_USB_BT =y
++CONFIG_USB_MSG_EP = y
++CONFIG_USB_NO_TRANS_DMA_MAP = n
++CONFIG_M2D_OTA_AUTO_SUPPORT = n
++CONFIG_LINK_DET_5G = y
++# Need to set fw path in BOARD_KERNEL_CMDLINE
++CONFIG_USE_FW_REQUEST = n
++CONFIG_PREALLOC_RX_SKB = n
++CONFIG_PREALLOC_TXQ = n
++
++# Platform support list
++CONFIG_PLATFORM_ROCKCHIP ?= y
++CONFIG_PLATFORM_ALLWINNER ?= n
++CONFIG_PLATFORM_AMLOGIC ?= n
++CONFIG_PLATFORM_UBUNTU ?= n
++
++CONFIG_AIC_LOADFW_SUPPORT = m
++MODULE_NAME = aic_load_fw
++# CONFIG_AIC_FW_PATH = "/vendor/etc/firmware"
++# export CONFIG_AIC_FW_PATH
++
++ifeq ($(CONFIG_USB_SUPPORT), y)
++ccflags-y += -DAICWF_USB_SUPPORT
++endif
++ccflags-$(CONFIG_RFTEST) += -DCONFIG_RFTEST
++ccflags-$(CONFIG_USB_BT) += -DCONFIG_USB_BT
++ccflags-$(CONFIG_USB_MSG_EP) += -DCONFIG_USB_MSG_EP
++ccflags-$(CONFIG_USB_NO_TRANS_DMA_MAP) += -DCONFIG_USB_NO_TRANS_DMA_MAP
++ccflags-$(CONFIG_M2D_OTA_AUTO_SUPPORT) += -DCONFIG_M2D_OTA_AUTO_SUPPORT
++ccflags-$(CONFIG_LINK_DET_5G) += -DCONFIG_LINK_DET_5G
++ccflags-$(CONFIG_USE_FW_REQUEST) += -DCONFIG_USE_FW_REQUEST
++ccflags-$(CONFIG_PREALLOC_RX_SKB) += -DCONFIG_PREALLOC_RX_SKB
++ccflags-$(CONFIG_PREALLOC_TXQ) += -DCONFIG_PREALLOC_TXQ
++
++
++obj-$(CONFIG_AIC_LOADFW_SUPPORT) := $(MODULE_NAME).o
++$(MODULE_NAME)-y := aic_bluetooth_main.o \
++ aicbluetooth.o \
++ aicwf_usb.o \
++ aic_txrxif.o \
++ aicbluetooth_cmds.o \
++ aic_compat_8800d80.o \
++ md5.o
++
++$(MODULE_NAME)-$(CONFIG_PREALLOC_RX_SKB) += aicwf_rx_prealloc.o
++$(MODULE_NAME)-$(CONFIG_PREALLOC_TXQ) += aicwf_txq_prealloc.o
++
++
++
++ifeq ($(CONFIG_PLATFORM_ROCKCHIP), y)
++ccflags-$(CONFIG_PLATFORM_ROCKCHIP) += -DCONFIG_PLATFORM_ROCKCHIP
++#KDIR := /home/yaya/E/Rockchip/3229/Android7/RK3229_ANDROID7.1_v1.01_20170914/rk3229_Android7.1_v1.01_xml0914/kernel
++#ARCH ?= arm
++#CROSS_COMPILE ?= /home/yaya/E/Rockchip/3229/Android7/RK3229_ANDROID7.1_v1.01_20170914/rk3229_Android7.1_v1.01_xml0914/prebuilts/gcc/linux-x86/arm/arm-eabi-4.6/bin/arm-eabi-
++KDIR := /home/yaya/E/Rockchip/3229/Android9/rk3229_android9.0_box/kernel
++ARCH ?= arm
++CROSS_COMPILE ?= /home/yaya/E/Rockchip/3229/Android9/rk3229_android9.0_box/prebuilts/gcc/linux-x86/arm/gcc-linaro-6.3.1-2017.05-x86_64_arm-linux-gnueabihf/bin/arm-linux-gnueabihf-
++#KDIR := /home/yaya/E/Rockchip/3399/rk3399-android-10/kernel
++#ARCH ?= arm64
++#CROSS_COMPILE ?= /home/yaya/E/Rockchip/3399/rk3399-android-10/prebuilts/gcc/linux-x86/aarch64/gcc-linaro-6.3.1-2017.05-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu-
++endif
++
++ifeq ($(CONFIG_PLATFORM_ALLWINNER), y)
++ccflags-$(CONFIG_PLATFORM_ALLWINNER) += -DCONFIG_PLATFORM_ALLWINNER
++KDIR := /home/yaya/E/Allwinner/R818/R818/AndroidQ/lichee/kernel/linux-4.9
++ARCH ?= arm64
++CROSS_COMPILE ?= /home/yaya/E/Allwinner/R818/R818/AndroidQ/lichee/out/gcc-linaro-5.3.1-2016.05-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu-
++endif
++
++ifeq ($(CONFIG_PLATFORM_AMLOGIC), y)
++ccflags-$(CONFIG_PLATFORM_AMLOGIC) += -DCONFIG_PLATFORM_AMLOGIC
++ARCH := arm
++CROSS_COMPILE := /home/yaya/D/Workspace/CyberQuantum/JinHaoYue/amls905x3/SDK/20191101-0tt-asop/android9.0/prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9/bin/arm-linux-androidkernel-
++KDIR := /home/yaya/D/Workspace/CyberQuantum/JinHaoYue/amls905x3/SDK/20191101-0tt-asop/android9.0/out/target/product/u202/obj/KERNEL_OBJ/
++
++endif
++
++ifeq ($(CONFIG_PLATFORM_UBUNTU), y)
++ccflags-$(CONFIG_PLATFORM_UBUNTU) += -DCONFIG_PLATFORM_UBUNTU
++KDIR := /lib/modules/$(shell uname -r)/build
++PWD := $(shell pwd)
++KVER := $(shell uname -r)
++MODDESTDIR := /lib/modules/$(KVER)/kernel/drivers/net/wireless/aic8800
++ARCH := x86_64
++CROSS_COMPILE :=
++endif
++
++
++all: modules
++modules:
++ make -C $(KDIR) M=$(PWD) ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) modules
++
++install:
++ mkdir -p $(MODDESTDIR)
++ install -p -m 644 $(MODULE_NAME).ko $(MODDESTDIR)/
++ /sbin/depmod -a ${KVER}
++
++uninstall:
++ rm -rfv $(MODDESTDIR)/$(MODULE_NAME).ko
++ /sbin/depmod -a ${KVER}
++
++clean:
++ rm -rf *.o *.ko *.o.* *.mod.* modules.* Module.* .a* .o* .*.o.* *.mod .tmp* .cache.mk .modules.order.cmd .Module.symvers.cmd
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic_load_fw/aic_bluetooth_main.c
+@@ -0,0 +1,71 @@
++#include <linux/module.h>
++#include <linux/inetdevice.h>
++#include "aicwf_usb.h"
++#include "rwnx_version_gen.h"
++#include "aicwf_rx_prealloc.h"
++#include "aicwf_debug.h"
++#include "aicwf_txq_prealloc.h"
++
++
++#define DRV_CONFIG_FW_NAME "fw.bin"
++#define DRV_DESCRIPTION "AIC BLUETOOTH"
++#define DRV_COPYRIGHT "Copyright(c) 2015-2020 AICSemi"
++#define DRV_AUTHOR "AICSemi"
++#define DRV_VERS_MOD "1.0"
++
++int testmode = FW_NORMAL_MODE;
++int adap_test = 0;
++char paringid[100];
++int n_para = 1;
++int ble_scan_wakeup_reboot_time = 1000;
++int aicwf_dbg_level = LOGERROR|LOGINFO|LOGDEBUG|LOGTRACE;
++uint32_t ad_data_filter_mask = 0;
++
++module_param(aicwf_dbg_level, int, 0660);
++module_param(ble_scan_wakeup_reboot_time, int, 0660);
++module_param(testmode, int, 0660);
++module_param(adap_test, int, 0660);
++module_param_string(paringid, paringid, 100, 0660);
++
++
++static void aicsmac_driver_register(void)
++{
++ aicwf_usb_register();
++}
++
++static int __init aic_bluetooth_mod_init(void)
++{
++ printk("%s \n", __func__);
++ printk("RELEASE DATE:%s \r\n", RELEASE_DATE);
++#ifdef CONFIG_PREALLOC_RX_SKB
++ aicwf_prealloc_init();
++#endif
++
++ aicsmac_driver_register();
++ return 0;
++}
++
++static void __exit aic_bluetooth_mod_exit(void)
++{
++ printk("%s\n", __func__);
++ aicwf_usb_exit();
++
++#ifdef CONFIG_PREALLOC_RX_SKB
++ aicwf_prealloc_exit();
++#endif
++
++#ifdef CONFIG_PREALLOC_TXQ
++ aicwf_prealloc_txq_free();
++#endif
++}
++
++
++module_init(aic_bluetooth_mod_init);
++module_exit(aic_bluetooth_mod_exit);
++
++MODULE_FIRMWARE(DRV_CONFIG_FW_NAME);
++MODULE_DESCRIPTION(DRV_DESCRIPTION);
++MODULE_VERSION(DRV_VERS_MOD);
++MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
++MODULE_LICENSE("GPL");
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic_load_fw/aic_compat_8800d80.c
+@@ -0,0 +1,331 @@
++#include "aic_txrxif.h"
++#include "aicwf_usb.h"
++#include "aicbluetooth.h"
++#include "aic_compat_8800d80.h"
++#include "aicwf_debug.h"
++
++int rwnx_plat_bin_fw_upload_2(struct aic_usb_dev *usbdev, u32 fw_addr,
++ char *filename);
++int rwnx_request_firmware_common(struct aic_usb_dev *usbdev,
++ u32** buffer, const char *filename);
++void rwnx_plat_userconfig_parsing(char *buffer, int size);
++void rwnx_release_firmware_common(u32** buffer);
++
++extern int testmode;
++extern int chip_id;
++
++typedef u32 (*array2_tbl_t)[2];
++
++#define AIC_PATCH_MAGIG_NUM 0x48435450 // "PTCH"
++#define AIC_PATCH_MAGIG_NUM_2 0x50544348 // "HCTP"
++#define AIC_PATCH_BLOCK_MAX 4
++
++typedef struct {
++ uint32_t magic_num;
++ uint32_t pair_start;
++ uint32_t magic_num_2;
++ uint32_t pair_count;
++ uint32_t block_dst[AIC_PATCH_BLOCK_MAX];
++ uint32_t block_src[AIC_PATCH_BLOCK_MAX];
++ uint32_t block_size[AIC_PATCH_BLOCK_MAX]; // word count
++} aic_patch_t;
++
++
++#define AIC_PATCH_OFST(mem) ((size_t) &((aic_patch_t *)0)->mem)
++#define AIC_PATCH_ADDR(mem) ((u32) (aic_patch_str_base + AIC_PATCH_OFST(mem)))
++
++u32 patch_tbl_d80[][2] =
++{
++ #ifdef USE_5G
++ {0x00b4, 0xf3010001},
++ #else
++ {0x00b4, 0xf3010000},
++ #endif
++};
++
++//adap test
++u32 adaptivity_patch_tbl_d80[][2] = {
++ {0x000C, 0x0000320A}, //linkloss_thd
++ {0x009C, 0x00000000}, //ac_param_conf
++ {0x0154, 0x00010000}, //tx_adaptivity_en
++};
++
++u32 syscfg_tbl_masked_8800d80[][3] = {
++};
++
++u32 syscfg_tbl_8800d80[][2] = {
++ #ifdef CONFIG_PMIC_SETTING
++ {0x70001408, 0x00000000}, // stop wdg
++ #endif /* CONFIG_PMIC_SETTING */
++};
++
++extern int adap_test;
++
++int aicwf_patch_config_8800d80(struct aic_usb_dev *usb_dev)
++{
++ u32 rd_patch_addr;
++ u32 aic_patch_addr;
++ u32 config_base, aic_patch_str_base;
++ uint32_t start_addr = 0x001D7000;
++ u32 patch_addr = start_addr;
++ u32 patch_cnt = sizeof(patch_tbl_d80) / 4 / 2;
++ struct dbg_mem_read_cfm rd_patch_addr_cfm;
++ int ret = 0;
++ int cnt = 0;
++ //adap test
++ int adap_patch_cnt = 0;
++
++ if (adap_test) {
++ adap_patch_cnt = sizeof(adaptivity_patch_tbl_d80)/sizeof(u32)/2;
++ }
++
++ if (chip_id == CHIP_REV_U01) {
++ rd_patch_addr = RAM_FMAC_FW_ADDR_8800D80 + 0x0198;
++ } else {
++ rd_patch_addr = RAM_FMAC_FW_ADDR_8800D80_U02 + 0x0198;
++ }
++ aic_patch_addr = rd_patch_addr + 8;
++
++ AICWFDBG(LOGERROR, "Read FW mem: %08x\n", rd_patch_addr);
++ if ((ret = rwnx_send_dbg_mem_read_req(usb_dev, rd_patch_addr, &rd_patch_addr_cfm))) {
++ AICWFDBG(LOGERROR, "setting base[0x%x] rd fail: %d\n", rd_patch_addr, ret);
++ return ret;
++ }
++ AICWFDBG(LOGERROR, "%x=%x\n", rd_patch_addr_cfm.memaddr, rd_patch_addr_cfm.memdata);
++ config_base = rd_patch_addr_cfm.memdata;
++
++ if ((ret = rwnx_send_dbg_mem_read_req(usb_dev, aic_patch_addr, &rd_patch_addr_cfm))) {
++ AICWFDBG(LOGERROR, "patch_str_base[0x%x] rd fail: %d\n", aic_patch_addr, ret);
++ return ret;
++ }
++ AICWFDBG(LOGERROR, "%x=%x\n", rd_patch_addr_cfm.memaddr, rd_patch_addr_cfm.memdata);
++ aic_patch_str_base = rd_patch_addr_cfm.memdata;
++
++ if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, AIC_PATCH_ADDR(magic_num), AIC_PATCH_MAGIG_NUM))) {
++ AICWFDBG(LOGERROR, "maigic_num[0x%x] write fail: %d\n", AIC_PATCH_ADDR(magic_num), ret);
++ return ret;
++ }
++
++ if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, AIC_PATCH_ADDR(magic_num_2), AIC_PATCH_MAGIG_NUM_2))) {
++ AICWFDBG(LOGERROR, "maigic_num[0x%x] write fail: %d\n", AIC_PATCH_ADDR(magic_num_2), ret);
++ return ret;
++ }
++
++ if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, AIC_PATCH_ADDR(pair_start), patch_addr))) {
++ AICWFDBG(LOGERROR, "pair_start[0x%x] write fail: %d\n", AIC_PATCH_ADDR(pair_start), ret);
++ return ret;
++ }
++
++ if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, AIC_PATCH_ADDR(pair_count), patch_cnt + adap_patch_cnt))) {
++ AICWFDBG(LOGERROR, "pair_count[0x%x] write fail: %d\n", AIC_PATCH_ADDR(pair_count), ret);
++ return ret;
++ }
++
++ for (cnt = 0; cnt < patch_cnt; cnt++) {
++ if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, start_addr+8*cnt, patch_tbl_d80[cnt][0]+config_base))) {
++ AICWFDBG(LOGERROR, "%x write fail\n", start_addr+8*cnt);
++ return ret;
++ }
++ if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, start_addr+8*cnt+4, patch_tbl_d80[cnt][1]))) {
++ AICWFDBG(LOGERROR, "%x write fail\n", start_addr+8*cnt+4);
++ return ret;
++ }
++ }
++
++ if (adap_test){
++ int tmp_cnt = patch_cnt + adap_patch_cnt;
++ for (cnt = patch_cnt; cnt < tmp_cnt; cnt++) {
++ int tbl_idx = cnt - patch_cnt;
++ if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, start_addr+8*cnt, adaptivity_patch_tbl_d80[tbl_idx][0]+config_base))) {
++ AICWFDBG(LOGERROR, "%x write fail\n", start_addr+8*cnt);
++ return ret;
++ }
++ if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, start_addr+8*cnt+4, adaptivity_patch_tbl_d80[tbl_idx][1]))) {
++ AICWFDBG(LOGERROR, "%x write fail\n", start_addr+8*cnt+4);
++ return ret;
++ }
++ }
++ }
++
++ /*
++ * Patch block 0 ~ 3, that is void by default, can be set as:
++ *
++ * const u32 patch_block_0[3] = {0x11223344, 0x55667788, 0xaabbccdd};
++ * if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, (u32)(&aic_patch->block_dst[0]), 0x160000))) {
++ * printk("block_dst [0x%x] write fail: %d\n", (u32)(&aic_patch->block_dst[0]), ret);
++ * }
++ * if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, (u32)(&aic_patch->block_src[0]), 0x307000))) {
++ * printk("block_src [0x%x] write fail: %d\n", (u32)(&aic_patch->block_src[0]), ret);
++ * }
++ * if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, (u32)(&aic_patch->block_size[0]), sizeof(patch_block_0) / sizeof(u32)))) {
++ * printk("block_size[0x%x] write fail: %d\n", (u32)(&aic_patch->block_size[0]), ret);
++ * }
++ * if ((ret = rwnx_send_dbg_mem_block_write_req(usb_dev, 0x307000, sizeof(patch_block_0), patch_block_0))) {
++ * printk("blk set fail: %d\n", ret);
++ * }
++ */
++ if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, AIC_PATCH_ADDR(block_size[0]), 0))) {
++ AICWFDBG(LOGERROR, "block_size[0x%x] write fail: %d\n", AIC_PATCH_ADDR(block_size[0]), ret);
++ return ret;
++ }
++ if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, AIC_PATCH_ADDR(block_size[1]), 0))) {
++ AICWFDBG(LOGERROR, "block_size[0x%x] write fail: %d\n", AIC_PATCH_ADDR(block_size[1]), ret);
++ return ret;
++ }
++ if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, AIC_PATCH_ADDR(block_size[2]), 0))) {
++ AICWFDBG(LOGERROR, "block_size[0x%x] write fail: %d\n", AIC_PATCH_ADDR(block_size[2]), ret);
++ return ret;
++ }
++ if ((ret = rwnx_send_dbg_mem_write_req(usb_dev, AIC_PATCH_ADDR(block_size[3]), 0))) {
++ AICWFDBG(LOGERROR, "block_size[0x%x] write fail: %d\n", AIC_PATCH_ADDR(block_size[3]), ret);
++ return ret;
++ }
++
++ return 0;
++}
++
++
++#if 0
++extern char aic_fw_path[200];
++
++int rwnx_plat_userconfig_load_8800d80(struct aic_usb_dev *usb_dev){
++ int size;
++ u32 *dst=NULL;
++ char *filename = FW_USERCONFIG_NAME_8800D80;
++
++ AICWFDBG(LOGINFO, "userconfig file path:%s \r\n", filename);
++
++ /* load file */
++ size = rwnx_request_firmware_common(usb_dev, &dst, filename);
++ if (size <= 0) {
++ AICWFDBG(LOGERROR, "wrong size of firmware file\n");
++ dst = NULL;
++ return 0;
++ }
++
++ /* Copy the file on the Embedded side */
++ AICWFDBG(LOGINFO, "### Load file done: %s, size=%d\n", filename, size);
++
++ rwnx_plat_userconfig_parsing((char *)dst, size);
++
++ rwnx_release_firmware_common(&dst);
++
++ AICWFDBG(LOGINFO, "userconfig download complete\n\n");
++ return 0;
++
++}
++#endif
++int system_config_8800d80(struct aic_usb_dev *usb_dev){
++ int syscfg_num;
++ int ret, cnt;
++ const u32 mem_addr = 0x40500000;
++ struct dbg_mem_read_cfm rd_mem_addr_cfm;
++ ret = rwnx_send_dbg_mem_read_req(usb_dev, mem_addr, &rd_mem_addr_cfm);
++ if (ret) {
++ printk("%x rd fail: %d\n", mem_addr, ret);
++ return ret;
++ }
++ chip_id = (u8)(rd_mem_addr_cfm.memdata >> 16);
++ printk("chip_id=%x\n", chip_id);
++ #if 1
++ syscfg_num = sizeof(syscfg_tbl_8800d80) / sizeof(u32) / 2;
++ for (cnt = 0; cnt < syscfg_num; cnt++) {
++ ret = rwnx_send_dbg_mem_write_req(usb_dev, syscfg_tbl_8800d80[cnt][0], syscfg_tbl_8800d80[cnt][1]);
++ if (ret) {
++ printk("%x write fail: %d\n", syscfg_tbl_8800d80[cnt][0], ret);
++ return ret;
++ }
++ }
++ syscfg_num = sizeof(syscfg_tbl_masked_8800d80) / sizeof(u32) / 3;
++ for (cnt = 0; cnt < syscfg_num; cnt++) {
++ ret = rwnx_send_dbg_mem_mask_write_req(usb_dev,
++ syscfg_tbl_masked_8800d80[cnt][0], syscfg_tbl_masked_8800d80[cnt][1], syscfg_tbl_masked_8800d80[cnt][2]);
++ if (ret) {
++ printk("%x mask write fail: %d\n", syscfg_tbl_masked_8800d80[cnt][0], ret);
++ return ret;
++ }
++ }
++ #endif
++ return 0;
++}
++
++int aicfw_download_fw_8800d80(struct aic_usb_dev *usb_dev){
++ if(testmode == FW_NORMAL_MODE){
++ if (chip_id != CHIP_REV_U01){
++ if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_ADID_BASE_ADDR_8800D80_U02, FW_ADID_BASE_NAME_8800D80_U02)) {
++ return -1;
++ }
++ if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_PATCH_BASE_ADDR_8800D80_U02, FW_PATCH_BASE_NAME_8800D80_U02)) {
++ return -1;
++ }
++ if (rwnx_plat_bin_fw_patch_table_upload_android(usb_dev, FW_PATCH_TABLE_NAME_8800D80_U02)) {
++ return -1;
++ }
++ if(rwnx_plat_bin_fw_upload_android(usb_dev, RAM_FMAC_FW_ADDR_8800D80_U02, FW_BASE_NAME_8800D80_U02)) {
++ return -1;
++ }
++ #if 0
++ if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_CALIBMODE_ADDR_8800D80_U02, FW_CALIBMODE_NAME_8800D80_U02)) {
++ return -1;
++ }
++ if (rwnx_send_dbg_mem_write_req(usb_dev, 0x40500048, 0x1e0000))
++ return -1;
++ #endif
++ if (aicwf_patch_config_8800d80(usb_dev)) {
++ return -1;
++ }
++ if (rwnx_send_dbg_start_app_req(usb_dev, RAM_FMAC_FW_ADDR_8800D80_U02, HOST_START_APP_AUTO)) {
++ return -1;
++ }
++ }else {
++ if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_ADID_BASE_ADDR_8800D80, FW_ADID_BASE_NAME_8800D80)) {
++ return -1;
++ }
++ if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_PATCH_BASE_ADDR_8800D80, FW_PATCH_BASE_NAME_8800D80)) {
++ return -1;
++ }
++ if (rwnx_plat_bin_fw_patch_table_upload_android(usb_dev, FW_PATCH_TABLE_NAME_8800D80)) {
++ return -1;
++ }
++ if(rwnx_plat_bin_fw_upload_android(usb_dev, RAM_FMAC_FW_ADDR_8800D80, FW_BASE_NAME_8800D80)) {
++ return -1;
++ }
++ if (rwnx_send_dbg_start_app_req(usb_dev, RAM_FMAC_FW_ADDR_8800D80, HOST_START_APP_AUTO)) {
++ return -1;
++ }
++ }
++ }else if(testmode == FW_TEST_MODE){
++ if (chip_id != CHIP_REV_U01){
++
++ if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_ADID_BASE_ADDR_8800D80_U02, FW_ADID_BASE_NAME_8800D80_U02)) {
++ return -1;
++ }
++
++ if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_PATCH_BASE_ADDR_8800D80_U02, FW_PATCH_BASE_NAME_8800D80_U02)) {
++ return -1;
++ }
++ if (rwnx_plat_bin_fw_patch_table_upload_android(usb_dev, FW_PATCH_TABLE_NAME_8800D80_U02)) {
++ return -1;
++ }
++
++ if(rwnx_plat_bin_fw_upload_android(usb_dev, RAM_FMAC_RF_FW_ADDR_8800D80_U02, FW_RF_BASE_NAME_8800D80_U02)) {
++ AICWFDBG(LOGERROR,"%s wifi fw download fail \r\n", __func__);
++ return -1;
++ }
++ if (rwnx_send_dbg_start_app_req(usb_dev, RAM_FMAC_RF_FW_ADDR_8800D80_U02, HOST_START_APP_AUTO)) {
++ return -1;
++ }
++ } else {
++ if(rwnx_plat_bin_fw_upload_android(usb_dev, RAM_FMAC_RF_FW_ADDR_8800D80, FW_RF_BASE_NAME_8800D80)) {
++ AICWFDBG(LOGERROR,"%s wifi fw download fail \r\n", __func__);
++ return -1;
++ }
++ if (rwnx_send_dbg_start_app_req(usb_dev, RAM_FMAC_RF_FW_ADDR_8800D80, HOST_START_APP_AUTO)) {
++ return -1;
++ }
++ }
++ }
++ return 0;
++}
++
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic_load_fw/aic_compat_8800d80.h
+@@ -0,0 +1,37 @@
++#include <linux/types.h>
++#include "aicwf_usb.h"
++
++#define USB_DEVICE_ID_AIC_8800D80 0x8D80
++#define USB_DEVICE_ID_AIC_8800D81 0x8D81
++
++#define FW_BASE_NAME_8800D80 "fmacfw_8800d80.bin"
++#define FW_RF_BASE_NAME_8800D80 "fmacfw_rf_8800d80.bin"
++#define FW_PATCH_BASE_NAME_8800D80 "fw_patch_8800d80.bin"
++#define FW_ADID_BASE_NAME_8800D80 "fw_adid_8800d80.bin"
++#define FW_PATCH_TABLE_NAME_8800D80 "fw_patch_table_8800d80.bin"
++
++#define FW_BASE_NAME_8800D80_U02 "fmacfw_8800d80_u02.bin"
++#define FW_RF_BASE_NAME_8800D80_U02 "lmacfw_rf_8800d80_u02.bin"
++#define FW_PATCH_BASE_NAME_8800D80_U02 "fw_patch_8800d80_u02.bin"
++#define FW_ADID_BASE_NAME_8800D80_U02 "fw_adid_8800d80_u02.bin"
++#define FW_CALIBMODE_NAME_8800D80_U02 "calibmode_8800d80.bin"
++#define FW_PATCH_TABLE_NAME_8800D80_U02 "fw_patch_table_8800d80_u02.bin"
++
++#define FW_USERCONFIG_NAME_8800D80 "aic_userconfig_8800d80.txt"
++
++#define RAM_FMAC_FW_ADDR_8800D80 0x100000
++#define RAM_FMAC_RF_FW_ADDR_8800D80 0x110000
++#define FW_RAM_ADID_BASE_ADDR_8800D80 0x002017E0
++#define FW_RAM_PATCH_BASE_ADDR_8800D80 0x0020B2B0
++
++#define RAM_FMAC_FW_ADDR_8800D80_U02 0x120000
++#define RAM_FMAC_RF_FW_ADDR_8800D80_U02 0x120000
++#define FW_RAM_ADID_BASE_ADDR_8800D80_U02 0x00201940
++#define FW_RAM_CALIBMODE_ADDR_8800D80_U02 0x1e0000
++#define FW_RAM_PATCH_BASE_ADDR_8800D80_U02 0x0020B43c
++
++int aicwf_patch_config_8800d80(struct aic_usb_dev *usb_dev);
++int rwnx_plat_userconfig_load_8800d80(struct aic_usb_dev *usbdev);
++int system_config_8800d80(struct aic_usb_dev *usb_dev);
++int aicfw_download_fw_8800d80(struct aic_usb_dev *usb_dev);
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic_load_fw/aic_txrxif.c
+@@ -0,0 +1,486 @@
++/**
++ * aicwf_bus.c
++ *
++ * bus function declarations
++ *
++ * Copyright (C) AICSemi 2018-2020
++ */
++
++#include <linux/kthread.h>
++#include <linux/netdevice.h>
++#include <linux/printk.h>
++#include <linux/interrupt.h>
++#include <linux/sched.h>
++#include <linux/completion.h>
++#include <linux/semaphore.h>
++#include <linux/debugfs.h>
++#include <linux/atomic.h>
++#include <linux/vmalloc.h>
++#include <linux/version.h>
++#ifdef CONFIG_PLATFORM_UBUNTU
++#if LINUX_VERSION_CODE > KERNEL_VERSION(4, 10, 0)
++#include <linux/sched/signal.h>
++#endif
++#endif
++#include "aic_txrxif.h"
++#include "aicbluetooth.h"
++#include "aicbluetooth_cmds.h"
++
++int aicwf_bus_init(uint bus_hdrlen, struct device *dev)
++{
++ int ret = 0;
++ struct aicwf_bus *bus_if;
++
++ if (!dev) {
++ txrx_err("device not found\n");
++ return -1;
++ }
++ bus_if = dev_get_drvdata(dev);
++ #if defined CONFIG_USB_SUPPORT && defined CONFIG_USB_NO_TRANS_DMA_MAP
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
++ bus_if->cmd_buf = usb_alloc_coherent(bus_if->bus_priv.usb->udev, CMD_BUF_MAX, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), &bus_if->bus_priv.usb->cmd_dma_trans_addr);
++ #else
++ bus_if->cmd_buf = usb_buffer_alloc(bus_if->bus_priv.usb->udev, CMD_BUF_MAX, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL), &bus_if->bus_priv.usb->cmd_dma_trans_addr);
++ #endif
++ #else
++ bus_if->cmd_buf = kzalloc(CMD_BUF_MAX, GFP_KERNEL);
++ #endif
++ if(!bus_if->cmd_buf) {
++ ret = -ENOMEM;
++ txrx_err("proto_attach failed\n");
++ goto fail;
++ }
++ memset(bus_if->cmd_buf, '\0', CMD_BUF_MAX);
++
++ init_completion(&bus_if->bustx_trgg);
++ init_completion(&bus_if->busrx_trgg);
++#ifdef AICWF_SDIO_SUPPORT
++ bus_if->bustx_thread = kthread_run(sdio_bustx_thread, (void *)bus_if, "aicwf_bustx_thread");
++ bus_if->busrx_thread = kthread_run(sdio_busrx_thread, (void *)bus_if->bus_priv.sdio->rx_priv, "aicwf_busrx_thread");
++#endif
++#ifdef AICWF_USB_SUPPORT
++ bus_if->bustx_thread = kthread_run(usb_bustx_thread, (void *)bus_if, "aicwf_bustx_thread");
++ bus_if->busrx_thread = kthread_run(usb_busrx_thread, (void *)bus_if->bus_priv.usb->rx_priv, "aicwf_busrx_thread");
++#endif
++
++ if (IS_ERR(bus_if->bustx_thread)) {
++ bus_if->bustx_thread = NULL;
++ txrx_err("aicwf_bustx_thread run fail\n");
++ goto fail;
++ }
++
++ if (IS_ERR(bus_if->busrx_thread)) {
++ bus_if->busrx_thread = NULL;
++ txrx_err("aicwf_bustx_thread run fail\n");
++ goto fail;
++ }
++
++ return ret;
++fail:
++ aicwf_bus_deinit(dev);
++
++ return ret;
++}
++
++void aicwf_bus_deinit(struct device *dev)
++{
++ struct aicwf_bus *bus_if;
++ struct aic_usb_dev *usbdev;
++
++ if (!dev) {
++ txrx_err("device not found\n");
++ return;
++ }
++ printk("%s", __func__);
++
++ mdelay(500);
++ bus_if = dev_get_drvdata(dev);
++ aicwf_bus_stop(bus_if);
++
++ usbdev = bus_if->bus_priv.usb;
++ usbdev->app_cmp = false;
++ aic_bt_platform_deinit(usbdev);
++
++ if (bus_if->cmd_buf) {
++ #if defined CONFIG_USB_SUPPORT && defined CONFIG_USB_NO_TRANS_DMA_MAP
++ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
++ usb_free_coherent(bus_if->bus_priv.usb->udev, CMD_BUF_MAX, bus_if->cmd_buf, bus_if->bus_priv.usb->cmd_dma_trans_addr);
++ #else
++ usb_buffer_free(bus_if->bus_priv.usb->udev, CMD_BUF_MAX, bus_if->cmd_buf, bus_if->bus_priv.usb->cmd_dma_trans_addr);
++ #endif
++ #else
++ kfree(bus_if->cmd_buf);
++ #endif
++ bus_if->cmd_buf = NULL;
++ }
++
++ if (bus_if->bustx_thread) {
++ complete_all(&bus_if->bustx_trgg);
++ kthread_stop(bus_if->bustx_thread);
++ bus_if->bustx_thread = NULL;
++ }
++ printk("exit %s\n", __func__);
++}
++
++void aicwf_frame_tx(void *dev, struct sk_buff *skb)
++{
++ struct aic_usb_dev *usbdev = (struct aic_usb_dev *)dev;
++ aicwf_bus_txdata(usbdev->bus_if, skb);
++}
++
++struct aicwf_tx_priv* aicwf_tx_init(void *arg)
++{
++ struct aicwf_tx_priv* tx_priv;
++
++ tx_priv = kzalloc(sizeof(struct aicwf_tx_priv), GFP_KERNEL);
++ if (!tx_priv)
++ return NULL;
++
++ tx_priv->usbdev = (struct aic_usb_dev *)arg;
++
++ atomic_set(&tx_priv->aggr_count, 0);
++ tx_priv->aggr_buf = dev_alloc_skb(MAX_AGGR_TXPKT_LEN);
++ if(!tx_priv->aggr_buf) {
++ txrx_err("Alloc bus->txdata_buf failed!\n");
++ kfree(tx_priv);
++ return NULL;
++ }
++ tx_priv->head = tx_priv->aggr_buf->data;
++ tx_priv->tail = tx_priv->aggr_buf->data;
++
++ return tx_priv;
++}
++
++void aicwf_tx_deinit(struct aicwf_tx_priv* tx_priv)
++{
++ if (tx_priv && tx_priv->aggr_buf)
++ dev_kfree_skb(tx_priv->aggr_buf);
++
++ kfree(tx_priv);
++ tx_priv = NULL;
++}
++
++static bool aicwf_another_ptk(struct sk_buff *skb)
++{
++ u8 *data;
++ u16 aggr_len = 0;
++
++ if(skb->data == NULL || skb->len == 0) {
++ return false;
++ }
++ data = skb->data;
++ aggr_len = (*skb->data | (*(skb->data + 1) << 8));
++ if(aggr_len == 0) {
++ return false;
++ }
++
++ return true;
++}
++
++int aicwf_process_rxframes(struct aicwf_rx_priv *rx_priv)
++{
++ int ret = 0;
++ unsigned long flags = 0;
++ struct sk_buff *skb = NULL;
++ u16 pkt_len = 0;
++ struct sk_buff *skb_inblock = NULL;
++ u16 aggr_len = 0, adjust_len = 0;
++ u8 *data = NULL;
++
++ while (1) {
++ spin_lock_irqsave(&rx_priv->rxqlock, flags);
++ if(aicwf_is_framequeue_empty(&rx_priv->rxq)) {
++ spin_unlock_irqrestore(&rx_priv->rxqlock,flags);
++ break;
++ }
++ skb = aicwf_frame_dequeue(&rx_priv->rxq);
++ spin_unlock_irqrestore(&rx_priv->rxqlock,flags);
++ if (skb == NULL) {
++ txrx_err("skb_error\r\n");
++ break;
++ }
++ while(aicwf_another_ptk(skb)) {
++ data = skb->data;
++ pkt_len = (*skb->data | (*(skb->data + 1) << 8));
++
++ if((skb->data[2] & USB_TYPE_CFG) != USB_TYPE_CFG) { // type : data
++ aggr_len = pkt_len + RX_HWHRD_LEN;
++
++ if (aggr_len & (RX_ALIGNMENT - 1))
++ adjust_len = roundup(aggr_len, RX_ALIGNMENT);
++ else
++ adjust_len = aggr_len;
++
++ skb_inblock = __dev_alloc_skb(aggr_len + CCMP_OR_WEP_INFO, GFP_KERNEL);//8 is for ccmp mic or wep icv
++ if(skb_inblock == NULL){
++ txrx_err("no more space!\n");
++ aicwf_dev_skb_free(skb);
++ return -EBADE;
++ }
++
++ skb_put(skb_inblock, aggr_len);
++ memcpy(skb_inblock->data, data, aggr_len);
++ skb_pull(skb, adjust_len);
++ }
++ else { // type : config
++ aggr_len = pkt_len;
++
++ if (aggr_len & (RX_ALIGNMENT - 1))
++ adjust_len = roundup(aggr_len, RX_ALIGNMENT);
++ else
++ adjust_len = aggr_len;
++
++ skb_inblock = __dev_alloc_skb(aggr_len+4, GFP_KERNEL);
++ if(skb_inblock == NULL){
++ txrx_err("no more space!\n");
++ aicwf_dev_skb_free(skb);
++ return -EBADE;
++ }
++
++ skb_put(skb_inblock, aggr_len+4);
++ memcpy(skb_inblock->data, data, aggr_len+4);
++ if((*(skb_inblock->data + 2) & 0x7f) == USB_TYPE_CFG_CMD_RSP)
++ rwnx_rx_handle_msg(rx_priv->usbdev, (struct ipc_e2a_msg *)(skb_inblock->data + 4));
++ skb_pull(skb, adjust_len+4);
++ }
++ }
++
++ dev_kfree_skb(skb);
++ atomic_dec(&rx_priv->rx_cnt);
++ }
++
++ return ret;
++}
++
++static struct recv_msdu *aicwf_rxframe_queue_init(struct list_head *q, int qsize)
++{
++ int i;
++ struct recv_msdu *req, *reqs;
++
++ reqs = vmalloc(qsize*sizeof(struct recv_msdu));
++ if (reqs == NULL)
++ return NULL;
++
++ req = reqs;
++ for (i = 0; i < qsize; i++) {
++ INIT_LIST_HEAD(&req->rxframe_list);
++ list_add(&req->rxframe_list, q);
++ req->len = 0;
++ req++;
++ }
++
++ return reqs;
++}
++
++struct aicwf_rx_priv *aicwf_rx_init(void *arg)
++{
++ struct aicwf_rx_priv* rx_priv;
++ rx_priv = kzalloc(sizeof(struct aicwf_rx_priv), GFP_KERNEL);
++ if (!rx_priv)
++ return NULL;
++
++ rx_priv->usbdev = (struct aic_usb_dev *)arg;
++ aicwf_frame_queue_init(&rx_priv->rxq, 1, MAX_RXQLEN);
++ spin_lock_init(&rx_priv->rxqlock);
++ atomic_set(&rx_priv->rx_cnt, 0);
++
++ INIT_LIST_HEAD(&rx_priv->rxframes_freequeue);
++ spin_lock_init(&rx_priv->freeq_lock);
++ rx_priv->recv_frames = aicwf_rxframe_queue_init(&rx_priv->rxframes_freequeue, MAX_REORD_RXFRAME);
++ if (!rx_priv->recv_frames) {
++ txrx_err("no enough buffer for free recv frame queue!\n");
++ kfree(rx_priv);
++ return NULL;
++ }
++ spin_lock_init(&rx_priv->stas_reord_lock);
++ INIT_LIST_HEAD(&rx_priv->stas_reord_list);
++
++ return rx_priv;
++}
++
++
++static void aicwf_recvframe_queue_deinit(struct list_head *q)
++{
++ struct recv_msdu *req, *next;
++
++ list_for_each_entry_safe(req, next, q, rxframe_list) {
++ list_del_init(&req->rxframe_list);
++ }
++}
++
++void aicwf_rx_deinit(struct aicwf_rx_priv* rx_priv)
++{
++ //struct reord_ctrl_info *reord_info, *tmp;
++ aicwf_frame_queue_flush(&rx_priv->rxq);
++ aicwf_recvframe_queue_deinit(&rx_priv->rxframes_freequeue);
++ if (rx_priv->recv_frames)
++ vfree(rx_priv->recv_frames);
++
++ if (rx_priv->usbdev->bus_if->busrx_thread) {
++ complete_all(&rx_priv->usbdev->bus_if->busrx_trgg);
++ send_sig(SIGTERM, rx_priv->usbdev->bus_if->busrx_thread, 1);
++ kthread_stop(rx_priv->usbdev->bus_if->busrx_thread);
++ rx_priv->usbdev->bus_if->busrx_thread = NULL;
++ }
++
++ kfree(rx_priv);
++ rx_priv = NULL;
++}
++
++bool aicwf_rxframe_enqueue(struct device *dev, struct frame_queue *q, struct sk_buff *pkt)
++{
++ return aicwf_frame_enq(dev, q, pkt, 0);
++}
++
++
++void aicwf_dev_skb_free(struct sk_buff *skb)
++{
++ if (!skb)
++ return;
++
++ dev_kfree_skb_any(skb);
++}
++
++static struct sk_buff *aicwf_frame_queue_penq(struct frame_queue *pq, int prio, struct sk_buff *p)
++{
++ struct sk_buff_head *q;
++
++ if (pq->queuelist[prio].qlen >= pq->qmax)
++ return NULL;
++
++ q = &pq->queuelist[prio];
++ __skb_queue_tail(q, p);
++ pq->qcnt++;
++ if (pq->hi_prio < prio)
++ pq->hi_prio = (u16)prio;
++
++ return p;
++}
++
++void aicwf_frame_queue_flush(struct frame_queue *pq)
++{
++ int prio;
++ struct sk_buff_head *q;
++ struct sk_buff *p, *next;
++
++ for (prio = 0; prio < pq->num_prio; prio++)
++ {
++ q = &pq->queuelist[prio];
++ skb_queue_walk_safe(q, p, next) {
++ skb_unlink(p, q);
++ aicwf_dev_skb_free(p);
++ pq->qcnt--;
++ }
++ }
++}
++
++void aicwf_frame_queue_init(struct frame_queue *pq, int num_prio, int max_len)
++{
++ int prio;
++
++ memset(pq, 0, offsetof(struct frame_queue, queuelist) + (sizeof(struct sk_buff_head) * num_prio));
++ pq->num_prio = (u16)num_prio;
++ pq->qmax = (u16)max_len;
++
++ for (prio = 0; prio < num_prio; prio++) {
++ skb_queue_head_init(&pq->queuelist[prio]);
++ }
++}
++
++struct sk_buff *aicwf_frame_queue_peek_tail(struct frame_queue *pq, int *prio_out)
++{
++ int prio;
++
++ if (pq->qcnt == 0)
++ return NULL;
++
++ for (prio = 0; prio < pq->hi_prio; prio++)
++ if (!skb_queue_empty(&pq->queuelist[prio]))
++ break;
++
++ if (prio_out)
++ *prio_out = prio;
++
++ return skb_peek_tail(&pq->queuelist[prio]);
++}
++
++bool aicwf_is_framequeue_empty(struct frame_queue *pq)
++{
++ int prio, len = 0;
++
++ for (prio = 0; prio <= pq->hi_prio; prio++)
++ len += pq->queuelist[prio].qlen;
++
++ if(len > 0)
++ return false;
++ else
++ return true;
++}
++
++struct sk_buff *aicwf_frame_dequeue(struct frame_queue *pq)
++{
++ struct sk_buff_head *q;
++ struct sk_buff *p;
++ int prio;
++
++ if (pq->qcnt == 0)
++ return NULL;
++
++ while ((prio = pq->hi_prio) > 0 && skb_queue_empty(&pq->queuelist[prio]))
++ pq->hi_prio--;
++
++ q = &pq->queuelist[prio];
++ p = __skb_dequeue(q);
++ if (p == NULL)
++ return NULL;
++
++ pq->qcnt--;
++
++ return p;
++}
++
++static struct sk_buff *aicwf_skb_dequeue_tail(struct frame_queue *pq, int prio)
++{
++ struct sk_buff_head *q = &pq->queuelist[prio];
++ struct sk_buff *p = skb_dequeue_tail(q);
++
++ if (!p)
++ return NULL;
++
++ pq->qcnt--;
++ return p;
++}
++
++bool aicwf_frame_enq(struct device *dev, struct frame_queue *q, struct sk_buff *pkt, int prio)
++{
++ struct sk_buff *p = NULL;
++ int prio_modified = -1;
++
++ if (q->queuelist[prio].qlen < q->qmax && q->qcnt < q->qmax) {
++ aicwf_frame_queue_penq(q, prio, pkt);
++ return true;
++ }
++ if (q->queuelist[prio].qlen >= q->qmax) {
++ prio_modified = prio;
++ } else if (q->qcnt >= q->qmax) {
++ p = aicwf_frame_queue_peek_tail(q, &prio_modified);
++ if (prio_modified > prio)
++ return false;
++ }
++
++ if (prio_modified >= 0) {
++ if (prio_modified == prio)
++ return false;
++
++ p = aicwf_skb_dequeue_tail(q, prio_modified);
++ aicwf_dev_skb_free(p);
++
++ p = aicwf_frame_queue_penq(q, prio_modified, pkt);
++ if (p == NULL)
++ txrx_err("failed\n");
++ }
++
++ return p != NULL;
++}
++
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic_load_fw/aic_txrxif.h
+@@ -0,0 +1,216 @@
++/**
++ * aicwf_txrxif.h
++ *
++ * bus function declarations
++ *
++ * Copyright (C) AICSemi 2018-2020
++ */
++
++#ifndef _AICWF_TXRXIF_H_
++#define _AICWF_TXRXIF_H_
++
++#include <linux/skbuff.h>
++#include <linux/sched.h>
++//#include "aicsdio.h"
++#include "aicwf_usb.h"
++
++#define CMD_BUF_MAX 1536
++#define DATA_BUF_MAX 2048
++#define TXPKT_BLOCKSIZE 512
++#define MAX_AGGR_TXPKT_LEN (1536*32)
++#define CMD_TX_TIMEOUT 5000
++#define TX_ALIGNMENT 4
++
++#define RX_HWHRD_LEN 60 //58->60 word allined
++#define CCMP_OR_WEP_INFO 8
++#define MAX_RXQLEN 2000
++#define RX_ALIGNMENT 4
++
++#define DEBUG_ERROR_LEVEL 0
++#define DEBUG_DEBUG_LEVEL 1
++#define DEBUG_INFO_LEVEL 2
++
++#define DBG_LEVEL DEBUG_DEBUG_LEVEL
++
++#define txrx_err(fmt, ...) pr_err("txrx_err:<%s,%d>: " fmt, __func__, __LINE__, ##__VA_ARGS__)
++#define sdio_err(fmt, ...) pr_err("sdio_err:<%s,%d>: " fmt, __func__, __LINE__, ##__VA_ARGS__)
++#define usb_err(fmt, ...) pr_err("usb_err:<%s,%d>: " fmt, __func__, __LINE__, ##__VA_ARGS__)
++#if DBG_LEVEL >= DEBUG_DEBUG_LEVEL
++#define sdio_dbg(fmt, ...) printk("aicbt: " fmt, ##__VA_ARGS__)
++#define usb_dbg(fmt, ...) printk("aicbt: " fmt, ##__VA_ARGS__)
++#else
++#define sdio_dbg(fmt, ...)
++#define usb_dbg(fmt, ...)
++#endif
++#if DBG_LEVEL >= DEBUG_INFO_LEVEL
++#define sdio_info(fmt, ...) printk("aicbt: " fmt, ##__VA_ARGS__)
++#define usb_info(fmt, ...) printk("aicbt: " fmt, ##__VA_ARGS__)
++#else
++#define sdio_info(fmt, ...)
++#define usb_info(fmt, ...)
++#endif
++
++enum aicwf_bus_state {
++ BUS_DOWN_ST,
++ BUS_UP_ST
++};
++
++struct aicwf_bus_ops {
++ int (*start) (struct device * dev);
++ void (*stop) (struct device * dev);
++ int (*txdata) (struct device * dev, struct sk_buff * skb);
++ int (*txmsg) (struct device * dev, u8 * msg, uint len);
++};
++
++struct frame_queue {
++ u16 num_prio;
++ u16 hi_prio;
++ u16 qmax; /* max number of queued frames */
++ u16 qcnt;
++ struct sk_buff_head queuelist[8];
++};
++
++struct aicwf_bus {
++ union {
++ struct aic_sdio_dev *sdio;
++ struct aic_usb_dev *usb;
++ } bus_priv;
++ struct device *dev;
++ struct aicwf_bus_ops *ops;
++ enum aicwf_bus_state state;
++ u8 *cmd_buf;
++ struct completion bustx_trgg;
++ struct completion busrx_trgg;
++ struct task_struct *bustx_thread;
++ struct task_struct *busrx_thread;
++};
++
++struct aicwf_tx_priv {
++#ifdef AICWF_SDIO_SUPPORT
++ struct aic_sdio_dev *sdiodev;
++ int fw_avail_bufcnt;
++ //for cmd tx
++ u8 *cmd_buf;
++ uint cmd_len;
++ bool cmd_txstate;
++ bool cmd_tx_succ;
++ struct semaphore cmd_txsema;
++ wait_queue_head_t cmd_txdone_wait;
++ //for data tx
++ atomic_t tx_pktcnt;
++
++ struct frame_queue txq;
++ spinlock_t txqlock;
++ struct semaphore txctl_sema;
++#endif
++#ifdef AICWF_USB_SUPPORT
++ struct aic_usb_dev *usbdev;
++#endif
++ struct sk_buff *aggr_buf;
++ atomic_t aggr_count;
++ u8 *head;
++ u8 *tail;
++};
++
++
++#define MAX_REORD_RXFRAME 250
++#define REORDER_UPDATE_TIME 50
++#define AICWF_REORDER_WINSIZE 64
++#define SN_LESS(a, b) (((a-b)&0x800)!=0)
++#define SN_EQUAL(a, b) (a == b)
++
++struct reord_ctrl {
++ struct aicwf_rx_priv *rx_priv;
++ u8 enable;
++ u16 ind_sn;
++ u8 wsize_b;
++ spinlock_t reord_list_lock;
++ struct list_head reord_list;
++ struct timer_list reord_timer;
++ struct work_struct reord_timer_work;
++};
++
++struct reord_ctrl_info {
++ u8 mac_addr[6];
++ struct reord_ctrl preorder_ctrl[8];
++ struct list_head list;
++};
++
++struct recv_msdu {
++ struct sk_buff *pkt;
++ u8 tid;
++ u16 seq_num;
++ uint len;
++ u8 *rx_data;
++ //for pending rx reorder list
++ struct list_head reord_pending_list;
++ //for total frame list, when rxframe from busif, dequeue, when submit frame to net, enqueue
++ struct list_head rxframe_list;
++ struct reord_ctrl *preorder_ctrl;
++};
++
++struct aicwf_rx_priv {
++ struct aic_usb_dev *usbdev;
++ void *rwnx_vif;
++ atomic_t rx_cnt;
++ u32 data_len;
++ spinlock_t rxqlock;
++ struct frame_queue rxq;
++
++ spinlock_t freeq_lock;
++ struct list_head rxframes_freequeue;
++ struct list_head stas_reord_list;
++ spinlock_t stas_reord_lock;
++ struct recv_msdu *recv_frames;
++};
++
++static inline int aicwf_bus_start(struct aicwf_bus *bus)
++{
++ return bus->ops->start(bus->dev);
++}
++
++static inline void aicwf_bus_stop(struct aicwf_bus *bus)
++{
++ bus->ops->stop(bus->dev);
++}
++
++static inline int aicwf_bus_txdata(struct aicwf_bus *bus, struct sk_buff *skb)
++{
++ return bus->ops->txdata(bus->dev, skb);
++}
++
++static inline int aicwf_bus_txmsg(struct aicwf_bus *bus, u8 *msg, uint len)
++{
++ return bus->ops->txmsg(bus->dev, msg, len);
++}
++
++static inline void aicwf_sched_timeout(u32 millisec)
++{
++ ulong timeout = 0, expires = 0;
++ expires = jiffies + msecs_to_jiffies(millisec);
++ timeout = millisec;
++
++ while (timeout) {
++ timeout = schedule_timeout(timeout);
++ if (time_after(jiffies, expires))
++ break;
++ }
++}
++
++int aicwf_bus_init(uint bus_hdrlen, struct device *dev);
++void aicwf_bus_deinit(struct device *dev);
++void aicwf_tx_deinit(struct aicwf_tx_priv* tx_priv);
++void aicwf_rx_deinit(struct aicwf_rx_priv* rx_priv);
++struct aicwf_tx_priv* aicwf_tx_init(void *arg);
++struct aicwf_rx_priv* aicwf_rx_init(void *arg);
++void aicwf_frame_queue_init(struct frame_queue *pq, int num_prio, int max_len);
++void aicwf_frame_queue_flush(struct frame_queue *pq);
++bool aicwf_frame_enq(struct device *dev, struct frame_queue *q, struct sk_buff *pkt, int prio);
++bool aicwf_rxframe_enqueue(struct device *dev, struct frame_queue *q, struct sk_buff *pkt);
++bool aicwf_is_framequeue_empty(struct frame_queue *pq);
++void aicwf_frame_tx(void *dev, struct sk_buff *skb);
++void aicwf_dev_skb_free(struct sk_buff *skb);
++struct sk_buff *aicwf_frame_dequeue(struct frame_queue *pq);
++struct sk_buff *aicwf_frame_queue_peek_tail(struct frame_queue *pq, int *prio_out);
++
++#endif /* _AICWF_TXRXIF_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic_load_fw/aicbluetooth.c
+@@ -0,0 +1,1125 @@
++#include <linux/version.h>
++#include <linux/vmalloc.h>
++#include "aicbluetooth_cmds.h"
++#include "aicwf_usb.h"
++#include "aic_txrxif.h"
++#include "md5.h"
++#ifdef CONFIG_USE_FW_REQUEST
++#include <linux/firmware.h>
++#endif
++
++//Parser state
++#define INIT 0
++#define CMD 1
++#define PRINT 2
++#define GET_VALUE 3
++
++typedef struct
++{
++ int8_t enable;
++ int8_t dsss;
++ int8_t ofdmlowrate_2g4;
++ int8_t ofdm64qam_2g4;
++ int8_t ofdm256qam_2g4;
++ int8_t ofdm1024qam_2g4;
++ int8_t ofdmlowrate_5g;
++ int8_t ofdm64qam_5g;
++ int8_t ofdm256qam_5g;
++ int8_t ofdm1024qam_5g;
++} txpwr_idx_conf_t;
++
++
++txpwr_idx_conf_t userconfig_txpwr_idx = {
++ .enable = 1,
++ .dsss = 9,
++ .ofdmlowrate_2g4 = 8,
++ .ofdm64qam_2g4 = 8,
++ .ofdm256qam_2g4 = 8,
++ .ofdm1024qam_2g4 = 8,
++ .ofdmlowrate_5g = 11,
++ .ofdm64qam_5g = 10,
++ .ofdm256qam_5g = 9,
++ .ofdm1024qam_5g = 9
++
++};
++
++typedef struct
++{
++ int8_t enable;
++ int8_t chan_1_4;
++ int8_t chan_5_9;
++ int8_t chan_10_13;
++ int8_t chan_36_64;
++ int8_t chan_100_120;
++ int8_t chan_122_140;
++ int8_t chan_142_165;
++} txpwr_ofst_conf_t;
++
++txpwr_ofst_conf_t userconfig_txpwr_ofst = {
++ .enable = 1,
++ .chan_1_4 = 0,
++ .chan_5_9 = 0,
++ .chan_10_13 = 0,
++ .chan_36_64 = 0,
++ .chan_100_120 = 0,
++ .chan_122_140 = 0,
++ .chan_142_165 = 0
++};
++
++typedef struct
++{
++ int8_t enable;
++ int8_t xtal_cap;
++ int8_t xtal_cap_fine;
++} xtal_cap_conf_t;
++
++
++xtal_cap_conf_t userconfig_xtal_cap = {
++ .enable = 0,
++ .xtal_cap = 24,
++ .xtal_cap_fine = 31,
++};
++
++
++struct aicbt_patch_table {
++ char *name;
++ uint32_t type;
++ uint32_t *data;
++ uint32_t len;
++ struct aicbt_patch_table *next;
++};
++
++struct aicbt_info_t {
++ uint32_t btmode;
++ uint32_t btport;
++ uint32_t uart_baud;
++ uint32_t uart_flowctrl;
++ uint32_t lpm_enable;
++ uint32_t txpwr_lvl;
++};
++
++struct aicbsp_info_t {
++ int hwinfo;
++ uint32_t cpmode;
++};
++
++#define AICBT_PT_TAG "AICBT_PT_TAG"
++#define AICBT_PT_TRAP 0x01
++#define AICBT_PT_B4 0x02
++#define AICBT_PT_BTMODE 0x03
++#define AICBT_PT_PWRON 0x04
++#define AICBT_PT_AF 0x05
++
++enum aicbt_btport_type {
++ AICBT_BTPORT_NULL,
++ AICBT_BTPORT_MB,
++ AICBT_BTPORT_UART,
++};
++
++/* btmode
++ * used for force bt mode,if not AICBSP_MODE_NULL
++ * efuse valid and vendor_info will be invalid, even has beed set valid
++*/
++enum aicbt_btmode_type {
++ AICBT_BTMODE_BT_ONLY_SW = 0x0, // bt only mode with switch
++ AICBT_BTMODE_BT_WIFI_COMBO, // wifi/bt combo mode
++ AICBT_BTMODE_BT_ONLY, // bt only mode without switch
++ AICBT_BTMODE_BT_ONLY_TEST, // bt only test mode
++ AICBT_BTMODE_BT_WIFI_COMBO_TEST, // wifi/bt combo test mode
++ AICBT_BTMODE_BT_ONLY_COANT, // bt only mode with no external switch
++ AICBT_MODE_NULL = 0xFF, // invalid value
++};
++
++/* uart_baud
++ * used for config uart baud when btport set to uart,
++ * otherwise meaningless
++*/
++enum aicbt_uart_baud_type {
++ AICBT_UART_BAUD_115200 = 115200,
++ AICBT_UART_BAUD_921600 = 921600,
++ AICBT_UART_BAUD_1_5M = 1500000,
++ AICBT_UART_BAUD_3_25M = 3250000,
++};
++
++enum aicbt_uart_flowctrl_type {
++ AICBT_UART_FLOWCTRL_DISABLE = 0x0, // uart without flow ctrl
++ AICBT_UART_FLOWCTRL_ENABLE, // uart with flow ctrl
++};
++
++enum aicbsp_cpmode_type {
++ AICBSP_CPMODE_WORK,
++ AICBSP_CPMODE_TEST,
++};
++#define AIC_M2D_OTA_INFO_ADDR 0x88000020
++#define AIC_M2D_OTA_DATA_ADDR 0x88000040
++#define AIC_M2D_OTA_FLASH_ADDR 0x08004000
++#define AIC_M2D_OTA_CODE_START_ADDR 0x08004188
++#define AIC_M2D_OTA_VER_ADDR 0x0800418c
++///aic bt tx pwr lvl :lsb->msb: first byte, min pwr lvl; second byte, max pwr lvl;
++///pwr lvl:20(min), 30 , 40 , 50 , 60(max)
++#define AICBT_TXPWR_LVL 0x00006020
++#define AICBT_TXPWR_LVL_D80 0x00006F2F
++
++#define AICBSP_MODE_BT_HCI_MODE_NULL 0
++#define AICBSP_MODE_BT_HCI_MODE_MB 1
++#define AICBSP_MODE_BT_HCI_MODE_UART 2
++
++#define AICBSP_HWINFO_DEFAULT (-1)
++#define AICBSP_CPMODE_DEFAULT AICBSP_CPMODE_WORK
++
++#define AICBT_BTMODE_DEFAULT AICBT_BTMODE_BT_ONLY
++#define AICBT_BTPORT_DEFAULT AICBT_BTPORT_MB
++#define AICBT_UART_BAUD_DEFAULT AICBT_UART_BAUD_1_5M
++#define AICBT_UART_FC_DEFAULT AICBT_UART_FLOWCTRL_ENABLE
++#define AICBT_LPM_ENABLE_DEFAULT 0
++#define AICBT_TXPWR_LVL_DEFAULT AICBT_TXPWR_LVL
++
++#define AIC_HW_INFO 0x21
++
++#define FW_PATH_MAX 200
++#if defined(CONFIG_PLATFORM_UBUNTU)
++static const char* aic_default_fw_path = "/lib/firmware";
++#else
++static const char* aic_default_fw_path = "/lib/firmware/aic8800";
++#endif
++char aic_fw_path[FW_PATH_MAX];
++module_param_string(aic_fw_path, aic_fw_path, FW_PATH_MAX, 0660);
++#ifdef CONFIG_M2D_OTA_AUTO_SUPPORT
++char saved_sdk_ver[64];
++module_param_string(saved_sdk_ver, saved_sdk_ver,64, 0660);
++#endif
++
++
++int aic_bt_platform_init(struct aic_usb_dev *usbdev)
++{
++ rwnx_cmd_mgr_init(&usbdev->cmd_mgr);
++ usbdev->cmd_mgr.usbdev = (void *)usbdev;
++ return 0;
++
++}
++
++void aic_bt_platform_deinit(struct aic_usb_dev *usbdev)
++{
++
++}
++
++#define MD5(x) x[0],x[1],x[2],x[3],x[4],x[5],x[6],x[7],x[8],x[9],x[10],x[11],x[12],x[13],x[14],x[15]
++#define MD5PINRT "file md5:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\r\n"
++
++static int aic_load_firmware(u32 ** fw_buf, const char *name, struct device *device)
++{
++
++#ifdef CONFIG_USE_FW_REQUEST
++ const struct firmware *fw = NULL;
++ u32 *dst = NULL;
++ void *buffer=NULL;
++ MD5_CTX md5;
++ unsigned char decrypt[16];
++ int size = 0;
++ int ret = 0;
++
++ printk("%s: request firmware = %s \n", __func__ ,name);
++
++
++ ret = request_firmware(&fw, name, NULL);
++
++ if (ret < 0) {
++ printk("Load %s fail\n", name);
++ release_firmware(fw);
++ return -1;
++ }
++
++ size = fw->size;
++ dst = (u32 *)fw->data;
++
++ if (size <= 0) {
++ printk("wrong size of firmware file\n");
++ release_firmware(fw);
++ return -1;
++ }
++
++
++ buffer = vmalloc(size);
++ memset(buffer, 0, size);
++ memcpy(buffer, dst, size);
++
++ *fw_buf = buffer;
++
++ MD5Init(&md5);
++ MD5Update(&md5, (unsigned char *)buffer, size);
++ MD5Final(&md5, decrypt);
++ printk(MD5PINRT, MD5(decrypt));
++
++ release_firmware(fw);
++
++ return size;
++#else
++ void *buffer=NULL;
++ char *path=NULL;
++ struct file *fp=NULL;
++ int size = 0, len=0, i=0;
++ ssize_t rdlen=0;
++ u32 *src=NULL, *dst = NULL;
++ MD5_CTX md5;
++ unsigned char decrypt[16];
++#if defined(CONFIG_PLATFORM_UBUNTU)
++ struct aicwf_bus *bus_if = dev_get_drvdata(device);
++ struct aic_usb_dev *usb_dev = bus_if->bus_priv.usb;
++#endif
++
++ /* get the firmware path */
++ path = __getname();
++ if (!path){
++ *fw_buf=NULL;
++ return -1;
++ }
++
++ if (strlen(aic_fw_path) > 0) {
++ printk("%s: use customer define fw_path\n", __func__);
++ len = snprintf(path, FW_PATH_MAX, "%s/%s", aic_fw_path, name);
++ } else {
++ #if defined(CONFIG_PLATFORM_UBUNTU)
++ if (usb_dev->chipid == PRODUCT_ID_AIC8800) {
++ len = snprintf(path, FW_PATH_MAX, "%s/%s/%s",aic_default_fw_path, "aic8800", name);
++ } else if (usb_dev->chipid == PRODUCT_ID_AIC8800D80) {
++ len = snprintf(path, FW_PATH_MAX, "%s/%s/%s",aic_default_fw_path, "aic8800D80", name);
++ } else {
++ printk("%s unknown chipid %d\n", __func__, usb_dev->chipid);
++ }
++ #else
++ len = snprintf(path, FW_PATH_MAX, "%s/%s",aic_default_fw_path, name);
++ #endif
++ }
++
++ if (len >= FW_PATH_MAX) {
++ printk("%s: %s file's path too long\n", __func__, name);
++ *fw_buf=NULL;
++ __putname(path);
++ return -1;
++ }
++
++ printk("%s :firmware path = %s \n", __func__ ,path);
++
++
++ /* open the firmware file */
++ fp=filp_open(path, O_RDONLY, 0);
++ if(IS_ERR(fp) || (!fp)){
++ printk("%s: %s file failed to open\n", __func__, name);
++ if(IS_ERR(fp))
++ printk("is_Err\n");
++ if((!fp))
++ printk("null\n");
++ *fw_buf=NULL;
++ __putname(path);
++ fp=NULL;
++ return -1;
++ }
++
++ size = i_size_read(file_inode(fp));
++ if(size<=0){
++ printk("%s: %s file size invalid %d\n", __func__, name, size);
++ *fw_buf=NULL;
++ __putname(path);
++ filp_close(fp,NULL);
++ fp=NULL;
++ return -1;
++}
++
++ /* start to read from firmware file */
++ buffer = vmalloc(size);
++ memset(buffer, 0, size);
++ if(!buffer){
++ *fw_buf=NULL;
++ __putname(path);
++ filp_close(fp,NULL);
++ fp=NULL;
++ return -1;
++ }
++
++
++ #if LINUX_VERSION_CODE > KERNEL_VERSION(4, 13, 16)
++ rdlen = kernel_read(fp, buffer, size, &fp->f_pos);
++ #else
++ rdlen = kernel_read(fp, fp->f_pos, buffer, size);
++ #endif
++
++ if(size != rdlen){
++ printk("%s: %s file rdlen invalid %d %d\n", __func__, name, (int)rdlen, size);
++ *fw_buf=NULL;
++ __putname(path);
++ filp_close(fp,NULL);
++ fp=NULL;
++ vfree(buffer);
++ buffer=NULL;
++ return -1;
++ }
++ if(rdlen > 0){
++ fp->f_pos += rdlen;
++ //printk("f_pos=%d\n", (int)fp->f_pos);
++ }
++
++
++ /*start to transform the data format*/
++ src = (u32*)buffer;
++ //printk("malloc dst\n");
++ dst = (u32*)vmalloc(size);
++ memset(dst, 0, size);
++
++ if(!dst){
++ *fw_buf=NULL;
++ __putname(path);
++ filp_close(fp,NULL);
++ fp=NULL;
++ vfree(buffer);
++ buffer=NULL;
++ return -1;
++ }
++
++ for(i=0;i<(size/4);i++){
++ dst[i] = src[i];
++ }
++
++ __putname(path);
++ filp_close(fp,NULL);
++ fp=NULL;
++ vfree(buffer);
++ buffer=NULL;
++ *fw_buf = dst;
++
++ MD5Init(&md5);
++ MD5Update(&md5, (unsigned char *)dst, size);
++ MD5Final(&md5, decrypt);
++
++ printk(MD5PINRT, MD5(decrypt));
++
++ return size;
++#endif
++
++}
++
++int rwnx_plat_bin_fw_upload_android(struct aic_usb_dev *usbdev, u32 fw_addr,
++ char *filename)
++{
++ struct device *dev = usbdev->dev;
++ unsigned int i=0;
++ int size;
++ u32 *dst=NULL;
++ int err=0;
++
++ /* load aic firmware */
++ size = aic_load_firmware(&dst, filename, dev);
++ if(size<=0){
++ printk("wrong size of firmware file\n");
++ vfree(dst);
++ dst = NULL;
++ return -1;
++ }
++
++ /* Copy the file on the Embedded side */
++ printk("### Upload %s firmware, @ = %x size=%d\n", filename, fw_addr, size);
++
++ if (size > 1024) {// > 1KB data
++ for (i = 0; i < (size - 1024); i += 1024) {//each time write 1KB
++ err = rwnx_send_dbg_mem_block_write_req(usbdev, fw_addr + i, 1024, dst + i / 4);
++ if (err) {
++ printk("bin upload fail: %x, err:%d\r\n", fw_addr + i, err);
++ break;
++ }
++ }
++ }
++
++ if (!err && (i < size)) {// <1KB data
++ err = rwnx_send_dbg_mem_block_write_req(usbdev, fw_addr + i, size - i, dst + i / 4);
++ if (err) {
++ printk("bin upload fail: %x, err:%d\r\n", fw_addr + i, err);
++ }
++ }
++
++ if (dst) {
++ vfree(dst);
++ dst = NULL;
++ }
++
++ printk("fw download complete\n\n");
++
++ return err;
++}
++
++extern int testmode;
++#ifdef CONFIG_M2D_OTA_AUTO_SUPPORT
++int rwnx_plat_m2d_flash_ota_android(struct aic_usb_dev *usbdev, char *filename)
++{
++ struct device *dev = usbdev->dev;
++ unsigned int i=0;
++ int size;
++ u32 *dst=NULL;
++ int err=0;
++ int ret;
++ u8 bond_id;
++ const u32 mem_addr = 0x40500000;
++ struct dbg_mem_read_cfm rd_mem_addr_cfm;
++
++ ret = rwnx_send_dbg_mem_read_req(usbdev, mem_addr, &rd_mem_addr_cfm);
++ if (ret) {
++ printk("m2d %x rd fail: %d\n", mem_addr, ret);
++ return ret;
++ }
++ bond_id = (u8)(rd_mem_addr_cfm.memdata >> 24);
++ printk("%x=%x\n", rd_mem_addr_cfm.memaddr, rd_mem_addr_cfm.memdata);
++ if (bond_id & (1<<1)) {
++ //flash is invalid
++ printk("m2d flash is invalid\n");
++ return -1;
++ }
++
++ /* load aic firmware */
++ size = aic_load_firmware(&dst, filename, dev);
++ if(size<=0){
++ printk("wrong size of m2d file\n");
++ vfree(dst);
++ dst = NULL;
++ return -1;
++ }
++
++ /* Copy the file on the Embedded side */
++ printk("### Upload m2d %s flash, size=%d\n", filename, size);
++
++ /*send info first*/
++ err = rwnx_send_dbg_mem_block_write_req(usbdev, AIC_M2D_OTA_INFO_ADDR, 4, (u32 *)&size);
++
++ /*send data first*/
++ if (size > 1024) {// > 1KB data
++ for (i = 0; i < (size - 1024); i += 1024) {//each time write 1KB
++ err = rwnx_send_dbg_mem_block_write_req(usbdev, AIC_M2D_OTA_DATA_ADDR, 1024, dst + i / 4);
++ if (err) {
++ printk("m2d upload fail: %x, err:%d\r\n", AIC_M2D_OTA_DATA_ADDR, err);
++ break;
++ }
++ }
++ }
++
++ if (!err && (i < size)) {// <1KB data
++ err = rwnx_send_dbg_mem_block_write_req(usbdev, AIC_M2D_OTA_DATA_ADDR, size - i, dst + i / 4);
++ if (err) {
++ printk("m2d upload fail: %x, err:%d\r\n", AIC_M2D_OTA_DATA_ADDR, err);
++ }
++ }
++
++ if (dst) {
++ vfree(dst);
++ dst = NULL;
++ }
++ testmode = FW_NORMAL_MODE;
++
++ printk("m2d flash update complete\n\n");
++
++ return err;
++}
++
++int rwnx_plat_m2d_flash_ota_check(struct aic_usb_dev *usbdev, char *filename)
++{
++ struct device *dev = usbdev->dev;
++ unsigned int i=0,j=0;
++ int size;
++ u32 *dst=NULL;
++ int err=0;
++ int ret=0;
++ u8 bond_id;
++ const u32 mem_addr = 0x40500000;
++ const u32 mem_addr_code_start = AIC_M2D_OTA_CODE_START_ADDR;
++ const u32 mem_addr_sdk_ver = AIC_M2D_OTA_VER_ADDR;
++ const u32 driver_code_start_idx = (AIC_M2D_OTA_CODE_START_ADDR-AIC_M2D_OTA_FLASH_ADDR)/4;
++ const u32 driver_sdk_ver_idx = (AIC_M2D_OTA_VER_ADDR-AIC_M2D_OTA_FLASH_ADDR)/4;
++ u32 driver_sdk_ver_addr_idx = 0;
++ u32 code_start_addr = 0xffffffff;
++ u32 sdk_ver_addr = 0xffffffff;
++ u32 drv_code_start_addr = 0xffffffff;
++ u32 drv_sdk_ver_addr = 0xffffffff;
++ struct dbg_mem_read_cfm rd_mem_addr_cfm;
++ char m2d_sdk_ver[64];
++ char flash_sdk_ver[64];
++ u32 flash_ver[16];
++ u32 ota_ver[16];
++
++ ret = rwnx_send_dbg_mem_read_req(usbdev, mem_addr, &rd_mem_addr_cfm);
++ if (ret) {
++ printk("m2d %x rd fail: %d\n", mem_addr, ret);
++ return ret;
++ }
++ bond_id = (u8)(rd_mem_addr_cfm.memdata >> 24);
++ printk("%x=%x\n", rd_mem_addr_cfm.memaddr, rd_mem_addr_cfm.memdata);
++ if (bond_id & (1<<1)) {
++ //flash is invalid
++ printk("m2d flash is invalid\n");
++ return -1;
++ }
++ ret = rwnx_send_dbg_mem_read_req(usbdev, mem_addr_code_start, &rd_mem_addr_cfm);
++ if (ret){
++ printk("mem_addr_code_start %x rd fail: %d\n", mem_addr_code_start, ret);
++ return ret;
++ }
++ code_start_addr = rd_mem_addr_cfm.memdata;
++
++ ret = rwnx_send_dbg_mem_read_req(usbdev, mem_addr_sdk_ver, &rd_mem_addr_cfm);
++ if (ret){
++ printk("mem_addr_sdk_ver %x rd fail: %d\n", mem_addr_code_start, ret);
++ return ret;
++ }
++ sdk_ver_addr = rd_mem_addr_cfm.memdata;
++ printk("code_start_addr: 0x%x, sdk_ver_addr: 0x%x\n", code_start_addr,sdk_ver_addr);
++
++ /* load aic firmware */
++ size = aic_load_firmware(&dst, filename, dev);
++ if(size<=0){
++ printk("wrong size of m2d file\n");
++ vfree(dst);
++ dst = NULL;
++ return -1;
++ }
++ if(code_start_addr == 0xffffffff && sdk_ver_addr == 0xffffffff) {
++ printk("########m2d flash old version , must be upgrade\n");
++ drv_code_start_addr = dst[driver_code_start_idx];
++ drv_sdk_ver_addr = dst[driver_sdk_ver_idx];
++
++ printk("drv_code_start_addr: 0x%x, drv_sdk_ver_addr: 0x%x\n", drv_code_start_addr,drv_sdk_ver_addr);
++
++ if(drv_sdk_ver_addr == 0xffffffff){
++ printk("########driver m2d_ota.bin is old ,not need upgrade\n");
++ return -1;
++ }
++
++ } else {
++ for(i=0;i<16;i++){
++ ret = rwnx_send_dbg_mem_read_req(usbdev, (sdk_ver_addr+i*4), &rd_mem_addr_cfm);
++ if (ret){
++ printk("mem_addr_sdk_ver %x rd fail: %d\n", mem_addr_code_start, ret);
++ return ret;
++ }
++ flash_ver[i] = rd_mem_addr_cfm.memdata;
++ }
++ memcpy((u8 *)flash_sdk_ver,(u8 *)flash_ver,64);
++ memcpy((u8 *)saved_sdk_ver,(u8 *)flash_sdk_ver,64);
++ printk("flash SDK Version: %s\r\n\r\n", flash_sdk_ver);
++
++ drv_code_start_addr = dst[driver_code_start_idx];
++ drv_sdk_ver_addr = dst[driver_sdk_ver_idx];
++
++ printk("drv_code_start_addr: 0x%x, drv_sdk_ver_addr: 0x%x\n", drv_code_start_addr,drv_sdk_ver_addr);
++
++ if(drv_sdk_ver_addr == 0xffffffff){
++ printk("########driver m2d_ota.bin is old ,not need upgrade\n");
++ return -1;
++ }
++
++ driver_sdk_ver_addr_idx = (drv_sdk_ver_addr-drv_code_start_addr)/4;
++ printk("driver_sdk_ver_addr_idx %d\n",driver_sdk_ver_addr_idx);
++
++ if (driver_sdk_ver_addr_idx){
++ for(j = 0; j < 16; j++){
++ ota_ver[j] = dst[driver_sdk_ver_addr_idx+j];
++ }
++ memcpy((u8 *)m2d_sdk_ver,(u8 *)ota_ver,64);
++ printk("m2d_ota SDK Version: %s\r\n\r\n", m2d_sdk_ver);
++ } else {
++ return -1;
++ }
++
++ if(!strcmp(m2d_sdk_ver,flash_sdk_ver)){
++ printk("######## m2d %s flash is not need upgrade\r\n", filename);
++ return -1;
++ }
++ }
++
++ /* Copy the file on the Embedded side */
++ printk("### Upload m2d %s flash, size=%d\n", filename, size);
++
++ /*send info first*/
++ err = rwnx_send_dbg_mem_block_write_req(usbdev, AIC_M2D_OTA_INFO_ADDR, 4, (u32 *)&size);
++
++ /*send data first*/
++ if (size > 1024) {// > 1KB data
++ for (i = 0; i < (size - 1024); i += 1024) {//each time write 1KB
++ err = rwnx_send_dbg_mem_block_write_req(usbdev, AIC_M2D_OTA_DATA_ADDR, 1024, dst + i / 4);
++ if (err) {
++ printk("m2d upload fail: %x, err:%d\r\n", AIC_M2D_OTA_DATA_ADDR, err);
++ break;
++ }
++ }
++ }
++
++ if (!err && (i < size)) {// <1KB data
++ err = rwnx_send_dbg_mem_block_write_req(usbdev, AIC_M2D_OTA_DATA_ADDR, size - i, dst + i / 4);
++ if (err) {
++ printk("m2d upload fail: %x, err:%d\r\n", AIC_M2D_OTA_DATA_ADDR, err);
++ }
++ }
++
++ if (dst) {
++ vfree(dst);
++ dst = NULL;
++ }
++ testmode = FW_NORMAL_MODE;
++
++ printk("m2d flash update complete\n\n");
++
++ return err;
++}
++#endif//CONFIG_M2D_OTA_AUTO_SUPPORT
++
++uint32_t rwnx_atoli(char *value){
++ int len = 0;
++ int temp_len = 0;
++ int i = 0;
++ uint32_t result = 0;
++
++ temp_len = strlen(value);
++
++ for(i = 0;i < temp_len; i++){
++ if((value[i] >= 48 && value[i] <= 57) ||
++ (value[i] >= 65 && value[i] <= 70) ||
++ (value[i] >= 97 && value[i] <= 102)){
++ len++;
++ }
++ }
++
++ //printk("%s len:%d \r\n", __func__, len);
++
++ for(i = 0; i < len; i++){
++ result = result * 16;
++ if(value[i] >= 48 && value[i] <= 57){
++ result += value[i] - 48;
++ }else if(value[i] >= 65 && value[i] <= 70){
++ result += (value[i] - 65) + 10;
++ }else if(value[i] >= 97 && value[i] <= 102){
++ result += (value[i] - 97) + 10;
++ }
++ }
++
++ return result;
++}
++
++int8_t rwnx_atoi(char *value){
++ int len = 0;
++ int i = 0;
++ int8_t result = 0;
++ int8_t signal = 1;
++
++ len = strlen(value);
++ //printk("%s len:%d \r\n", __func__, len);
++
++ for(i = 0;i < len ;i++){
++ if(i == 0 && value[0] == '-'){
++ signal = -1;
++ continue;
++ }
++
++ result = result * 10;
++ if(value[i] >= 48 && value[i] <= 57){
++ result += value[i] - 48;
++ }else{
++ result = 0;
++ break;
++ }
++ }
++
++ result = result * signal;
++ //printk("%s result:%d \r\n", __func__, result);
++
++ return result;
++}
++
++void get_fw_path(char* fw_path){
++ if (strlen(aic_fw_path) > 0) {
++ memcpy(fw_path, aic_fw_path, strlen(aic_fw_path));
++ }else{
++ memcpy(fw_path, aic_default_fw_path, strlen(aic_default_fw_path));
++ }
++}
++
++void set_testmode(int val){
++ testmode = val;
++}
++
++int get_testmode(void){
++ return testmode;
++}
++
++int get_hardware_info(void){
++ return AIC_HW_INFO;
++}
++
++extern int adap_test;
++int get_adap_test(void){
++ return adap_test;
++}
++
++EXPORT_SYMBOL(get_fw_path);
++
++EXPORT_SYMBOL(get_testmode);
++
++EXPORT_SYMBOL(set_testmode);
++
++EXPORT_SYMBOL(get_hardware_info);
++
++EXPORT_SYMBOL(get_adap_test);
++
++
++void get_userconfig_xtal_cap(xtal_cap_conf_t *xtal_cap)
++{
++ xtal_cap->enable = userconfig_xtal_cap.enable;
++ xtal_cap->xtal_cap = userconfig_xtal_cap.xtal_cap;
++ xtal_cap->xtal_cap_fine = userconfig_xtal_cap.xtal_cap_fine;
++
++ printk("%s:enable :%d\r\n", __func__, xtal_cap->enable);
++ printk("%s:xtal_cap :%d\r\n", __func__, xtal_cap->xtal_cap);
++ printk("%s:xtal_cap_fine:%d\r\n", __func__, xtal_cap->xtal_cap_fine);
++}
++
++EXPORT_SYMBOL(get_userconfig_xtal_cap);
++
++void get_userconfig_txpwr_idx(txpwr_idx_conf_t *txpwr_idx){
++ txpwr_idx->enable = userconfig_txpwr_idx.enable;
++ txpwr_idx->dsss = userconfig_txpwr_idx.dsss;
++ txpwr_idx->ofdmlowrate_2g4 = userconfig_txpwr_idx.ofdmlowrate_2g4;
++ txpwr_idx->ofdm64qam_2g4 = userconfig_txpwr_idx.ofdm64qam_2g4;
++ txpwr_idx->ofdm256qam_2g4 = userconfig_txpwr_idx.ofdm256qam_2g4;
++ txpwr_idx->ofdm1024qam_2g4 = userconfig_txpwr_idx.ofdm1024qam_2g4;
++ txpwr_idx->ofdmlowrate_5g = userconfig_txpwr_idx.ofdmlowrate_5g;
++ txpwr_idx->ofdm64qam_5g = userconfig_txpwr_idx.ofdm64qam_5g;
++ txpwr_idx->ofdm256qam_5g = userconfig_txpwr_idx.ofdm256qam_5g;
++ txpwr_idx->ofdm1024qam_5g = userconfig_txpwr_idx.ofdm1024qam_5g;
++
++ printk("%s:enable:%d\r\n", __func__, txpwr_idx->enable);
++ printk("%s:dsss:%d\r\n", __func__, txpwr_idx->dsss);
++ printk("%s:ofdmlowrate_2g4:%d\r\n", __func__, txpwr_idx->ofdmlowrate_2g4);
++ printk("%s:ofdm64qam_2g4:%d\r\n", __func__, txpwr_idx->ofdm64qam_2g4);
++ printk("%s:ofdm256qam_2g4:%d\r\n", __func__, txpwr_idx->ofdm256qam_2g4);
++ printk("%s:ofdm1024qam_2g4:%d\r\n", __func__, txpwr_idx->ofdm1024qam_2g4);
++ printk("%s:ofdmlowrate_5g:%d\r\n", __func__, txpwr_idx->ofdmlowrate_5g);
++ printk("%s:ofdm64qam_5g:%d\r\n", __func__, txpwr_idx->ofdm64qam_5g);
++ printk("%s:ofdm256qam_5g:%d\r\n", __func__, txpwr_idx->ofdm256qam_5g);
++ printk("%s:ofdm1024qam_5g:%d\r\n", __func__, txpwr_idx->ofdm1024qam_5g);
++
++}
++
++EXPORT_SYMBOL(get_userconfig_txpwr_idx);
++
++void get_userconfig_txpwr_ofst(txpwr_ofst_conf_t *txpwr_ofst){
++ txpwr_ofst->enable = userconfig_txpwr_ofst.enable;
++ txpwr_ofst->chan_1_4 = userconfig_txpwr_ofst.chan_1_4;
++ txpwr_ofst->chan_5_9 = userconfig_txpwr_ofst.chan_5_9;
++ txpwr_ofst->chan_10_13 = userconfig_txpwr_ofst.chan_10_13;
++ txpwr_ofst->chan_36_64 = userconfig_txpwr_ofst.chan_36_64;
++ txpwr_ofst->chan_100_120 = userconfig_txpwr_ofst.chan_100_120;
++ txpwr_ofst->chan_122_140 = userconfig_txpwr_ofst.chan_122_140;
++ txpwr_ofst->chan_142_165 = userconfig_txpwr_ofst.chan_142_165;
++
++ printk("%s:ofst_enable:%d\r\n", __func__, txpwr_ofst->enable);
++ printk("%s:ofst_chan_1_4:%d\r\n", __func__, txpwr_ofst->chan_1_4);
++ printk("%s:ofst_chan_5_9:%d\r\n", __func__, txpwr_ofst->chan_5_9);
++ printk("%s:ofst_chan_10_13:%d\r\n", __func__, txpwr_ofst->chan_10_13);
++ printk("%s:ofst_chan_36_64:%d\r\n", __func__, txpwr_ofst->chan_36_64);
++ printk("%s:ofst_chan_100_120:%d\r\n", __func__, txpwr_ofst->chan_100_120);
++ printk("%s:ofst_chan_122_140:%d\r\n", __func__, txpwr_ofst->chan_122_140);
++ printk("%s:ofst_chan_142_165:%d\r\n", __func__, txpwr_ofst->chan_142_165);
++
++}
++
++EXPORT_SYMBOL(get_userconfig_txpwr_ofst);
++
++void rwnx_plat_userconfig_set_value(char *command, char *value){
++ //TODO send command
++ printk("%s:command=%s value=%s \r\n", __func__, command, value);
++ if(!strcmp(command, "enable")){
++ userconfig_txpwr_idx.enable = rwnx_atoi(value);
++ }else if(!strcmp(command, "dsss")){
++ userconfig_txpwr_idx.dsss = rwnx_atoi(value);
++ }else if(!strcmp(command, "ofdmlowrate_2g4")){
++ userconfig_txpwr_idx.ofdmlowrate_2g4 = rwnx_atoi(value);
++ }else if(!strcmp(command, "ofdm64qam_2g4")){
++ userconfig_txpwr_idx.ofdm64qam_2g4 = rwnx_atoi(value);
++ }else if(!strcmp(command, "ofdm256qam_2g4")){
++ userconfig_txpwr_idx.ofdm256qam_2g4 = rwnx_atoi(value);
++ }else if(!strcmp(command, "ofdm1024qam_2g4")){
++ userconfig_txpwr_idx.ofdm1024qam_2g4 = rwnx_atoi(value);
++ }else if(!strcmp(command, "ofdmlowrate_5g")){
++ userconfig_txpwr_idx.ofdmlowrate_5g = rwnx_atoi(value);
++ }else if(!strcmp(command, "ofdm64qam_5g")){
++ userconfig_txpwr_idx.ofdm64qam_5g = rwnx_atoi(value);
++ }else if(!strcmp(command, "ofdm256qam_5g")){
++ userconfig_txpwr_idx.ofdm256qam_5g = rwnx_atoi(value);
++ }else if(!strcmp(command, "ofdm1024qam_5g")){
++ userconfig_txpwr_idx.ofdm1024qam_5g = rwnx_atoi(value);
++ }else if(!strcmp(command, "ofst_enable")){
++ userconfig_txpwr_ofst.enable = rwnx_atoi(value);
++ }else if(!strcmp(command, "ofst_chan_1_4")){
++ userconfig_txpwr_ofst.chan_1_4 = rwnx_atoi(value);
++ }else if(!strcmp(command, "ofst_chan_5_9")){
++ userconfig_txpwr_ofst.chan_5_9 = rwnx_atoi(value);
++ }else if(!strcmp(command, "ofst_chan_10_13")){
++ userconfig_txpwr_ofst.chan_10_13 = rwnx_atoi(value);
++ }else if(!strcmp(command, "ofst_chan_36_64")){
++ userconfig_txpwr_ofst.chan_36_64 = rwnx_atoi(value);
++ }else if(!strcmp(command, "ofst_chan_100_120")){
++ userconfig_txpwr_ofst.chan_100_120 = rwnx_atoi(value);
++ }else if(!strcmp(command, "ofst_chan_122_140")){
++ userconfig_txpwr_ofst.chan_122_140 = rwnx_atoi(value);
++ }else if(!strcmp(command, "ofst_chan_142_165")){
++ userconfig_txpwr_ofst.chan_142_165 = rwnx_atoi(value);
++ }else if(!strcmp(command, "xtal_enable")){
++ userconfig_xtal_cap.enable = rwnx_atoi(value);
++ }else if(!strcmp(command, "xtal_cap")){
++ userconfig_xtal_cap.xtal_cap = rwnx_atoi(value);
++ }else if(!strcmp(command, "xtal_cap_fine")){
++ userconfig_xtal_cap.xtal_cap_fine = rwnx_atoi(value);
++ }
++}
++
++void rwnx_plat_userconfig_parsing(char *buffer, int size){
++ int i = 0;
++ int parse_state = 0;
++ char command[30];
++ char value[100];
++ int char_counter = 0;
++
++ memset(command, 0, 30);
++ memset(value, 0, 100);
++
++ for(i = 0; i < size; i++){
++
++ //Send command or print nvram log when char is \r or \n
++ if(buffer[i] == 0x0a || buffer[i] == 0x0d){
++ if(command[0] != 0 && value[0] != 0){
++ if(parse_state == PRINT){
++ printk("%s:%s\r\n", __func__, value);
++ }else if(parse_state == GET_VALUE){
++ rwnx_plat_userconfig_set_value(command, value);
++ }
++ }
++ //Reset command value and char_counter
++ memset(command, 0, 30);
++ memset(value, 0, 100);
++ char_counter = 0;
++ parse_state = INIT;
++ continue;
++ }
++
++ //Switch parser state
++ if(parse_state == INIT){
++ if(buffer[i] == '#'){
++ parse_state = PRINT;
++ continue;
++ }else if(buffer[i] == 0x0a || buffer[i] == 0x0d){
++ parse_state = INIT;
++ continue;
++ }else{
++ parse_state = CMD;
++ }
++ }
++
++ //Fill data to command and value
++ if(parse_state == PRINT){
++ command[0] = 0x01;
++ value[char_counter] = buffer[i];
++ char_counter++;
++ }else if(parse_state == CMD){
++ if(command[0] != 0 && buffer[i] == '='){
++ parse_state = GET_VALUE;
++ char_counter = 0;
++ continue;
++ }
++ command[char_counter] = buffer[i];
++ char_counter++;
++ }else if(parse_state == GET_VALUE){
++ value[char_counter] = buffer[i];
++ char_counter++;
++ }
++ }
++
++
++}
++
++int rwnx_plat_userconfig_upload_android(struct aic_usb_dev *usbdev, char *filename){
++ int size;
++ u32 *dst=NULL;
++ struct device *dev = usbdev->dev;
++
++ printk("userconfig file path:%s \r\n", filename);
++
++ /* load aic firmware */
++ size = aic_load_firmware(&dst, filename, dev);
++ if(size <= 0){
++ printk("wrong size of firmware file\n");
++ vfree(dst);
++ dst = NULL;
++ return 0;
++ }
++
++ /* Copy the file on the Embedded side */
++ printk("### Upload %s userconfig, size=%d\n", filename, size);
++
++ rwnx_plat_userconfig_parsing((char *)dst, size);
++
++ if (dst) {
++ vfree(dst);
++ dst = NULL;
++ }
++
++ printk("userconfig download complete\n\n");
++ return 0;
++}
++
++
++
++int aicbt_patch_table_free(struct aicbt_patch_table **head)
++{
++ struct aicbt_patch_table *p = *head, *n = NULL;
++ while (p) {
++ n = p->next;
++ vfree(p->name);
++ vfree(p->data);
++ vfree(p);
++ p = n;
++ }
++ *head = NULL;
++ return 0;
++}
++
++struct aicbsp_info_t aicbsp_info = {
++ .hwinfo = AICBSP_HWINFO_DEFAULT,
++ .cpmode = AICBSP_CPMODE_DEFAULT,
++};
++
++static struct aicbt_info_t aicbt_info = {
++ .btmode = AICBT_BTMODE_DEFAULT,
++ .btport = AICBT_BTPORT_DEFAULT,
++ .uart_baud = AICBT_UART_BAUD_DEFAULT,
++ .uart_flowctrl = AICBT_UART_FC_DEFAULT,
++ .lpm_enable = AICBT_LPM_ENABLE_DEFAULT,
++ .txpwr_lvl = AICBT_TXPWR_LVL_DEFAULT,
++};
++
++int aicbt_patch_table_load(struct aic_usb_dev *usbdev, struct aicbt_patch_table *_head)
++{
++ struct aicbt_patch_table *head, *p;
++ int ret = 0, i;
++ uint32_t *data = NULL;
++
++ head = _head;
++ if (usbdev->chipid == PRODUCT_ID_AIC8800D80) {
++ //aicbt_info.btmode = AICBT_BTMODE_BT_ONLY_COANT;
++ aicbt_info.txpwr_lvl = AICBT_TXPWR_LVL_D80;
++ }
++ for (p = head; p != NULL; p = p->next) {
++ data = p->data;
++ if(AICBT_PT_BTMODE == p->type){
++ *(data + 1) = aicbsp_info.hwinfo < 0;
++ *(data + 3) = aicbsp_info.hwinfo;
++ *(data + 5) = aicbsp_info.cpmode;
++
++ *(data + 7) = aicbt_info.btmode;
++ *(data + 9) = aicbt_info.btport;
++ *(data + 11) = aicbt_info.uart_baud;
++ *(data + 13) = aicbt_info.uart_flowctrl;
++ *(data + 15) = aicbt_info.lpm_enable;
++ *(data + 17) = aicbt_info.txpwr_lvl;
++
++ }
++ if (p->type == 0x06) {
++ char *data_s = (char *)p->data;
++ printk("patch version %s\n", data_s);
++ continue;
++ }
++ for (i = 0; i < p->len; i++) {
++ ret = rwnx_send_dbg_mem_write_req(usbdev, *data, *(data + 1));
++ if (ret != 0)
++ return ret;
++ data += 2;
++ }
++ if (p->type == AICBT_PT_PWRON)
++ udelay(500);
++ }
++ aicbt_patch_table_free(&head);
++ return 0;
++}
++
++
++int rwnx_plat_bin_fw_patch_table_upload_android(struct aic_usb_dev *usbdev, char *filename){
++ struct device *dev = usbdev->dev;
++ struct aicbt_patch_table *head = NULL;
++ struct aicbt_patch_table *new = NULL;
++ struct aicbt_patch_table *cur = NULL;
++ int size;
++ int ret = 0;
++ uint8_t *rawdata=NULL;
++ uint8_t *p = NULL;
++
++ /* load aic firmware */
++ size = aic_load_firmware((u32 **)&rawdata, filename, dev);
++
++ /* Copy the file on the Embedded side */
++ printk("### Upload %s fw_patch_table, size=%d\n", filename, size);
++
++ if (size <= 0) {
++ printk("wrong size of firmware file\n");
++ ret = -1;
++ goto err;
++ }
++
++ p = rawdata;
++
++ if (memcmp(p, AICBT_PT_TAG, sizeof(AICBT_PT_TAG) < 16 ? sizeof(AICBT_PT_TAG) : 16)) {
++ printk("TAG err\n");
++ ret = -1;
++ goto err;
++ }
++ p += 16;
++
++ while (p - rawdata < size) {
++ //printk("size = %d p - rawdata = %d \r\n", size, p - rawdata);
++ new = (struct aicbt_patch_table *)vmalloc(sizeof(struct aicbt_patch_table));
++ memset(new, 0, sizeof(struct aicbt_patch_table));
++ if (head == NULL) {
++ head = new;
++ cur = new;
++ } else {
++ cur->next = new;
++ cur = cur->next;
++ }
++
++ cur->name = (char *)vmalloc(sizeof(char) * 16);
++ memset(cur->name, 0, sizeof(char) * 16);
++ memcpy(cur->name, p, 16);
++ p += 16;
++
++ cur->type = *(uint32_t *)p;
++ p += 4;
++
++ cur->len = *(uint32_t *)p;
++ p += 4;
++
++ if((cur->type ) >= 1000 || cur->len == 0) {//Temp Workaround
++ cur->len = 0;
++ }else{
++ cur->data = (uint32_t *)vmalloc(sizeof(uint8_t) * cur->len * 8);
++ memset(cur->data, 0, sizeof(uint8_t) * cur->len * 8);
++ memcpy(cur->data, p, cur->len * 8);
++ p += cur->len * 8;
++ }
++ }
++
++ vfree(rawdata);
++ aicbt_patch_table_load(usbdev, head);
++ printk("fw_patch_table download complete\n\n");
++
++ return ret;
++err:
++ //aicbt_patch_table_free(&head);
++
++ if (rawdata){
++ vfree(rawdata);
++ }
++ return ret;
++}
++
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic_load_fw/aicbluetooth.h
+@@ -0,0 +1,22 @@
++#ifndef _AICBLUETOOTH_H
++#define _AICBLUETOOTH_H
++
++int aic_bt_platform_init(struct aic_usb_dev *sdiodev);
++
++void aic_bt_platform_deinit(struct aic_usb_dev *sdiodev);
++
++int rwnx_plat_bin_fw_upload_android(struct aic_usb_dev *sdiodev, u32 fw_addr,
++ char *filename);
++
++int rwnx_plat_m2d_flash_ota_android(struct aic_usb_dev *usbdev, char *filename);
++
++int rwnx_plat_m2d_flash_ota_check(struct aic_usb_dev *usbdev, char *filename);
++
++int rwnx_plat_bin_fw_patch_table_upload_android(struct aic_usb_dev *usbdev, char *filename);
++
++int rwnx_plat_userconfig_upload_android(struct aic_usb_dev *usbdev, char *filename);
++
++uint8_t rwnx_atoi(char *value);
++uint32_t rwnx_atoli(char *value);
++
++#endif
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic_load_fw/aicbluetooth_cmds.c
+@@ -0,0 +1,472 @@
++/**
++ ******************************************************************************
++ *
++ * rwnx_cmds.c
++ *
++ * Handles queueing (push to IPC, ack/cfm from IPC) of commands issued to
++ * LMAC FW
++ *
++ * Copyright (C) RivieraWaves 2014-2019
++ *
++ ******************************************************************************
++ */
++
++#include <linux/list.h>
++#if 0
++#include <linux/stddef.h>
++#endif
++#include <linux/version.h>
++#include "aicbluetooth_cmds.h"
++#include "aic_txrxif.h"
++#include "aicwf_usb.h"
++
++//extern int aicwf_sdio_writeb(struct aic_sdio_dev *sdiodev, uint regaddr, u8 val);
++
++static void cmd_dump(const struct rwnx_cmd *cmd)
++{
++ printk(KERN_CRIT "tkn[%d] flags:%04x result:%3d cmd:%4d - reqcfm(%4d)\n",
++ cmd->tkn, cmd->flags, cmd->result, cmd->id, cmd->reqid);
++}
++
++static void cmd_complete(struct rwnx_cmd_mgr *cmd_mgr, struct rwnx_cmd *cmd)
++{
++ //printk("cmdcmp\n");
++ lockdep_assert_held(&cmd_mgr->lock);
++
++ list_del(&cmd->list);
++ cmd_mgr->queue_sz--;
++
++ cmd->flags |= RWNX_CMD_FLAG_DONE;
++ if (cmd->flags & RWNX_CMD_FLAG_NONBLOCK) {
++ kfree(cmd);
++ } else {
++ if (RWNX_CMD_WAIT_COMPLETE(cmd->flags)) {
++ cmd->result = 0;
++ complete(&cmd->complete);
++ }
++ }
++}
++
++static int cmd_mgr_queue(struct rwnx_cmd_mgr *cmd_mgr, struct rwnx_cmd *cmd)
++{
++ bool defer_push = false;
++
++ spin_lock_bh(&cmd_mgr->lock);
++
++ if (cmd_mgr->state == RWNX_CMD_MGR_STATE_CRASHED) {
++ printk(KERN_CRIT"cmd queue crashed\n");
++ cmd->result = -EPIPE;
++ spin_unlock_bh(&cmd_mgr->lock);
++ return -EPIPE;
++ }
++
++ if (!list_empty(&cmd_mgr->cmds)) {
++ struct rwnx_cmd *last;
++
++ if (cmd_mgr->queue_sz == cmd_mgr->max_queue_sz) {
++ printk(KERN_CRIT"Too many cmds (%d) already queued\n",
++ cmd_mgr->max_queue_sz);
++ cmd->result = -ENOMEM;
++ spin_unlock_bh(&cmd_mgr->lock);
++ return -ENOMEM;
++ }
++ last = list_entry(cmd_mgr->cmds.prev, struct rwnx_cmd, list);
++ if (last->flags & (RWNX_CMD_FLAG_WAIT_ACK | RWNX_CMD_FLAG_WAIT_PUSH)) {
++ cmd->flags |= RWNX_CMD_FLAG_WAIT_PUSH;
++ defer_push = true;
++ }
++ }
++
++ if (cmd->flags & RWNX_CMD_FLAG_REQ_CFM)
++ cmd->flags |= RWNX_CMD_FLAG_WAIT_CFM;
++
++ cmd->tkn = cmd_mgr->next_tkn++;
++ cmd->result = -EINTR;
++
++ if (!(cmd->flags & RWNX_CMD_FLAG_NONBLOCK))
++ init_completion(&cmd->complete);
++
++ list_add_tail(&cmd->list, &cmd_mgr->cmds);
++ cmd_mgr->queue_sz++;
++ spin_unlock_bh(&cmd_mgr->lock);
++
++ if (!defer_push) {
++ //printk("queue:id=%x, param_len=%u\n",cmd->a2e_msg->id, cmd->a2e_msg->param_len);
++ aicwf_set_cmd_tx((void *)(cmd_mgr->usbdev), cmd->a2e_msg, sizeof(struct lmac_msg) + cmd->a2e_msg->param_len);
++ kfree(cmd->a2e_msg);
++ } else {
++ printk("ERR: never defer push!!!!");
++ return 0;
++ }
++
++ if (!(cmd->flags & RWNX_CMD_FLAG_NONBLOCK)) {
++ unsigned long tout = msecs_to_jiffies(RWNX_80211_CMD_TIMEOUT_MS * cmd_mgr->queue_sz);
++ if (!wait_for_completion_killable_timeout(&cmd->complete, tout)) {
++ printk(KERN_CRIT"cmd timed-out\n");
++
++ cmd_dump(cmd);
++ spin_lock_bh(&cmd_mgr->lock);
++ cmd_mgr->state = RWNX_CMD_MGR_STATE_CRASHED;
++ if (!(cmd->flags & RWNX_CMD_FLAG_DONE)) {
++ cmd->result = -ETIMEDOUT;
++ cmd_complete(cmd_mgr, cmd);
++ }
++ spin_unlock_bh(&cmd_mgr->lock);
++ }
++ else{
++ kfree(cmd);
++ }
++ } else {
++ cmd->result = 0;
++ }
++
++ return 0;
++}
++
++static int cmd_mgr_run_callback(struct rwnx_cmd_mgr *cmd_mgr, struct rwnx_cmd *cmd,
++ struct rwnx_cmd_e2amsg *msg, msg_cb_fct cb)
++{
++ int res;
++
++ if (! cb){
++ return 0;
++ }
++ spin_lock(&cmd_mgr->cb_lock);
++ res = cb(cmd, msg);
++ spin_unlock(&cmd_mgr->cb_lock);
++
++ return res;
++}
++
++static int cmd_mgr_msgind(struct rwnx_cmd_mgr *cmd_mgr, struct rwnx_cmd_e2amsg *msg,
++ msg_cb_fct cb)
++{
++ struct rwnx_cmd *cmd;
++ bool found = false;
++
++ //printk("cmd->id=%x\n", msg->id);
++ spin_lock(&cmd_mgr->lock);
++ list_for_each_entry(cmd, &cmd_mgr->cmds, list) {
++ if (cmd->reqid == msg->id &&
++ (cmd->flags & RWNX_CMD_FLAG_WAIT_CFM)) {
++
++ if (!cmd_mgr_run_callback(cmd_mgr, cmd, msg, cb)) {
++ found = true;
++ cmd->flags &= ~RWNX_CMD_FLAG_WAIT_CFM;
++
++ if (WARN((msg->param_len > RWNX_CMD_E2AMSG_LEN_MAX),
++ "Unexpect E2A msg len %d > %d\n", msg->param_len,
++ RWNX_CMD_E2AMSG_LEN_MAX)) {
++ msg->param_len = RWNX_CMD_E2AMSG_LEN_MAX;
++ }
++
++ if (cmd->e2a_msg && msg->param_len)
++ memcpy(cmd->e2a_msg, &msg->param, msg->param_len);
++
++ if (RWNX_CMD_WAIT_COMPLETE(cmd->flags))
++ cmd_complete(cmd_mgr, cmd);
++
++ break;
++ }
++ }
++ }
++ spin_unlock(&cmd_mgr->lock);
++
++ if (!found)
++ cmd_mgr_run_callback(cmd_mgr, NULL, msg, cb);
++
++ return 0;
++}
++
++static void cmd_mgr_print(struct rwnx_cmd_mgr *cmd_mgr)
++{
++ struct rwnx_cmd *cur;
++
++ spin_lock_bh(&cmd_mgr->lock);
++ printk("q_sz/max: %2d / %2d - next tkn: %d\n",
++ cmd_mgr->queue_sz, cmd_mgr->max_queue_sz,
++ cmd_mgr->next_tkn);
++ list_for_each_entry(cur, &cmd_mgr->cmds, list) {
++ cmd_dump(cur);
++ }
++ spin_unlock_bh(&cmd_mgr->lock);
++}
++
++static void cmd_mgr_drain(struct rwnx_cmd_mgr *cmd_mgr)
++{
++ struct rwnx_cmd *cur, *nxt;
++
++ spin_lock_bh(&cmd_mgr->lock);
++ list_for_each_entry_safe(cur, nxt, &cmd_mgr->cmds, list) {
++ list_del(&cur->list);
++ cmd_mgr->queue_sz--;
++ if (!(cur->flags & RWNX_CMD_FLAG_NONBLOCK))
++ complete(&cur->complete);
++ }
++ spin_unlock_bh(&cmd_mgr->lock);
++}
++
++void rwnx_cmd_mgr_init(struct rwnx_cmd_mgr *cmd_mgr)
++{
++ cmd_mgr->max_queue_sz = RWNX_CMD_MAX_QUEUED;
++ INIT_LIST_HEAD(&cmd_mgr->cmds);
++ cmd_mgr->state = RWNX_CMD_MGR_STATE_INITED;
++ spin_lock_init(&cmd_mgr->lock);
++ spin_lock_init(&cmd_mgr->cb_lock);
++ cmd_mgr->queue = &cmd_mgr_queue;
++ cmd_mgr->print = &cmd_mgr_print;
++ cmd_mgr->drain = &cmd_mgr_drain;
++ cmd_mgr->llind = NULL;//&cmd_mgr_llind;
++ cmd_mgr->msgind = &cmd_mgr_msgind;
++
++ #if 0
++ INIT_WORK(&cmd_mgr->cmdWork, cmd_mgr_task_process);
++ cmd_mgr->cmd_wq = create_singlethread_workqueue("cmd_wq");
++ if (!cmd_mgr->cmd_wq) {
++ txrx_err("insufficient memory to create cmd workqueue.\n");
++ return;
++ }
++ #endif
++}
++
++void rwnx_cmd_mgr_deinit(struct rwnx_cmd_mgr *cmd_mgr)
++{
++ cmd_mgr->print(cmd_mgr);
++ cmd_mgr->drain(cmd_mgr);
++ cmd_mgr->print(cmd_mgr);
++ memset(cmd_mgr, 0, sizeof(*cmd_mgr));
++}
++
++void aicwf_set_cmd_tx(void *dev, struct lmac_msg *msg, uint len)
++{
++ struct aic_usb_dev *usbdev = (struct aic_usb_dev *)dev;
++ struct aicwf_bus *bus = usbdev->bus_if;
++ u8 *buffer = bus->cmd_buf;
++ u16 index = 0;
++
++ memset(buffer, 0, CMD_BUF_MAX);
++ buffer[0] = (len+4) & 0x00ff;
++ buffer[1] = ((len+4) >> 8) &0x0f;
++ buffer[2] = 0x11;
++ buffer[3] = 0x0;
++ index += 4;
++ //there is a dummy word
++ index += 4;
++
++ //make sure little endian
++ put_u16(&buffer[index], msg->id);
++ index += 2;
++ put_u16(&buffer[index], msg->dest_id);
++ index += 2;
++ put_u16(&buffer[index], msg->src_id);
++ index += 2;
++ put_u16(&buffer[index], msg->param_len);
++ index += 2;
++ memcpy(&buffer[index], (u8 *)msg->param, msg->param_len);
++
++ aicwf_bus_txmsg(bus, buffer, len + 8);
++}
++
++static inline void *rwnx_msg_zalloc(lmac_msg_id_t const id,
++ lmac_task_id_t const dest_id,
++ lmac_task_id_t const src_id,
++ uint16_t const param_len)
++{
++ struct lmac_msg *msg;
++ gfp_t flags;
++
++ if (in_softirq())
++ flags = GFP_ATOMIC;
++ else
++ flags = GFP_KERNEL;
++
++ msg = (struct lmac_msg *)kzalloc(sizeof(struct lmac_msg) + param_len,
++ flags);
++ if (msg == NULL) {
++ printk(KERN_CRIT "%s: msg allocation failed\n", __func__);
++ return NULL;
++ }
++ msg->id = id;
++ msg->dest_id = dest_id;
++ msg->src_id = src_id;
++ msg->param_len = param_len;
++
++ return msg->param;
++}
++
++static void rwnx_msg_free(struct lmac_msg *msg, const void *msg_params)
++{
++ kfree(msg);
++}
++
++
++static int rwnx_send_msg(struct aic_usb_dev *usbdev, const void *msg_params,
++ int reqcfm, lmac_msg_id_t reqid, void *cfm)
++{
++ struct lmac_msg *msg;
++ struct rwnx_cmd *cmd;
++ bool nonblock;
++ int ret = 0;
++
++ msg = container_of((void *)msg_params, struct lmac_msg, param);
++ if(usbdev->bus_if->state == BUS_DOWN_ST) {
++ rwnx_msg_free(msg, msg_params);
++ printk("bus is down\n");
++ return 0;
++ }
++
++ nonblock = 0;
++ cmd = kzalloc(sizeof(struct rwnx_cmd), nonblock ? GFP_ATOMIC : GFP_KERNEL);
++ cmd->result = -EINTR;
++ cmd->id = msg->id;
++ cmd->reqid = reqid;
++ cmd->a2e_msg = msg;
++ cmd->e2a_msg = cfm;
++ if (nonblock)
++ cmd->flags = RWNX_CMD_FLAG_NONBLOCK;
++ if (reqcfm)
++ cmd->flags |= RWNX_CMD_FLAG_REQ_CFM;
++
++ if(reqcfm) {
++ cmd->flags &= ~RWNX_CMD_FLAG_WAIT_ACK; // we don't need ack any more
++ ret = usbdev->cmd_mgr.queue(&usbdev->cmd_mgr,cmd);
++ } else {
++ aicwf_set_cmd_tx((void *)(usbdev), cmd->a2e_msg, sizeof(struct lmac_msg) + cmd->a2e_msg->param_len);
++ }
++
++ if(!reqcfm)
++ kfree(cmd);
++
++ return ret;
++}
++
++int rwnx_send_dbg_mem_mask_write_req(struct aic_usb_dev *usbdev, u32 mem_addr,
++ u32 mem_mask, u32 mem_data)
++{
++ struct dbg_mem_mask_write_req *mem_mask_write_req;
++
++ /* Build the DBG_MEM_MASK_WRITE_REQ message */
++ mem_mask_write_req = rwnx_msg_zalloc(DBG_MEM_MASK_WRITE_REQ, TASK_DBG, DRV_TASK_ID,
++ sizeof(struct dbg_mem_mask_write_req));
++ if (!mem_mask_write_req)
++ return -ENOMEM;
++
++ /* Set parameters for the DBG_MEM_MASK_WRITE_REQ message */
++ mem_mask_write_req->memaddr = mem_addr;
++ mem_mask_write_req->memmask = mem_mask;
++ mem_mask_write_req->memdata = mem_data;
++
++ /* Send the DBG_MEM_MASK_WRITE_REQ message to LMAC FW */
++ return rwnx_send_msg(usbdev, mem_mask_write_req, 1, DBG_MEM_MASK_WRITE_CFM, NULL);
++}
++
++
++
++int rwnx_send_dbg_mem_block_write_req(struct aic_usb_dev *usbdev, u32 mem_addr,
++ u32 mem_size, u32 *mem_data)
++{
++ struct dbg_mem_block_write_req *mem_blk_write_req;
++
++ /* Build the DBG_MEM_BLOCK_WRITE_REQ message */
++ mem_blk_write_req = rwnx_msg_zalloc(DBG_MEM_BLOCK_WRITE_REQ, TASK_DBG, DRV_TASK_ID,
++ sizeof(struct dbg_mem_block_write_req));
++ if (!mem_blk_write_req)
++ return -ENOMEM;
++
++ /* Set parameters for the DBG_MEM_BLOCK_WRITE_REQ message */
++ mem_blk_write_req->memaddr = mem_addr;
++ mem_blk_write_req->memsize = mem_size;
++ memcpy(mem_blk_write_req->memdata, mem_data, mem_size);
++
++ /* Send the DBG_MEM_BLOCK_WRITE_REQ message to LMAC FW */
++ return rwnx_send_msg(usbdev, mem_blk_write_req, 1, DBG_MEM_BLOCK_WRITE_CFM, NULL);
++}
++
++
++int rwnx_send_dbg_mem_read_req(struct aic_usb_dev *usbdev, u32 mem_addr,
++ struct dbg_mem_read_cfm *cfm)
++{
++ struct dbg_mem_read_req *mem_read_req;
++
++
++ /* Build the DBG_MEM_READ_REQ message */
++ mem_read_req = rwnx_msg_zalloc(DBG_MEM_READ_REQ, TASK_DBG, DRV_TASK_ID,
++ sizeof(struct dbg_mem_read_req));
++ if (!mem_read_req)
++ return -ENOMEM;
++
++ /* Set parameters for the DBG_MEM_READ_REQ message */
++ mem_read_req->memaddr = mem_addr;
++
++ /* Send the DBG_MEM_READ_REQ message to LMAC FW */
++ return rwnx_send_msg(usbdev, mem_read_req, 1, DBG_MEM_READ_CFM, cfm);
++}
++
++
++int rwnx_send_dbg_mem_write_req(struct aic_usb_dev *usbdev, u32 mem_addr, u32 mem_data)
++{
++ struct dbg_mem_write_req *mem_write_req;
++
++ //printk("%s mem_addr:%x mem_data:%x\r\n", __func__, mem_addr, mem_data);
++
++ /* Build the DBG_MEM_WRITE_REQ message */
++ mem_write_req = rwnx_msg_zalloc(DBG_MEM_WRITE_REQ, TASK_DBG, DRV_TASK_ID,
++ sizeof(struct dbg_mem_write_req));
++ if (!mem_write_req)
++ return -ENOMEM;
++
++ /* Set parameters for the DBG_MEM_WRITE_REQ message */
++ mem_write_req->memaddr = mem_addr;
++ mem_write_req->memdata = mem_data;
++
++ /* Send the DBG_MEM_WRITE_REQ message to LMAC FW */
++ return rwnx_send_msg(usbdev, mem_write_req, 1, DBG_MEM_WRITE_CFM, NULL);
++}
++
++int rwnx_send_dbg_start_app_req(struct aic_usb_dev *usbdev, u32 boot_addr,
++ u32 boot_type)
++{
++ struct dbg_start_app_req *start_app_req;
++
++
++ /* Build the DBG_START_APP_REQ message */
++ start_app_req = rwnx_msg_zalloc(DBG_START_APP_REQ, TASK_DBG, DRV_TASK_ID,
++ sizeof(struct dbg_start_app_req));
++ if (!start_app_req)
++ return -ENOMEM;
++
++ /* Set parameters for the DBG_START_APP_REQ message */
++ start_app_req->bootaddr = boot_addr;
++ start_app_req->boottype = boot_type;
++
++ /* Send the DBG_START_APP_REQ message to LMAC FW */
++ return rwnx_send_msg(usbdev, start_app_req, 0, 0, NULL);
++}
++
++static msg_cb_fct dbg_hdlrs[MSG_I(DBG_MAX)] = {
++};
++
++static msg_cb_fct *msg_hdlrs[] = {
++ [TASK_DBG] = dbg_hdlrs,
++};
++
++void rwnx_rx_handle_msg(struct aic_usb_dev *usbdev, struct ipc_e2a_msg *msg)
++{
++ usbdev->cmd_mgr.msgind(&usbdev->cmd_mgr, msg,
++ msg_hdlrs[MSG_T(msg->id)][MSG_I(msg->id)]);
++}
++
++
++int rwnx_send_reboot(struct aic_usb_dev *usbdev)
++{
++ int ret = 0;
++ u32 delay = 2 *1000; //1s
++
++ printk("%s enter \r\n", __func__);
++
++ ret = rwnx_send_dbg_start_app_req(usbdev, delay, HOST_START_APP_REBOOT);
++ return ret;
++}
++
++
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic_load_fw/aicbluetooth_cmds.h
+@@ -0,0 +1,298 @@
++/**
++ ******************************************************************************
++ *
++ * rwnx_cmds.h
++ *
++ * Copyright (C) RivieraWaves 2014-2019
++ *
++ ******************************************************************************
++ */
++
++#ifndef _AICBLUETOOTH_CMDS_H_
++#define _AICBLUETOOTH_CMDS_H_
++
++#include <linux/spinlock.h>
++#include <linux/completion.h>
++#include <linux/module.h>
++
++#define RWNX_80211_CMD_TIMEOUT_MS 2000//500//300
++
++#define RWNX_CMD_FLAG_NONBLOCK BIT(0)
++#define RWNX_CMD_FLAG_REQ_CFM BIT(1)
++#define RWNX_CMD_FLAG_WAIT_PUSH BIT(2)
++#define RWNX_CMD_FLAG_WAIT_ACK BIT(3)
++#define RWNX_CMD_FLAG_WAIT_CFM BIT(4)
++#define RWNX_CMD_FLAG_DONE BIT(5)
++/* ATM IPC design makes it possible to get the CFM before the ACK,
++ * otherwise this could have simply been a state enum */
++#define RWNX_CMD_WAIT_COMPLETE(flags) \
++ (!(flags & (RWNX_CMD_FLAG_WAIT_ACK | RWNX_CMD_FLAG_WAIT_CFM)))
++
++#define RWNX_CMD_MAX_QUEUED 8
++
++//#include "ipc_shared.h"
++
++#define IPC_E2A_MSG_PARAM_SIZE 256
++
++/// Message structure for MSGs from Emb to App
++struct ipc_e2a_msg
++{
++ u16 id; ///< Message id.
++ u16 dummy_dest_id;
++ u16 dummy_src_id;
++ u16 param_len; ///< Parameter embedded struct length.
++ u32 pattern; ///< Used to stamp a valid MSG buffer
++ u32 param[IPC_E2A_MSG_PARAM_SIZE]; ///< Parameter embedded struct. Must be word-aligned.
++};
++
++typedef u16 lmac_msg_id_t;
++typedef u16 lmac_task_id_t;
++
++struct lmac_msg
++{
++ lmac_msg_id_t id; ///< Message id.
++ lmac_task_id_t dest_id; ///< Destination kernel identifier.
++ lmac_task_id_t src_id; ///< Source kernel identifier.
++ u16 param_len; ///< Parameter embedded struct length.
++ u32 param[]; ///< Parameter embedded struct. Must be word-aligned.
++};
++
++#define rwnx_cmd_e2amsg ipc_e2a_msg
++#define rwnx_cmd_a2emsg lmac_msg
++#define RWNX_CMD_A2EMSG_LEN(m) (sizeof(struct lmac_msg) + m->param_len)
++#define RWNX_CMD_E2AMSG_LEN_MAX (IPC_E2A_MSG_PARAM_SIZE * 4)
++
++static inline void put_u16(u8 *buf, u16 data)
++{
++ buf[0] = (u8)(data&0x00ff);
++ buf[1] = (u8)((data >> 8)&0x00ff);
++}
++
++enum rwnx_cmd_mgr_state {
++ RWNX_CMD_MGR_STATE_DEINIT,
++ RWNX_CMD_MGR_STATE_INITED,
++ RWNX_CMD_MGR_STATE_CRASHED,
++};
++
++struct rwnx_cmd {
++ struct list_head list;
++ lmac_msg_id_t id;
++ lmac_msg_id_t reqid;
++ struct rwnx_cmd_a2emsg *a2e_msg;
++ char *e2a_msg;
++ u32 tkn;
++ u16 flags;
++ struct completion complete;
++ u32 result;
++};
++
++struct aic_sdio_dev;
++struct aic_usb_dev;
++struct rwnx_cmd;
++typedef int (*msg_cb_fct)(struct rwnx_cmd *cmd, struct rwnx_cmd_e2amsg *msg);
++
++struct rwnx_cmd_mgr {
++ enum rwnx_cmd_mgr_state state;
++ spinlock_t lock;
++ u32 next_tkn;
++ u32 queue_sz;
++ u32 max_queue_sz;
++ spinlock_t cb_lock;
++ #if 0
++ void *sdiodev;
++ #else
++ void *usbdev;
++ #endif
++ struct list_head cmds;
++
++ int (*queue)(struct rwnx_cmd_mgr *, struct rwnx_cmd *);
++ int (*llind)(struct rwnx_cmd_mgr *, struct rwnx_cmd *);
++ int (*msgind)(struct rwnx_cmd_mgr *, struct rwnx_cmd_e2amsg *, msg_cb_fct);
++ void (*print)(struct rwnx_cmd_mgr *);
++ void (*drain)(struct rwnx_cmd_mgr *);
++
++ struct work_struct cmdWork;
++ struct workqueue_struct *cmd_wq;
++};
++
++
++#if 0
++#define WAKE_CMD_WORK(cmd_mgr) \
++ do { \
++ queue_work((cmd_mgr)->cmd_wq, &cmd_mgr->cmdWork); \
++ } while (0)
++#endif
++void rwnx_cmd_mgr_init(struct rwnx_cmd_mgr *cmd_mgr);
++void rwnx_cmd_mgr_deinit(struct rwnx_cmd_mgr *cmd_mgr);
++int cmd_mgr_queue_force_defer(struct rwnx_cmd_mgr *cmd_mgr, struct rwnx_cmd *cmd);
++void aicwf_set_cmd_tx(void *dev, struct lmac_msg *msg, uint len);
++
++enum
++{
++ TASK_NONE = (u8) -1,
++
++ // MAC Management task.
++ TASK_MM = 0,
++ // DEBUG task
++ TASK_DBG,
++ /// SCAN task
++ TASK_SCAN,
++ /// TDLS task
++ TASK_TDLS,
++ /// SCANU task
++ TASK_SCANU,
++ /// ME task
++ TASK_ME,
++ /// SM task
++ TASK_SM,
++ /// APM task
++ TASK_APM,
++ /// BAM task
++ TASK_BAM,
++ /// MESH task
++ TASK_MESH,
++ /// RXU task
++ TASK_RXU,
++ // This is used to define the last task that is running on the EMB processor
++ TASK_LAST_EMB = TASK_RXU,
++
++ // nX API task
++ TASK_API,
++ TASK_MAX,
++};
++
++#define LMAC_FIRST_MSG(task) ((lmac_msg_id_t)((task) << 10))
++#define DRV_TASK_ID 100
++#define MSG_I(msg) ((msg) & ((1<<10)-1))
++#define MSG_T(msg) ((lmac_task_id_t)((msg) >> 10))
++
++
++enum dbg_msg_tag
++{
++ /// Memory read request
++ DBG_MEM_READ_REQ = LMAC_FIRST_MSG(TASK_DBG),
++ /// Memory read confirm
++ DBG_MEM_READ_CFM,
++ /// Memory write request
++ DBG_MEM_WRITE_REQ,
++ /// Memory write confirm
++ DBG_MEM_WRITE_CFM,
++ /// Module filter request
++ DBG_SET_MOD_FILTER_REQ,
++ /// Module filter confirm
++ DBG_SET_MOD_FILTER_CFM,
++ /// Severity filter request
++ DBG_SET_SEV_FILTER_REQ,
++ /// Severity filter confirm
++ DBG_SET_SEV_FILTER_CFM,
++ /// LMAC/MAC HW fatal error indication
++ DBG_ERROR_IND,
++ /// Request to get system statistics
++ DBG_GET_SYS_STAT_REQ,
++ /// COnfirmation of system statistics
++ DBG_GET_SYS_STAT_CFM,
++ /// Memory block write request
++ DBG_MEM_BLOCK_WRITE_REQ,
++ /// Memory block write confirm
++ DBG_MEM_BLOCK_WRITE_CFM,
++ /// Start app request
++ DBG_START_APP_REQ,
++ /// Start app confirm
++ DBG_START_APP_CFM,
++ /// Start npc request
++ DBG_START_NPC_REQ,
++ /// Start npc confirm
++ DBG_START_NPC_CFM,
++ /// Memory mask write request
++ DBG_MEM_MASK_WRITE_REQ,
++ /// Memory mask write confirm
++ DBG_MEM_MASK_WRITE_CFM,
++ /// Max number of Debug messages
++ DBG_MAX,
++};
++
++struct dbg_mem_block_write_req
++{
++ u32 memaddr;
++ u32 memsize;
++ u32 memdata[1024 / sizeof(u32)];
++};
++
++/// Structure containing the parameters of the @ref DBG_MEM_MASK_WRITE_REQ message.
++struct dbg_mem_mask_write_req
++{
++ u32 memaddr;
++ u32 memmask;
++ u32 memdata;
++};
++
++
++/// Structure containing the parameters of the @ref DBG_MEM_BLOCK_WRITE_CFM message.
++struct dbg_mem_block_write_cfm
++{
++ u32 wstatus;
++};
++
++/// Structure containing the parameters of the @ref DBG_MEM_READ_REQ message.
++struct dbg_mem_read_req
++{
++ u32 memaddr;
++};
++
++/// Structure containing the parameters of the @ref DBG_MEM_READ_CFM message.
++struct dbg_mem_read_cfm
++{
++ u32 memaddr;
++ u32 memdata;
++};
++
++/// Structure containing the parameters of the @ref DBG_MEM_WRITE_REQ message.
++struct dbg_mem_write_req
++{
++ u32 memaddr;
++ u32 memdata;
++};
++
++/// Structure containing the parameters of the @ref DBG_MEM_WRITE_CFM message.
++struct dbg_mem_write_cfm
++{
++ u32 memaddr;
++ u32 memdata;
++};
++
++struct dbg_start_app_req
++{
++ u32 bootaddr;
++ u32 boottype;
++};
++
++/// Structure containing the parameters of the @ref DBG_START_APP_CFM message.
++struct dbg_start_app_cfm
++{
++ u32 bootstatus;
++};
++
++enum {
++ HOST_START_APP_AUTO = 1,
++ HOST_START_APP_CUSTOM,
++ HOST_START_APP_REBOOT,
++};
++
++int rwnx_send_dbg_mem_mask_write_req(struct aic_usb_dev *usbdev, u32 mem_addr,
++ u32 mem_mask, u32 mem_data);
++
++
++int rwnx_send_dbg_mem_block_write_req(struct aic_usb_dev *usbdev, u32 mem_addr,
++ u32 mem_size, u32 *mem_data);
++
++int rwnx_send_dbg_mem_write_req(struct aic_usb_dev *usbdev, u32 mem_addr, u32 mem_data);
++int rwnx_send_dbg_mem_read_req(struct aic_usb_dev *usbdev, u32 mem_addr, struct dbg_mem_read_cfm *cfm);
++
++void rwnx_rx_handle_msg(struct aic_usb_dev *usbdev, struct ipc_e2a_msg *msg);
++
++int rwnx_send_dbg_start_app_req(struct aic_usb_dev *usbdev, u32 boot_addr,
++ u32 boot_type);
++
++int rwnx_send_reboot(struct aic_usb_dev *usbdev);
++
++#endif
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic_load_fw/aicwf_debug.h
+@@ -0,0 +1,52 @@
++
++
++#define RWNX_FN_ENTRY_STR ">>> %s()\n", __func__
++
++
++
++/* message levels */
++#define LOGERROR 0x0001
++#define LOGINFO 0x0002
++#define LOGTRACE 0x0004
++#define LOGDEBUG 0x0008
++#define LOGDATA 0x0010
++
++extern int aicwf_dbg_level;
++void rwnx_data_dump(char* tag, void* data, unsigned long len);
++
++#define AICWF_LOG "AICWFDBG("
++
++#define AICWFDBG(level, args, arg...) \
++do { \
++ if (aicwf_dbg_level & level) { \
++ printk(AICWF_LOG#level")\t" args, ##arg); \
++ } \
++} while (0)
++
++#define RWNX_DBG(fmt, ...) \
++do { \
++ if (aicwf_dbg_level & LOGTRACE) { \
++ printk(AICWF_LOG"LOGTRACE)\t"fmt , ##__VA_ARGS__); \
++ } \
++} while (0)
++
++
++
++#if 0
++#define RWNX_DBG(fmt, ...) \
++ do { \
++ if (aicwf_dbg_level & LOGTRACE) { \
++ printk(AICWF_LOG"LOGTRACE"")\t" fmt, ##__VA_ARGS__); \
++ } \
++ } while (0)
++#define AICWFDBG(args, level) \
++do { \
++ if (aicwf_dbg_level & level) { \
++ printk(AICWF_LOG"(%s)\t" ,#level); \
++ printf args; \
++ } \
++} while (0)
++#endif
++
++
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic_load_fw/aicwf_rx_prealloc.c
+@@ -0,0 +1,120 @@
++#include <linux/version.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/netdevice.h>
++#include <linux/skbuff.h>
++#include "aicwf_rx_prealloc.h"
++#include "aicwf_debug.h"
++
++#ifdef CONFIG_PREALLOC_RX_SKB
++struct aicwf_rx_buff_list aic_rx_buff_list;
++
++int aic_rxbuff_num_max = 1000;
++
++int aic_rxbuff_size = (4 * 512);
++
++int rx_buff_list_ava = 0;
++
++module_param(rx_buff_list_ava, int, 0660);
++
++int aicwf_rxbuff_size_get(void)
++{
++ return aic_rxbuff_size;
++}
++
++struct rx_buff *aicwf_prealloc_rxbuff_alloc(spinlock_t *lock)
++{
++ unsigned long flags;
++ struct rx_buff *rxbuff = NULL;
++
++ spin_lock_irqsave(lock, flags);
++ rx_buff_list_ava = atomic_read(&aic_rx_buff_list.rxbuff_list_len);
++ if(rx_buff_list_ava < 10){
++ AICWFDBG(LOGERROR, "%s WARNING rxbuff is running out %d\r\n", __func__,
++ rx_buff_list_ava);
++ }
++
++ if (list_empty(&aic_rx_buff_list.rxbuff_list)) {
++ spin_unlock_irqrestore(lock, flags);
++ AICWFDBG(LOGERROR, "%s %d, rxbuff list is empty\r\n", __func__, __LINE__);
++ return NULL;
++ } else {
++ rxbuff = list_first_entry(&aic_rx_buff_list.rxbuff_list,
++ struct rx_buff, queue);
++ list_del_init(&rxbuff->queue);
++ atomic_dec(&aic_rx_buff_list.rxbuff_list_len);
++ }
++ spin_unlock_irqrestore(lock, flags);
++ //printk("len:%d\n", aic_rx_buff_list.rxbuff_list_len);
++ memset(rxbuff->data, 0, aic_rxbuff_size);
++ rxbuff->len = 0;
++ rxbuff->start = NULL;
++ rxbuff->read = NULL;
++ rxbuff->end = NULL;
++
++ return rxbuff;
++}
++
++void aicwf_prealloc_rxbuff_free(struct rx_buff *rxbuff, spinlock_t *lock)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(lock, flags);
++ list_add_tail(&rxbuff->queue, &aic_rx_buff_list.rxbuff_list);
++ atomic_inc(&aic_rx_buff_list.rxbuff_list_len);
++ rx_buff_list_ava = atomic_read(&aic_rx_buff_list.rxbuff_list_len);
++ spin_unlock_irqrestore(lock, flags);
++}
++
++int aicwf_prealloc_init()
++{
++ struct rx_buff *rxbuff;
++ int i = 0;
++
++ AICWFDBG(LOGINFO, "%s enter\n", __func__);
++ INIT_LIST_HEAD(&aic_rx_buff_list.rxbuff_list);
++
++ for (i = 0 ; i < aic_rxbuff_num_max ; i++) {
++ rxbuff = kzalloc(sizeof(struct rx_buff), GFP_KERNEL);
++ if (rxbuff) {
++ rxbuff->data = kzalloc(aic_rxbuff_size, GFP_KERNEL);
++ if (rxbuff->data == NULL) {
++ AICWFDBG(LOGERROR, "failed to alloc rxbuff data\n");
++ kfree(rxbuff);
++ continue;
++ }
++ rxbuff->len = 0;
++ rxbuff->start = NULL;
++ rxbuff->read = NULL;
++ rxbuff->end = NULL;
++ list_add_tail(&rxbuff->queue, &aic_rx_buff_list.rxbuff_list);
++ atomic_inc(&aic_rx_buff_list.rxbuff_list_len);
++ rx_buff_list_ava = atomic_read(&aic_rx_buff_list.rxbuff_list_len);
++ }
++ }
++
++ AICWFDBG(LOGINFO, "pre alloc rxbuff list len: %d\n", (int)atomic_read(&aic_rx_buff_list.rxbuff_list_len));
++ return 0;
++}
++
++void aicwf_prealloc_exit()
++{
++ struct rx_buff *rxbuff;
++ struct rx_buff *pos;
++
++ AICWFDBG(LOGINFO, "%s enter\n", __func__);
++
++ AICWFDBG(LOGINFO, "free pre alloc rxbuff list %d\n", (int)atomic_read(&aic_rx_buff_list.rxbuff_list_len));
++ list_for_each_entry_safe(rxbuff, pos, &aic_rx_buff_list.rxbuff_list, queue) {
++ list_del_init(&rxbuff->queue);
++ kfree(rxbuff->data);
++ kfree(rxbuff);
++ }
++}
++
++EXPORT_SYMBOL(aicwf_rxbuff_size_get);
++EXPORT_SYMBOL(aicwf_prealloc_rxbuff_alloc);
++EXPORT_SYMBOL(aicwf_prealloc_rxbuff_free);
++
++#endif
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic_load_fw/aicwf_rx_prealloc.h
+@@ -0,0 +1,26 @@
++#ifndef _AICWF_RX_PREALLOC_H_
++#define _AICWF_RX_PREALLOC_H_
++
++#ifdef CONFIG_PREALLOC_RX_SKB
++
++struct rx_buff {
++ struct list_head queue;
++ unsigned char *data;
++ u32 len;
++ uint8_t *start;
++ uint8_t *end;
++ uint8_t *read;
++};
++
++struct aicwf_rx_buff_list {
++ struct list_head rxbuff_list;
++ atomic_t rxbuff_list_len;
++};
++
++struct rx_buff *aicwf_prealloc_rxbuff_alloc(spinlock_t *lock);
++void aicwf_prealloc_rxbuff_free(struct rx_buff *rxbuff, spinlock_t *lock);
++int aicwf_prealloc_init(void);
++void aicwf_prealloc_exit(void);
++int aicwf_rxbuff_size_get(void);
++#endif
++#endif /* _AICWF_RX_PREALLOC_H_ */
+\ No newline at end of file
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic_load_fw/aicwf_txq_prealloc.c
+@@ -0,0 +1,61 @@
++#include <linux/slab.h>
++#include "aicwf_debug.h"
++
++struct prealloc_txq{
++ int prealloced;
++ void *txq;
++ size_t size;
++};
++
++struct prealloc_txq prealloc_txq;
++#define MAX_TXQ_SIZE 30 * 1024
++
++void *aicwf_prealloc_txq_alloc(size_t size)
++{
++
++ BUG_ON(size > MAX_TXQ_SIZE);
++
++ //check prealloc_txq.size
++ if((int)prealloc_txq.size != (int)size)
++ {
++ AICWFDBG(LOGINFO, "%s size is diff will to be kzalloc \r\n", __func__);
++
++ if(prealloc_txq.txq != NULL)
++ {
++ AICWFDBG(LOGINFO, "%s txq to kfree \r\n", __func__);
++ kfree(prealloc_txq.txq);
++ prealloc_txq.txq = NULL;
++ }
++
++ prealloc_txq.size = size;
++ prealloc_txq.prealloced = 0;
++ }
++
++ //check prealloc or not
++ if(!prealloc_txq.prealloced)
++ {
++ prealloc_txq.txq = kzalloc(size, GFP_KERNEL);
++ if(!prealloc_txq.txq){
++ AICWFDBG(LOGERROR, "%s txq kzalloc fail \r\n", __func__);
++ }else{
++ AICWFDBG(LOGINFO, "%s txq kzalloc successful \r\n", __func__);
++ prealloc_txq.prealloced = 1;
++ }
++ }else{
++ AICWFDBG(LOGINFO, "%s txq not need to kzalloc \r\n", __func__);
++ }
++
++ return prealloc_txq.txq;
++}
++void aicwf_prealloc_txq_free(void)
++{
++ if(prealloc_txq.txq != NULL)
++ {
++ AICWFDBG(LOGINFO, "%s txq to kfree \r\n", __func__);
++ kfree(prealloc_txq.txq);
++ prealloc_txq.txq = NULL;
++ }
++}
++
++EXPORT_SYMBOL(aicwf_prealloc_txq_alloc);
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic_load_fw/aicwf_txq_prealloc.h
+@@ -0,0 +1,4 @@
++
++
++void aicwf_prealloc_txq_free(void);
++
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic_load_fw/aicwf_usb.c
+@@ -0,0 +1,1716 @@
++/**
++ * aicwf_usb.c
++ *
++ * USB function declarations
++ *
++ * Copyright (C) AICSemi 2018-2020
++ */
++#include <linux/kthread.h>
++#include <linux/netdevice.h>
++#include <linux/printk.h>
++#include <linux/interrupt.h>
++#include <linux/sched.h>
++#include <linux/completion.h>
++#include <linux/semaphore.h>
++#include <linux/debugfs.h>
++#include <linux/atomic.h>
++#include <linux/vmalloc.h>
++#include <linux/signal.h>
++
++#include <linux/usb.h>
++#include <linux/kthread.h>
++#include <linux/version.h>
++#include "aic_txrxif.h"
++#include "aicwf_usb.h"
++#include "aicbluetooth.h"
++#include "aicwf_debug.h"
++#include "aic_compat_8800d80.h"
++
++#define JUMP_TABLE_BASE 0x161928
++#define JUMP_TABLE_OFFSET(i) ((u32)(JUMP_TABLE_BASE+(i)*4))
++extern int adap_test;
++extern int testmode;
++extern unsigned char paringid[100];
++extern int ble_scan_wakeup_reboot_time;
++extern uint32_t ad_data_filter_mask;
++u8 chip_id = 0;
++u8 chip_sub_id = 0;
++int fw_loaded = 0;
++
++void aicwf_usb_tx_flowctrl(struct aic_usb_dev *usb_dev, bool state)
++{
++}
++
++static struct aicwf_usb_buf *aicwf_usb_tx_dequeue(struct aic_usb_dev *usb_dev,
++ struct list_head *q, int *counter, spinlock_t *qlock)
++{
++ unsigned long flags;
++ struct aicwf_usb_buf *usb_buf;
++
++ spin_lock_irqsave(qlock, flags);
++ if (list_empty(q)) {
++ usb_buf = NULL;
++ } else {
++ usb_buf = list_first_entry(q, struct aicwf_usb_buf, list);
++ list_del_init(&usb_buf->list);
++ if (counter)
++ (*counter)--;
++ }
++ spin_unlock_irqrestore(qlock, flags);
++ return usb_buf;
++}
++
++static void aicwf_usb_tx_queue(struct aic_usb_dev *usb_dev,
++ struct list_head *q, struct aicwf_usb_buf *usb_buf, int *counter,
++ spinlock_t *qlock)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(qlock, flags);
++ list_add_tail(&usb_buf->list, q);
++ (*counter)++;
++ spin_unlock_irqrestore(qlock, flags);
++}
++
++static struct aicwf_usb_buf *aicwf_usb_rx_buf_get(struct aic_usb_dev *usb_dev)
++{
++ unsigned long flags;
++ struct aicwf_usb_buf *usb_buf;
++
++ spin_lock_irqsave(&usb_dev->rx_free_lock, flags);
++ if (list_empty(&usb_dev->rx_free_list)) {
++ usb_buf = NULL;
++ } else {
++ usb_buf = list_first_entry(&usb_dev->rx_free_list, struct aicwf_usb_buf, list);
++ list_del_init(&usb_buf->list);
++ }
++ spin_unlock_irqrestore(&usb_dev->rx_free_lock, flags);
++ return usb_buf;
++}
++
++static void aicwf_usb_rx_buf_put(struct aic_usb_dev *usb_dev, struct aicwf_usb_buf *usb_buf)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&usb_dev->rx_free_lock, flags);
++ list_add_tail(&usb_buf->list, &usb_dev->rx_free_list);
++ spin_unlock_irqrestore(&usb_dev->rx_free_lock, flags);
++}
++
++static void aicwf_usb_tx_complete(struct urb *urb)
++{
++ unsigned long flags;
++ struct aicwf_usb_buf *usb_buf = (struct aicwf_usb_buf *) urb->context;
++ struct aic_usb_dev *usb_dev = usb_buf->usbdev;
++ struct sk_buff *skb;
++
++ if (usb_buf->cfm == false) {
++ skb = usb_buf->skb;
++ dev_kfree_skb_any(skb);
++ }
++ #if !defined CONFIG_USB_NO_TRANS_DMA_MAP
++ else {
++ u8 *buf;
++ buf = (u8 *)usb_buf->skb;
++ kfree(buf);
++ }
++ #endif
++ usb_buf->skb = NULL;
++
++ aicwf_usb_tx_queue(usb_dev, &usb_dev->tx_free_list, usb_buf,
++ &usb_dev->tx_free_count, &usb_dev->tx_free_lock);
++
++ spin_lock_irqsave(&usb_dev->tx_flow_lock, flags);
++ if (usb_dev->tx_free_count > AICWF_USB_TX_HIGH_WATER) {
++ if (usb_dev->tbusy) {
++ usb_dev->tbusy = false;
++ aicwf_usb_tx_flowctrl(usb_dev, false);
++ }
++ }
++ spin_unlock_irqrestore(&usb_dev->tx_flow_lock, flags);
++ }
++
++static void aicwf_usb_rx_complete(struct urb *urb)
++{
++ struct aicwf_usb_buf *usb_buf = (struct aicwf_usb_buf *) urb->context;
++ struct aic_usb_dev *usb_dev = usb_buf->usbdev;
++ struct aicwf_rx_priv* rx_priv = usb_dev->rx_priv;
++ struct sk_buff *skb = NULL;
++ unsigned long flags = 0;
++
++ skb = usb_buf->skb;
++ usb_buf->skb = NULL;
++
++ if (urb->actual_length > urb->transfer_buffer_length) {
++ aicwf_dev_skb_free(skb);
++ aicwf_usb_rx_buf_put(usb_dev, usb_buf);
++ schedule_work(&usb_dev->rx_urb_work);
++ return;
++ }
++
++ if (urb->status != 0 || !urb->actual_length) {
++ aicwf_dev_skb_free(skb);
++ aicwf_usb_rx_buf_put(usb_dev, usb_buf);
++ schedule_work(&usb_dev->rx_urb_work);
++ return;
++ }
++
++ if (usb_dev->state == USB_UP_ST) {
++ skb_put(skb, urb->actual_length);
++
++ spin_lock_irqsave(&rx_priv->rxqlock, flags);
++ if(!aicwf_rxframe_enqueue(usb_dev->dev, &rx_priv->rxq, skb)){
++ spin_unlock_irqrestore(&rx_priv->rxqlock, flags);
++ usb_err("rx_priv->rxq is over flow!!!\n");
++ aicwf_dev_skb_free(skb);
++ return;
++ }
++ spin_unlock_irqrestore(&rx_priv->rxqlock, flags);
++ atomic_inc(&rx_priv->rx_cnt);
++ complete(&rx_priv->usbdev->bus_if->busrx_trgg);
++ aicwf_usb_rx_buf_put(usb_dev, usb_buf);
++
++ schedule_work(&usb_dev->rx_urb_work);
++ } else {
++ aicwf_dev_skb_free(skb);
++ aicwf_usb_rx_buf_put(usb_dev, usb_buf);
++ }
++}
++
++static int aicwf_usb_submit_rx_urb(struct aic_usb_dev *usb_dev,
++ struct aicwf_usb_buf *usb_buf)
++{
++ struct sk_buff *skb;
++ int ret;
++
++ if (!usb_buf || !usb_dev)
++ return -1;
++
++ if (usb_dev->state != USB_UP_ST) {
++ usb_err("usb state is not up!\n");
++ aicwf_usb_rx_buf_put(usb_dev, usb_buf);
++ return -1;
++ }
++
++ skb = __dev_alloc_skb(AICWF_USB_MAX_PKT_SIZE, GFP_KERNEL);
++ if (!skb) {
++ aicwf_usb_rx_buf_put(usb_dev, usb_buf);
++ return -1;
++ }
++
++ usb_buf->skb = skb;
++
++ usb_fill_bulk_urb(usb_buf->urb,
++ usb_dev->udev,
++ usb_dev->bulk_in_pipe,
++ skb->data, skb_tailroom(skb), aicwf_usb_rx_complete, usb_buf);
++
++ usb_buf->usbdev = usb_dev;
++
++ usb_anchor_urb(usb_buf->urb, &usb_dev->rx_submitted);
++
++ ret = usb_submit_urb(usb_buf->urb, GFP_ATOMIC);
++ if (ret) {
++ usb_err("usb submit rx urb fail:%d\n", ret);
++ usb_unanchor_urb(usb_buf->urb);
++ aicwf_dev_skb_free(usb_buf->skb);
++ usb_buf->skb = NULL;
++ aicwf_usb_rx_buf_put(usb_dev, usb_buf);
++ msleep(100);
++ return ret;
++ }
++ return 0;
++}
++
++static void aicwf_usb_rx_submit_all_urb(struct aic_usb_dev *usb_dev)
++{
++ struct aicwf_usb_buf *usb_buf;
++
++ if (usb_dev->state != USB_UP_ST) {
++ usb_err("bus is not up=%d\n", usb_dev->state);
++ return;
++ }
++ if (usb_dev->app_cmp) {
++ printk("app_cmp\r\n");
++ return;
++}
++
++ while((usb_buf = aicwf_usb_rx_buf_get(usb_dev)) != NULL) {
++ if (aicwf_usb_submit_rx_urb(usb_dev, usb_buf)) {
++ usb_err("usb rx refill fail\n");
++ if (usb_dev->state != USB_UP_ST){
++ return;
++ }
++ break;
++ }
++ }
++}
++
++static void aicwf_usb_rx_prepare(struct aic_usb_dev *usb_dev)
++{
++ aicwf_usb_rx_submit_all_urb(usb_dev);
++}
++
++static void aicwf_usb_tx_prepare(struct aic_usb_dev *usb_dev)
++{
++ struct aicwf_usb_buf *usb_buf;
++
++ while(!list_empty(&usb_dev->tx_post_list)){
++ usb_buf = aicwf_usb_tx_dequeue(usb_dev, &usb_dev->tx_post_list,
++ &usb_dev->tx_post_count, &usb_dev->tx_post_lock);
++ if(usb_buf->skb) {
++ dev_kfree_skb(usb_buf->skb);
++ usb_buf->skb = NULL;
++ }
++ aicwf_usb_tx_queue(usb_dev, &usb_dev->tx_free_list, usb_buf,
++ &usb_dev->tx_free_count, &usb_dev->tx_free_lock);
++ }
++}
++static void aicwf_usb_tx_process(struct aic_usb_dev *usb_dev)
++{
++ struct aicwf_usb_buf *usb_buf;
++ int ret = 0;
++ u8* data = NULL;
++
++ while(!list_empty(&usb_dev->tx_post_list)) {
++ if (usb_dev->state != USB_UP_ST) {
++ usb_err("usb state is not up!\n");
++ return;
++ }
++
++ usb_buf = aicwf_usb_tx_dequeue(usb_dev, &usb_dev->tx_post_list,
++ &usb_dev->tx_post_count, &usb_dev->tx_post_lock);
++ if(!usb_buf) {
++ usb_err("can not get usb_buf from tx_post_list!\n");
++ return;
++ }
++ data = usb_buf->skb->data;
++
++ ret = usb_submit_urb(usb_buf->urb, GFP_ATOMIC);
++ if (ret) {
++ usb_err("aicwf_usb_bus_tx usb_submit_urb FAILED\n");
++ goto fail;
++ }
++
++ continue;
++fail:
++ dev_kfree_skb(usb_buf->skb);
++ usb_buf->skb = NULL;
++ aicwf_usb_tx_queue(usb_dev, &usb_dev->tx_free_list, usb_buf,
++ &usb_dev->tx_free_count, &usb_dev->tx_free_lock);
++ }
++}
++
++static inline void aic_thread_wait_stop(void)
++{
++#if 1// PLATFORM_LINUX
++ #if 0
++ while (!kthread_should_stop())
++ rtw_msleep_os(10);
++ #else
++ set_current_state(TASK_INTERRUPTIBLE);
++ while (!kthread_should_stop()) {
++ schedule();
++ set_current_state(TASK_INTERRUPTIBLE);
++ }
++ __set_current_state(TASK_RUNNING);
++ #endif
++#endif
++}
++
++int usb_bustx_thread(void *data)
++{
++ struct aicwf_bus *bus = (struct aicwf_bus *)data;
++ struct aic_usb_dev *usbdev = bus->bus_priv.usb;
++
++ while (1) {
++ /*
++ if(kthread_should_stop()) {
++ usb_err("usb bustx thread stop\n");
++ break;
++ }
++ */
++
++ if (!wait_for_completion_interruptible(&bus->bustx_trgg)) {
++ if (usbdev->app_cmp == false){
++ printk("usb bustx thread will to stop\n");
++ //mdelay(100);
++ break;
++ //continue;
++ }
++ if (usbdev->tx_post_count > 0)
++ aicwf_usb_tx_process(usbdev);
++ }
++ }
++
++ aic_thread_wait_stop();
++ printk("usb bustx thread stop\n");
++
++ return 0;
++}
++
++int usb_busrx_thread(void *data)
++{
++ struct aicwf_rx_priv *rx_priv = (struct aicwf_rx_priv *)data;
++ struct aicwf_bus *bus_if = rx_priv->usbdev->bus_if;
++
++ allow_signal(SIGTERM);
++ while (1) {
++ #if 0
++ if(kthread_should_stop()) {
++ usb_err("usb busrx thread stop\n");
++ break;
++ }
++ #endif
++ if (!wait_for_completion_interruptible(&bus_if->busrx_trgg)) {
++ aicwf_process_rxframes(rx_priv);
++ } else {
++ break;
++ }
++ if (bus_if->state == BUS_DOWN_ST) {
++ printk("usb busrx thread will to stop\n");
++ break;
++ }
++
++ }
++ aic_thread_wait_stop();
++ printk("usb busrx thread stop\n");
++
++ return 0;
++}
++
++static void aicwf_usb_send_msg_complete(struct urb *urb)
++{
++ struct aic_usb_dev *usb_dev = (struct aic_usb_dev *) urb->context;
++
++ usb_dev->msg_finished = true;
++ if (waitqueue_active(&usb_dev->msg_wait))
++ wake_up(&usb_dev->msg_wait);
++}
++
++static int aicwf_usb_bus_txmsg(struct device *dev, u8 *buf, u32 len)
++{
++ int ret = 0;
++ struct aicwf_bus *bus_if = dev_get_drvdata(dev);
++ struct aic_usb_dev *usb_dev = bus_if->bus_priv.usb;
++
++ if (usb_dev->state != USB_UP_ST)
++ return -EIO;
++
++ if (buf == NULL || len == 0 || usb_dev->msg_out_urb == NULL)
++ return -EINVAL;
++
++ if (test_and_set_bit(0, &usb_dev->msg_busy)) {
++ usb_err("In a control frame option, can't tx!\n");
++ return -EIO;
++ }
++
++ usb_dev->msg_finished = false;
++
++#ifdef CONFIG_USB_MSG_EP
++ if (usb_dev->msg_out_pipe && usb_dev->use_msg_ep == 1) {
++ usb_fill_bulk_urb(usb_dev->msg_out_urb,
++ usb_dev->udev,
++ usb_dev->msg_out_pipe,
++ buf, len, (usb_complete_t) aicwf_usb_send_msg_complete, usb_dev);
++ } else {
++ usb_fill_bulk_urb(usb_dev->msg_out_urb,
++ usb_dev->udev,
++ usb_dev->bulk_out_pipe,
++ buf, len, (usb_complete_t) aicwf_usb_send_msg_complete, usb_dev);
++ }
++#else
++ usb_fill_bulk_urb(usb_dev->msg_out_urb,
++ usb_dev->udev,
++ usb_dev->bulk_out_pipe,
++ buf, len, (usb_complete_t) aicwf_usb_send_msg_complete, usb_dev);
++#endif
++ #if defined CONFIG_USB_NO_TRANS_DMA_MAP
++ usb_dev->msg_out_urb->transfer_dma = usb_dev->cmd_dma_trans_addr;
++ usb_dev->msg_out_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
++ #endif
++ usb_dev->msg_out_urb->transfer_flags |= URB_ZERO_PACKET;
++
++ ret = usb_submit_urb(usb_dev->msg_out_urb, GFP_ATOMIC);
++ if (ret) {
++ usb_err("usb_submit_urb failed %d\n", ret);
++ goto exit;
++ }
++
++ ret = wait_event_timeout(usb_dev->msg_wait,
++ usb_dev->msg_finished, msecs_to_jiffies(CMD_TX_TIMEOUT));
++ if (!ret) {
++ if (usb_dev->msg_out_urb)
++ usb_kill_urb(usb_dev->msg_out_urb);
++ usb_err("Txmsg wait timed out\n");
++ ret = -EIO;
++ goto exit;
++ }
++
++ if (usb_dev->msg_finished == false) {
++ usb_err("Txmsg timed out\n");
++ ret = -ETIMEDOUT;
++ goto exit;
++ }
++exit:
++ clear_bit(0, &usb_dev->msg_busy);
++ return ret;
++}
++
++
++static void aicwf_usb_free_urb(struct list_head *q, spinlock_t *qlock)
++{
++ struct aicwf_usb_buf *usb_buf, *tmp;
++ unsigned long flags;
++
++ spin_lock_irqsave(qlock, flags);
++ list_for_each_entry_safe(usb_buf, tmp, q, list) {
++ spin_unlock_irqrestore(qlock, flags);
++ if (!usb_buf->urb) {
++ usb_err("bad usb_buf\n");
++ spin_lock_irqsave(qlock, flags);
++ break;
++ }
++ usb_free_urb(usb_buf->urb);
++ list_del_init(&usb_buf->list);
++ spin_lock_irqsave(qlock, flags);
++ }
++ spin_unlock_irqrestore(qlock, flags);
++}
++
++static int aicwf_usb_alloc_rx_urb(struct aic_usb_dev *usb_dev)
++{
++ int i;
++
++ for (i = 0; i < AICWF_USB_RX_URBS; i++) {
++ struct aicwf_usb_buf *usb_buf = &usb_dev->usb_rx_buf[i];
++
++ usb_buf->usbdev = usb_dev;
++ usb_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
++ if (!usb_buf->urb) {
++ usb_err("could not allocate rx data urb\n");
++ goto err;
++ }
++ #if defined CONFIG_USB_NO_TRANS_DMA_MAP
++ // dma buf unused
++ usb_buf->data_buf = NULL;
++ usb_buf->data_dma_trans_addr = 0x0;
++ #endif
++ list_add_tail(&usb_buf->list, &usb_dev->rx_free_list);
++ }
++ return 0;
++
++err:
++ aicwf_usb_free_urb(&usb_dev->rx_free_list, &usb_dev->rx_free_lock);
++ return -ENOMEM;
++}
++
++static int aicwf_usb_alloc_tx_urb(struct aic_usb_dev *usb_dev)
++{
++ int i;
++
++ for (i = 0; i < AICWF_USB_TX_URBS; i++) {
++ struct aicwf_usb_buf *usb_buf = &usb_dev->usb_tx_buf[i];
++
++ usb_buf->usbdev = usb_dev;
++ usb_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
++ if (!usb_buf->urb) {
++ usb_err("could not allocate tx data urb\n");
++ goto err;
++ }
++ #if defined CONFIG_USB_NO_TRANS_DMA_MAP
++ // dma buf unused
++ usb_buf->data_buf = NULL;
++ usb_buf->data_dma_trans_addr = 0x0;
++ #endif
++ list_add_tail(&usb_buf->list, &usb_dev->tx_free_list);
++ (usb_dev->tx_free_count)++;
++ }
++ return 0;
++
++err:
++ aicwf_usb_free_urb(&usb_dev->tx_free_list, &usb_dev->tx_free_lock);
++ return -ENOMEM;
++}
++
++
++static void aicwf_usb_state_change(struct aic_usb_dev *usb_dev, int state)
++{
++ int old_state;
++
++ if (usb_dev->state == state)
++ return;
++
++ old_state = usb_dev->state;
++ usb_dev->state = state;
++
++ if (state == USB_DOWN_ST) {
++ usb_dev->bus_if->state = BUS_DOWN_ST;
++ }
++ if (state == USB_UP_ST) {
++ usb_dev->bus_if->state = BUS_UP_ST;
++ }
++}
++
++static int aicwf_usb_bus_txdata(struct device *dev, struct sk_buff *skb)
++{
++ u8 *buf = NULL;
++ u16 buf_len = 0;
++ struct aicwf_usb_buf *usb_buf;
++ int ret = 0;
++ unsigned long flags;
++ struct aicwf_bus *bus_if = dev_get_drvdata(dev);
++ struct aic_usb_dev *usb_dev = bus_if->bus_priv.usb;
++ bool need_cfm = false;
++
++ if (usb_dev->state != USB_UP_ST) {
++ usb_err("usb state is not up!\n");
++ dev_kfree_skb_any(skb);
++ return -EIO;
++ }
++
++ usb_buf = aicwf_usb_tx_dequeue(usb_dev, &usb_dev->tx_free_list,
++ &usb_dev->tx_free_count, &usb_dev->tx_free_lock);
++ if (!usb_buf) {
++ usb_err("free:%d, post:%d\n", usb_dev->tx_free_count, usb_dev->tx_post_count);
++ dev_kfree_skb_any(skb);
++ ret = -ENOMEM;
++ goto flow_ctrl;
++ }
++
++ usb_buf->usbdev = usb_dev;
++ if (need_cfm)
++ usb_buf->cfm = true;
++ else
++ usb_buf->cfm = false;
++ usb_fill_bulk_urb(usb_buf->urb, usb_dev->udev, usb_dev->bulk_out_pipe,
++ buf, buf_len, aicwf_usb_tx_complete, usb_buf);
++ #if defined CONFIG_USB_NO_TRANS_DMA_MAP
++ usb_buf->urb->transfer_dma = usb_buf->data_dma_trans_addr;
++ usb_buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
++ #endif
++ usb_buf->urb->transfer_flags |= URB_ZERO_PACKET;
++
++ aicwf_usb_tx_queue(usb_dev, &usb_dev->tx_post_list, usb_buf,
++ &usb_dev->tx_post_count, &usb_dev->tx_post_lock);
++ complete(&bus_if->bustx_trgg);
++ ret = 0;
++
++ flow_ctrl:
++ spin_lock_irqsave(&usb_dev->tx_flow_lock, flags);
++ if (usb_dev->tx_free_count < AICWF_USB_TX_LOW_WATER) {
++ usb_dev->tbusy = true;
++ aicwf_usb_tx_flowctrl(usb_dev, true);
++ }
++ spin_unlock_irqrestore(&usb_dev->tx_flow_lock, flags);
++
++ return ret;
++}
++
++static int aicwf_usb_bus_start(struct device *dev)
++{
++ struct aicwf_bus *bus_if = dev_get_drvdata(dev);
++ struct aic_usb_dev *usb_dev = bus_if->bus_priv.usb;
++
++ if (usb_dev->state == USB_UP_ST)
++ return 0;
++
++ aicwf_usb_state_change(usb_dev, USB_UP_ST);
++ aicwf_usb_rx_prepare(usb_dev);
++ aicwf_usb_tx_prepare(usb_dev);
++ return 0;
++}
++
++static void aicwf_usb_cancel_all_urbs(struct aic_usb_dev *usb_dev)
++{
++ struct aicwf_usb_buf *usb_buf, *tmp;
++ unsigned long flags;
++
++ if (usb_dev->msg_out_urb)
++ usb_kill_urb(usb_dev->msg_out_urb);
++
++ spin_lock_irqsave(&usb_dev->tx_post_lock, flags);
++ list_for_each_entry_safe(usb_buf, tmp, &usb_dev->tx_post_list, list) {
++ spin_unlock_irqrestore(&usb_dev->tx_post_lock, flags);
++ if (!usb_buf->urb) {
++ usb_err("bad usb_buf\n");
++ spin_lock_irqsave(&usb_dev->tx_post_lock, flags);
++ break;
++ }
++ usb_kill_urb(usb_buf->urb);
++ spin_lock_irqsave(&usb_dev->tx_post_lock, flags);
++ }
++ spin_unlock_irqrestore(&usb_dev->tx_post_lock, flags);
++
++ usb_kill_anchored_urbs(&usb_dev->rx_submitted);
++}
++
++static void aicwf_usb_bus_stop(struct device *dev)
++{
++ struct aicwf_bus *bus_if = dev_get_drvdata(dev);
++ struct aic_usb_dev *usb_dev = bus_if->bus_priv.usb;
++
++ if (usb_dev == NULL)
++ return;
++
++ if (usb_dev->state == USB_DOWN_ST)
++ return;
++
++ aicwf_usb_state_change(usb_dev, USB_DOWN_ST);
++ aicwf_usb_cancel_all_urbs(usb_dev);
++}
++
++static void aicwf_usb_deinit(struct aic_usb_dev *usbdev)
++{
++ cancel_work_sync(&usbdev->rx_urb_work);
++ aicwf_usb_free_urb(&usbdev->rx_free_list, &usbdev->rx_free_lock);
++ aicwf_usb_free_urb(&usbdev->tx_free_list, &usbdev->tx_free_lock);
++ usb_free_urb(usbdev->msg_out_urb);
++}
++
++static void aicwf_usb_rx_urb_work(struct work_struct *work)
++{
++ struct aic_usb_dev *usb_dev = container_of(work, struct aic_usb_dev, rx_urb_work);
++
++ aicwf_usb_rx_submit_all_urb(usb_dev);
++}
++
++static int aicwf_usb_init(struct aic_usb_dev *usb_dev)
++{
++ int ret = 0;
++
++ usb_dev->tbusy = false;
++ usb_dev->app_cmp = false;
++ usb_dev->state = USB_DOWN_ST;
++
++ init_waitqueue_head(&usb_dev->msg_wait);
++ init_usb_anchor(&usb_dev->rx_submitted);
++
++ spin_lock_init(&usb_dev->tx_free_lock);
++ spin_lock_init(&usb_dev->tx_post_lock);
++ spin_lock_init(&usb_dev->rx_free_lock);
++ spin_lock_init(&usb_dev->tx_flow_lock);
++
++ INIT_LIST_HEAD(&usb_dev->rx_free_list);
++ INIT_LIST_HEAD(&usb_dev->tx_free_list);
++ INIT_LIST_HEAD(&usb_dev->tx_post_list);
++
++ usb_dev->tx_free_count = 0;
++ usb_dev->tx_post_count = 0;
++
++ ret = aicwf_usb_alloc_rx_urb(usb_dev);
++ if (ret) {
++ goto error;
++ }
++ ret = aicwf_usb_alloc_tx_urb(usb_dev);
++ if (ret) {
++ goto error;
++ }
++
++
++ usb_dev->msg_out_urb = usb_alloc_urb(0, GFP_ATOMIC);
++ if (!usb_dev->msg_out_urb) {
++ usb_err("usb_alloc_urb (msg out) failed\n");
++ ret = ENOMEM;
++ goto error;
++ }
++
++ INIT_WORK(&usb_dev->rx_urb_work, aicwf_usb_rx_urb_work);
++
++ return ret;
++ error:
++ usb_err("failed!\n");
++ aicwf_usb_deinit(usb_dev);
++ return ret;
++}
++
++static int aicwf_parse_usb(struct aic_usb_dev *usb_dev, struct usb_interface *interface, int pid)
++{
++ struct usb_interface_descriptor *interface_desc;
++ struct usb_host_interface *host_interface;
++ struct usb_endpoint_descriptor *endpoint;
++ struct usb_device *usb = usb_dev->udev;
++ int i, endpoints;
++ u8 endpoint_num;
++ int ret = 0;
++
++ usb_dev->bulk_in_pipe = 0;
++ usb_dev->bulk_out_pipe = 0;
++#ifdef CONFIG_USB_MSG_EP
++ usb_dev->msg_out_pipe = 0;
++#endif
++
++
++ host_interface = &interface->altsetting[0];
++ interface_desc = &host_interface->desc;
++ endpoints = interface_desc->bNumEndpoints;
++
++ /* Check device configuration */
++ if (usb->descriptor.bNumConfigurations != 1) {
++ usb_err("Number of configurations: %d not supported\n",
++ usb->descriptor.bNumConfigurations);
++ ret = -ENODEV;
++ goto exit;
++ }
++
++ /* Check deviceclass */
++ if (usb->descriptor.bDeviceClass != 0x00 && usb->descriptor.bDeviceClass != 239) {
++ usb_err("DeviceClass %d not supported\n",
++ usb->descriptor.bDeviceClass);
++ ret = -ENODEV;
++ goto exit;
++ }
++
++ /* Check interface number */
++ if (usb->actconfig->desc.bNumInterfaces != 1 && usb->actconfig->desc.bNumInterfaces != 3) {
++ usb_err("Number of interfaces: %d not supported\n",
++ usb->actconfig->desc.bNumInterfaces);
++ ret = -ENODEV;
++ goto exit;
++ }
++
++ if ((interface_desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) ||
++ (interface_desc->bInterfaceSubClass != 0xff) ||
++ (interface_desc->bInterfaceProtocol != 0xff)) {
++ usb_err("non WLAN interface %d: 0x%x:0x%x:0x%x\n",
++ interface_desc->bInterfaceNumber, interface_desc->bInterfaceClass,
++ interface_desc->bInterfaceSubClass, interface_desc->bInterfaceProtocol);
++ ret = -ENODEV;
++ goto exit;
++ }
++
++ for (i = 0; i < endpoints; i++) {
++ endpoint = &host_interface->endpoint[i].desc;
++ endpoint_num = usb_endpoint_num(endpoint);
++
++ if (usb_endpoint_dir_in(endpoint) &&
++ usb_endpoint_xfer_bulk(endpoint)) {
++ if (!usb_dev->bulk_in_pipe) {
++ usb_dev->bulk_in_pipe = usb_rcvbulkpipe(usb, endpoint_num);
++ }
++ }
++
++ if (usb_endpoint_dir_out(endpoint) &&
++ usb_endpoint_xfer_bulk(endpoint)) {
++ if (!usb_dev->bulk_out_pipe)
++ {
++ usb_dev->bulk_out_pipe = usb_sndbulkpipe(usb, endpoint_num);
++ }
++#ifdef CONFIG_USB_MSG_EP
++ else if (!usb_dev->msg_out_pipe) {
++ usb_dev->msg_out_pipe = usb_sndbulkpipe(usb, endpoint_num);
++ }
++#endif
++ }
++ }
++
++ if (usb_dev->bulk_in_pipe == 0) {
++ usb_err("No RX (in) Bulk EP found\n");
++ ret = -ENODEV;
++ goto exit;
++ }
++ if (usb_dev->bulk_out_pipe == 0) {
++ usb_err("No TX (out) Bulk EP found\n");
++ ret = -ENODEV;
++ goto exit;
++ }
++
++#ifdef CONFIG_USB_MSG_EP
++ if ( usb_dev->msg_out_pipe != 0 &&
++ (usb_dev->chipid == PRODUCT_ID_AIC8801 ||
++ usb_dev->chipid == PRODUCT_ID_AIC8800D81)){
++ printk("TX Msg Bulk EP found\n");
++ usb_dev->use_msg_ep = 1;
++ }else{
++ usb_dev->use_msg_ep = 0;
++ }
++
++#if 0
++ if (usb_dev->msg_out_pipe == 0) {
++ usb_err("No TX Msg (out) Bulk EP found\n");
++ usb_dev->use_msg_ep = 0;
++ }else{
++ usb_dev->use_msg_ep = 1;
++ }
++#endif
++#endif
++
++
++ if (usb->speed == USB_SPEED_HIGH) {
++ printk("Aic high speed USB device detected\n");
++ } else {
++ printk("Aic full speed USB device detected\n");
++ }
++ exit:
++ return ret;
++}
++
++
++
++static struct aicwf_bus_ops aicwf_usb_bus_ops = {
++ .start = aicwf_usb_bus_start,
++ .stop = aicwf_usb_bus_stop,
++ .txdata = aicwf_usb_bus_txdata,
++ .txmsg = aicwf_usb_bus_txmsg,
++};
++
++#if 0
++u32 patch_tbl[][2] =
++{
++#if defined(CONFIG_RFTEST)
++ {JUMP_TABLE_OFFSET(28), 0x16b4c5}, // 161998
++ {JUMP_TABLE_OFFSET(130), 0x16b379}, // 161B30
++ {JUMP_TABLE_OFFSET(126), 0x16b155}, // 161B20
++ {JUMP_TABLE_OFFSET(132), 0x16b38d}, // 161b38
++ {JUMP_TABLE_OFFSET(34), 0x16b515}, // 1619b0
++#elif defined(CONFIG_PLATFORM_UBUNTU)
++ {JUMP_TABLE_OFFSET(28), 0x16b5a5}, // 161998
++ {JUMP_TABLE_OFFSET(130), 0x16b459}, // 161B30
++ {JUMP_TABLE_OFFSET(126), 0x16b235}, // 161B20
++ {JUMP_TABLE_OFFSET(132), 0x16b46d}, // 161b38
++ {JUMP_TABLE_OFFSET(34), 0x16b5f5}, // 1619b0
++#else
++ {JUMP_TABLE_OFFSET(28), 0x16b231}, // 161998
++ {JUMP_TABLE_OFFSET(130), 0x16b0e5}, // 161B30
++ {JUMP_TABLE_OFFSET(126), 0x16aec1}, // 161B20
++ {JUMP_TABLE_OFFSET(132), 0x16b0f9}, // 161b38
++ {JUMP_TABLE_OFFSET(34), 0x16b281}, // 1619b0
++#endif
++};
++#endif
++
++#if 0
++u32 patch_tbl_rf[][2] =
++{
++ {JUMP_TABLE_OFFSET(28), 0x16b4c5}, // 161998
++ {JUMP_TABLE_OFFSET(130), 0x16b379}, // 161B30
++ {JUMP_TABLE_OFFSET(126), 0x16b155}, // 161B20
++ {JUMP_TABLE_OFFSET(132), 0x16b38d}, // 161b38
++ {JUMP_TABLE_OFFSET(34), 0x16b515}, // 1619b0
++};
++#endif
++#if 0
++u32 patch_tbl[18][2] =
++{
++ {0x0044, 0x00000002}, //hosttype
++ {0x0048, 0x00000060},
++#ifdef CONFIG_USB_BT
++ {0x004c, 0x00000040},
++#else
++ {0x004c, 0x00000046},
++#endif
++ {0x0050, 0x00000000}, //ipc base
++ {0x0054, 0x001a0000}, //buf base
++ {0x0058, 0x001a0140}, //desc base
++ {0x005c, 0x00000ee0}, //desc size
++ {0x0060, 0x001a1020}, //pkt base
++#ifdef CONFIG_USB_BT
++ {0x0064, 0x0001EFE0}, //pkt size
++#else
++ {0x0064, 0x000207e0}, //pkt size
++#endif
++ {0x0068, 0x00000008},
++ {0x006c, 0x00000040},
++ {0x0070, 0x00000040},
++ {0x0074, 0x00000020},
++ {0x0078, 0x00000000},
++ {0x007c, 0x00000040},
++ {0x0080, 0x00190000},
++ {0x0084, 0x0000fc00},//63kB
++ {0x0088, 0x0019fc00}
++};
++#endif
++
++u32 adaptivity_patch_tbl[][2] = {
++ {0x0004, 0x0000320A}, //linkloss_thd
++ {0x0094, 0x00000000}, //ac_param_conf
++ {0x00F8, 0x00010138}, //tx_adaptivity_en
++};
++
++u32 patch_tbl[][2] ={
++{0x0044, 0x00000002}, //hosttype
++{0x0048, 0x00000060},
++#if 1//def CONFIG_USB_BT
++#ifdef CONFIG_USB_MSG_EP
++{0x004c, 0x00040050},
++#else
++{0x004c, 0x00000052},
++#endif
++#else
++{0x004c, 0x00000046},
++#endif
++{0x0050, 0x00000000}, //ipc base
++{0x0054, 0x00190000}, //buf base
++{0x0058, 0x00190140}, //desc base
++{0x005c, 0x00000ee0}, //desc size
++{0x0060, 0x00191020}, //pkt base
++#if 1//def CONFIG_USB_BT
++{0x0064, 0x0002EFE0}, //pkt size
++#else
++{0x0064, 0x000207e0}, //pkt size
++#endif
++{0x0068, 0x00000008}, //tx BK A-MPDU 8
++{0x006c, 0x00000040}, //tx BE A-MPDU 40
++{0x0070, 0x00000040}, //tx VI A-MPDU 40
++{0x0074, 0x00000020}, //tx VO A-MPDU 20
++{0x0078, 0x00000000}, //bcn queue
++{0x007c, 0x00000020}, //rx A-MPDU 20
++{0x0080, 0x001d0000},
++{0x0084, 0x0000fc00},//63kB
++{0x0088, 0x001dfc00},
++{0x00A8, 0x8D080103},//dm 8D08
++{0x00D0, 0x00010103},//aon sram
++#ifdef CONFIG_USB_MSG_EP
++{0x00D4, 0x0404087C},
++#else
++{0x00D4, 0x0000087C},
++#endif
++{0x00D8, 0x001C0000},//bt base
++{0x00DC, 0x00008000},//bt size
++{0x00E0, 0x04020A08},
++{0x00E4, 0x00000001},
++#ifdef CONFIG_USB_MSG_EP
++{0x00FC, 0x00000302},//rx msg fc pkt cnt
++#endif
++{0x0100, 0x0000000F}, //usb_reboot_additional_delay
++#if !defined(CONFIG_LINK_DET_5G)
++{0x0104, 0x00000000}, //link_det_5g
++#endif
++};
++
++
++#if 0
++u32 patch_tbl[][2] =
++{
++#ifdef CONFIG_PLATFORM_UBUNTU
++ {JUMP_TABLE_OFFSET(28), 0x16b5a5}, // 161998
++ {JUMP_TABLE_OFFSET(130), 0x16b459}, // 161B30
++ {JUMP_TABLE_OFFSET(126), 0x16b235}, // 161B20
++ {JUMP_TABLE_OFFSET(132), 0x16b46d}, // 161b38
++ {JUMP_TABLE_OFFSET(34), 0x16b5f5}, // 1619b0
++#else
++ {JUMP_TABLE_OFFSET(28), 0x16b231}, // 161998
++ {JUMP_TABLE_OFFSET(130), 0x16b0e5}, // 161B30
++ {JUMP_TABLE_OFFSET(126), 0x16aec1}, // 161B20
++ {JUMP_TABLE_OFFSET(132), 0x16b0f9}, // 161b38
++ {JUMP_TABLE_OFFSET(34), 0x16b281}, // 1619b0
++#endif
++};
++#endif
++
++u32 syscfg_tbl_pmic_u02[][2] = {
++ {0x40040000, 0x00001AC8}, // 1) fix panic
++ {0x40040084, 0x00011580},
++ {0x40040080, 0x00000001},
++ {0x40100058, 0x00000000},
++};
++
++u32 syscfg_tbl_u04[][2] = {
++ {0x40040000, 0x0000042C}, // protect usb replenish rxq / flush rxq, skip flush rxq before start_app
++ {0x40040004, 0x0000DD44},
++ {0x40040008, 0x00000448},
++ {0x4004000C, 0x0000044C},
++ {0x0019B800, 0xB9F0F19B},
++ {0x0019B804, 0x0019B81D},
++ {0x0019B808, 0xBF00FA79},
++ {0x0019B80C, 0xF007BF00},
++ {0x0019B810, 0x4605B672}, // code
++ {0x0019B814, 0x21E0F04F},
++ {0x0019B818, 0xBE0BF664},
++ {0x0019B81C, 0xF665B510},
++ {0x0019B820, 0x4804FC9D},
++ {0x0019B824, 0xFA9EF66C},
++ {0x0019B828, 0xFCA8F665},
++ {0x0019B82C, 0x4010E8BD},
++ {0x0019B830, 0xBAC6F66C},
++ {0x0019B834, 0x0019A0C4},
++ {0x40040084, 0x0019B800}, // out base
++ {0x40040080, 0x0000000F},
++ {0x40100058, 0x00000000},
++};
++
++u32 syscfg_tbl[][2] = {
++ {0x40500014, 0x00000101}, // 1)
++ {0x40500018, 0x0000010d}, // 2)//bt only:10d ,bt combo and bt only sw:109
++ {0x40500004, 0x00000010}, // 3) the order should not be changed
++ #if 1//CONFIG_PMIC_SETTING
++ {0x50000000, 0x03220204}, // 2) pmic interface init
++ {0x50019150, 0x00000002},
++ {0x50017008, 0x00000000}, // 4) stop wdg
++ #endif /* CONFIG_PMIC_SETTING */
++};
++
++u32 sys_reboot_tbl[][2] = {
++ {0x50017000, 0x0001ffff},
++ {0x50017008, 0x00000002},
++};
++
++#if 0
++u32 bt_config_tbl[][2] =
++{
++ {0x0016f000, 0xe0250793},
++ {0x40080000, 0x0008bd64},
++ #if 0
++ {0x0016f004, 0xe0052b00},
++ {0x40080004, 0x000b5570},
++ {0x0016f008, 0xe7b12b00},
++ {0x40080008, 0x000b5748},
++ {0x0016f00c, 0x001ad00e},
++ {0x4008000c, 0x000ac8f8},
++ #endif
++ {0x40080084, 0x0016f000}, // out
++ {0x40080080, 0x00000001}, // en
++ {0x40100058, 0x00000000}, // bypass
++};
++#endif
++
++u32 rf_tbl_masked[][3] = {
++ {0x40344058, 0x00800000, 0x00000000},// pll trx
++};
++
++static int system_config_8800(struct aic_usb_dev *usb_dev){
++ int syscfg_num;
++ int ret, cnt;
++ const u32 mem_addr = 0x40500000;
++ struct dbg_mem_read_cfm rd_mem_addr_cfm;
++ ret = rwnx_send_dbg_mem_read_req(usb_dev, mem_addr, &rd_mem_addr_cfm);
++ if (ret) {
++ printk("%x rd fail: %d\n", mem_addr, ret);
++ return ret;
++ }
++ chip_id = (u8)(rd_mem_addr_cfm.memdata >> 16);
++ //printk("%x=%x\n", rd_mem_addr_cfm.memaddr, rd_mem_addr_cfm.memdata);
++ ret = rwnx_send_dbg_mem_read_req(usb_dev, 0x00000004, &rd_mem_addr_cfm);
++ if (ret) {
++ printk("[0x00000004] rd fail: %d\n", ret);
++ return ret;
++ }
++ chip_sub_id = (u8)(rd_mem_addr_cfm.memdata >> 4);
++ //printk("%x=%x\n", rd_mem_addr_cfm.memaddr, rd_mem_addr_cfm.memdata);
++ printk("chip_id=%x, chip_sub_id=%x\n", chip_id, chip_sub_id);
++ if (chip_id == CHIP_REV_U02) {
++ syscfg_num = sizeof(syscfg_tbl_pmic_u02) / sizeof(u32) / 2;
++ for (cnt = 0; cnt < syscfg_num; cnt++) {
++ ret = rwnx_send_dbg_mem_write_req(usb_dev, syscfg_tbl_pmic_u02[cnt][0], syscfg_tbl_pmic_u02[cnt][1]);
++ if (ret) {
++ printk("%x write fail: %d\n", syscfg_tbl_pmic_u02[cnt][0], ret);
++ return ret;
++ }
++ }
++ }
++ if ((chip_id == CHIP_REV_U03) && (chip_sub_id == CHIP_SUB_REV_U04)) {
++ syscfg_num = sizeof(syscfg_tbl_u04) / sizeof(u32) / 2;
++ printk("cfg u04\n");
++ for (cnt = 0; cnt < syscfg_num; cnt++) {
++ ret = rwnx_send_dbg_mem_write_req(usb_dev, syscfg_tbl_u04[cnt][0], syscfg_tbl_u04[cnt][1]);
++ if (ret) {
++ printk("%x write fail: %d\n", syscfg_tbl_u04[cnt][0], ret);
++ return ret;
++ }
++ }
++ }
++ syscfg_num = sizeof(syscfg_tbl) / sizeof(u32) / 2;
++ for (cnt = 0; cnt < syscfg_num; cnt++) {
++ ret = rwnx_send_dbg_mem_write_req(usb_dev, syscfg_tbl[cnt][0], syscfg_tbl[cnt][1]);
++ if (ret) {
++ printk("%x write fail: %d\n", syscfg_tbl[cnt][0], ret);
++ return ret;
++ }
++ }
++ return 0;
++
++}
++
++static int system_config(struct aic_usb_dev *usb_dev)
++{
++ if(usb_dev->chipid == PRODUCT_ID_AIC8800){
++ return system_config_8800(usb_dev);
++ }else if(usb_dev->chipid == PRODUCT_ID_AIC8800D80){
++ return system_config_8800d80(usb_dev);
++ }else{
++ return -1;
++ }
++}
++
++
++static int system_reboot(struct aic_usb_dev *usb_dev)
++{
++ int syscfg_num;
++ int ret, cnt;
++
++ syscfg_num = sizeof(sys_reboot_tbl) / sizeof(u32) / 2;
++ for (cnt = 0; cnt < syscfg_num; cnt++) {
++ ret = rwnx_send_dbg_mem_write_req(usb_dev, sys_reboot_tbl[cnt][0], sys_reboot_tbl[cnt][1]);
++ if (ret) {
++ printk("%x write fail: %d\n", sys_reboot_tbl[cnt][0], ret);
++ return ret;
++ }
++ }
++ return 0;
++}
++
++static int rf_config(struct aic_usb_dev *usb_dev)
++{
++ int ret;
++ ret = rwnx_send_dbg_mem_mask_write_req(usb_dev,
++ rf_tbl_masked[0][0], rf_tbl_masked[0][1], rf_tbl_masked[0][2]);
++ if (ret) {
++ printk("rf config %x write fail: %d\n", rf_tbl_masked[0][0], ret);
++ }
++
++ return ret;
++}
++
++static int patch_config(struct aic_usb_dev *usb_dev)
++{
++
++ int ret = 0;
++ int cnt = 0;
++ int patch_num = 0;
++ uint32_t start_addr = 0x1e6000;
++ u32 patch_addr = start_addr;
++ u32 config_base;
++ struct dbg_mem_read_cfm rd_patch_addr_cfm;
++ const u32 rd_patch_addr = RAM_FW_ADDR + 0x0180;
++ int tmp_cnt = 0;
++ int adap_patch_num = 0;
++
++ printk("%s enter \r\n", __func__);
++
++ //if (testmode) {
++ patch_num = sizeof(patch_tbl)/4;
++
++ printk("Read FW mem: %08x\n", rd_patch_addr);
++ if ((ret = rwnx_send_dbg_mem_read_req(usb_dev, rd_patch_addr, &rd_patch_addr_cfm))) {
++ printk("patch rd fail\n");
++ }
++
++ printk("%x=%x\n", rd_patch_addr_cfm.memaddr, rd_patch_addr_cfm.memdata);
++ config_base = rd_patch_addr_cfm.memdata;
++
++ //if (testmode == FW_NORMAL_MODE) {
++ if((ret = rwnx_send_dbg_mem_write_req(usb_dev, 0x1e5318, patch_addr))) {
++ printk("%x write fail\n", 0x1e5318);
++ }
++
++ if(adap_test){
++ printk("%s for adaptivity test \r\n", __func__);
++ adap_patch_num = sizeof(adaptivity_patch_tbl)/4;
++ if((ret = rwnx_send_dbg_mem_write_req(usb_dev, 0x1e531c, patch_num + adap_patch_num))) {
++ printk("%x write fail\n", 0x1e531c);
++ }
++ }else{
++ if((ret = rwnx_send_dbg_mem_write_req(usb_dev, 0x1e531c, patch_num))) {
++ printk("%x write fail\n", 0x1e531c);
++ }
++ }
++ //}else if(testmode == FW_TEST_MODE){//for old rf fw
++ // if((ret = rwnx_send_dbg_mem_write_req(usb_dev, 0x1e4d78, patch_addr))) {
++ // printk("%x write fail\n", 0x1e4d80);
++ // }
++ // if((ret = rwnx_send_dbg_mem_write_req(usb_dev, 0x1e4d7C, patch_num))) {
++ // printk("%x write fail\n", 0x1e4d84);
++ // }
++ //}
++
++ for(cnt = 0; cnt < patch_num/2; cnt+=1)
++ {
++ if((ret = rwnx_send_dbg_mem_write_req(usb_dev, start_addr+8*cnt, patch_tbl[cnt][0]+config_base))) {
++ printk("%x write fail\n", start_addr+8*cnt);
++ }
++
++ if((ret = rwnx_send_dbg_mem_write_req(usb_dev, start_addr+8*cnt+4, patch_tbl[cnt][1]))) {
++ printk("%x write fail\n", start_addr+8*cnt+4);
++ }
++
++ }
++
++ tmp_cnt = cnt;
++
++ if(adap_test){
++ for(cnt = 0; cnt < adap_patch_num/2; cnt+=1)
++ {
++ if((ret = rwnx_send_dbg_mem_write_req(usb_dev, start_addr+8*(cnt+tmp_cnt), adaptivity_patch_tbl[cnt][0]+config_base))) {
++ printk("%x write fail\n", start_addr+8*cnt);
++ }
++
++ if((ret = rwnx_send_dbg_mem_write_req(usb_dev, start_addr+8*(cnt+tmp_cnt)+4, adaptivity_patch_tbl[cnt][1]))) {
++ printk("%x write fail\n", start_addr+8*cnt+4);
++ }
++ }
++ }
++
++ return ret;
++#if 0
++ } else {
++ patch_num = sizeof(patch_tbl) / sizeof(u32) / 2;
++
++ for(cnt = 0; cnt < patch_num; cnt++) {
++ ret = rwnx_send_dbg_mem_write_req(usb_dev, patch_tbl[cnt][0], patch_tbl[cnt][1]);
++ if(ret) {
++ printk("%x write fail: %d\n", patch_tbl[cnt][0], ret);
++ break;
++ }
++ }
++ return ret;
++ }
++#endif
++}
++
++#if 0
++static int bt_config(struct aic_usb_dev *usb_dev)
++{
++ int trap_num = sizeof(bt_config_tbl) / sizeof(u32) / 2;
++ int ret, cnt;
++
++ printk("%s enter \r\n", __func__);
++ for(cnt = 0; cnt < trap_num; cnt++) {
++ ret = rwnx_send_dbg_mem_write_req(usb_dev, bt_config_tbl[cnt][0], bt_config_tbl[cnt][1]);
++ if(ret) {
++ printk("%x write fail: %d\n", bt_config_tbl[cnt][0], ret);
++ break;
++ }
++ }
++ return ret;
++}
++#endif
++
++static int get_paring_ids(char* c_paringids, int* i_paringids){
++ int i = 0;
++ int len = strlen(c_paringids);
++ int paring_id_num = 0;
++ char temp_paring_id[5];
++
++//get paring_id_num
++ for(i = 4; i < len; i += 5){
++ if(c_paringids[i] == ','){
++ paring_id_num++;
++ }else if(c_paringids[i] == 0x00){
++ break;
++ }else{
++ paring_id_num = 0;
++ break;
++ }
++ }
++
++ if(paring_id_num == 0 && len == 0){
++ printk("%s paring_id_num:%d \r\n", __func__, paring_id_num);
++ return 0;
++ }else{
++ paring_id_num++;
++ }
++
++//parsing paring_id
++ for(i = 0; i < paring_id_num; i++){
++ memset(temp_paring_id, 0, 5);
++ temp_paring_id[0] = c_paringids[(i * 5)];
++ temp_paring_id[1] = c_paringids[(i * 5) + 1];
++ temp_paring_id[2] = c_paringids[(i * 5) + 2];
++ temp_paring_id[3] = c_paringids[(i * 5) + 3];
++ i_paringids[i] = rwnx_atoli(temp_paring_id);
++ }
++
++ return paring_id_num;
++}
++
++static int aicloadfw_chipmatch(struct aic_usb_dev *usb_dev, u16 vid, u16 pid){
++
++ if(pid == USB_DEVICE_ID_AIC){
++ usb_dev->chipid = PRODUCT_ID_AIC8800;
++ AICWFDBG(LOGINFO, "%s USE AIC8800\r\n", __func__);
++ return 0;
++ }else if(pid == USB_DEVICE_ID_AIC_8801){
++ usb_dev->chipid = PRODUCT_ID_AIC8801;
++ AICWFDBG(LOGINFO, "%s USE AIC8801\r\n", __func__);
++ return 0;
++ }else if(pid == USB_DEVICE_ID_AIC_8800D80){
++ usb_dev->chipid = PRODUCT_ID_AIC8800D80;
++ AICWFDBG(LOGINFO, "%s USE AIC8800D80\r\n", __func__);
++ return 0;
++ }else if(pid == USB_DEVICE_ID_AIC_8800D81){
++ usb_dev->chipid = PRODUCT_ID_AIC8800D81;
++ AICWFDBG(LOGINFO, "%s USE AIC8800D81\r\n", __func__);
++ return 0;
++ }else{
++ return -1;
++ }
++}
++
++int aicfw_download_fw_8800(struct aic_usb_dev *usb_dev){
++ //uint32_t paring_id = 0;
++ uint32_t* paring_ids;
++ int paring_id_num = 0;
++ int i = 0;
++ const u32 fw_addr = RAM_FW_ADDR;
++
++#ifdef CONFIG_M2D_OTA_AUTO_SUPPORT
++ if(testmode == FW_M2D_OTA_MODE){
++ rwnx_plat_m2d_flash_ota_android(usb_dev,FW_M2D_OTA_NAME);
++ }else if(testmode == FW_NORMAL_MODE) {
++ rwnx_plat_m2d_flash_ota_check(usb_dev,FW_M2D_OTA_NAME);
++ }
++#endif
++ if(testmode == FW_TEST_MODE){
++ if (rwnx_plat_bin_fw_upload_android(usb_dev, RAM_FW_ADDR, FW_RF_BASE_NAME)) {
++ return -1;
++ }
++
++ if (chip_id == CHIP_REV_U03) {
++ if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_ADID_BASE_ADDR, FW_ADID_BASE_NAME_U03)) {
++ return -1;;
++ }
++
++ if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_PATCH_BASE_ADDR_U03, FW_PATCH_BASE_NAME_U03)) {
++ return -1;;
++ }
++ } else {
++ if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_ADID_BASE_ADDR, FW_ADID_BASE_NAME)) {
++ return -1;;
++ }
++
++ if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_PATCH_BASE_ADDR, FW_PATCH_BASE_NAME)) {
++ return -1;;
++ }
++ }
++
++#if 0
++ if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_ADID_BASE_ADDR, FW_RF_ADID_BASE_NAME)) {
++ goto out_free_bus;
++ }
++
++ if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_PATCH_BASE_ADDR, FW_RF_PATCH_BASE_NAME)) {
++ goto out_free_bus;
++ }
++#endif
++ } else if(testmode == FW_BLE_SCAN_WAKEUP_MODE){
++#if 0
++ paring_id = rwnx_atoli(paringid);
++ rwnx_plat_bin_fw_upload_android(usb_dev, RAM_FW_BLE_SCAN_WAKEUP_ADDR, FW_BLE_SCAN_WAKEUP_NAME);
++ rwnx_send_dbg_mem_write_req(usb_dev, 0x15FF00, 0x53454C42);//magic_num
++ rwnx_send_dbg_mem_write_req(usb_dev, 0x15FF04, ble_scan_wakeup_reboot_time);//reboot time
++ rwnx_send_dbg_mem_write_req(usb_dev, 0x15FF08, paring_id);
++ rwnx_send_dbg_mem_write_req(usb_dev, 0x15FF0c, paring_id);
++ rwnx_send_dbg_start_app_req(usb_dev, RAM_FW_BLE_SCAN_WAKEUP_ADDR, HOST_START_APP_AUTO);
++#endif
++#if 1
++ paring_ids = (uint32_t*)kmalloc(sizeof(uint32_t) * 8, GFP_KERNEL);
++ rwnx_plat_bin_fw_upload_android(usb_dev, RAM_FW_BLE_SCAN_WAKEUP_ADDR, FW_BLE_SCAN_WAKEUP_NAME);
++ rwnx_send_dbg_mem_write_req(usb_dev, 0x15FF00, 0x53454C42);//magic_num
++ rwnx_send_dbg_mem_write_req(usb_dev, 0x15FF04, ble_scan_wakeup_reboot_time);//reboot time
++ paring_id_num = get_paring_ids(paringid, paring_ids);
++ for(i = 0; i < paring_id_num; i++){
++ printk("paring_ids[%d]:0x%X \r\n", i, paring_ids[i]);
++ rwnx_send_dbg_mem_write_req(usb_dev, 0x15FF08 + (4 * i), paring_ids[i]);
++ }
++ rwnx_send_dbg_start_app_req(usb_dev, RAM_FW_BLE_SCAN_WAKEUP_ADDR, HOST_START_APP_AUTO);
++ kfree(paring_ids);
++#endif
++ return -1;
++
++ } else if(testmode == FW_BLE_SCAN_AD_FILTER_MODE){
++
++/*
++ data and ad_data_filter_mask instructions for use
++ ex.
++ data[18] = {0x46,0x00,0x00,0xff,0xff,0xff,0xff,0xff,0xff,0x30,0xff,0xff,0xff,0x43,0x52,0x45,0x4c,0x42};
++ mask = 1100 0000 0111 1111 1100 0000 0000 0000 = 0xc07fc000
++
++ data = 0x46,0x00,0x00,0xff,0xff,0xff,0xff,0xff,0xff,0x30,0xff,0xff,0xff,0x43,0x52,0x45,0x4c,0x42
++ mask = 1 1 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 0 0...... fill 0
++
++ data & mask = "0x46 0x00" 0x00 0x00 0x00 0x00 0x00 0x00 0x00 "0x30 0xff 0xff 0x43 0x52 0x45 0x4c 0x42"
++ using data & mask value condition to wakeup host_wake_bt gpio
++*/
++ const uint8_t data[18] = {0x46,0x00,0x00,0xff,0xff,0xff,0xff,0xff,0xff,0x30,0xff,0xff,0xff,0x43,0x52,0x45,0x4c,0x42};
++ struct ad_data_filter* ad_data = (struct ad_data_filter*)kmalloc(sizeof(struct ad_data_filter), GFP_KERNEL);
++ uint32_t *write_blocks = (uint32_t *)ad_data;
++
++ memset(ad_data, 0, sizeof(struct ad_data_filter));
++ rwnx_plat_bin_fw_upload_android(usb_dev, RAM_FW_BLE_SCAN_WAKEUP_ADDR, FW_BLE_SCAN_AD_FILTER_NAME);
++ rwnx_send_dbg_mem_write_req(usb_dev, 0x15FF00, 0x53454C42);//magic_num
++ rwnx_send_dbg_mem_write_req(usb_dev, 0x15FF04, ble_scan_wakeup_reboot_time);//reboot time
++ ad_data->ad_len = 0x13;
++ ad_data->ad_type = 0xff;
++ memcpy(ad_data->ad_data, data,ad_data->ad_len-1);// 1100 0000 0111 1111 1100 0000 0000 0000 //0xc07fc000
++ ad_data_filter_mask = 0xc07fc000;
++ rwnx_send_dbg_mem_write_req(usb_dev, 0x15FF08, ad_data_filter_mask);//reboot time
++ for(i = 0; i < 9; i++){
++ printk("write_blocks[%d]:0x%08X \r\n", i, write_blocks[i]);
++ rwnx_send_dbg_mem_write_req(usb_dev, 0x15FF0c + (4 * i), write_blocks[i]);
++ }
++ rwnx_send_dbg_start_app_req(usb_dev, RAM_FW_BLE_SCAN_WAKEUP_ADDR, HOST_START_APP_AUTO);
++ kfree(ad_data);
++ return -1;
++ } else {
++ if (rwnx_plat_bin_fw_upload_android(usb_dev, RAM_FW_ADDR, FW_BASE_NAME)) {
++ return -1;;
++ }
++ if (chip_id == CHIP_REV_U03) {
++ if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_ADID_BASE_ADDR, FW_ADID_BASE_NAME_U03)) {
++ return -1;;
++ }
++
++ if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_PATCH_BASE_ADDR_U03, FW_PATCH_BASE_NAME_U03)) {
++ return -1;;
++ }
++ } else {
++ if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_ADID_BASE_ADDR, FW_ADID_BASE_NAME)) {
++ return -1;;
++ }
++
++ if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_RAM_PATCH_BASE_ADDR, FW_PATCH_BASE_NAME)) {
++ return -1;;
++ }
++ }
++ }
++ if (chip_id == CHIP_REV_U03) {
++ if (rwnx_plat_bin_fw_patch_table_upload_android(usb_dev, FW_PATCH_TABLE_NAME_U03)) {
++ return -1;;
++ }
++ } else {
++ if (rwnx_plat_bin_fw_patch_table_upload_android(usb_dev, FW_PATCH_TABLE_NAME)) {
++ return -1;;
++ }
++ }
++
++#if 0
++ if(testmode == FW_TEST_MODE){
++ if(rwnx_plat_bin_fw_upload_android(usb_dev, FW_PATCH_TEST_BASE_ADDR, FW_PATCH_TEST_BASE_NAME)) {
++ goto out_free_bus;
++ }
++ }
++#endif
++
++ if (rwnx_plat_userconfig_upload_android(usb_dev, FW_USERCONFIG_NAME)){
++ return -1;
++ }
++
++ if (patch_config(usb_dev)) {
++ return -1;;
++ }
++
++ if (rf_config(usb_dev)){
++ return -1;;
++ }
++ if (rwnx_send_dbg_start_app_req(usb_dev, fw_addr, HOST_START_APP_AUTO)) {
++ return -1;
++ }
++
++ return 0;
++}
++
++
++int aicfw_download_fw(struct aic_usb_dev *usb_dev)
++{
++ if(usb_dev->chipid == PRODUCT_ID_AIC8800){
++ return aicfw_download_fw_8800(usb_dev);
++ }else if(usb_dev->chipid == PRODUCT_ID_AIC8800D80){
++ return aicfw_download_fw_8800d80(usb_dev);
++ }else{
++ return -1;
++ }
++}
++
++
++static int aicwf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
++{
++ int ret = 0;
++ struct usb_device *usb = interface_to_usbdev(intf);
++ struct aicwf_bus *bus_if ;
++ struct device *dev = NULL;
++ struct aicwf_rx_priv* rx_priv = NULL;
++ struct aic_usb_dev *usb_dev = NULL;
++
++
++ AICWFDBG(LOGINFO, "%s vid:0x%X pid:0x%X icl:0x%X isc:0x%X ipr:0x%X \r\n", __func__,
++ id->idVendor,
++ id->idProduct,
++ id->bInterfaceClass,
++ id->bInterfaceSubClass,
++ id->bInterfaceProtocol);
++
++ if(fw_loaded == 1 &&
++ (id->idProduct == USB_DEVICE_ID_AIC_8801 ||
++ id->idProduct == USB_DEVICE_ID_AIC_8800D81)){
++ return -1;
++ }
++
++ usb_dev = kzalloc(sizeof(struct aic_usb_dev), GFP_ATOMIC);
++ if (!usb_dev) {
++ return -ENOMEM;
++ }
++
++ usb_dev->udev = usb;
++ usb_dev->dev = &usb->dev;
++ ret = aicloadfw_chipmatch(usb_dev, id->idVendor, id->idProduct);
++ if (ret) {
++ AICWFDBG(LOGERROR, "%s chip unsupport.\r\n", __func__);
++ goto out_free;
++ }
++
++ usb_set_intfdata(intf, usb_dev);
++
++ ret = aicwf_parse_usb(usb_dev, intf, id->idProduct);
++ if (ret) {
++ usb_err("aicwf_parse_usb err %d\n", ret);
++ goto out_free;
++ }
++
++ ret = aicwf_usb_init(usb_dev);
++ if (ret) {
++ usb_err("aicwf_usb_init err %d\n", ret);
++ goto out_free;
++ }
++
++ bus_if = kzalloc(sizeof(struct aicwf_bus), GFP_ATOMIC);
++ if (!bus_if) {
++ ret = -ENOMEM;
++ goto out_free_usb;
++ }
++
++ dev = usb_dev->dev;
++ bus_if->dev = dev;
++ usb_dev->bus_if = bus_if;
++ bus_if->bus_priv.usb = usb_dev;
++ dev_set_drvdata(dev, bus_if);
++
++ bus_if->ops = &aicwf_usb_bus_ops;
++
++ rx_priv = aicwf_rx_init(usb_dev);
++ if(!rx_priv) {
++ txrx_err("rx init failed\n");
++ ret = -1;
++ goto out_free_bus;
++ }
++ usb_dev->rx_priv = rx_priv;
++
++ ret = aicwf_bus_init(0, dev);
++ if (ret < 0) {
++ usb_err("aicwf_bus_init err %d\n", ret);
++ goto out_free_bus;
++ }
++
++ ret = aicwf_bus_start(bus_if);
++ if (ret < 0) {
++ usb_err("aicwf_bus_start err %d\n", ret);
++ goto out_free_bus;
++ }
++
++ aic_bt_platform_init(usb_dev);
++
++ if (usb->speed != USB_SPEED_HIGH) {
++ printk("Aic full speed USB device detected\n");
++ system_reboot(usb_dev);
++ goto out_free_bus;
++ }
++
++ if(fw_loaded == 0 &&
++ (usb_dev->chipid == PRODUCT_ID_AIC8801 ||
++ usb_dev->chipid == PRODUCT_ID_AIC8800D81)){
++ rwnx_send_reboot(usb_dev);
++ goto out_free_bus;
++ }
++
++ if (system_config(usb_dev)) {
++ goto out_free_bus;
++ }
++
++ if (aicfw_download_fw(usb_dev)){
++ goto out_free_bus;
++ }
++
++ usb_dev->app_cmp = true;
++ fw_loaded = 1;
++
++ return 0;
++
++out_free_bus:
++ aicwf_bus_deinit(dev);
++ kfree(bus_if);
++out_free_usb:
++ aicwf_usb_deinit(usb_dev);
++out_free:
++ usb_err("failed with errno %d\n", ret);
++ kfree(usb_dev);
++ usb_set_intfdata(intf, NULL);
++ return ret;
++}
++
++static void aicwf_usb_disconnect(struct usb_interface *intf)
++{
++ struct aic_usb_dev *usb_dev =
++ (struct aic_usb_dev *) usb_get_intfdata(intf);
++
++ printk("%s enter \r\n", __func__);
++
++ if (!usb_dev)
++ return;
++
++ aicwf_bus_deinit(usb_dev->dev);
++ aicwf_usb_deinit(usb_dev);
++ if (usb_dev->rx_priv)
++ aicwf_rx_deinit(usb_dev->rx_priv);
++ kfree(usb_dev->bus_if);
++ kfree(usb_dev);
++}
++#if 0
++static int aicwf_usb_suspend(struct usb_interface *intf, pm_message_t state)
++{
++ struct aic_usb_dev *usb_dev =
++ (struct aic_usb_dev *) usb_get_intfdata(intf);
++
++ aicwf_usb_state_change(usb_dev, USB_SLEEP_ST);
++ aicwf_bus_stop(usb_dev->bus_if);
++ return 0;
++}
++
++static int aicwf_usb_resume(struct usb_interface *intf)
++{
++ struct aic_usb_dev *usb_dev =
++ (struct aic_usb_dev *) usb_get_intfdata(intf);
++
++ if (usb_dev->state == USB_UP_ST)
++ return 0;
++
++ aicwf_bus_start(usb_dev->bus_if);
++ return 0;
++}
++
++static int aicwf_usb_reset_resume(struct usb_interface *intf)
++{
++ return aicwf_usb_resume(intf);
++}
++#endif
++static struct usb_device_id aicwf_usb_id_table[] = {
++ {USB_DEVICE(USB_VENDOR_ID_AIC, USB_DEVICE_ID_AIC)},
++ {USB_DEVICE(USB_VENDOR_ID_AIC, USB_DEVICE_ID_AIC_8801)},
++ {USB_DEVICE(USB_VENDOR_ID_AIC, USB_DEVICE_ID_AIC_8800D80)},
++ {USB_DEVICE(USB_VENDOR_ID_AIC, USB_DEVICE_ID_AIC_8800D81)},
++ {}
++};
++
++MODULE_DEVICE_TABLE(usb, aicwf_usb_id_table);
++
++static struct usb_driver aicwf_usbdrvr = {
++ .name = KBUILD_MODNAME,
++ .probe = aicwf_usb_probe,
++ .disconnect = aicwf_usb_disconnect,
++ .id_table = aicwf_usb_id_table,
++ //.suspend = aicwf_usb_suspend,
++ //.resume = aicwf_usb_resume,
++ //.reset_resume = aicwf_usb_reset_resume,
++ .supports_autosuspend = 0,
++ .disable_hub_initiated_lpm = 1,
++};
++
++void aicwf_usb_register(void)
++{
++ if (usb_register(&aicwf_usbdrvr) < 0) {
++ usb_err("usb_register failed\n");
++ }
++}
++
++void aicwf_usb_exit(void)
++{
++ usb_deregister(&aicwf_usbdrvr);
++}
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic_load_fw/aicwf_usb.h
+@@ -0,0 +1,187 @@
++/**
++ * aicwf_usb.h
++ *
++ * USB function declarations
++ *
++ * Copyright (C) AICSemi 2018-2020
++ */
++
++#ifndef _AICWF_USB_H_
++#define _AICWF_USB_H_
++
++#include <linux/usb.h>
++#include <linux/skbuff.h>
++#include <linux/if_ether.h>
++#include <linux/ieee80211.h>
++#include <linux/semaphore.h>
++
++#include "aicbluetooth_cmds.h"
++
++#ifdef AICWF_USB_SUPPORT
++
++/* USB Device ID */
++#define USB_VENDOR_ID_AIC 0xA69C
++#define USB_DEVICE_ID_AIC 0x8800
++#define USB_DEVICE_ID_AIC_8801 0x8801
++
++#define CHIP_REV_U01 0x1
++#define CHIP_REV_U02 0x3
++#define CHIP_REV_U03 0x7
++#define CHIP_SUB_REV_U04 0x20
++
++enum AICWF_IC{
++ PRODUCT_ID_AIC8800 = 0,
++ PRODUCT_ID_AIC8801,
++ PRODUCT_ID_AIC8800DC,
++ PRODUCT_ID_AIC8800DW,
++ PRODUCT_ID_AIC8800D80,
++ PRODUCT_ID_AIC8800D81,
++};
++
++
++#define AICWF_USB_RX_URBS (20)
++#define AICWF_USB_TX_URBS (100)
++#define AICWF_USB_TX_LOW_WATER (AICWF_USB_TX_URBS/4)
++#define AICWF_USB_TX_HIGH_WATER (AICWF_USB_TX_LOW_WATER*3)
++#define AICWF_USB_MAX_PKT_SIZE (2048)
++
++#ifdef CONFIG_RFTEST
++#define FW_RF_PATCH_BASE_NAME "fw_patch.bin"
++#define FW_RF_ADID_BASE_NAME "fw_adid.bin"
++#define FW_RF_BASE_NAME "fmacfw_rf.bin"
++#define FW_PATCH_BASE_NAME "fw_patch.bin"
++#define FW_ADID_BASE_NAME "fw_adid.bin"
++#define FW_BASE_NAME "fmacfw.bin"
++#define FW_BLE_SCAN_WAKEUP_NAME "fw_ble_scan.bin"
++#define FW_BLE_SCAN_AD_FILTER_NAME "fw_ble_scan_ad_filter.bin"
++
++#define FW_PATCH_BASE_NAME_PC "fw_patch.bin"
++#define FW_ADID_BASE_NAME_PC "fw_adid.bin"
++#define FW_BASE_NAME_PC "fmacfw.bin"
++#define FW_RF_PATCH_BASE_NAME_PC "fw_patch.bin"
++#define FW_RF_ADID_BASE_NAME_PC "fw_adid.bin"
++#define FW_RF_BASE_NAME_PC "fmacfw_rf.bin"
++#define FW_PATCH_TEST_BASE_NAME "fw_patch_test.bin"
++#define FW_PATCH_BASE_NAME_U03 "fw_patch_u03.bin"
++#define FW_ADID_BASE_NAME_U03 "fw_adid_u03.bin"
++
++#else
++#define FW_PATCH_BASE_NAME "fw_patch.bin"
++#define FW_ADID_BASE_NAME "fw_adid.bin"
++#define FW_BASE_NAME "fmacfw.bin"
++#endif
++
++#define FW_PATCH_TABLE_NAME "fw_patch_table.bin"
++#define FW_PATCH_TABLE_NAME_U03 "fw_patch_table_u03.bin"
++#define FW_USERCONFIG_NAME "aic_userconfig.txt"
++#define FW_M2D_OTA_NAME "m2d_ota.bin"
++
++#define RAM_FW_BLE_SCAN_WAKEUP_ADDR 0x00100000
++#define RAM_FW_ADDR 0x00110000
++#define FW_RAM_ADID_BASE_ADDR 0x00161928
++#define FW_RAM_PATCH_BASE_ADDR 0x00100000
++#define FW_RAM_PATCH_BASE_ADDR_U03 0x00100000
++#define FW_PATCH_TEST_BASE_ADDR 0x00100000
++
++enum {
++ FW_NORMAL_MODE,
++ FW_TEST_MODE,
++ FW_BLE_SCAN_WAKEUP_MODE,
++ FW_M2D_OTA_MODE,
++ FW_DPDCALIB_MODE,
++ FW_BLE_SCAN_AD_FILTER_MODE,
++};
++
++
++#if 0
++#define FW_NAME2 FW_BASE_NAME".bin"
++#define FW_PATCH_BIN_NAME FW_PATCH_BASE_NAME".bin"
++#define FW_ADID_BIN_NAME FW_ADID_BASE_NAME".bin"
++#endif
++
++typedef enum {
++ USB_TYPE_DATA = 0X00,
++ USB_TYPE_CFG = 0X10,
++ USB_TYPE_CFG_CMD_RSP = 0X11,
++ USB_TYPE_CFG_DATA_CFM = 0X12
++} usb_type;
++
++enum aicwf_usb_state {
++ USB_DOWN_ST,
++ USB_UP_ST,
++ USB_SLEEP_ST
++};
++
++struct ad_data_filter {
++ uint8_t ad_len;
++ uint8_t ad_type;
++ uint8_t ad_data[31];
++};
++
++struct aicwf_usb_buf {
++ struct list_head list;
++ struct aic_usb_dev *usbdev;
++ struct urb *urb;
++ struct sk_buff *skb;
++ #ifdef CONFIG_USB_NO_TRANS_DMA_MAP
++ u8 *data_buf;
++ dma_addr_t data_dma_trans_addr;
++ #endif
++ bool cfm;
++};
++
++struct aic_usb_dev {
++ struct rwnx_cmd_mgr cmd_mgr;
++ struct aicwf_bus *bus_if;
++ struct usb_device *udev;
++ struct device *dev;
++ struct aicwf_rx_priv* rx_priv;
++ enum aicwf_usb_state state;
++
++ struct usb_anchor rx_submitted;
++ struct work_struct rx_urb_work;
++
++ spinlock_t rx_free_lock;
++ spinlock_t tx_free_lock;
++ spinlock_t tx_post_lock;
++ spinlock_t tx_flow_lock;
++
++ struct list_head rx_free_list;
++ struct list_head tx_free_list;
++ struct list_head tx_post_list;
++
++ uint bulk_in_pipe;
++ uint bulk_out_pipe;
++#ifdef CONFIG_USB_MSG_EP
++ uint msg_out_pipe;
++ uint use_msg_ep;
++#endif
++
++ int tx_free_count;
++ int tx_post_count;
++
++ struct aicwf_usb_buf usb_tx_buf[AICWF_USB_TX_URBS];
++ struct aicwf_usb_buf usb_rx_buf[AICWF_USB_RX_URBS];
++
++ int msg_finished;
++ wait_queue_head_t msg_wait;
++ ulong msg_busy;
++ struct urb *msg_out_urb;
++ #ifdef CONFIG_USB_NO_TRANS_DMA_MAP
++ dma_addr_t cmd_dma_trans_addr;
++ #endif
++
++ u16 chipid;
++ bool tbusy;
++ bool app_cmp;
++};
++
++extern void aicwf_usb_exit(void);
++extern void aicwf_usb_register(void);
++extern void aicwf_usb_tx_flowctrl(struct aic_usb_dev *usb_dev, bool state);
++int usb_bustx_thread(void *data);
++int usb_busrx_thread(void *data);
++int aicwf_process_rxframes(struct aicwf_rx_priv *rx_priv);
++
++#endif /* AICWF_USB_SUPPORT */
++#endif /* _AICWF_USB_H_ */
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic_load_fw/md5.c
+@@ -0,0 +1,161 @@
++#include <linux/memory.h>
++#include "md5.h"
++
++unsigned char PADDING[]={0x80,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
++ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
++ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
++ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
++
++void MD5Init(MD5_CTX *context)
++{
++ context->count[0] = 0;
++ context->count[1] = 0;
++ context->state[0] = 0x67452301;
++ context->state[1] = 0xEFCDAB89;
++ context->state[2] = 0x98BADCFE;
++ context->state[3] = 0x10325476;
++}
++void MD5Update(MD5_CTX *context,unsigned char *input,unsigned int inputlen)
++{
++ unsigned int i = 0,index = 0,partlen = 0;
++ index = (context->count[0] >> 3) & 0x3F;
++ partlen = 64 - index;
++ context->count[0] += inputlen << 3;
++ if(context->count[0] < (inputlen << 3))
++ context->count[1]++;
++ context->count[1] += inputlen >> 29;
++
++ if(inputlen >= partlen)
++ {
++ memcpy(&context->buffer[index],input,partlen);
++ MD5Transform(context->state,context->buffer);
++ for(i = partlen;i+64 <= inputlen;i+=64)
++ MD5Transform(context->state,&input[i]);
++ index = 0;
++ }
++ else
++ {
++ i = 0;
++ }
++ memcpy(&context->buffer[index],&input[i],inputlen-i);
++}
++void MD5Final(MD5_CTX *context,unsigned char digest[16])
++{
++ unsigned int index = 0,padlen = 0;
++ unsigned char bits[8];
++ index = (context->count[0] >> 3) & 0x3F;
++ padlen = (index < 56)?(56-index):(120-index);
++ MD5Encode(bits,context->count,8);
++ MD5Update(context,PADDING,padlen);
++ MD5Update(context,bits,8);
++ MD5Encode(digest,context->state,16);
++}
++void MD5Encode(unsigned char *output,unsigned int *input,unsigned int len)
++{
++ unsigned int i = 0,j = 0;
++ while(j < len)
++ {
++ output[j] = input[i] & 0xFF;
++ output[j+1] = (input[i] >> 8) & 0xFF;
++ output[j+2] = (input[i] >> 16) & 0xFF;
++ output[j+3] = (input[i] >> 24) & 0xFF;
++ i++;
++ j+=4;
++ }
++}
++void MD5Decode(unsigned int *output,unsigned char *input,unsigned int len)
++{
++ unsigned int i = 0,j = 0;
++ while(j < len)
++ {
++ output[i] = (input[j]) |
++ (input[j+1] << 8) |
++ (input[j+2] << 16) |
++ (input[j+3] << 24);
++ i++;
++ j+=4;
++ }
++}
++void MD5Transform(unsigned int state[4],unsigned char block[64])
++{
++ unsigned int a = state[0];
++ unsigned int b = state[1];
++ unsigned int c = state[2];
++ unsigned int d = state[3];
++ unsigned int x[64];
++ MD5Decode(x,block,64);
++ FF(a, b, c, d, x[ 0], 7, 0xd76aa478); /* 1 */
++ FF(d, a, b, c, x[ 1], 12, 0xe8c7b756); /* 2 */
++ FF(c, d, a, b, x[ 2], 17, 0x242070db); /* 3 */
++ FF(b, c, d, a, x[ 3], 22, 0xc1bdceee); /* 4 */
++ FF(a, b, c, d, x[ 4], 7, 0xf57c0faf); /* 5 */
++ FF(d, a, b, c, x[ 5], 12, 0x4787c62a); /* 6 */
++ FF(c, d, a, b, x[ 6], 17, 0xa8304613); /* 7 */
++ FF(b, c, d, a, x[ 7], 22, 0xfd469501); /* 8 */
++ FF(a, b, c, d, x[ 8], 7, 0x698098d8); /* 9 */
++ FF(d, a, b, c, x[ 9], 12, 0x8b44f7af); /* 10 */
++ FF(c, d, a, b, x[10], 17, 0xffff5bb1); /* 11 */
++ FF(b, c, d, a, x[11], 22, 0x895cd7be); /* 12 */
++ FF(a, b, c, d, x[12], 7, 0x6b901122); /* 13 */
++ FF(d, a, b, c, x[13], 12, 0xfd987193); /* 14 */
++ FF(c, d, a, b, x[14], 17, 0xa679438e); /* 15 */
++ FF(b, c, d, a, x[15], 22, 0x49b40821); /* 16 */
++
++ /* Round 2 */
++ GG(a, b, c, d, x[ 1], 5, 0xf61e2562); /* 17 */
++ GG(d, a, b, c, x[ 6], 9, 0xc040b340); /* 18 */
++ GG(c, d, a, b, x[11], 14, 0x265e5a51); /* 19 */
++ GG(b, c, d, a, x[ 0], 20, 0xe9b6c7aa); /* 20 */
++ GG(a, b, c, d, x[ 5], 5, 0xd62f105d); /* 21 */
++ GG(d, a, b, c, x[10], 9, 0x2441453); /* 22 */
++ GG(c, d, a, b, x[15], 14, 0xd8a1e681); /* 23 */
++ GG(b, c, d, a, x[ 4], 20, 0xe7d3fbc8); /* 24 */
++ GG(a, b, c, d, x[ 9], 5, 0x21e1cde6); /* 25 */
++ GG(d, a, b, c, x[14], 9, 0xc33707d6); /* 26 */
++ GG(c, d, a, b, x[ 3], 14, 0xf4d50d87); /* 27 */
++ GG(b, c, d, a, x[ 8], 20, 0x455a14ed); /* 28 */
++ GG(a, b, c, d, x[13], 5, 0xa9e3e905); /* 29 */
++ GG(d, a, b, c, x[ 2], 9, 0xfcefa3f8); /* 30 */
++ GG(c, d, a, b, x[ 7], 14, 0x676f02d9); /* 31 */
++ GG(b, c, d, a, x[12], 20, 0x8d2a4c8a); /* 32 */
++
++ /* Round 3 */
++ HH(a, b, c, d, x[ 5], 4, 0xfffa3942); /* 33 */
++ HH(d, a, b, c, x[ 8], 11, 0x8771f681); /* 34 */
++ HH(c, d, a, b, x[11], 16, 0x6d9d6122); /* 35 */
++ HH(b, c, d, a, x[14], 23, 0xfde5380c); /* 36 */
++ HH(a, b, c, d, x[ 1], 4, 0xa4beea44); /* 37 */
++ HH(d, a, b, c, x[ 4], 11, 0x4bdecfa9); /* 38 */
++ HH(c, d, a, b, x[ 7], 16, 0xf6bb4b60); /* 39 */
++ HH(b, c, d, a, x[10], 23, 0xbebfbc70); /* 40 */
++ HH(a, b, c, d, x[13], 4, 0x289b7ec6); /* 41 */
++ HH(d, a, b, c, x[ 0], 11, 0xeaa127fa); /* 42 */
++ HH(c, d, a, b, x[ 3], 16, 0xd4ef3085); /* 43 */
++ HH(b, c, d, a, x[ 6], 23, 0x4881d05); /* 44 */
++ HH(a, b, c, d, x[ 9], 4, 0xd9d4d039); /* 45 */
++ HH(d, a, b, c, x[12], 11, 0xe6db99e5); /* 46 */
++ HH(c, d, a, b, x[15], 16, 0x1fa27cf8); /* 47 */
++ HH(b, c, d, a, x[ 2], 23, 0xc4ac5665); /* 48 */
++
++ /* Round 4 */
++ II(a, b, c, d, x[ 0], 6, 0xf4292244); /* 49 */
++ II(d, a, b, c, x[ 7], 10, 0x432aff97); /* 50 */
++ II(c, d, a, b, x[14], 15, 0xab9423a7); /* 51 */
++ II(b, c, d, a, x[ 5], 21, 0xfc93a039); /* 52 */
++ II(a, b, c, d, x[12], 6, 0x655b59c3); /* 53 */
++ II(d, a, b, c, x[ 3], 10, 0x8f0ccc92); /* 54 */
++ II(c, d, a, b, x[10], 15, 0xffeff47d); /* 55 */
++ II(b, c, d, a, x[ 1], 21, 0x85845dd1); /* 56 */
++ II(a, b, c, d, x[ 8], 6, 0x6fa87e4f); /* 57 */
++ II(d, a, b, c, x[15], 10, 0xfe2ce6e0); /* 58 */
++ II(c, d, a, b, x[ 6], 15, 0xa3014314); /* 59 */
++ II(b, c, d, a, x[13], 21, 0x4e0811a1); /* 60 */
++ II(a, b, c, d, x[ 4], 6, 0xf7537e82); /* 61 */
++ II(d, a, b, c, x[11], 10, 0xbd3af235); /* 62 */
++ II(c, d, a, b, x[ 2], 15, 0x2ad7d2bb); /* 63 */
++ II(b, c, d, a, x[ 9], 21, 0xeb86d391); /* 64 */
++ state[0] += a;
++ state[1] += b;
++ state[2] += c;
++ state[3] += d;
++}
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic_load_fw/md5.h
+@@ -0,0 +1,48 @@
++#ifndef MD5_H
++#define MD5_H
++
++typedef struct
++{
++ unsigned int count[2];
++ unsigned int state[4];
++ unsigned char buffer[64];
++}MD5_CTX;
++
++
++#define F(x,y,z) ((x & y) | (~x & z))
++#define G(x,y,z) ((x & z) | (y & ~z))
++#define H(x,y,z) (x^y^z)
++#define I(x,y,z) (y ^ (x | ~z))
++#define ROTATE_LEFT(x,n) ((x << n) | (x >> (32-n)))
++#define FF(a,b,c,d,x,s,ac) \
++ { \
++ a += F(b,c,d) + x + ac; \
++ a = ROTATE_LEFT(a,s); \
++ a += b; \
++ }
++#define GG(a,b,c,d,x,s,ac) \
++ { \
++ a += G(b,c,d) + x + ac; \
++ a = ROTATE_LEFT(a,s); \
++ a += b; \
++ }
++#define HH(a,b,c,d,x,s,ac) \
++ { \
++ a += H(b,c,d) + x + ac; \
++ a = ROTATE_LEFT(a,s); \
++ a += b; \
++ }
++#define II(a,b,c,d,x,s,ac) \
++ { \
++ a += I(b,c,d) + x + ac; \
++ a = ROTATE_LEFT(a,s); \
++ a += b; \
++ }
++void MD5Init(MD5_CTX *context);
++void MD5Update(MD5_CTX *context,unsigned char *input,unsigned int inputlen);
++void MD5Final(MD5_CTX *context,unsigned char digest[16]);
++void MD5Transform(unsigned int state[4],unsigned char block[64]);
++void MD5Encode(unsigned char *output,unsigned int *input,unsigned int len);
++void MD5Decode(unsigned int *output,unsigned char *input,unsigned int len);
++
++#endif
+--- /dev/null
++++ b/drivers/net/wireless/aic8800/aic_load_fw/rwnx_version_gen.h
+@@ -0,0 +1,4 @@
++#define RWNX_VERS_REV "1a4b0054d2M (master)"
++#define RWNX_VERS_MOD "6.4.3.0"
++#define RWNX_VERS_BANNER "rwnx v6.4.3.0 - 1a4b0054d2M (master)"
++#define RELEASE_DATE "2023_0707_1001"
+++ /dev/null
-From 908b10ebc95eb29caae8c4737b23a29af5c6298f Mon Sep 17 00:00:00 2001
-From: Changhuang Liang <changhuang.liang@starfivetech.com>
-Date: Mon, 5 Jun 2023 13:54:16 +0800
-Subject: [PATCH 085/116] media: starfive: Add vin driver support
-
-Add vin driver support.
-
-Signed-off-by: Changhuang Liang <changhuang.liang@starfivetech.com>
----
- drivers/media/platform/Kconfig | 1 +
- drivers/media/platform/Makefile | 1 +
- drivers/media/platform/starfive/Kconfig | 56 +
- drivers/media/platform/starfive/Makefile | 24 +
- .../platform/starfive/v4l2_driver/Readme.txt | 11 +
- .../starfive/v4l2_driver/imx219_mipi.c | 1583 ++++++++
- .../starfive/v4l2_driver/ov13850_mipi.c | 1921 ++++++++++
- .../starfive/v4l2_driver/ov4689_mipi.c | 2975 +++++++++++++++
- .../platform/starfive/v4l2_driver/ov5640.c | 3227 +++++++++++++++++
- .../platform/starfive/v4l2_driver/sc2235.c | 1914 ++++++++++
- .../starfive/v4l2_driver/stf_common.h | 185 +
- .../platform/starfive/v4l2_driver/stf_csi.c | 465 +++
- .../platform/starfive/v4l2_driver/stf_csi.h | 61 +
- .../starfive/v4l2_driver/stf_csi_hw_ops.c | 310 ++
- .../starfive/v4l2_driver/stf_csiphy.c | 357 ++
- .../starfive/v4l2_driver/stf_csiphy.h | 188 +
- .../starfive/v4l2_driver/stf_csiphy_hw_ops.c | 335 ++
- .../starfive/v4l2_driver/stf_dmabuf.c | 123 +
- .../starfive/v4l2_driver/stf_dmabuf.h | 12 +
- .../platform/starfive/v4l2_driver/stf_dvp.c | 385 ++
- .../platform/starfive/v4l2_driver/stf_dvp.h | 67 +
- .../starfive/v4l2_driver/stf_dvp_hw_ops.c | 187 +
- .../platform/starfive/v4l2_driver/stf_event.c | 36 +
- .../platform/starfive/v4l2_driver/stf_isp.c | 1521 ++++++++
- .../platform/starfive/v4l2_driver/stf_isp.h | 222 ++
- .../starfive/v4l2_driver/stf_isp_hw_ops.c | 1550 ++++++++
- .../starfive/v4l2_driver/stf_isp_ioctl.h | 133 +
- .../platform/starfive/v4l2_driver/stf_video.c | 1552 ++++++++
- .../platform/starfive/v4l2_driver/stf_video.h | 83 +
- .../platform/starfive/v4l2_driver/stf_vin.c | 1515 ++++++++
- .../platform/starfive/v4l2_driver/stf_vin.h | 182 +
- .../starfive/v4l2_driver/stf_vin_hw_ops.c | 433 +++
- .../platform/starfive/v4l2_driver/stfcamss.c | 1369 +++++++
- .../platform/starfive/v4l2_driver/stfcamss.h | 117 +
- include/uapi/linux/jh7110-isp.h | 253 ++
- include/uapi/linux/v4l2-controls.h | 6 +
- include/video/stf-vin.h | 443 +++
- 37 files changed, 23803 insertions(+)
- create mode 100644 drivers/media/platform/starfive/Kconfig
- create mode 100644 drivers/media/platform/starfive/Makefile
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/Readme.txt
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/imx219_mipi.c
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/ov13850_mipi.c
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/ov4689_mipi.c
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/ov5640.c
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/sc2235.c
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/stf_common.h
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/stf_csi.c
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/stf_csi.h
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/stf_csi_hw_ops.c
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/stf_csiphy.c
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/stf_csiphy.h
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/stf_csiphy_hw_ops.c
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/stf_dmabuf.c
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/stf_dmabuf.h
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/stf_dvp.c
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/stf_dvp.h
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/stf_dvp_hw_ops.c
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/stf_event.c
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/stf_isp.c
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/stf_isp.h
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/stf_isp_hw_ops.c
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/stf_isp_ioctl.h
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/stf_video.c
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/stf_video.h
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/stf_vin.c
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/stf_vin.h
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/stf_vin_hw_ops.c
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/stfcamss.c
- create mode 100644 drivers/media/platform/starfive/v4l2_driver/stfcamss.h
- create mode 100644 include/uapi/linux/jh7110-isp.h
- create mode 100644 include/video/stf-vin.h
-
---- a/drivers/media/platform/Kconfig
-+++ b/drivers/media/platform/Kconfig
-@@ -80,6 +80,7 @@ source "drivers/media/platform/renesas/K
- source "drivers/media/platform/rockchip/Kconfig"
- source "drivers/media/platform/samsung/Kconfig"
- source "drivers/media/platform/st/Kconfig"
-+source "drivers/media/platform/starfive/Kconfig"
- source "drivers/media/platform/sunxi/Kconfig"
- source "drivers/media/platform/ti/Kconfig"
- source "drivers/media/platform/verisilicon/Kconfig"
---- a/drivers/media/platform/Makefile
-+++ b/drivers/media/platform/Makefile
-@@ -23,6 +23,7 @@ obj-y += renesas/
- obj-y += rockchip/
- obj-y += samsung/
- obj-y += st/
-+obj-y += starfive/
- obj-y += sunxi/
- obj-y += ti/
- obj-y += verisilicon/
---- /dev/null
-+++ b/drivers/media/platform/starfive/Kconfig
-@@ -0,0 +1,56 @@
-+# SPDX-License-Identifier: GPL-2.0-only
-+
-+comment "Starfive media platform drivers"
-+
-+config VIN_SENSOR_OV5640
-+ tristate "VIN SENSOR support OV5640"
-+ depends on VIDEO_STF_VIN
-+ select V4L2_FWNODE
-+ default n
-+ help
-+ Say Y here if you want to have support for VIN sensor OV5640
-+
-+config VIN_SENSOR_SC2235
-+ tristate "VIN SENSOR support SC2235"
-+ depends on VIDEO_STF_VIN
-+ select V4L2_FWNODE
-+ default n
-+ help
-+ Say Y here if you want to have support for VIN sensor SC2235
-+
-+config VIN_SENSOR_OV4689
-+ tristate "VIN SENSOR support OV4689"
-+ depends on VIDEO_STF_VIN
-+ select V4L2_FWNODE
-+ default n
-+
-+ help
-+ Say Y here if you want to have support for VIN sensor OV4689
-+
-+config VIN_SENSOR_OV13850
-+ bool "VIN SENSOR support OV13850"
-+ depends on VIDEO_STF_VIN
-+ select V4L2_FWNODE
-+ default n
-+ help
-+ Say Y here if you want to have support for VIN sensor OV13850
-+
-+config VIN_SENSOR_IMX219
-+ tristate "VIN SENSOR support IMX219"
-+ depends on VIDEO_STF_VIN
-+ select V4L2_FWNODE
-+ default n
-+ help
-+ Say Y here if you want to have support for VIN sensor IMX219
-+
-+config VIDEO_STF_VIN
-+ tristate "starfive VIC video in support"
-+ depends on V4L_PLATFORM_DRIVERS
-+ depends on VIDEO_DEV
-+ select MEDIA_CONTROLLER
-+ select VIDEOBUF2_DMA_CONTIG
-+ select VIDEO_V4L2_SUBDEV_API
-+ select V4L2_FWNODE
-+ help
-+ To compile this driver as a module, choose M here: the module
-+ will be called stf-vin.
---- /dev/null
-+++ b/drivers/media/platform/starfive/Makefile
-@@ -0,0 +1,24 @@
-+# SPDX-License-Identifier: GPL-2.0
-+
-+obj-$(CONFIG_VIN_SENSOR_OV5640) += v4l2_driver/ov5640.o
-+obj-$(CONFIG_VIN_SENSOR_SC2235) += v4l2_driver/sc2235.o
-+obj-$(CONFIG_VIN_SENSOR_OV4689) += v4l2_driver/ov4689_mipi.o
-+obj-$(CONFIG_VIN_SENSOR_OV13850) += v4l2_driver/ov13850_mipi.o
-+obj-$(CONFIG_VIN_SENSOR_IMX219) += v4l2_driver/imx219_mipi.o
-+
-+starfivecamss-objs += v4l2_driver/stfcamss.o \
-+ v4l2_driver/stf_event.o \
-+ v4l2_driver/stf_dvp.o \
-+ v4l2_driver/stf_csi.o \
-+ v4l2_driver/stf_csiphy.o \
-+ v4l2_driver/stf_isp.o \
-+ v4l2_driver/stf_video.o \
-+ v4l2_driver/stf_vin.o \
-+ v4l2_driver/stf_vin_hw_ops.o \
-+ v4l2_driver/stf_csi_hw_ops.o \
-+ v4l2_driver/stf_csiphy_hw_ops.o \
-+ v4l2_driver/stf_isp_hw_ops.o \
-+ v4l2_driver/stf_dvp_hw_ops.o \
-+ v4l2_driver/stf_dmabuf.o
-+
-+obj-$(CONFIG_VIDEO_STF_VIN) += starfivecamss.o \
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/Readme.txt
-@@ -0,0 +1,11 @@
-+
-+/dev/video0: Output the camera data directly.
-+/dev/video1: Output the data of the camera converted by isp.
-+
-+ensure linux/arch/riscv/configs/starfive_jh7110_defconfig:
-+CONFIG_VIDEO_STF_VIN=y
-+CONFIG_VIN_SENSOR_SC2235=y
-+CONFIG_VIN_SENSOR_OV4689=y
-+
-+Only support the lane0/lane5 of dphy as clock lane, lane1/lane2/lane3/lane4
-+as data lane.
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/imx219_mipi.c
-@@ -0,0 +1,1583 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * A V4L2 driver for Sony IMX219 cameras.
-+ * Copyright (C) 2019, Raspberry Pi (Trading) Ltd
-+ *
-+ * Based on Sony imx258 camera driver
-+ * Copyright (C) 2018 Intel Corporation
-+ *
-+ * DT / fwnode changes, and regulator / GPIO control taken from imx214 driver
-+ * Copyright 2018 Qtechnology A/S
-+ *
-+ * Flip handling taken from the Sony IMX319 driver.
-+ * Copyright (C) 2018 Intel Corporation
-+ *
-+ */
-+
-+#include <linux/clk.h>
-+#include <linux/delay.h>
-+#include <linux/gpio/consumer.h>
-+#include <linux/i2c.h>
-+#include <linux/module.h>
-+#include <linux/pm_runtime.h>
-+#include <linux/regulator/consumer.h>
-+#include <media/v4l2-ctrls.h>
-+#include <media/v4l2-device.h>
-+#include <media/v4l2-event.h>
-+#include <media/v4l2-fwnode.h>
-+#include <media/v4l2-mediabus.h>
-+#include <asm/unaligned.h>
-+
-+#define IMX219_REG_VALUE_08BIT 1
-+#define IMX219_REG_VALUE_16BIT 2
-+
-+#define IMX219_REG_MODE_SELECT 0x0100
-+#define IMX219_MODE_STANDBY 0x00
-+#define IMX219_MODE_STREAMING 0x01
-+
-+/* Chip ID */
-+#define IMX219_REG_CHIP_ID 0x0000
-+#define IMX219_CHIP_ID 0x0219
-+
-+/* External clock frequency is 24.0M */
-+#define IMX219_XCLK_FREQ 24000000
-+
-+/* Pixel rate is fixed at 182.4M for all the modes */
-+#define IMX219_PIXEL_RATE 182400000
-+
-+#define IMX219_DEFAULT_LINK_FREQ 456000000
-+
-+/* V_TIMING internal */
-+#define IMX219_REG_VTS 0x0160
-+#define IMX219_VTS_15FPS 0x0dc6
-+#define IMX219_VTS_30FPS_1080P 0x06e3
-+#define IMX219_VTS_30FPS_BINNED 0x06e3
-+#define IMX219_VTS_30FPS_1280x720 0x06e3
-+#define IMX219_VTS_30FPS_640x480 0x06e3
-+#define IMX219_VTS_MAX 0xffff
-+
-+#define IMX219_VBLANK_MIN 4
-+
-+/*Frame Length Line*/
-+#define IMX219_FLL_MIN 0x08a6
-+#define IMX219_FLL_MAX 0xffff
-+#define IMX219_FLL_STEP 1
-+#define IMX219_FLL_DEFAULT 0x0c98
-+
-+/* HBLANK control - read only */
-+#define IMX219_PPL_DEFAULT 3448
-+
-+/* Exposure control */
-+#define IMX219_REG_EXPOSURE 0x015a
-+#define IMX219_EXPOSURE_MIN 4
-+#define IMX219_EXPOSURE_STEP 1
-+#define IMX219_EXPOSURE_DEFAULT 0x640
-+#define IMX219_EXPOSURE_MAX 65535
-+
-+/* Analog gain control */
-+#define IMX219_REG_ANALOG_GAIN 0x0157
-+#define IMX219_ANA_GAIN_MIN 0
-+#define IMX219_ANA_GAIN_MAX 232
-+#define IMX219_ANA_GAIN_STEP 1
-+#define IMX219_ANA_GAIN_DEFAULT 0xd0
-+
-+/* Digital gain control */
-+#define IMX219_REG_DIGITAL_GAIN 0x0158
-+#define IMX219_DGTL_GAIN_MIN 0x0100
-+#define IMX219_DGTL_GAIN_MAX 0x0fff
-+#define IMX219_DGTL_GAIN_DEFAULT 0x0100
-+#define IMX219_DGTL_GAIN_STEP 1
-+
-+#define IMX219_REG_ORIENTATION 0x0172
-+
-+/* Test Pattern Control */
-+#define IMX219_REG_TEST_PATTERN 0x0600
-+#define IMX219_TEST_PATTERN_DISABLE 0
-+#define IMX219_TEST_PATTERN_SOLID_COLOR 1
-+#define IMX219_TEST_PATTERN_COLOR_BARS 2
-+#define IMX219_TEST_PATTERN_GREY_COLOR 3
-+#define IMX219_TEST_PATTERN_PN9 4
-+
-+/* Test pattern colour components */
-+#define IMX219_REG_TESTP_RED 0x0602
-+#define IMX219_REG_TESTP_GREENR 0x0604
-+#define IMX219_REG_TESTP_BLUE 0x0606
-+#define IMX219_REG_TESTP_GREENB 0x0608
-+#define IMX219_TESTP_COLOUR_MIN 0
-+#define IMX219_TESTP_COLOUR_MAX 0x03ff
-+#define IMX219_TESTP_COLOUR_STEP 1
-+#define IMX219_TESTP_RED_DEFAULT IMX219_TESTP_COLOUR_MAX
-+#define IMX219_TESTP_GREENR_DEFAULT 0
-+#define IMX219_TESTP_BLUE_DEFAULT 0
-+#define IMX219_TESTP_GREENB_DEFAULT 0
-+
-+/* IMX219 native and active pixel array size. */
-+#define IMX219_NATIVE_WIDTH 3296U
-+#define IMX219_NATIVE_HEIGHT 2480U
-+#define IMX219_PIXEL_ARRAY_LEFT 8U
-+#define IMX219_PIXEL_ARRAY_TOP 8U
-+#define IMX219_PIXEL_ARRAY_WIDTH 3280U
-+#define IMX219_PIXEL_ARRAY_HEIGHT 2464U
-+
-+struct imx219_reg {
-+ u16 address;
-+ u8 val;
-+};
-+
-+struct imx219_reg_list {
-+ unsigned int num_of_regs;
-+ const struct imx219_reg *regs;
-+};
-+
-+/* Mode : resolution and related config&values */
-+struct imx219_mode {
-+ /* Frame width */
-+ unsigned int width;
-+ /* Frame height */
-+ unsigned int height;
-+
-+ unsigned int fps;
-+
-+ /* Analog crop rectangle. */
-+ struct v4l2_rect crop;
-+
-+ /* V-timing */
-+ unsigned int vts_def;
-+
-+ /* Default register values */
-+ struct imx219_reg_list reg_list;
-+};
-+
-+/*
-+ * Register sets lifted off the i2C interface from the Raspberry Pi firmware
-+ * driver.
-+ * 3280x2464 = mode 2, 1920x1080 = mode 1, 1640x1232 = mode 4, 640x480 = mode 7.
-+ */
-+
-+static const struct imx219_reg mode_1920_1080_regs[] = {
-+ {0x0100, 0x00},
-+ {0x30eb, 0x05},
-+ {0x30eb, 0x0c},
-+ {0x300a, 0xff},
-+ {0x300b, 0xff},
-+ {0x30eb, 0x05},
-+ {0x30eb, 0x09},
-+ {0x0114, 0x01},
-+ {0x0128, 0x00},
-+ {0x012a, 0x18},
-+ {0x012b, 0x00},
-+ {0x0162, 0x0d},
-+ {0x0163, 0x78},
-+ {0x0164, 0x02},
-+ {0x0165, 0xa8},
-+ {0x0166, 0x0a},
-+ {0x0167, 0x27},
-+ {0x0168, 0x02},
-+ {0x0169, 0xb4},
-+ {0x016a, 0x06},
-+ {0x016b, 0xeb},
-+ {0x016c, 0x07},
-+ {0x016d, 0x80},
-+ {0x016e, 0x04},
-+ {0x016f, 0x38},
-+ {0x0170, 0x01},
-+ {0x0171, 0x01},
-+ {0x0174, 0x00},
-+ {0x0175, 0x00},
-+ {0x0301, 0x05},
-+ {0x0303, 0x01},
-+ {0x0304, 0x03},
-+ {0x0305, 0x03},
-+ {0x0306, 0x00},
-+ {0x0307, 0x39},
-+ {0x030b, 0x01},
-+ {0x030c, 0x00},
-+ {0x030d, 0x72},
-+ {0x0624, 0x07},
-+ {0x0625, 0x80},
-+ {0x0626, 0x04},
-+ {0x0627, 0x38},
-+ {0x455e, 0x00},
-+ {0x471e, 0x4b},
-+ {0x4767, 0x0f},
-+ {0x4750, 0x14},
-+ {0x4540, 0x00},
-+ {0x47b4, 0x14},
-+ {0x4713, 0x30},
-+ {0x478b, 0x10},
-+ {0x478f, 0x10},
-+ {0x4793, 0x10},
-+ {0x4797, 0x0e},
-+ {0x479b, 0x0e},
-+};
-+
-+static const struct imx219_reg mode_1280_720_regs[] = {
-+ {0x0100, 0x00},
-+ {0x30eb, 0x05},
-+ {0x30eb, 0x0c},
-+ {0x300a, 0xff},
-+ {0x300b, 0xff},
-+ {0x30eb, 0x05},
-+ {0x30eb, 0x09},
-+ {0x0114, 0x01},
-+ {0x0128, 0x00},
-+ {0x012a, 0x18},
-+ {0x012b, 0x00},
-+ {0x0162, 0x0d},
-+ {0x0163, 0x78},
-+ {0x0164, 0x01},
-+ {0x0165, 0x68},
-+ {0x0166, 0x0b},
-+ {0x0167, 0x67},
-+ {0x0168, 0x02},
-+ {0x0169, 0x00},
-+ {0x016a, 0x07},
-+ {0x016b, 0x9f},
-+ {0x016c, 0x05},
-+ {0x016d, 0x00},
-+ {0x016e, 0x02},
-+ {0x016f, 0xd0},
-+ {0x0170, 0x01},
-+ {0x0171, 0x01},
-+ {0x0174, 0x01},
-+ {0x0175, 0x01},
-+ {0x0301, 0x05},
-+ {0x0303, 0x01},
-+ {0x0304, 0x03},
-+ {0x0305, 0x03},
-+ {0x0306, 0x00},
-+ {0x0307, 0x39},
-+ {0x030b, 0x01},
-+ {0x030c, 0x00},
-+ {0x030d, 0x72},
-+ {0x0624, 0x06},
-+ {0x0625, 0x68},
-+ {0x0626, 0x04},
-+ {0x0627, 0xd0},
-+ {0x455e, 0x00},
-+ {0x471e, 0x4b},
-+ {0x4767, 0x0f},
-+ {0x4750, 0x14},
-+ {0x4540, 0x00},
-+ {0x47b4, 0x14},
-+ {0x4713, 0x30},
-+ {0x478b, 0x10},
-+ {0x478f, 0x10},
-+ {0x4793, 0x10},
-+ {0x4797, 0x0e},
-+ {0x479b, 0x0e},
-+};
-+
-+static const struct imx219_reg mode_640_480_regs[] = {
-+ {0x0100, 0x00},
-+ {0x30eb, 0x05},
-+ {0x30eb, 0x0c},
-+ {0x300a, 0xff},
-+ {0x300b, 0xff},
-+ {0x30eb, 0x05},
-+ {0x30eb, 0x09},
-+ {0x0114, 0x01},
-+ {0x0128, 0x00},
-+ {0x012a, 0x18},
-+ {0x012b, 0x00},
-+ {0x0162, 0x0d},
-+ {0x0163, 0x78},
-+ {0x0164, 0x03},
-+ {0x0165, 0xe8},
-+ {0x0166, 0x08},
-+ {0x0167, 0xe7},
-+ {0x0168, 0x02},
-+ {0x0169, 0xf0},
-+ {0x016a, 0x06},
-+ {0x016b, 0xaf},
-+ {0x016c, 0x02},
-+ {0x016d, 0x80},
-+ {0x016e, 0x01},
-+ {0x016f, 0xe0},
-+ {0x0170, 0x01},
-+ {0x0171, 0x01},
-+ {0x0174, 0x03},
-+ {0x0175, 0x03},
-+ {0x0301, 0x05},
-+ {0x0303, 0x01},
-+ {0x0304, 0x03},
-+ {0x0305, 0x03},
-+ {0x0306, 0x00},
-+ {0x0307, 0x39},
-+ {0x030b, 0x01},
-+ {0x030c, 0x00},
-+ {0x030d, 0x72},
-+ {0x0624, 0x06},
-+ {0x0625, 0x68},
-+ {0x0626, 0x04},
-+ {0x0627, 0xd0},
-+ {0x455e, 0x00},
-+ {0x471e, 0x4b},
-+ {0x4767, 0x0f},
-+ {0x4750, 0x14},
-+ {0x4540, 0x00},
-+ {0x47b4, 0x14},
-+ {0x4713, 0x30},
-+ {0x478b, 0x10},
-+ {0x478f, 0x10},
-+ {0x4793, 0x10},
-+ {0x4797, 0x0e},
-+ {0x479b, 0x0e},
-+};
-+
-+static const struct imx219_reg raw8_framefmt_regs[] = {
-+ {0x018c, 0x08},
-+ {0x018d, 0x08},
-+ {0x0309, 0x08},
-+};
-+
-+static const struct imx219_reg raw10_framefmt_regs[] = {
-+ {0x018c, 0x0a},
-+ {0x018d, 0x0a},
-+ {0x0309, 0x0a},
-+};
-+
-+static const s64 imx219_link_freq_menu[] = {
-+ IMX219_DEFAULT_LINK_FREQ,
-+};
-+
-+static const char * const imx219_test_pattern_menu[] = {
-+ "Disabled",
-+ "Color Bars",
-+ "Solid Color",
-+ "Grey Color Bars",
-+ "PN9"
-+};
-+
-+static const int imx219_test_pattern_val[] = {
-+ IMX219_TEST_PATTERN_DISABLE,
-+ IMX219_TEST_PATTERN_COLOR_BARS,
-+ IMX219_TEST_PATTERN_SOLID_COLOR,
-+ IMX219_TEST_PATTERN_GREY_COLOR,
-+ IMX219_TEST_PATTERN_PN9,
-+};
-+
-+/* regulator supplies */
-+static const char * const imx219_supply_name[] = {
-+ /* Supplies can be enabled in any order */
-+ "VANA", /* Analog (2.8V) supply */
-+ "VDIG", /* Digital Core (1.8V) supply */
-+ "VDDL", /* IF (1.2V) supply */
-+};
-+
-+#define IMX219_NUM_SUPPLIES ARRAY_SIZE(imx219_supply_name)
-+
-+/*
-+ * The supported formats.
-+ * This table MUST contain 4 entries per format, to cover the various flip
-+ * combinations in the order
-+ * - no flip
-+ * - h flip
-+ * - v flip
-+ * - h&v flips
-+ */
-+static const u32 codes[] = {
-+ MEDIA_BUS_FMT_SRGGB10_1X10,
-+ MEDIA_BUS_FMT_SGRBG10_1X10,
-+ MEDIA_BUS_FMT_SGBRG10_1X10,
-+ MEDIA_BUS_FMT_SBGGR10_1X10,
-+
-+ MEDIA_BUS_FMT_SRGGB8_1X8,
-+ MEDIA_BUS_FMT_SGRBG8_1X8,
-+ MEDIA_BUS_FMT_SGBRG8_1X8,
-+ MEDIA_BUS_FMT_SBGGR8_1X8,
-+};
-+
-+/*
-+ * Initialisation delay between XCLR low->high and the moment when the sensor
-+ * can start capture (i.e. can leave software stanby) must be not less than:
-+ * t4 + max(t5, t6 + <time to initialize the sensor register over I2C>)
-+ * where
-+ * t4 is fixed, and is max 200uS,
-+ * t5 is fixed, and is 6000uS,
-+ * t6 depends on the sensor external clock, and is max 32000 clock periods.
-+ * As per sensor datasheet, the external clock must be from 6MHz to 27MHz.
-+ * So for any acceptable external clock t6 is always within the range of
-+ * 1185 to 5333 uS, and is always less than t5.
-+ * For this reason this is always safe to wait (t4 + t5) = 6200 uS, then
-+ * initialize the sensor over I2C, and then exit the software standby.
-+ *
-+ * This start-up time can be optimized a bit more, if we start the writes
-+ * over I2C after (t4+t6), but before (t4+t5) expires. But then sensor
-+ * initialization over I2C may complete before (t4+t5) expires, and we must
-+ * ensure that capture is not started before (t4+t5).
-+ *
-+ * This delay doesn't account for the power supply startup time. If needed,
-+ * this should be taken care of via the regulator framework. E.g. in the
-+ * case of DT for regulator-fixed one should define the startup-delay-us
-+ * property.
-+ */
-+#define IMX219_XCLR_MIN_DELAY_US 6200
-+#define IMX219_XCLR_DELAY_RANGE_US 1000
-+
-+/* Mode configs */
-+static const struct imx219_mode supported_modes[] = {
-+ {
-+ /* 1080P 30fps cropped */
-+ .width = 1920,
-+ .height = 1080,
-+ .fps = 30,
-+ .crop = {
-+ .left = 688,
-+ .top = 700,
-+ .width = 1920,
-+ .height = 1080
-+ },
-+ .vts_def = IMX219_VTS_30FPS_1080P,
-+ .reg_list = {
-+ .num_of_regs = ARRAY_SIZE(mode_1920_1080_regs),
-+ .regs = mode_1920_1080_regs,
-+ },
-+ },
-+ {
-+ /* 1280x720 30fps mode */
-+ .width = 1280,
-+ .height = 720,
-+ .fps = 30,
-+ .crop = {
-+ .left = 360,
-+ .top = 512,
-+ .width = 2560,
-+ .height = 1440
-+ },
-+ .vts_def = IMX219_VTS_30FPS_1280x720,
-+ .reg_list = {
-+ .num_of_regs = ARRAY_SIZE(mode_1280_720_regs),
-+ .regs = mode_1280_720_regs,
-+ },
-+ },
-+ {
-+ /* 640x480 30fps mode */
-+ .width = 640,
-+ .height = 480,
-+ .fps = 30,
-+ .crop = {
-+ .left = 1008,
-+ .top = 760,
-+ .width = 1280,
-+ .height = 960
-+ },
-+ .vts_def = IMX219_VTS_30FPS_640x480,
-+ .reg_list = {
-+ .num_of_regs = ARRAY_SIZE(mode_640_480_regs),
-+ .regs = mode_640_480_regs,
-+ },
-+ },
-+};
-+
-+struct imx219 {
-+ struct v4l2_subdev sd;
-+ struct media_pad pad;
-+ //struct v4l2_fwnode_endpoint ep; /* the parsed DT endpoint info */
-+
-+ struct v4l2_mbus_framefmt fmt;
-+
-+ struct clk *xclk; /* system clock to IMX219 */
-+ u32 xclk_freq;
-+
-+ struct gpio_desc *reset_gpio;
-+ struct regulator_bulk_data supplies[IMX219_NUM_SUPPLIES];
-+
-+ struct v4l2_ctrl_handler ctrl_handler;
-+ /* V4L2 Controls */
-+ struct v4l2_ctrl *pixel_rate;
-+ struct v4l2_ctrl *link_freq;
-+ struct v4l2_ctrl *exposure;
-+ struct v4l2_ctrl *vflip;
-+ struct v4l2_ctrl *hflip;
-+ struct v4l2_ctrl *vblank;
-+ struct v4l2_ctrl *hblank;
-+
-+ /* Current mode */
-+ const struct imx219_mode *mode;
-+ struct v4l2_fract frame_interval;
-+
-+ /*
-+ * Mutex for serialized access:
-+ * Protect sensor module set pad format and start/stop streaming safely.
-+ */
-+ struct mutex mutex;
-+
-+ /* Streaming on/off */
-+ int streaming;
-+};
-+
-+static inline struct imx219 *to_imx219(struct v4l2_subdev *_sd)
-+{
-+ return container_of(_sd, struct imx219, sd);
-+}
-+
-+/* Read registers up to 2 at a time */
-+static int imx219_read_reg(struct imx219 *imx219, u16 reg, u32 len, u32 *val)
-+{
-+ struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
-+ struct i2c_msg msgs[2];
-+ u8 addr_buf[2] = { reg >> 8, reg & 0xff };
-+ u8 data_buf[4] = { 0, };
-+ int ret;
-+
-+ if (len > 4)
-+ return -EINVAL;
-+
-+ /* Write register address */
-+ msgs[0].addr = client->addr;
-+ msgs[0].flags = 0;
-+ msgs[0].len = ARRAY_SIZE(addr_buf);
-+ msgs[0].buf = addr_buf;
-+
-+ /* Read data from register */
-+ msgs[1].addr = client->addr;
-+ msgs[1].flags = I2C_M_RD;
-+ msgs[1].len = len;
-+ msgs[1].buf = &data_buf[4 - len];
-+
-+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
-+ if (ret != ARRAY_SIZE(msgs))
-+ return -EIO;
-+
-+ *val = get_unaligned_be32(data_buf);
-+
-+ return 0;
-+}
-+
-+/* Write registers up to 2 at a time */
-+static int imx219_write_reg(struct imx219 *imx219, u16 reg, u32 len, u32 val)
-+{
-+ struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
-+ u8 buf[6];
-+
-+ if (len > 4)
-+ return -EINVAL;
-+
-+ put_unaligned_be16(reg, buf);
-+ put_unaligned_be32(val << (8 * (4 - len)), buf + 2);
-+ if (i2c_master_send(client, buf, len + 2) != len + 2)
-+ return -EIO;
-+
-+ return 0;
-+}
-+
-+/* Write a list of registers */
-+static int imx219_write_regs(struct imx219 *imx219,
-+ const struct imx219_reg *regs, u32 len)
-+{
-+ struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
-+ unsigned int i;
-+ int ret;
-+
-+ for (i = 0; i < len; i++) {
-+ ret = imx219_write_reg(imx219, regs[i].address, 1, regs[i].val);
-+ if (ret) {
-+ dev_err_ratelimited(&client->dev,
-+ "Failed to write reg 0x%4.4x. error = %d\n",
-+ regs[i].address, ret);
-+
-+ return ret;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+/* Get bayer order based on flip setting. */
-+static u32 imx219_get_format_code(struct imx219 *imx219, u32 code)
-+{
-+ unsigned int i;
-+
-+ lockdep_assert_held(&imx219->mutex);
-+
-+ for (i = 0; i < ARRAY_SIZE(codes); i++)
-+ if (codes[i] == code)
-+ break;
-+
-+ if (i >= ARRAY_SIZE(codes))
-+ i = 0;
-+
-+ i = (i & ~3) | (imx219->vflip->val ? 2 : 0) |
-+ (imx219->hflip->val ? 1 : 0);
-+
-+ return codes[i];
-+}
-+
-+static void imx219_set_default_format(struct imx219 *imx219)
-+{
-+ struct v4l2_mbus_framefmt *fmt;
-+
-+ fmt = &imx219->fmt;
-+ fmt->code = MEDIA_BUS_FMT_SRGGB10_1X10;
-+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
-+ fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
-+ fmt->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
-+ fmt->colorspace, fmt->ycbcr_enc);
-+ fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace);
-+ fmt->width = supported_modes[0].width;
-+ fmt->height = supported_modes[0].height;
-+ fmt->field = V4L2_FIELD_NONE;
-+}
-+
-+static int imx219_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
-+{
-+ struct imx219 *imx219 = to_imx219(sd);
-+ struct v4l2_mbus_framefmt *try_fmt =
-+ v4l2_subdev_get_try_format(sd, fh->state, 0);
-+ struct v4l2_rect *try_crop;
-+
-+ mutex_lock(&imx219->mutex);
-+
-+ /* Initialize try_fmt */
-+ try_fmt->width = supported_modes[0].width;
-+ try_fmt->height = supported_modes[0].height;
-+ try_fmt->code = imx219_get_format_code(imx219, MEDIA_BUS_FMT_SRGGB10_1X10);
-+ try_fmt->field = V4L2_FIELD_NONE;
-+
-+ /* Initialize try_crop rectangle. */
-+ try_crop = v4l2_subdev_get_try_crop(sd, fh->state, 0);
-+ try_crop->top = IMX219_PIXEL_ARRAY_TOP;
-+ try_crop->left = IMX219_PIXEL_ARRAY_LEFT;
-+ try_crop->width = IMX219_PIXEL_ARRAY_WIDTH;
-+ try_crop->height = IMX219_PIXEL_ARRAY_HEIGHT;
-+
-+ mutex_unlock(&imx219->mutex);
-+
-+ return 0;
-+}
-+
-+static int imx219_set_ctrl(struct v4l2_ctrl *ctrl)
-+{
-+ struct imx219 *imx219 =
-+ container_of(ctrl->handler, struct imx219, ctrl_handler);
-+ struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
-+ int ret;
-+
-+ if (ctrl->id == V4L2_CID_VBLANK) {
-+ int exposure_max, exposure_def;
-+
-+ /* Update max exposure while meeting expected vblanking */
-+ exposure_max = imx219->mode->height + ctrl->val - 4;
-+ exposure_def = (exposure_max < IMX219_EXPOSURE_DEFAULT) ?
-+ exposure_max : IMX219_EXPOSURE_DEFAULT;
-+ __v4l2_ctrl_modify_range(imx219->exposure, imx219->exposure->minimum,
-+ exposure_max, imx219->exposure->step, exposure_def);
-+ }
-+
-+ /*
-+ * Applying V4L2 control value only happens
-+ * when power is up for streaming
-+ */
-+ if (pm_runtime_get_if_in_use(&client->dev) == 0)
-+ return 0;
-+
-+ switch (ctrl->id) {
-+ case V4L2_CID_ANALOGUE_GAIN:
-+ ret = imx219_write_reg(imx219, IMX219_REG_ANALOG_GAIN,
-+ IMX219_REG_VALUE_08BIT, ctrl->val);
-+ break;
-+ case V4L2_CID_EXPOSURE:
-+ ret = imx219_write_reg(imx219, IMX219_REG_EXPOSURE,
-+ IMX219_REG_VALUE_16BIT, ctrl->val);
-+ break;
-+ case V4L2_CID_DIGITAL_GAIN:
-+ ret = imx219_write_reg(imx219, IMX219_REG_DIGITAL_GAIN,
-+ IMX219_REG_VALUE_16BIT, ctrl->val);
-+ break;
-+ case V4L2_CID_TEST_PATTERN:
-+ ret = imx219_write_reg(imx219, IMX219_REG_TEST_PATTERN,
-+ IMX219_REG_VALUE_16BIT, imx219_test_pattern_val[ctrl->val]);
-+ break;
-+ case V4L2_CID_HFLIP:
-+ case V4L2_CID_VFLIP:
-+ ret = imx219_write_reg(imx219, IMX219_REG_ORIENTATION, 1,
-+ imx219->hflip->val | imx219->vflip->val << 1);
-+ break;
-+ case V4L2_CID_VBLANK:
-+ ret = imx219_write_reg(imx219, IMX219_REG_VTS, IMX219_REG_VALUE_16BIT,
-+ imx219->mode->height + ctrl->val);
-+ break;
-+ case V4L2_CID_TEST_PATTERN_RED:
-+ ret = imx219_write_reg(imx219, IMX219_REG_TESTP_RED,
-+ IMX219_REG_VALUE_16BIT, ctrl->val);
-+ break;
-+ case V4L2_CID_TEST_PATTERN_GREENR:
-+ ret = imx219_write_reg(imx219, IMX219_REG_TESTP_GREENR,
-+ IMX219_REG_VALUE_16BIT, ctrl->val);
-+ break;
-+ case V4L2_CID_TEST_PATTERN_BLUE:
-+ ret = imx219_write_reg(imx219, IMX219_REG_TESTP_BLUE,
-+ IMX219_REG_VALUE_16BIT, ctrl->val);
-+ break;
-+ case V4L2_CID_TEST_PATTERN_GREENB:
-+ ret = imx219_write_reg(imx219, IMX219_REG_TESTP_GREENB,
-+ IMX219_REG_VALUE_16BIT, ctrl->val);
-+ break;
-+ default:
-+ dev_info(&client->dev,
-+ "ctrl(id:0x%x,val:0x%x) is not handled\n", ctrl->id, ctrl->val);
-+ ret = -EINVAL;
-+ break;
-+ }
-+
-+ pm_runtime_put(&client->dev);
-+
-+ return ret;
-+}
-+
-+static const struct v4l2_ctrl_ops imx219_ctrl_ops = {
-+ .s_ctrl = imx219_set_ctrl,
-+};
-+
-+static int imx219_enum_mbus_code(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_mbus_code_enum *code)
-+{
-+ struct imx219 *imx219 = to_imx219(sd);
-+
-+ if (code->index >= (ARRAY_SIZE(codes) / 4))
-+ return -EINVAL;
-+
-+ mutex_lock(&imx219->mutex);
-+ code->code = imx219_get_format_code(imx219, codes[code->index * 4]);
-+ mutex_unlock(&imx219->mutex);
-+
-+ return 0;
-+}
-+
-+static int imx219_enum_frame_size(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_frame_size_enum *fse)
-+{
-+ struct imx219 *imx219 = to_imx219(sd);
-+ u32 code;
-+
-+ if (fse->index >= ARRAY_SIZE(supported_modes))
-+ return -EINVAL;
-+
-+ mutex_lock(&imx219->mutex);
-+ code = imx219_get_format_code(imx219, fse->code);
-+ mutex_unlock(&imx219->mutex);
-+ if (fse->code != code)
-+ return -EINVAL;
-+
-+ fse->min_width = supported_modes[fse->index].width;
-+ fse->max_width = fse->min_width;
-+ fse->min_height = supported_modes[fse->index].height;
-+ fse->max_height = fse->min_height;
-+
-+ return 0;
-+}
-+
-+static int imx219_try_frame_interval(struct imx219 *imx219,
-+ struct v4l2_fract *fi,
-+ u32 w, u32 h)
-+{
-+ const struct imx219_mode *mode;
-+
-+ mode = v4l2_find_nearest_size(supported_modes, ARRAY_SIZE(supported_modes),
-+ width, height, w, h);
-+ if (!mode || (mode->width != w || mode->height != h))
-+ return -EINVAL;
-+
-+ fi->numerator = 1;
-+ fi->denominator = mode->fps;
-+
-+ return mode->fps;
-+}
-+
-+static int imx219_enum_frame_interval(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_frame_interval_enum *fie)
-+{
-+ struct imx219 *imx219 = to_imx219(sd);
-+ struct v4l2_fract tpf;
-+ u32 code;
-+ int ret;
-+
-+ if (fie->index >= ARRAY_SIZE(supported_modes))
-+ return -EINVAL;
-+
-+ mutex_lock(&imx219->mutex);
-+ code = imx219_get_format_code(imx219, fie->code);
-+ if (fie->code != code) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ ret = imx219_try_frame_interval(imx219, &tpf,
-+ fie->width, fie->height);
-+ if (ret < 0) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ mutex_unlock(&imx219->mutex);
-+ fie->interval = tpf;
-+
-+ return 0;
-+
-+out:
-+ mutex_unlock(&imx219->mutex);
-+ return ret;
-+}
-+
-+static int imx219_g_frame_interval(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_frame_interval *fi)
-+{
-+ struct imx219 *imx219 = to_imx219(sd);
-+
-+ mutex_lock(&imx219->mutex);
-+ fi->interval = imx219->frame_interval;
-+ mutex_unlock(&imx219->mutex);
-+
-+ return 0;
-+}
-+
-+static int imx219_s_frame_interval(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_frame_interval *fi)
-+{
-+ struct imx219 *imx219 = to_imx219(sd);
-+ const struct imx219_mode *mode = imx219->mode;
-+ int frame_rate, ret = 0;
-+
-+ if (fi->pad != 0)
-+ return -EINVAL;
-+
-+ mutex_lock(&imx219->mutex);
-+
-+ if (imx219->streaming) {
-+ ret = -EBUSY;
-+ goto out;
-+ }
-+
-+ frame_rate = imx219_try_frame_interval(imx219, &fi->interval,
-+ mode->width, mode->height);
-+ if (frame_rate < 0) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ imx219->frame_interval = fi->interval;
-+
-+out:
-+ mutex_unlock(&imx219->mutex);
-+ return ret;
-+}
-+
-+static void imx219_reset_colorspace(struct v4l2_mbus_framefmt *fmt)
-+{
-+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
-+ fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
-+ fmt->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
-+ fmt->colorspace, fmt->ycbcr_enc);
-+ fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace);
-+}
-+
-+static void imx219_update_pad_format(struct imx219 *imx219,
-+ const struct imx219_mode *mode,
-+ struct v4l2_subdev_format *fmt)
-+{
-+ fmt->format.width = mode->width;
-+ fmt->format.height = mode->height;
-+ fmt->format.field = V4L2_FIELD_NONE;
-+ imx219_reset_colorspace(&fmt->format);
-+}
-+
-+static int __imx219_get_pad_format(struct imx219 *imx219,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_format *fmt)
-+{
-+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
-+ struct v4l2_mbus_framefmt *try_fmt =
-+ v4l2_subdev_get_try_format(&imx219->sd, state, fmt->pad);
-+ /* update the code which could change due to vflip or hflip: */
-+ try_fmt->code = imx219_get_format_code(imx219, try_fmt->code);
-+ fmt->format = *try_fmt;
-+ } else {
-+ imx219_update_pad_format(imx219, imx219->mode, fmt);
-+ fmt->format.code = imx219_get_format_code(imx219, imx219->fmt.code);
-+ }
-+
-+ return 0;
-+}
-+
-+static int imx219_get_pad_format(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_format *fmt)
-+{
-+ struct imx219 *imx219 = to_imx219(sd);
-+ int ret;
-+
-+ mutex_lock(&imx219->mutex);
-+ ret = __imx219_get_pad_format(imx219, state, fmt);
-+ mutex_unlock(&imx219->mutex);
-+
-+ return ret;
-+}
-+
-+static int imx219_set_pad_format(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_format *fmt)
-+{
-+ struct imx219 *imx219 = to_imx219(sd);
-+ const struct imx219_mode *mode;
-+ struct v4l2_mbus_framefmt *framefmt;
-+ int exposure_max, exposure_def, hblank;
-+ unsigned int i;
-+
-+ mutex_lock(&imx219->mutex);
-+
-+ for (i = 0; i < ARRAY_SIZE(codes); i++)
-+ if (codes[i] == fmt->format.code)
-+ break;
-+ if (i >= ARRAY_SIZE(codes))
-+ i = 0;
-+
-+ /* Bayer order varies with flips */
-+ fmt->format.code = imx219_get_format_code(imx219, codes[i]);
-+
-+ mode = v4l2_find_nearest_size(supported_modes, ARRAY_SIZE(supported_modes),
-+ width, height, fmt->format.width, fmt->format.height);
-+ imx219_update_pad_format(imx219, mode, fmt);
-+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
-+ framefmt = v4l2_subdev_get_try_format(sd, state, fmt->pad);
-+ *framefmt = fmt->format;
-+ } else if (imx219->mode != mode ||
-+ imx219->fmt.code != fmt->format.code) {
-+ imx219->fmt = fmt->format;
-+ imx219->mode = mode;
-+ /* Update limits and set FPS to default */
-+ __v4l2_ctrl_modify_range(imx219->vblank, IMX219_VBLANK_MIN,
-+ IMX219_VTS_MAX - mode->height, 1,
-+ mode->vts_def - mode->height);
-+ __v4l2_ctrl_s_ctrl(imx219->vblank, mode->vts_def - mode->height);
-+ /* Update max exposure while meeting expected vblanking */
-+ exposure_max = mode->vts_def - 4;
-+ exposure_def = (exposure_max < IMX219_EXPOSURE_DEFAULT) ?
-+ exposure_max : IMX219_EXPOSURE_DEFAULT;
-+ __v4l2_ctrl_modify_range(imx219->exposure, imx219->exposure->minimum,
-+ exposure_max, imx219->exposure->step, exposure_def);
-+ /*
-+ * Currently PPL is fixed to IMX219_PPL_DEFAULT, so hblank
-+ * depends on mode->width only, and is not changeble in any
-+ * way other than changing the mode.
-+ */
-+ hblank = IMX219_PPL_DEFAULT - mode->width;
-+ __v4l2_ctrl_modify_range(imx219->hblank, hblank, hblank, 1, hblank);
-+ }
-+
-+ mutex_unlock(&imx219->mutex);
-+
-+ return 0;
-+}
-+
-+static int imx219_set_framefmt(struct imx219 *imx219)
-+{
-+ switch (imx219->fmt.code) {
-+ case MEDIA_BUS_FMT_SRGGB8_1X8:
-+ case MEDIA_BUS_FMT_SGRBG8_1X8:
-+ case MEDIA_BUS_FMT_SGBRG8_1X8:
-+ case MEDIA_BUS_FMT_SBGGR8_1X8:
-+ return imx219_write_regs(imx219, raw8_framefmt_regs,
-+ ARRAY_SIZE(raw8_framefmt_regs));
-+
-+ case MEDIA_BUS_FMT_SRGGB10_1X10:
-+ case MEDIA_BUS_FMT_SGRBG10_1X10:
-+ case MEDIA_BUS_FMT_SGBRG10_1X10:
-+ case MEDIA_BUS_FMT_SBGGR10_1X10:
-+ return imx219_write_regs(imx219, raw10_framefmt_regs,
-+ ARRAY_SIZE(raw10_framefmt_regs));
-+ }
-+
-+ return -EINVAL;
-+}
-+
-+static const struct v4l2_rect *
-+__imx219_get_pad_crop(struct imx219 *imx219, struct v4l2_subdev_state *state,
-+ unsigned int pad, enum v4l2_subdev_format_whence which)
-+{
-+ switch (which) {
-+ case V4L2_SUBDEV_FORMAT_TRY:
-+ return v4l2_subdev_get_try_crop(&imx219->sd, state, pad);
-+ case V4L2_SUBDEV_FORMAT_ACTIVE:
-+ return &imx219->mode->crop;
-+ }
-+
-+ return NULL;
-+}
-+
-+static int imx219_get_selection(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_selection *sel)
-+{
-+ switch (sel->target) {
-+ case V4L2_SEL_TGT_CROP: {
-+ struct imx219 *imx219 = to_imx219(sd);
-+
-+ mutex_lock(&imx219->mutex);
-+ sel->r = *__imx219_get_pad_crop(imx219, state, sel->pad, sel->which);
-+ mutex_unlock(&imx219->mutex);
-+ return 0;
-+ }
-+
-+ case V4L2_SEL_TGT_NATIVE_SIZE:
-+ sel->r.top = 0;
-+ sel->r.left = 0;
-+ sel->r.width = IMX219_NATIVE_WIDTH;
-+ sel->r.height = IMX219_NATIVE_HEIGHT;
-+ return 0;
-+
-+ case V4L2_SEL_TGT_CROP_DEFAULT:
-+ case V4L2_SEL_TGT_CROP_BOUNDS:
-+ sel->r.top = IMX219_PIXEL_ARRAY_TOP;
-+ sel->r.left = IMX219_PIXEL_ARRAY_LEFT;
-+ sel->r.width = IMX219_PIXEL_ARRAY_WIDTH;
-+ sel->r.height = IMX219_PIXEL_ARRAY_HEIGHT;
-+ return 0;
-+ }
-+
-+ return -EINVAL;
-+}
-+
-+static int imx219_start_streaming(struct imx219 *imx219)
-+{
-+ struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
-+ const struct imx219_reg_list *reg_list;
-+ int ret;
-+
-+ /* Apply default values of current mode */
-+ reg_list = &imx219->mode->reg_list;
-+ ret = imx219_write_regs(imx219, reg_list->regs, reg_list->num_of_regs);
-+ if (ret) {
-+ dev_err(&client->dev, "%s failed to set mode\n", __func__);
-+ goto err;
-+ }
-+
-+ ret = imx219_set_framefmt(imx219);
-+ if (ret) {
-+ dev_err(&client->dev, "%s failed to set frame format: %d\n",
-+ __func__, ret);
-+ goto err;
-+ }
-+
-+ /* Apply customized values from user */
-+ ret = __v4l2_ctrl_handler_setup(imx219->sd.ctrl_handler);
-+ if (ret)
-+ goto err;
-+
-+ /* set stream on register */
-+ ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
-+ IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
-+ if (ret)
-+ goto err;
-+
-+ /* vflip and hflip cannot change during streaming */
-+ __v4l2_ctrl_grab(imx219->vflip, true);
-+ __v4l2_ctrl_grab(imx219->hflip, true);
-+
-+ return 0;
-+
-+err:
-+ return ret;
-+}
-+
-+static void imx219_stop_streaming(struct imx219 *imx219)
-+{
-+ struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
-+ int ret;
-+
-+ /* set stream off register */
-+ ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
-+ IMX219_REG_VALUE_08BIT, IMX219_MODE_STANDBY);
-+ if (ret)
-+ dev_err(&client->dev, "%s failed to set stream\n", __func__);
-+
-+ __v4l2_ctrl_grab(imx219->vflip, false);
-+ __v4l2_ctrl_grab(imx219->hflip, false);
-+}
-+
-+static int imx219_set_stream(struct v4l2_subdev *sd, int enable)
-+{
-+ struct imx219 *imx219 = to_imx219(sd);
-+ struct i2c_client *client = v4l2_get_subdevdata(sd);
-+ int ret = 0;
-+
-+ mutex_lock(&imx219->mutex);
-+
-+ if (enable) {
-+ ret = pm_runtime_get_sync(&client->dev);
-+ if (ret < 0) {
-+ pm_runtime_put_noidle(&client->dev);
-+ mutex_unlock(&imx219->mutex);
-+ return ret;
-+ }
-+
-+ if (imx219->streaming)
-+ goto unlock;
-+
-+ /*
-+ * Apply default & customized values
-+ * and then start streaming.
-+ */
-+ ret = imx219_start_streaming(imx219);
-+ if (ret)
-+ goto err_unlock;
-+ } else {
-+ imx219_stop_streaming(imx219);
-+ pm_runtime_put(&client->dev);
-+ }
-+
-+unlock:
-+ imx219->streaming += enable ? 1 : -1;
-+ WARN_ON(imx219->streaming < 0);
-+
-+ mutex_unlock(&imx219->mutex);
-+
-+ return ret;
-+
-+err_unlock:
-+ pm_runtime_put(&client->dev);
-+ mutex_unlock(&imx219->mutex);
-+
-+ return ret;
-+}
-+
-+/* Power/clock management functions */
-+static int imx219_power_on(struct device *dev)
-+{
-+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
-+ struct imx219 *imx219 = to_imx219(sd);
-+ int ret;
-+
-+ ret = regulator_bulk_enable(IMX219_NUM_SUPPLIES, imx219->supplies);
-+ if (ret) {
-+ dev_err(dev, "%s: failed to enable regulators\n",
-+ __func__);
-+ return ret;
-+ }
-+
-+ ret = clk_prepare_enable(imx219->xclk);
-+ if (ret) {
-+ dev_err(dev, "%s: failed to enable clock\n", __func__);
-+ goto reg_off;
-+ }
-+
-+ gpiod_set_value_cansleep(imx219->reset_gpio, 1);
-+ usleep_range(IMX219_XCLR_MIN_DELAY_US,
-+ IMX219_XCLR_MIN_DELAY_US + IMX219_XCLR_DELAY_RANGE_US);
-+
-+ return 0;
-+
-+reg_off:
-+ regulator_bulk_disable(IMX219_NUM_SUPPLIES, imx219->supplies);
-+
-+ return ret;
-+}
-+
-+static int imx219_power_off(struct device *dev)
-+{
-+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
-+ struct imx219 *imx219 = to_imx219(sd);
-+
-+ gpiod_set_value_cansleep(imx219->reset_gpio, 0);
-+ regulator_bulk_disable(IMX219_NUM_SUPPLIES, imx219->supplies);
-+ clk_disable_unprepare(imx219->xclk);
-+
-+ return 0;
-+}
-+
-+static int imx219_get_regulators(struct imx219 *imx219)
-+{
-+ struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
-+ unsigned int i;
-+
-+ for (i = 0; i < IMX219_NUM_SUPPLIES; i++)
-+ imx219->supplies[i].supply = imx219_supply_name[i];
-+
-+ return devm_regulator_bulk_get(&client->dev,
-+ IMX219_NUM_SUPPLIES, imx219->supplies);
-+}
-+
-+/* Verify chip ID */
-+static int imx219_identify_module(struct imx219 *imx219)
-+{
-+ struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
-+ int ret;
-+ u32 val;
-+
-+ ret = imx219_read_reg(imx219, IMX219_REG_CHIP_ID,
-+ IMX219_REG_VALUE_16BIT, &val);
-+ if (ret) {
-+ dev_err(&client->dev, "failed to read chip id %x\n",
-+ IMX219_CHIP_ID);
-+ return ret;
-+ }
-+
-+ if (val != IMX219_CHIP_ID) {
-+ dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
-+ IMX219_CHIP_ID, val);
-+ return -EIO;
-+ }
-+
-+ dev_err(&client->dev, "%s: chip identifier, got 0x%x\n",
-+ __func__, IMX219_CHIP_ID);
-+
-+ return 0;
-+}
-+
-+static const struct v4l2_subdev_core_ops imx219_core_ops = {
-+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
-+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
-+};
-+
-+static const struct v4l2_subdev_video_ops imx219_video_ops = {
-+ .g_frame_interval = imx219_g_frame_interval,
-+ .s_frame_interval = imx219_s_frame_interval,
-+ .s_stream = imx219_set_stream,
-+};
-+
-+static const struct v4l2_subdev_pad_ops imx219_pad_ops = {
-+ .enum_mbus_code = imx219_enum_mbus_code,
-+ .get_fmt = imx219_get_pad_format,
-+ .set_fmt = imx219_set_pad_format,
-+ .get_selection = imx219_get_selection,
-+ .enum_frame_size = imx219_enum_frame_size,
-+ .enum_frame_interval = imx219_enum_frame_interval,
-+};
-+
-+static const struct v4l2_subdev_ops imx219_subdev_ops = {
-+ .core = &imx219_core_ops,
-+ .video = &imx219_video_ops,
-+ .pad = &imx219_pad_ops,
-+};
-+
-+static const struct v4l2_subdev_internal_ops imx219_internal_ops = {
-+ .open = imx219_open,
-+};
-+
-+/* Initialize control handlers */
-+static int imx219_init_controls(struct imx219 *imx219)
-+{
-+ struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
-+ struct v4l2_ctrl_handler *ctrl_hdlr;
-+ unsigned int height = imx219->mode->height;
-+ struct v4l2_fwnode_device_properties props;
-+ int exposure_max, exposure_def, hblank;
-+ int i, ret;
-+
-+ ctrl_hdlr = &imx219->ctrl_handler;
-+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 12);
-+ if (ret)
-+ return ret;
-+
-+ mutex_init(&imx219->mutex);
-+ ctrl_hdlr->lock = &imx219->mutex;
-+
-+ /* By default, PIXEL_RATE is read only */
-+ imx219->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
-+ V4L2_CID_PIXEL_RATE, IMX219_PIXEL_RATE,
-+ IMX219_PIXEL_RATE, 1, IMX219_PIXEL_RATE);
-+
-+ imx219->link_freq =
-+ v4l2_ctrl_new_int_menu(ctrl_hdlr, &imx219_ctrl_ops, V4L2_CID_LINK_FREQ,
-+ ARRAY_SIZE(imx219_link_freq_menu) - 1, 0, imx219_link_freq_menu);
-+ if (imx219->link_freq)
-+ imx219->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
-+
-+ /* Initial vblank/hblank/exposure parameters based on current mode */
-+ imx219->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
-+ V4L2_CID_VBLANK, IMX219_VBLANK_MIN,
-+ IMX219_VTS_MAX - height, 1,
-+ imx219->mode->vts_def - height);
-+ hblank = IMX219_PPL_DEFAULT - imx219->mode->width;
-+ imx219->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
-+ V4L2_CID_HBLANK, hblank, hblank, 1, hblank);
-+ if (imx219->hblank)
-+ imx219->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
-+ exposure_max = imx219->mode->vts_def - 4;
-+ exposure_def = (exposure_max < IMX219_EXPOSURE_DEFAULT) ?
-+ exposure_max : IMX219_EXPOSURE_DEFAULT;
-+ imx219->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
-+ V4L2_CID_EXPOSURE, IMX219_EXPOSURE_MIN, exposure_max,
-+ IMX219_EXPOSURE_STEP, exposure_def);
-+
-+ v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
-+ IMX219_ANA_GAIN_MIN, IMX219_ANA_GAIN_MAX,
-+ IMX219_ANA_GAIN_STEP, IMX219_ANA_GAIN_DEFAULT);
-+
-+ v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
-+ IMX219_DGTL_GAIN_MIN, IMX219_DGTL_GAIN_MAX,
-+ IMX219_DGTL_GAIN_STEP, IMX219_DGTL_GAIN_DEFAULT);
-+
-+ imx219->hflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
-+ V4L2_CID_HFLIP, 0, 1, 1, 0);
-+ if (imx219->hflip)
-+ imx219->hflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
-+
-+ imx219->vflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
-+ V4L2_CID_VFLIP, 0, 1, 1, 0);
-+ if (imx219->vflip)
-+ imx219->vflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
-+
-+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &imx219_ctrl_ops, V4L2_CID_TEST_PATTERN,
-+ ARRAY_SIZE(imx219_test_pattern_menu) - 1,
-+ 0, 0, imx219_test_pattern_menu);
-+ for (i = 0; i < 4; i++) {
-+ /*
-+ * The assumption is that
-+ * V4L2_CID_TEST_PATTERN_GREENR == V4L2_CID_TEST_PATTERN_RED + 1
-+ * V4L2_CID_TEST_PATTERN_BLUE == V4L2_CID_TEST_PATTERN_RED + 2
-+ * V4L2_CID_TEST_PATTERN_GREENB == V4L2_CID_TEST_PATTERN_RED + 3
-+ */
-+ v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
-+ V4L2_CID_TEST_PATTERN_RED + i,
-+ IMX219_TESTP_COLOUR_MIN,
-+ IMX219_TESTP_COLOUR_MAX,
-+ IMX219_TESTP_COLOUR_STEP,
-+ IMX219_TESTP_COLOUR_MAX);
-+ /* The "Solid color" pattern is white by default */
-+ }
-+
-+ if (ctrl_hdlr->error) {
-+ ret = ctrl_hdlr->error;
-+ dev_err(&client->dev, "%s control init failed (%d)\n",
-+ __func__, ret);
-+ goto error;
-+ }
-+
-+ ret = v4l2_fwnode_device_parse(&client->dev, &props);
-+ if (ret)
-+ goto error;
-+
-+ ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &imx219_ctrl_ops, &props);
-+ if (ret)
-+ goto error;
-+
-+ imx219->sd.ctrl_handler = ctrl_hdlr;
-+
-+ return 0;
-+
-+error:
-+ v4l2_ctrl_handler_free(ctrl_hdlr);
-+ mutex_destroy(&imx219->mutex);
-+
-+ return ret;
-+}
-+
-+static void imx219_free_controls(struct imx219 *imx219)
-+{
-+ v4l2_ctrl_handler_free(imx219->sd.ctrl_handler);
-+ mutex_destroy(&imx219->mutex);
-+}
-+
-+static int imx219_check_hwcfg(struct device *dev)
-+{
-+ struct fwnode_handle *endpoint;
-+ struct v4l2_fwnode_endpoint ep_cfg = {
-+ .bus_type = V4L2_MBUS_CSI2_DPHY
-+ };
-+ int ret = -EINVAL;
-+
-+ endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL);
-+ if (!endpoint) {
-+ dev_err(dev, "endpoint node not found\n");
-+ return -EINVAL;
-+ }
-+
-+ if (v4l2_fwnode_endpoint_alloc_parse(endpoint, &ep_cfg)) {
-+ dev_err(dev, "could not parse endpoint\n");
-+ goto error_out;
-+ }
-+
-+ /* Check the number of MIPI CSI2 data lanes */
-+ if (ep_cfg.bus.mipi_csi2.num_data_lanes != 2) {
-+ dev_err(dev, "only 2 data lanes are currently supported\n");
-+ goto error_out;
-+ }
-+
-+ /* Check the link frequency set in device tree */
-+ if (!ep_cfg.nr_of_link_frequencies) {
-+ dev_err(dev, "link-frequency property not found in DT\n");
-+ goto error_out;
-+ }
-+
-+ if (ep_cfg.nr_of_link_frequencies != 1 ||
-+ ep_cfg.link_frequencies[0] != IMX219_DEFAULT_LINK_FREQ) {
-+ dev_err(dev, "Link frequency not supported: %lld\n",
-+ ep_cfg.link_frequencies[0]);
-+ goto error_out;
-+ }
-+
-+ ret = 0;
-+
-+error_out:
-+ v4l2_fwnode_endpoint_free(&ep_cfg);
-+ fwnode_handle_put(endpoint);
-+
-+ return ret;
-+}
-+
-+static int imx219_probe(struct i2c_client *client)
-+{
-+ struct device *dev = &client->dev;
-+ struct imx219 *imx219;
-+ int ret;
-+
-+ imx219 = devm_kzalloc(&client->dev, sizeof(*imx219), GFP_KERNEL);
-+ if (!imx219)
-+ return -ENOMEM;
-+
-+ v4l2_i2c_subdev_init(&imx219->sd, client, &imx219_subdev_ops);
-+
-+ /* Check the hardware configuration in device tree */
-+ if (imx219_check_hwcfg(dev))
-+ return -EINVAL;
-+
-+ /* Get system clock (xclk) */
-+ imx219->xclk = devm_clk_get(dev, NULL);
-+ if (IS_ERR(imx219->xclk)) {
-+ dev_err(dev, "failed to get xclk\n");
-+ return PTR_ERR(imx219->xclk);
-+ }
-+
-+ imx219->xclk_freq = clk_get_rate(imx219->xclk);
-+ if (imx219->xclk_freq != IMX219_XCLK_FREQ) {
-+ dev_err(dev, "xclk frequency not supported: %d Hz\n",
-+ imx219->xclk_freq);
-+ return -EINVAL;
-+ }
-+
-+ ret = imx219_get_regulators(imx219);
-+ if (ret) {
-+ dev_err(dev, "failed to get regulators\n");
-+ return ret;
-+ }
-+
-+ /* Request optional enable pin */
-+ imx219->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
-+ usleep_range(IMX219_XCLR_MIN_DELAY_US,
-+ IMX219_XCLR_MIN_DELAY_US + IMX219_XCLR_DELAY_RANGE_US);
-+
-+ /*
-+ * The sensor must be powered for imx219_identify_module()
-+ * to be able to read the CHIP_ID register
-+ */
-+ ret = imx219_power_on(dev);
-+ if (ret)
-+ return ret;
-+
-+ ret = imx219_identify_module(imx219);
-+ if (ret)
-+ goto error_power_off;
-+
-+ /* Set default mode to max resolution */
-+ imx219->mode = &supported_modes[0];
-+ imx219->frame_interval.numerator = 1;
-+ imx219->frame_interval.denominator = supported_modes[0].fps;
-+
-+ /* sensor doesn't enter LP-11 state upon power up until and unless
-+ * streaming is started, so upon power up switch the modes to:
-+ * streaming -> standby
-+ */
-+ ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
-+ IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
-+ if (ret < 0)
-+ goto error_power_off;
-+ usleep_range(100, 110);
-+
-+ /* put sensor back to standby mode */
-+ ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
-+ IMX219_REG_VALUE_08BIT, IMX219_MODE_STANDBY);
-+ if (ret < 0)
-+ goto error_power_off;
-+ usleep_range(100, 110);
-+
-+ ret = imx219_init_controls(imx219);
-+ if (ret)
-+ goto error_power_off;
-+
-+ /* Initialize subdev */
-+ imx219->sd.internal_ops = &imx219_internal_ops;
-+ imx219->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
-+ imx219->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
-+
-+ /* Initialize source pad */
-+ imx219->pad.flags = MEDIA_PAD_FL_SOURCE;
-+
-+ /* Initialize default format */
-+ imx219_set_default_format(imx219);
-+
-+ ret = media_entity_pads_init(&imx219->sd.entity, 1, &imx219->pad);
-+ if (ret) {
-+ dev_err(dev, "failed to init entity pads: %d\n", ret);
-+ goto error_handler_free;
-+ }
-+
-+ ret = v4l2_async_register_subdev_sensor(&imx219->sd);
-+ if (ret < 0) {
-+ dev_err(dev, "failed to register sensor sub-device: %d\n", ret);
-+ goto error_media_entity;
-+ }
-+
-+ /* Enable runtime PM and turn off the device */
-+ pm_runtime_set_active(dev);
-+ pm_runtime_enable(dev);
-+ pm_runtime_idle(dev);
-+
-+ return 0;
-+
-+error_media_entity:
-+ media_entity_cleanup(&imx219->sd.entity);
-+
-+error_handler_free:
-+ imx219_free_controls(imx219);
-+
-+error_power_off:
-+ imx219_power_off(dev);
-+
-+ return ret;
-+}
-+
-+static void imx219_remove(struct i2c_client *client)
-+{
-+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
-+ struct imx219 *imx219 = to_imx219(sd);
-+
-+ v4l2_async_unregister_subdev(sd);
-+ media_entity_cleanup(&sd->entity);
-+ imx219_free_controls(imx219);
-+
-+ pm_runtime_disable(&client->dev);
-+ if (!pm_runtime_status_suspended(&client->dev))
-+ imx219_power_off(&client->dev);
-+ pm_runtime_set_suspended(&client->dev);
-+}
-+
-+static const struct of_device_id imx219_dt_ids[] = {
-+ { .compatible = "sony,imx219" },
-+ { /* sentinel */ }
-+};
-+MODULE_DEVICE_TABLE(of, imx219_dt_ids);
-+
-+static const struct dev_pm_ops imx219_pm_ops = {
-+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
-+ SET_RUNTIME_PM_OPS(imx219_power_off, imx219_power_on, NULL)
-+};
-+
-+static struct i2c_driver imx219_i2c_driver = {
-+ .driver = {
-+ .name = "imx219",
-+ .of_match_table = imx219_dt_ids,
-+ .pm = &imx219_pm_ops,
-+ },
-+ .probe = imx219_probe,
-+ .remove = imx219_remove,
-+};
-+
-+module_i2c_driver(imx219_i2c_driver);
-+
-+MODULE_AUTHOR("Dave Stevenson <dave.stevenson@raspberrypi.com");
-+MODULE_DESCRIPTION("Sony IMX219 sensor driver");
-+MODULE_LICENSE("GPL v2");
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/ov13850_mipi.c
-@@ -0,0 +1,1921 @@
-+// SPDX-License-Identifier: GPL-2.0-or-later
-+/*
-+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
-+ *
-+ */
-+
-+#include <linux/clk.h>
-+#include <linux/clk-provider.h>
-+#include <linux/clkdev.h>
-+#include <linux/ctype.h>
-+#include <linux/delay.h>
-+#include <linux/device.h>
-+#include <linux/gpio/consumer.h>
-+#include <linux/i2c.h>
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/of_device.h>
-+#include <linux/regulator/consumer.h>
-+#include <linux/slab.h>
-+#include <linux/types.h>
-+#include <media/v4l2-async.h>
-+#include <media/v4l2-ctrls.h>
-+#include <media/v4l2-device.h>
-+#include <media/v4l2-event.h>
-+#include <media/v4l2-fwnode.h>
-+#include <media/v4l2-subdev.h>
-+#include "stfcamss.h"
-+
-+#define OV13850_XCLK_MIN 6000000
-+#define OV13850_XCLK_MAX 54000000
-+
-+#define OV13850_LINK_FREQ_500MHZ 500000000LL
-+
-+/**
-+ *OV13850 PLL
-+ *
-+ *PLL1:
-+ *
-+ *REF_CLK -> /PREDIVP[0] -> /PREDIV[2:0] -> *DIVP[9:8,7:0] -> /DIVM[3:0] -> /DIV_MIPI[1:0] -> PCLK
-+ *(6-64M) 0x030A 0x0300 0x0301,0x302 0x0303 0x0304
-+ * `-> MIPI_PHY_CLK
-+ *
-+ *
-+ *PLL2:
-+ * 000: 1
-+ * 001: 1.5
-+ * 010: 2
-+ * 011: 2.5
-+ * 100: 3
-+ * 101: 4
-+ * 0: /1 110: 6
-+ * 1: /2 111: 8
-+ *REF_CLK -> /PREDIVP[3] -> /PREDIV[2:0] -> /DIVP[9:0] -> /DIVDAC[3:0] -> DAC_CLK =
-+ *(6~64M) 0x3611
-+ * -> /DIVSP[3:0] -> /DIVS[2:0] -> SCLK
-+ *
-+ * -> /(1+DIVSRAM[3:0]) -> SRAM_CLK
-+ */
-+
-+// PREDIVP
-+#define OV13850_REG_PLL1_PREDIVP 0x030a
-+#define OV13850_PREDIVP_1 0
-+#define OV13850_PREDIVP_2 1
-+
-+// PREDIV
-+#define OV13850_REG_PLL1_PREDIV 0x0300
-+#define OV13850_PREDIV_1 0
-+#define OV13850_PREDIV_1_5 1
-+#define OV13850_PREDIV_2 2
-+#define OV13850_PREDIV_2_5 3
-+#define OV13850_PREDIV_3 4
-+#define OV13850_PREDIV_4 5
-+#define OV13850_PREDIV_6 6
-+#define OV13850_PREDIV_8 7
-+
-+// DIVP
-+#define OV13850_REG_PLL1_DIVP_H 0x0301
-+#define OV13850_REG_PLL1_DIVP_L 0x0302
-+#define OV13850_REG_PLL1_DIVP OV13850_REG_PLL1_DIVP_H
-+
-+// DIVM
-+#define OV13850_REG_PLL1_DIVM 0x0303
-+#define OV13850_DIVM(n) ((n)-1) // n=1~16
-+
-+// DIV_MIPI
-+#define OV13850_REG_PLL1_DIV_MIPI 0x0304
-+#define OV13850_DIV_MIPI_4 0
-+#define OV13850_DIV_MIPI_5 1
-+#define OV13850_DIV_MIPI_6 2
-+#define OV13850_DIV_MIPI_8 3
-+
-+// system control
-+#define OV13850_STREAM_CTRL 0x0100
-+#define OV13850_REG_MIPI_SC 0x300f
-+#define OV13850_MIPI_SC_8_BIT 0x0
-+#define OV13850_MIPI_SC_10_BIT 0x1
-+#define OV13850_MIPI_SC_12_BIT 0x2
-+#define OV13850_GET_MIPI_SC_MIPI_BIT(v) ((v) & 0x3)
-+#define OV13850_REG_MIPI_SC_CTRL0 0x3012
-+#define OV13850_GET_MIPI_SC_CTRL0_LANE_NUM(v) ((v)>>4 & 0xf)
-+
-+// timing
-+#define OV13850_REG_H_CROP_START_H 0x3800
-+#define OV13850_REG_H_CROP_START_L 0x3801
-+#define OV13850_REG_H_CROP_START OV13850_REG_H_CROP_START_H
-+#define OV13850_REG_V_CROP_START_H 0x3802
-+#define OV13850_REG_V_CROP_START_L 0x3803
-+#define OV13850_REG_V_CROP_START OV13850_REG_V_CROP_START_H
-+
-+#define OV13850_REG_H_CROP_END_H 0x3804
-+#define OV13850_REG_H_CROP_END_L 0x3805
-+#define OV13850_REG_H_CROP_END OV13850_REG_H_CROP_END_H
-+#define OV13850_REG_V_CROP_END_H 0x3806
-+#define OV13850_REG_V_CROP_END_L 0x3807
-+#define OV13850_REG_V_CROP_END OV13850_REG_V_CROP_END_H
-+
-+#define OV13850_REG_H_OUTPUT_SIZE_H 0x3808
-+#define OV13850_REG_H_OUTPUT_SIZE_L 0x3809
-+#define OV13850_REG_H_OUTPUT_SIZE OV13850_REG_H_OUTPUT_SIZE_H
-+#define OV13850_REG_V_OUTPUT_SIZE_H 0x380a
-+#define OV13850_REG_V_OUTPUT_SIZE_L 0x380b
-+#define OV13850_REG_V_OUTPUT_SIZE OV13850_REG_V_OUTPUT_SIZE_H
-+
-+#define OV13850_REG_TIMING_HTS_H 0x380c
-+#define OV13850_REG_TIMING_HTS_L 0x380d
-+#define OV13850_REG_TIMING_HTS OV13850_REG_TIMING_HTS_H
-+#define OV13850_REG_TIMING_VTS_H 0x380e
-+#define OV13850_REG_TIMING_VTS_L 0x380f
-+#define OV13850_REG_TIMING_VTS OV13850_REG_TIMING_VTS_H
-+
-+
-+#define OV13850_REG_H_WIN_OFF_H 0x3810
-+#define OV13850_REG_H_WIN_OFF_L 0x3811
-+#define OV13850_REG_V_WIN_OFF_H 0x3812
-+#define OV13850_REG_V_WIN_OFF_L 0x3813
-+
-+#define OV13850_REG_H_INC 0x3814
-+#define OV13850_REG_V_INC 0x3815
-+
-+enum ov13850_mode_id {
-+ OV13850_MODE_1080P_1920_1080 = 0,
-+ OV13850_NUM_MODES,
-+};
-+
-+enum ov13850_frame_rate {
-+ OV13850_15_FPS = 0,
-+ OV13850_30_FPS,
-+ OV13850_60_FPS,
-+ OV13850_NUM_FRAMERATES,
-+};
-+
-+static const int ov13850_framerates[] = {
-+ [OV13850_15_FPS] = 15,
-+ [OV13850_30_FPS] = 30,
-+ [OV13850_60_FPS] = 60,
-+};
-+
-+struct ov13850_pixfmt {
-+ u32 code;
-+ u32 colorspace;
-+};
-+
-+static const struct ov13850_pixfmt ov13850_formats[] = {
-+ { MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB, },
-+ { MEDIA_BUS_FMT_SGBRG10_1X10, V4L2_COLORSPACE_SRGB, },
-+ { MEDIA_BUS_FMT_SGRBG10_1X10, V4L2_COLORSPACE_SRGB, },
-+ { MEDIA_BUS_FMT_SRGGB10_1X10, V4L2_COLORSPACE_SRGB, },
-+};
-+
-+/* regulator supplies */
-+static const char * const ov13850_supply_name[] = {
-+ "DOVDD", /* Digital I/O (1.8V) supply */
-+ "AVDD", /* Analog (2.8V) supply */
-+ "DVDD", /* Digital Core (1.5V) supply */
-+};
-+
-+#define OV13850_NUM_SUPPLIES ARRAY_SIZE(ov13850_supply_name)
-+
-+/*
-+ * Image size under 1280 * 960 are SUBSAMPLING
-+ * Image size upper 1280 * 960 are SCALING
-+ */
-+enum ov13850_downsize_mode {
-+ SUBSAMPLING,
-+ SCALING,
-+};
-+
-+struct reg_value {
-+ u16 reg_addr;
-+ u8 val;
-+ u8 mask;
-+ u32 delay_ms;
-+};
-+
-+struct ov13850_mode_info {
-+ enum ov13850_mode_id id;
-+ enum ov13850_downsize_mode dn_mode;
-+ u32 hact;
-+ u32 htot;
-+ u32 vact;
-+ u32 vtot;
-+ const struct reg_value *reg_data;
-+ u32 reg_data_size;
-+ u32 max_fps;
-+};
-+
-+struct ov13850_ctrls {
-+ struct v4l2_ctrl_handler handler;
-+ struct v4l2_ctrl *pixel_rate;
-+ struct {
-+ struct v4l2_ctrl *exposure;
-+ };
-+ struct {
-+ struct v4l2_ctrl *auto_wb;
-+ struct v4l2_ctrl *blue_balance;
-+ struct v4l2_ctrl *red_balance;
-+ };
-+ struct {
-+ struct v4l2_ctrl *anal_gain;
-+ };
-+ struct v4l2_ctrl *brightness;
-+ struct v4l2_ctrl *light_freq;
-+ struct v4l2_ctrl *link_freq;
-+ struct v4l2_ctrl *saturation;
-+ struct v4l2_ctrl *contrast;
-+ struct v4l2_ctrl *hue;
-+ struct v4l2_ctrl *test_pattern;
-+ struct v4l2_ctrl *hflip;
-+ struct v4l2_ctrl *vflip;
-+};
-+
-+struct ov13850_dev {
-+ struct i2c_client *i2c_client;
-+ struct v4l2_subdev sd;
-+ struct media_pad pad;
-+ struct v4l2_fwnode_endpoint ep; /* the parsed DT endpoint info */
-+ struct clk *xclk; /* system clock to OV13850 */
-+ u32 xclk_freq;
-+
-+ struct regulator_bulk_data supplies[OV13850_NUM_SUPPLIES];
-+ struct gpio_desc *reset_gpio;
-+ struct gpio_desc *pwdn_gpio;
-+ bool upside_down;
-+
-+ /* lock to protect all members below */
-+ struct mutex lock;
-+
-+ int power_count;
-+
-+ struct v4l2_mbus_framefmt fmt;
-+ bool pending_fmt_change;
-+
-+ const struct ov13850_mode_info *current_mode;
-+ const struct ov13850_mode_info *last_mode;
-+ enum ov13850_frame_rate current_fr;
-+ struct v4l2_fract frame_interval;
-+
-+ struct ov13850_ctrls ctrls;
-+
-+ u32 prev_sysclk, prev_hts;
-+ u32 ae_low, ae_high, ae_target;
-+
-+ bool pending_mode_change;
-+ bool streaming;
-+};
-+
-+static inline struct ov13850_dev *to_ov13850_dev(struct v4l2_subdev *sd)
-+{
-+ return container_of(sd, struct ov13850_dev, sd);
-+}
-+
-+static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
-+{
-+ return &container_of(ctrl->handler, struct ov13850_dev,
-+ ctrls.handler)->sd;
-+}
-+
-+/* ov13850 initial register */
-+static const struct reg_value ov13850_init_setting_30fps_1080P[] = {
-+
-+};
-+
-+static const struct reg_value ov13850_setting_1080P_1920_1080[] = {
-+//;XVCLK=24Mhz, SCLK=4x120Mhz, MIPI 640Mbps, DACCLK=240Mhz
-+/*
-+ * using quarter size to scale down
-+ */
-+ {0x0103, 0x01, 0, 0}, // ; software reset
-+
-+ {0x0300, 0x01, 0, 0}, //; PLL
-+ {0x0301, 0x00, 0, 0}, //; PLL1_DIVP_hi
-+ {0x0302, 0x28, 0, 0}, //; PLL1_DIVP_lo
-+ {0x0303, 0x00, 0, 0}, // ; PLL
-+ {0x030a, 0x00, 0, 0}, // ; PLL
-+ //{0xffff, 20, 0, 0},
-+ {0x300f, 0x11, 0, 0}, // SFC modified, MIPI_SRC, [1:0] 00-8bit, 01-10bit, 10-12bit
-+ {0x3010, 0x01, 0, 0}, // ; MIPI PHY
-+ {0x3011, 0x76, 0, 0}, // ; MIPI PHY
-+ {0x3012, 0x41, 0, 0}, // ; MIPI 4 lane
-+ {0x3013, 0x12, 0, 0}, // ; MIPI control
-+ {0x3014, 0x11, 0, 0}, // ; MIPI control
-+ {0x301f, 0x03, 0, 0}, //
-+ {0x3106, 0x00, 0, 0}, //
-+ {0x3210, 0x47, 0, 0}, //
-+ {0x3500, 0x00, 0, 0}, // ; exposure HH
-+ {0x3501, 0x67, 0, 0}, // ; exposure H
-+ {0x3502, 0x80, 0, 0}, // ; exposure L
-+ {0x3506, 0x00, 0, 0}, // ; short exposure HH
-+ {0x3507, 0x02, 0, 0}, // ; short exposure H
-+ {0x3508, 0x00, 0, 0}, // ; shour exposure L
-+ {0x3509, 0x10, 0, 0},//00},//8},
-+ {0x350a, 0x00, 0, 0}, // ; gain H
-+ {0x350b, 0x10, 0, 0}, // ; gain L
-+ {0x350e, 0x00, 0, 0}, // ; short gain H
-+ {0x350f, 0x10, 0, 0}, // ; short gain L
-+ {0x3600, 0x40, 0, 0}, // ; analog control
-+ {0x3601, 0xfc, 0, 0}, // ; analog control
-+ {0x3602, 0x02, 0, 0}, // ; analog control
-+ {0x3603, 0x48, 0, 0}, // ; analog control
-+ {0x3604, 0xa5, 0, 0}, // ; analog control
-+ {0x3605, 0x9f, 0, 0}, // ; analog control
-+ {0x3607, 0x00, 0, 0}, // ; analog control
-+ {0x360a, 0x40, 0, 0}, // ; analog control
-+ {0x360b, 0x91, 0, 0}, // ; analog control
-+ {0x360c, 0x49, 0, 0}, // ; analog control
-+ {0x360f, 0x8a, 0, 0}, //
-+ {0x3611, 0x10, 0, 0}, // ; PLL2
-+ //{0x3612, 0x23, 0, 0}, // ; PLL2
-+ {0x3612, 0x13, 0, 0}, // ; PLL2
-+ //{0x3613, 0x33, 0, 0}, // ; PLL2
-+ {0x3613, 0x22, 0, 0}, // ; PLL2
-+ //{0xffff, 50, 0, 0},
-+ {0x3614, 0x28, 0, 0}, //[7:0] PLL2_DIVP lo
-+ {0x3615, 0x08, 0, 0}, //[7:6] Debug mode, [5:4] N_pump clock div, [3:2] P_pump clock div, [1:0] PLL2_DIVP hi
-+ {0x3641, 0x02, 0, 0},
-+ {0x3660, 0x82, 0, 0},
-+ {0x3668, 0x54, 0, 0},
-+ {0x3669, 0x40, 0, 0},
-+ {0x3667, 0xa0, 0, 0},
-+ {0x3702, 0x40, 0, 0},
-+ {0x3703, 0x44, 0, 0},
-+ {0x3704, 0x2c, 0, 0},
-+ {0x3705, 0x24, 0, 0},
-+ {0x3706, 0x50, 0, 0},
-+ {0x3707, 0x44, 0, 0},
-+ {0x3708, 0x3c, 0, 0},
-+ {0x3709, 0x1f, 0, 0},
-+ {0x370a, 0x26, 0, 0},
-+ {0x370b, 0x3c, 0, 0},
-+ {0x3720, 0x66, 0, 0},
-+ {0x3722, 0x84, 0, 0},
-+ {0x3728, 0x40, 0, 0},
-+ {0x372a, 0x00, 0, 0},
-+ {0x372f, 0x90, 0, 0},
-+ {0x3710, 0x28, 0, 0},
-+ {0x3716, 0x03, 0, 0},
-+ {0x3718, 0x10, 0, 0},
-+ {0x3719, 0x08, 0, 0},
-+ {0x371c, 0xfc, 0, 0},
-+ {0x3760, 0x13, 0, 0},
-+ {0x3761, 0x34, 0, 0},
-+ {0x3767, 0x24, 0, 0},
-+ {0x3768, 0x06, 0, 0},
-+ {0x3769, 0x45, 0, 0},
-+ {0x376c, 0x23, 0, 0},
-+ {0x3d84, 0x00, 0, 0}, // ; OTP program disable
-+ {0x3d85, 0x17, 0, 0}, // ; OTP power up load data enable, power load setting enable, software load setting
-+ {0x3d8c, 0x73, 0, 0}, // ; OTP start address H
-+ {0x3d8d, 0xbf, 0, 0}, // ; OTP start address L
-+ {0x3800, 0x00, 0, 0}, // ; H crop start H
-+ {0x3801, 0x08, 0, 0}, // ; H crop start L
-+ {0x3802, 0x00, 0, 0}, // ; V crop start H
-+ {0x3803, 0x04, 0, 0}, // ; V crop start L
-+ {0x3804, 0x10, 0, 0}, // ; H crop end H
-+ {0x3805, 0x97, 0, 0}, // ; H crop end L
-+ {0x3806, 0x0c, 0, 0}, // ; V crop end H
-+ {0x3807, 0x4b, 0, 0}, // ; V crop end L
-+ {0x3808, 0x08, 0, 0}, // ; H output size H
-+ {0x3809, 0x40, 0, 0}, // ; H output size L
-+ {0x380a, 0x06, 0, 0}, // ; V output size H
-+ {0x380b, 0x20, 0, 0}, // ; V output size L
-+ {0x380c, 0x25, 0, 0}, // ; HTS H
-+ {0x380d, 0x80, 0, 0}, // ; HTS L
-+ {0x380e, 0x06, 0, 0}, // ; VTS H
-+ {0x380f, 0x80, 0, 0}, // ; VTS L
-+ {0x3810, 0x00, 0, 0}, // ; H win off H
-+ {0x3811, 0x04, 0, 0}, // ; H win off L
-+ {0x3812, 0x00, 0, 0}, // ; V win off H
-+ {0x3813, 0x02, 0, 0}, // ; V win off L
-+ {0x3814, 0x31, 0, 0}, // ; H inc
-+ {0x3815, 0x31, 0, 0}, // ; V inc
-+ {0x3820, 0x02, 0, 0}, // ; V flip off, V bin on
-+ {0x3821, 0x05, 0, 0}, // ; H mirror on, H bin on
-+ {0x3834, 0x00, 0, 0}, //
-+ {0x3835, 0x1c, 0, 0}, // ; cut_en, vts_auto, blk_col_dis
-+ {0x3836, 0x08, 0, 0}, //
-+ {0x3837, 0x02, 0, 0}, //
-+ {0x4000, 0xf1, 0, 0},//c1}, // ; BLC offset trig en, format change trig en, gain trig en, exp trig en, median en
-+ {0x4001, 0x00, 0, 0}, // ; BLC
-+ {0x400b, 0x0c, 0, 0}, // ; BLC
-+ {0x4011, 0x00, 0, 0}, // ; BLC
-+ {0x401a, 0x00, 0, 0}, // ; BLC
-+ {0x401b, 0x00, 0, 0}, // ; BLC
-+ {0x401c, 0x00, 0, 0}, // ; BLC
-+ {0x401d, 0x00, 0, 0}, // ; BLC
-+ {0x4020, 0x00, 0, 0}, // ; BLC
-+ {0x4021, 0xe4, 0, 0}, // ; BLC
-+ {0x4022, 0x07, 0, 0}, // ; BLC
-+ {0x4023, 0x5f, 0, 0}, // ; BLC
-+ {0x4024, 0x08, 0, 0}, // ; BLC
-+ {0x4025, 0x44, 0, 0}, // ; BLC
-+ {0x4026, 0x08, 0, 0}, // ; BLC
-+ {0x4027, 0x47, 0, 0}, // ; BLC
-+ {0x4028, 0x00, 0, 0}, // ; BLC
-+ {0x4029, 0x02, 0, 0}, // ; BLC
-+ {0x402a, 0x04, 0, 0}, // ; BLC
-+ {0x402b, 0x08, 0, 0}, // ; BLC
-+ {0x402c, 0x02, 0, 0}, // ; BLC
-+ {0x402d, 0x02, 0, 0}, // ; BLC
-+ {0x402e, 0x0c, 0, 0}, // ; BLC
-+ {0x402f, 0x08, 0, 0}, // ; BLC
-+ {0x403d, 0x2c, 0, 0}, //
-+ {0x403f, 0x7f, 0, 0}, //
-+ {0x4500, 0x82, 0, 0}, // ; BLC
-+ {0x4501, 0x38, 0, 0}, // ; BLC
-+ {0x4601, 0x04, 0, 0}, //
-+ {0x4602, 0x22, 0, 0}, //
-+ {0x4603, 0x01, 0, 0}, //; VFIFO
-+ {0x4837, 0x19, 0, 0}, //; MIPI global timing
-+ {0x4d00, 0x04, 0, 0}, // ; temperature monitor
-+ {0x4d01, 0x42, 0, 0}, // ; temperature monitor
-+ {0x4d02, 0xd1, 0, 0}, // ; temperature monitor
-+ {0x4d03, 0x90, 0, 0}, // ; temperature monitor
-+ {0x4d04, 0x66, 0, 0}, // ; temperature monitor
-+ {0x4d05, 0x65, 0, 0}, // ; temperature monitor
-+ {0x5000, 0x0e, 0, 0}, // ; windowing enable, BPC on, WPC on, Lenc on
-+ {0x5001, 0x03, 0, 0}, // ; BLC enable, MWB on
-+ {0x5002, 0x07, 0, 0}, //
-+ {0x5013, 0x40, 0, 0},
-+ {0x501c, 0x00, 0, 0},
-+ {0x501d, 0x10, 0, 0},
-+ //{0x5057, 0x56, 0, 0},//add
-+ {0x5056, 0x08, 0, 0},
-+ {0x5058, 0x08, 0, 0},
-+ {0x505a, 0x08, 0, 0},
-+ {0x5242, 0x00, 0, 0},
-+ {0x5243, 0xb8, 0, 0},
-+ {0x5244, 0x00, 0, 0},
-+ {0x5245, 0xf9, 0, 0},
-+ {0x5246, 0x00, 0, 0},
-+ {0x5247, 0xf6, 0, 0},
-+ {0x5248, 0x00, 0, 0},
-+ {0x5249, 0xa6, 0, 0},
-+ {0x5300, 0xfc, 0, 0},
-+ {0x5301, 0xdf, 0, 0},
-+ {0x5302, 0x3f, 0, 0},
-+ {0x5303, 0x08, 0, 0},
-+ {0x5304, 0x0c, 0, 0},
-+ {0x5305, 0x10, 0, 0},
-+ {0x5306, 0x20, 0, 0},
-+ {0x5307, 0x40, 0, 0},
-+ {0x5308, 0x08, 0, 0},
-+ {0x5309, 0x08, 0, 0},
-+ {0x530a, 0x02, 0, 0},
-+ {0x530b, 0x01, 0, 0},
-+ {0x530c, 0x01, 0, 0},
-+ {0x530d, 0x0c, 0, 0},
-+ {0x530e, 0x02, 0, 0},
-+ {0x530f, 0x01, 0, 0},
-+ {0x5310, 0x01, 0, 0},
-+ {0x5400, 0x00, 0, 0},
-+ {0x5401, 0x61, 0, 0},
-+ {0x5402, 0x00, 0, 0},
-+ {0x5403, 0x00, 0, 0},
-+ {0x5404, 0x00, 0, 0},
-+ {0x5405, 0x40, 0, 0},
-+ {0x540c, 0x05, 0, 0},
-+ {0x5b00, 0x00, 0, 0},
-+ {0x5b01, 0x00, 0, 0},
-+ {0x5b02, 0x01, 0, 0},
-+ {0x5b03, 0xff, 0, 0},
-+ {0x5b04, 0x02, 0, 0},
-+ {0x5b05, 0x6c, 0, 0},
-+ {0x5b09, 0x02, 0, 0}, //
-+ //{0x5e00, 0x00, 0, 0}, // ; test pattern disable
-+ //{0x5e00, 0x80, 0, 0}, // ; test pattern enable
-+ {0x5e10, 0x1c, 0, 0}, // ; ISP test disable
-+
-+ //{0x0300, 0x01, 0, 0},// ; PLL
-+ //{0x0302, 0x28, 0, 0},// ; PLL
-+ //{0xffff, 50, 0, 0},
-+ {0x3501, 0x67, 0, 0},// ; Exposure H
-+ {0x370a, 0x26, 0, 0},//
-+ {0x372a, 0x00, 0, 0},
-+ {0x372f, 0x90, 0, 0},
-+ {0x3801, 0x08, 0, 0}, //; H crop start L
-+ {0x3803, 0x04, 0, 0}, //; V crop start L
-+ {0x3805, 0x97, 0, 0}, //; H crop end L
-+ {0x3807, 0x4b, 0, 0}, //; V crop end L
-+ {0x3808, 0x08, 0, 0}, //; H output size H
-+ {0x3809, 0x40, 0, 0}, //; H output size L
-+ {0x380a, 0x06, 0, 0}, //; V output size H
-+ {0x380b, 0x20, 0, 0}, //; V output size L
-+ {0x380c, 0x25, 0, 0}, //; HTS H
-+ {0x380d, 0x80, 0, 0}, //; HTS L
-+ {0x380e, 0x0a, 0, 0},//6}, //; VTS H
-+ {0x380f, 0x80, 0, 0}, //; VTS L
-+ {0x3813, 0x02, 0, 0}, //; V win off
-+ {0x3814, 0x31, 0, 0}, //; H inc
-+ {0x3815, 0x31, 0, 0}, //; V inc
-+ {0x3820, 0x02, 0, 0}, //; V flip off, V bin on
-+ {0x3821, 0x05, 0, 0}, //; H mirror on, H bin on
-+ {0x3836, 0x08, 0, 0}, //
-+ {0x3837, 0x02, 0, 0}, //
-+ {0x4020, 0x00, 0, 0}, //
-+ {0x4021, 0xe4, 0, 0}, //
-+ {0x4022, 0x07, 0, 0}, //
-+ {0x4023, 0x5f, 0, 0}, //
-+ {0x4024, 0x08, 0, 0}, //
-+ {0x4025, 0x44, 0, 0}, //
-+ {0x4026, 0x08, 0, 0}, //
-+ {0x4027, 0x47, 0, 0}, //
-+ {0x4603, 0x01, 0, 0}, //; VFIFO
-+ {0x4837, 0x19, 0, 0}, //; MIPI global timing
-+ {0x4802, 0x42, 0, 0}, //default 0x00
-+ {0x481a, 0x00, 0, 0},
-+ {0x481b, 0x1c, 0, 0}, //default 0x3c prepare
-+ {0x4826, 0x12, 0, 0}, //default 0x32 trail
-+ {0x5401, 0x61, 0, 0}, //
-+ {0x5405, 0x40, 0, 0}, //
-+
-+ //{0xffff, 200, 0, 0},
-+ //{0xffff, 200, 0, 0},
-+ //{0xffff, 200, 0, 0},
-+
-+ //{0x0100, 0x01, 0, 0}, //; wake up, streaming
-+};
-+
-+/* power-on sensor init reg table */
-+static const struct ov13850_mode_info ov13850_mode_init_data = {
-+ OV13850_MODE_1080P_1920_1080, SCALING,
-+ 1920, 0x6e0, 1080, 0x470,
-+ ov13850_init_setting_30fps_1080P,
-+ ARRAY_SIZE(ov13850_init_setting_30fps_1080P),
-+ OV13850_30_FPS,
-+};
-+
-+static const struct ov13850_mode_info
-+ov13850_mode_data[OV13850_NUM_MODES] = {
-+ {OV13850_MODE_1080P_1920_1080, SCALING,
-+ 1920, 0x6e0, 1080, 0x470,
-+ ov13850_setting_1080P_1920_1080,
-+ ARRAY_SIZE(ov13850_setting_1080P_1920_1080),
-+ OV13850_30_FPS},
-+};
-+
-+static int ov13850_write_reg(struct ov13850_dev *sensor, u16 reg, u8 val)
-+{
-+ struct i2c_client *client = sensor->i2c_client;
-+ struct i2c_msg msg;
-+ u8 buf[3];
-+ int ret;
-+
-+ buf[0] = reg >> 8;
-+ buf[1] = reg & 0xff;
-+ buf[2] = val;
-+
-+ msg.addr = client->addr;
-+ msg.flags = client->flags;
-+ msg.buf = buf;
-+ msg.len = sizeof(buf);
-+
-+ ret = i2c_transfer(client->adapter, &msg, 1);
-+ if (ret < 0) {
-+ dev_err(&client->dev, "%s: error: reg=%x, val=%x\n",
-+ __func__, reg, val);
-+ return ret;
-+ }
-+
-+ return 0;
-+}
-+
-+static int ov13850_read_reg(struct ov13850_dev *sensor, u16 reg, u8 *val)
-+{
-+ struct i2c_client *client = sensor->i2c_client;
-+ struct i2c_msg msg[2];
-+ u8 buf[2];
-+ int ret;
-+
-+ buf[0] = reg >> 8;
-+ buf[1] = reg & 0xff;
-+
-+ msg[0].addr = client->addr;
-+ msg[0].flags = client->flags;
-+ msg[0].buf = buf;
-+ msg[0].len = sizeof(buf);
-+
-+ msg[1].addr = client->addr;
-+ msg[1].flags = client->flags | I2C_M_RD;
-+ msg[1].buf = buf;
-+ msg[1].len = 1;
-+
-+ ret = i2c_transfer(client->adapter, msg, 2);
-+ if (ret < 0) {
-+ dev_err(&client->dev, "%s: error: reg=%x\n",
-+ __func__, reg);
-+ return ret;
-+ }
-+
-+ *val = buf[0];
-+ return 0;
-+}
-+
-+static int ov13850_read_reg16(struct ov13850_dev *sensor, u16 reg, u16 *val)
-+{
-+ u8 hi, lo;
-+ int ret;
-+
-+ ret = ov13850_read_reg(sensor, reg, &hi);
-+ if (ret)
-+ return ret;
-+ ret = ov13850_read_reg(sensor, reg + 1, &lo);
-+ if (ret)
-+ return ret;
-+
-+ *val = ((u16)hi << 8) | (u16)lo;
-+ return 0;
-+}
-+
-+static int ov13850_write_reg16(struct ov13850_dev *sensor, u16 reg, u16 val)
-+{
-+ int ret;
-+
-+ ret = ov13850_write_reg(sensor, reg, val >> 8);
-+ if (ret)
-+ return ret;
-+
-+ return ov13850_write_reg(sensor, reg + 1, val & 0xff);
-+}
-+
-+static int ov13850_mod_reg(struct ov13850_dev *sensor, u16 reg,
-+ u8 mask, u8 val)
-+{
-+ u8 readval;
-+ int ret;
-+
-+ ret = ov13850_read_reg(sensor, reg, &readval);
-+ if (ret)
-+ return ret;
-+
-+ readval &= ~mask;
-+ val &= mask;
-+ val |= readval;
-+
-+ return ov13850_write_reg(sensor, reg, val);
-+}
-+
-+static int ov13850_set_timings(struct ov13850_dev *sensor,
-+ const struct ov13850_mode_info *mode)
-+{
-+ int ret;
-+
-+ ret = ov13850_write_reg16(sensor, OV13850_REG_H_OUTPUT_SIZE, mode->hact);
-+ if (ret < 0)
-+ return ret;
-+
-+ ret = ov13850_write_reg16(sensor, OV13850_REG_V_OUTPUT_SIZE, mode->vact);
-+ if (ret < 0)
-+ return ret;
-+
-+ return 0;
-+}
-+
-+static int ov13850_load_regs(struct ov13850_dev *sensor,
-+ const struct ov13850_mode_info *mode)
-+{
-+ const struct reg_value *regs = mode->reg_data;
-+ unsigned int i;
-+ u32 delay_ms;
-+ u16 reg_addr;
-+ u8 mask, val;
-+ int ret = 0;
-+
-+ st_info(ST_SENSOR, "%s, mode = 0x%x\n", __func__, mode->id);
-+ for (i = 0; i < mode->reg_data_size; ++i, ++regs) {
-+ delay_ms = regs->delay_ms;
-+ reg_addr = regs->reg_addr;
-+ val = regs->val;
-+ mask = regs->mask;
-+
-+ if (mask)
-+ ret = ov13850_mod_reg(sensor, reg_addr, mask, val);
-+ else
-+ ret = ov13850_write_reg(sensor, reg_addr, val);
-+ if (ret)
-+ break;
-+
-+ if (delay_ms)
-+ usleep_range(1000 * delay_ms, 1000 * delay_ms + 100);
-+ }
-+
-+ return ov13850_set_timings(sensor, mode);
-+}
-+
-+
-+
-+static int ov13850_get_gain(struct ov13850_dev *sensor)
-+{
-+ u32 gain = 0;
-+ return gain;
-+}
-+
-+static int ov13850_set_gain(struct ov13850_dev *sensor, int gain)
-+{
-+ return 0;
-+}
-+
-+static int ov13850_set_stream_mipi(struct ov13850_dev *sensor, bool on)
-+{
-+ return 0;
-+}
-+
-+static int ov13850_get_sysclk(struct ov13850_dev *sensor)
-+{
-+ return 0;
-+}
-+
-+static int ov13850_set_night_mode(struct ov13850_dev *sensor)
-+{
-+ return 0;
-+}
-+
-+static int ov13850_get_hts(struct ov13850_dev *sensor)
-+{
-+ /* read HTS from register settings */
-+ u16 hts;
-+ int ret;
-+
-+ ret = ov13850_read_reg16(sensor, OV13850_REG_TIMING_HTS, &hts);
-+ if (ret)
-+ return ret;
-+ return hts;
-+}
-+
-+static int ov13850_set_hts(struct ov13850_dev *sensor, int hts)
-+{
-+ return ov13850_write_reg16(sensor, OV13850_REG_TIMING_HTS, hts);
-+}
-+
-+
-+static int ov13850_get_vts(struct ov13850_dev *sensor)
-+{
-+ u16 vts;
-+ int ret;
-+
-+ ret = ov13850_read_reg16(sensor, OV13850_REG_TIMING_VTS, &vts);
-+ if (ret)
-+ return ret;
-+ return vts;
-+}
-+
-+static int ov13850_set_vts(struct ov13850_dev *sensor, int vts)
-+{
-+ return ov13850_write_reg16(sensor, OV13850_REG_TIMING_VTS, vts);
-+}
-+
-+static int ov13850_get_light_freq(struct ov13850_dev *sensor)
-+{
-+ return 0;
-+}
-+
-+static int ov13850_set_bandingfilter(struct ov13850_dev *sensor)
-+{
-+ return 0;
-+}
-+
-+static int ov13850_set_ae_target(struct ov13850_dev *sensor, int target)
-+{
-+ return 0;
-+}
-+
-+static int ov13850_get_binning(struct ov13850_dev *sensor)
-+{
-+ return 0;
-+}
-+
-+static int ov13850_set_binning(struct ov13850_dev *sensor, bool enable)
-+{
-+ return 0;
-+}
-+
-+static const struct ov13850_mode_info *
-+ov13850_find_mode(struct ov13850_dev *sensor, enum ov13850_frame_rate fr,
-+ int width, int height, bool nearest)
-+{
-+ const struct ov13850_mode_info *mode;
-+
-+ mode = v4l2_find_nearest_size(ov13850_mode_data,
-+ ARRAY_SIZE(ov13850_mode_data),
-+ hact, vact,
-+ width, height);
-+
-+ if (!mode ||
-+ (!nearest && (mode->hact != width || mode->vact != height)))
-+ return NULL;
-+
-+ /* Check to see if the current mode exceeds the max frame rate */
-+ if (ov13850_framerates[fr] > ov13850_framerates[mode->max_fps])
-+ return NULL;
-+
-+ return mode;
-+}
-+
-+static u64 ov13850_calc_pixel_rate(struct ov13850_dev *sensor)
-+{
-+ u64 rate;
-+
-+ rate = sensor->current_mode->vact * sensor->current_mode->hact;
-+ rate *= ov13850_framerates[sensor->current_fr];
-+
-+ return rate;
-+}
-+
-+/*
-+ * After trying the various combinations, reading various
-+ * documentations spread around the net, and from the various
-+ * feedback, the clock tree is probably as follows:
-+ *
-+ * +--------------+
-+ * | Ext. Clock |
-+ * +-+------------+
-+ * | +----------+
-+ * +->| PLL1 | - reg 0x030a, bit0 for the pre-dividerp
-+ * +-+--------+ - reg 0x0300, bits 0-2 for the pre-divider
-+ * +-+--------+ - reg 0x0301~0x0302, for the multiplier
-+ * | +--------------+
-+ * +->| MIPI Divider | - reg 0x0303, bits 0-3 for the pre-divider
-+ * | +---------> MIPI PHY CLK
-+ * | +-----+
-+ * | +->| PLL1_DIV_MIPI | - reg 0x0304, bits 0-1 for the divider
-+ * | +----------------> PCLK
-+ * | +-----+
-+ *
-+ * +--------------+
-+ * | Ext. Clock |
-+ * +-+------------+
-+ * | +----------+
-+ * +->| PLL2 | - reg 0x0311, bit0 for the pre-dividerp
-+ * +-+--------+ - reg 0x030b, bits 0-2 for the pre-divider
-+ * +-+--------+ - reg 0x030c~0x030d, for the multiplier
-+ * | +--------------+
-+ * +->| SCLK Divider | - reg 0x030F, bits 0-3 for the pre-divider
-+ * +-+--------+ - reg 0x030E, bits 0-2 for the divider
-+ * | +---------> SCLK
-+ *
-+ * | +-----+
-+ * +->| DAC Divider | - reg 0x0312, bits 0-3 for the divider
-+ * | +----------------> DACCLK
-+ **
-+ */
-+
-+/*
-+ * ov13850_set_mipi_pclk() - Calculate the clock tree configuration values
-+ * for the MIPI CSI-2 output.
-+ *
-+ * @rate: The requested bandwidth per lane in bytes per second.
-+ * 'Bandwidth Per Lane' is calculated as:
-+ * bpl = HTOT * VTOT * FPS * bpp / num_lanes;
-+ *
-+ * This function use the requested bandwidth to calculate:
-+ *
-+ * - mipi_pclk = bpl / 2; ( / 2 is for CSI-2 DDR)
-+ * - mipi_phy_clk = mipi_pclk * PLL1_DIV_MIPI;
-+ *
-+ * with these fixed parameters:
-+ * PLL1_PREDIVP = 1;
-+ * PLL1_PREDIV = 1; (MIPI_BIT_MODE == 8 ? 2 : 2,5);
-+ * PLL1_DIVM = 1;
-+ * PLL1_DIV_MIPI = 4;
-+ *
-+ * FIXME: this have been tested with 10-bit raw and 2 lanes setup only.
-+ * MIPI_DIV is fixed to value 2, but it -might- be changed according to the
-+ * above formula for setups with 1 lane or image formats with different bpp.
-+ *
-+ * FIXME: this deviates from the sensor manual documentation which is quite
-+ * thin on the MIPI clock tree generation part.
-+ */
-+
-+
-+
-+static int ov13850_set_mipi_pclk(struct ov13850_dev *sensor,
-+ unsigned long rate)
-+{
-+
-+ return 0;
-+}
-+
-+/*
-+ * if sensor changes inside scaling or subsampling
-+ * change mode directly
-+ */
-+static int ov13850_set_mode_direct(struct ov13850_dev *sensor,
-+ const struct ov13850_mode_info *mode)
-+{
-+ if (!mode->reg_data)
-+ return -EINVAL;
-+
-+ /* Write capture setting */
-+ return ov13850_load_regs(sensor, mode);
-+}
-+
-+static int ov13850_set_mode(struct ov13850_dev *sensor)
-+{
-+ const struct ov13850_mode_info *mode = sensor->current_mode;
-+ const struct ov13850_mode_info *orig_mode = sensor->last_mode;
-+ int ret = 0;
-+
-+ ret = ov13850_set_mode_direct(sensor, mode);
-+ if (ret < 0)
-+ return ret;
-+
-+ /*
-+ * we support have 10 bits raw RGB(mipi)
-+ */
-+ if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY)
-+ ret = ov13850_set_mipi_pclk(sensor, 0);
-+
-+ if (ret < 0)
-+ return 0;
-+
-+ sensor->pending_mode_change = false;
-+ sensor->last_mode = mode;
-+ return 0;
-+}
-+
-+static int ov13850_set_framefmt(struct ov13850_dev *sensor,
-+ struct v4l2_mbus_framefmt *format);
-+
-+/* restore the last set video mode after chip power-on */
-+static int ov13850_restore_mode(struct ov13850_dev *sensor)
-+{
-+ int ret;
-+
-+ /* first load the initial register values */
-+ ret = ov13850_load_regs(sensor, &ov13850_mode_init_data);
-+ if (ret < 0)
-+ return ret;
-+ sensor->last_mode = &ov13850_mode_init_data;
-+
-+ /* now restore the last capture mode */
-+ ret = ov13850_set_mode(sensor);
-+ if (ret < 0)
-+ return ret;
-+
-+ return ov13850_set_framefmt(sensor, &sensor->fmt);
-+}
-+
-+static void ov13850_power(struct ov13850_dev *sensor, bool enable)
-+{
-+ if (!sensor->pwdn_gpio)
-+ return;
-+ if (enable) {
-+ gpiod_set_value_cansleep(sensor->pwdn_gpio, 0);
-+ gpiod_set_value_cansleep(sensor->pwdn_gpio, 1);
-+ } else {
-+ gpiod_set_value_cansleep(sensor->pwdn_gpio, 0);
-+ }
-+
-+ mdelay(100);
-+}
-+
-+static void ov13850_reset(struct ov13850_dev *sensor)
-+{
-+ if (!sensor->reset_gpio)
-+ return;
-+
-+ gpiod_set_value_cansleep(sensor->reset_gpio, 0);
-+ gpiod_set_value_cansleep(sensor->reset_gpio, 1);
-+ mdelay(100);
-+}
-+
-+static int ov13850_set_power_on(struct ov13850_dev *sensor)
-+{
-+ struct i2c_client *client = sensor->i2c_client;
-+ int ret;
-+
-+ ret = clk_prepare_enable(sensor->xclk);
-+ if (ret) {
-+ dev_err(&client->dev, "%s: failed to enable clock\n",
-+ __func__);
-+ return ret;
-+ }
-+
-+ ret = regulator_bulk_enable(OV13850_NUM_SUPPLIES,
-+ sensor->supplies);
-+ if (ret) {
-+ dev_err(&client->dev, "%s: failed to enable regulators\n",
-+ __func__);
-+ goto xclk_off;
-+ }
-+
-+ ov13850_reset(sensor);
-+ ov13850_power(sensor, true);
-+
-+ return 0;
-+
-+xclk_off:
-+ clk_disable_unprepare(sensor->xclk);
-+ return ret;
-+}
-+
-+static void ov13850_set_power_off(struct ov13850_dev *sensor)
-+{
-+ ov13850_power(sensor, false);
-+ regulator_bulk_disable(OV13850_NUM_SUPPLIES, sensor->supplies);
-+ clk_disable_unprepare(sensor->xclk);
-+}
-+
-+static int ov13850_set_power_mipi(struct ov13850_dev *sensor, bool on)
-+{
-+ return 0;
-+}
-+
-+static int ov13850_set_power(struct ov13850_dev *sensor, bool on)
-+{
-+ int ret = 0;
-+ u16 chip_id;
-+
-+ if (on) {
-+ ret = ov13850_set_power_on(sensor);
-+ if (ret)
-+ return ret;
-+
-+#ifdef UNUSED_CODE
-+ ret = ov13850_read_reg16(sensor, OV13850_REG_CHIP_ID, &chip_id);
-+ if (ret) {
-+ dev_err(&sensor->i2c_client->dev, "%s: failed to read chip identifier\n",
-+ __func__);
-+ ret = -ENODEV;
-+ goto power_off;
-+ }
-+
-+ if (chip_id != OV13850_CHIP_ID) {
-+ dev_err(&sensor->i2c_client->dev,
-+ "%s: wrong chip identifier, expected 0x%x, got 0x%x\n",
-+ __func__, OV13850_CHIP_ID, chip_id);
-+ ret = -ENXIO;
-+ goto power_off;
-+ }
-+ dev_err(&sensor->i2c_client->dev, "%s: chip identifier, got 0x%x\n",
-+ __func__, chip_id);
-+#endif
-+
-+ ret = ov13850_restore_mode(sensor);
-+ if (ret)
-+ goto power_off;
-+ }
-+
-+ if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY)
-+ ret = ov13850_set_power_mipi(sensor, on);
-+ if (ret)
-+ goto power_off;
-+
-+ if (!on)
-+ ov13850_set_power_off(sensor);
-+
-+ return 0;
-+
-+power_off:
-+ ov13850_set_power_off(sensor);
-+ return ret;
-+}
-+
-+static int ov13850_s_power(struct v4l2_subdev *sd, int on)
-+{
-+ struct ov13850_dev *sensor = to_ov13850_dev(sd);
-+ int ret = 0;
-+
-+ mutex_lock(&sensor->lock);
-+
-+ /*
-+ * If the power count is modified from 0 to != 0 or from != 0 to 0,
-+ * update the power state.
-+ */
-+ if (sensor->power_count == !on) {
-+ ret = ov13850_set_power(sensor, !!on);
-+ if (ret)
-+ goto out;
-+ }
-+
-+ /* Update the power count. */
-+ sensor->power_count += on ? 1 : -1;
-+ WARN_ON(sensor->power_count < 0);
-+out:
-+ mutex_unlock(&sensor->lock);
-+
-+ if (on && !ret && sensor->power_count == 1) {
-+ /* restore controls */
-+ ret = v4l2_ctrl_handler_setup(&sensor->ctrls.handler);
-+ }
-+
-+ return ret;
-+}
-+
-+static int ov13850_try_frame_interval(struct ov13850_dev *sensor,
-+ struct v4l2_fract *fi,
-+ u32 width, u32 height)
-+{
-+ const struct ov13850_mode_info *mode;
-+ enum ov13850_frame_rate rate = OV13850_15_FPS;
-+ int minfps, maxfps, best_fps, fps;
-+ int i;
-+
-+ minfps = ov13850_framerates[OV13850_15_FPS];
-+ maxfps = ov13850_framerates[OV13850_NUM_FRAMERATES - 1];
-+
-+ if (fi->numerator == 0) {
-+ fi->denominator = maxfps;
-+ fi->numerator = 1;
-+ rate = OV13850_60_FPS;
-+ goto find_mode;
-+ }
-+
-+ fps = clamp_val(DIV_ROUND_CLOSEST(fi->denominator, fi->numerator),
-+ minfps, maxfps);
-+
-+ best_fps = minfps;
-+ for (i = 0; i < ARRAY_SIZE(ov13850_framerates); i++) {
-+ int curr_fps = ov13850_framerates[i];
-+
-+ if (abs(curr_fps - fps) < abs(best_fps - fps)) {
-+ best_fps = curr_fps;
-+ rate = i;
-+ }
-+ }
-+ st_info(ST_SENSOR, "best_fps = %d, fps = %d\n", best_fps, fps);
-+
-+ fi->numerator = 1;
-+ fi->denominator = best_fps;
-+
-+find_mode:
-+ mode = ov13850_find_mode(sensor, rate, width, height, false);
-+ return mode ? rate : -EINVAL;
-+}
-+
-+static int ov13850_enum_mbus_code(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_mbus_code_enum *code)
-+{
-+ if (code->pad != 0)
-+ return -EINVAL;
-+
-+ if (code->index >= ARRAY_SIZE(ov13850_formats))
-+ return -EINVAL;
-+
-+ code->code = ov13850_formats[code->index].code;
-+ return 0;
-+}
-+
-+static int ov13850_get_fmt(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_format *format)
-+{
-+ struct ov13850_dev *sensor = to_ov13850_dev(sd);
-+ struct v4l2_mbus_framefmt *fmt;
-+
-+ if (format->pad != 0)
-+ return -EINVAL;
-+
-+ mutex_lock(&sensor->lock);
-+
-+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
-+ fmt = v4l2_subdev_get_try_format(&sensor->sd, state,
-+ format->pad);
-+ else
-+ fmt = &sensor->fmt;
-+
-+ format->format = *fmt;
-+
-+ mutex_unlock(&sensor->lock);
-+
-+ return 0;
-+}
-+
-+static int ov13850_try_fmt_internal(struct v4l2_subdev *sd,
-+ struct v4l2_mbus_framefmt *fmt,
-+ enum ov13850_frame_rate fr,
-+ const struct ov13850_mode_info **new_mode)
-+{
-+ struct ov13850_dev *sensor = to_ov13850_dev(sd);
-+ const struct ov13850_mode_info *mode;
-+ int i;
-+
-+ mode = ov13850_find_mode(sensor, fr, fmt->width, fmt->height, true);
-+ if (!mode)
-+ return -EINVAL;
-+ fmt->width = mode->hact;
-+ fmt->height = mode->vact;
-+
-+ if (new_mode)
-+ *new_mode = mode;
-+
-+ for (i = 0; i < ARRAY_SIZE(ov13850_formats); i++)
-+ if (ov13850_formats[i].code == fmt->code)
-+ break;
-+ if (i >= ARRAY_SIZE(ov13850_formats))
-+ i = 0;
-+
-+ fmt->code = ov13850_formats[i].code;
-+ fmt->colorspace = ov13850_formats[i].colorspace;
-+ fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
-+ fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
-+ fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace);
-+
-+ return 0;
-+}
-+
-+static int ov13850_set_fmt(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_format *format)
-+{
-+ struct ov13850_dev *sensor = to_ov13850_dev(sd);
-+ const struct ov13850_mode_info *new_mode;
-+ struct v4l2_mbus_framefmt *mbus_fmt = &format->format;
-+ struct v4l2_mbus_framefmt *fmt;
-+ int ret;
-+
-+ if (format->pad != 0)
-+ return -EINVAL;
-+
-+ mutex_lock(&sensor->lock);
-+
-+ if (sensor->streaming) {
-+ ret = -EBUSY;
-+ goto out;
-+ }
-+
-+ ret = ov13850_try_fmt_internal(sd, mbus_fmt, 0, &new_mode);
-+ if (ret)
-+ goto out;
-+
-+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
-+ fmt = v4l2_subdev_get_try_format(sd, state, 0);
-+ else
-+ fmt = &sensor->fmt;
-+
-+ if (mbus_fmt->code != sensor->fmt.code)
-+ sensor->pending_fmt_change = true;
-+
-+ *fmt = *mbus_fmt;
-+
-+ if (new_mode != sensor->current_mode) {
-+ sensor->current_mode = new_mode;
-+ sensor->pending_mode_change = true;
-+ }
-+
-+ if (new_mode->max_fps < sensor->current_fr) {
-+ sensor->current_fr = new_mode->max_fps;
-+ sensor->frame_interval.numerator = 1;
-+ sensor->frame_interval.denominator =
-+ ov13850_framerates[sensor->current_fr];
-+ sensor->current_mode = new_mode;
-+ sensor->pending_mode_change = true;
-+ }
-+
-+ __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
-+ ov13850_calc_pixel_rate(sensor));
-+out:
-+ mutex_unlock(&sensor->lock);
-+ return ret;
-+}
-+
-+static int ov13850_set_framefmt(struct ov13850_dev *sensor,
-+ struct v4l2_mbus_framefmt *format)
-+{
-+ u8 fmt;
-+
-+ switch (format->code) {
-+ /* Raw, BGBG... / GRGR... */
-+ case MEDIA_BUS_FMT_SBGGR8_1X8:
-+ case MEDIA_BUS_FMT_SGBRG8_1X8:
-+ case MEDIA_BUS_FMT_SGRBG8_1X8:
-+ case MEDIA_BUS_FMT_SRGGB8_1X8:
-+ fmt = 0x0;
-+ break;
-+ case MEDIA_BUS_FMT_SBGGR10_1X10:
-+ case MEDIA_BUS_FMT_SGBRG10_1X10:
-+ case MEDIA_BUS_FMT_SGRBG10_1X10:
-+ case MEDIA_BUS_FMT_SRGGB10_1X10:
-+ fmt = 0x1;
-+ case MEDIA_BUS_FMT_SBGGR12_1X12:
-+ case MEDIA_BUS_FMT_SGBRG12_1X12:
-+ case MEDIA_BUS_FMT_SGRBG12_1X12:
-+ case MEDIA_BUS_FMT_SRGGB12_1X12:
-+ fmt = 0x2;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ return ov13850_mod_reg(sensor, OV13850_REG_MIPI_SC,
-+ BIT(1) | BIT(0), fmt);
-+}
-+
-+/*
-+ * Sensor Controls.
-+ */
-+
-+static int ov13850_set_ctrl_hue(struct ov13850_dev *sensor, int value)
-+{
-+ int ret = 0;
-+
-+ return ret;
-+}
-+
-+static int ov13850_set_ctrl_contrast(struct ov13850_dev *sensor, int value)
-+{
-+ int ret = 0;
-+
-+ return ret;
-+}
-+
-+static int ov13850_set_ctrl_saturation(struct ov13850_dev *sensor, int value)
-+{
-+ int ret = 0;
-+
-+ return ret;
-+}
-+
-+static int ov13850_set_ctrl_white_balance(struct ov13850_dev *sensor, int awb)
-+{
-+ struct ov13850_ctrls *ctrls = &sensor->ctrls;
-+ int ret = 0;
-+
-+ return ret;
-+}
-+
-+static int ov13850_set_ctrl_exposure(struct ov13850_dev *sensor,
-+ enum v4l2_exposure_auto_type auto_exposure)
-+{
-+ struct ov13850_ctrls *ctrls = &sensor->ctrls;
-+ int ret = 0;
-+
-+ return ret;
-+}
-+
-+static const s64 link_freq_menu_items[] = {
-+ OV13850_LINK_FREQ_500MHZ
-+};
-+
-+static const char * const test_pattern_menu[] = {
-+ "Disabled",
-+ "Color bars",
-+ "Color bars w/ rolling bar",
-+ "Color squares",
-+ "Color squares w/ rolling bar",
-+};
-+
-+static int ov13850_set_ctrl_test_pattern(struct ov13850_dev *sensor, int value)
-+{
-+ return 0;
-+}
-+
-+static int ov13850_set_ctrl_light_freq(struct ov13850_dev *sensor, int value)
-+{
-+ return 0;
-+}
-+
-+static int ov13850_set_ctrl_hflip(struct ov13850_dev *sensor, int value)
-+{
-+ return 0;
-+}
-+
-+static int ov13850_set_ctrl_vflip(struct ov13850_dev *sensor, int value)
-+{
-+ return 0;
-+}
-+
-+static int ov13850_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
-+{
-+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
-+ struct ov13850_dev *sensor = to_ov13850_dev(sd);
-+ int val;
-+
-+ /* v4l2_ctrl_lock() locks our own mutex */
-+
-+ switch (ctrl->id) {
-+ case V4L2_CID_ANALOGUE_GAIN:
-+ val = ov13850_get_gain(sensor);
-+ break;
-+ }
-+
-+ return 0;
-+}
-+
-+static int ov13850_s_ctrl(struct v4l2_ctrl *ctrl)
-+{
-+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
-+ struct ov13850_dev *sensor = to_ov13850_dev(sd);
-+ int ret;
-+
-+ /* v4l2_ctrl_lock() locks our own mutex */
-+
-+ /*
-+ * If the device is not powered up by the host driver do
-+ * not apply any controls to H/W at this time. Instead
-+ * the controls will be restored right after power-up.
-+ */
-+ if (sensor->power_count == 0)
-+ return 0;
-+
-+ switch (ctrl->id) {
-+ case V4L2_CID_ANALOGUE_GAIN:
-+ ret = ov13850_set_gain(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_EXPOSURE:
-+ ret = ov13850_set_ctrl_exposure(sensor, V4L2_EXPOSURE_MANUAL);
-+ break;
-+ case V4L2_CID_AUTO_WHITE_BALANCE:
-+ ret = ov13850_set_ctrl_white_balance(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_HUE:
-+ ret = ov13850_set_ctrl_hue(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_CONTRAST:
-+ ret = ov13850_set_ctrl_contrast(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_SATURATION:
-+ ret = ov13850_set_ctrl_saturation(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_TEST_PATTERN:
-+ ret = ov13850_set_ctrl_test_pattern(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_POWER_LINE_FREQUENCY:
-+ ret = ov13850_set_ctrl_light_freq(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_HFLIP:
-+ ret = ov13850_set_ctrl_hflip(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_VFLIP:
-+ ret = ov13850_set_ctrl_vflip(sensor, ctrl->val);
-+ break;
-+ default:
-+ ret = -EINVAL;
-+ break;
-+ }
-+
-+ return ret;
-+}
-+
-+static const struct v4l2_ctrl_ops ov13850_ctrl_ops = {
-+ .g_volatile_ctrl = ov13850_g_volatile_ctrl,
-+ .s_ctrl = ov13850_s_ctrl,
-+};
-+
-+static int ov13850_init_controls(struct ov13850_dev *sensor)
-+{
-+ const struct v4l2_ctrl_ops *ops = &ov13850_ctrl_ops;
-+ struct ov13850_ctrls *ctrls = &sensor->ctrls;
-+ struct v4l2_ctrl_handler *hdl = &ctrls->handler;
-+ int ret;
-+
-+ v4l2_ctrl_handler_init(hdl, 32);
-+
-+ /* we can use our own mutex for the ctrl lock */
-+ hdl->lock = &sensor->lock;
-+
-+ /* Clock related controls */
-+ ctrls->pixel_rate = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_PIXEL_RATE,
-+ 0, INT_MAX, 1,
-+ ov13850_calc_pixel_rate(sensor));
-+
-+ /* Auto/manual white balance */
-+ ctrls->auto_wb = v4l2_ctrl_new_std(hdl, ops,
-+ V4L2_CID_AUTO_WHITE_BALANCE,
-+ 0, 1, 1, 0);
-+ ctrls->blue_balance = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BLUE_BALANCE,
-+ 0, 4095, 1, 1024);
-+ ctrls->red_balance = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_RED_BALANCE,
-+ 0, 4095, 1, 1024);
-+
-+ ctrls->exposure = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_EXPOSURE,
-+ 4, 0xfff8, 1, 0x4c00);
-+ ctrls->anal_gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_ANALOGUE_GAIN,
-+ 0x10, 0xfff8, 1, 0x0080);
-+ ctrls->test_pattern =
-+ v4l2_ctrl_new_std_menu_items(hdl, ops, V4L2_CID_TEST_PATTERN,
-+ ARRAY_SIZE(test_pattern_menu) - 1,
-+ 0, 0, test_pattern_menu);
-+ ctrls->hflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HFLIP,
-+ 0, 1, 1, 0);
-+ ctrls->vflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VFLIP,
-+ 0, 1, 1, 0);
-+ ctrls->light_freq =
-+ v4l2_ctrl_new_std_menu(hdl, ops,
-+ V4L2_CID_POWER_LINE_FREQUENCY,
-+ V4L2_CID_POWER_LINE_FREQUENCY_AUTO, 0,
-+ V4L2_CID_POWER_LINE_FREQUENCY_50HZ);
-+ ctrls->link_freq = v4l2_ctrl_new_int_menu(hdl, ops, V4L2_CID_LINK_FREQ,
-+ 0, 0, link_freq_menu_items);
-+ if (hdl->error) {
-+ ret = hdl->error;
-+ goto free_ctrls;
-+ }
-+
-+ ctrls->pixel_rate->flags |= V4L2_CTRL_FLAG_READ_ONLY;
-+ ctrls->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
-+ // ctrls->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
-+ // ctrls->anal_gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
-+
-+ v4l2_ctrl_auto_cluster(3, &ctrls->auto_wb, 0, false);
-+
-+ sensor->sd.ctrl_handler = hdl;
-+ return 0;
-+
-+free_ctrls:
-+ v4l2_ctrl_handler_free(hdl);
-+ return ret;
-+}
-+
-+static int ov13850_enum_frame_size(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_frame_size_enum *fse)
-+{
-+ if (fse->pad != 0)
-+ return -EINVAL;
-+ if (fse->index >= OV13850_NUM_MODES)
-+ return -EINVAL;
-+
-+ fse->min_width =
-+ ov13850_mode_data[fse->index].hact;
-+ fse->max_width = fse->min_width;
-+ fse->min_height =
-+ ov13850_mode_data[fse->index].vact;
-+ fse->max_height = fse->min_height;
-+
-+ return 0;
-+}
-+
-+static int ov13850_enum_frame_interval(
-+ struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_frame_interval_enum *fie)
-+{
-+ struct ov13850_dev *sensor = to_ov13850_dev(sd);
-+ struct v4l2_fract tpf;
-+ int ret;
-+
-+ if (fie->pad != 0)
-+ return -EINVAL;
-+ if (fie->index >= OV13850_NUM_FRAMERATES)
-+ return -EINVAL;
-+
-+ tpf.numerator = 1;
-+ tpf.denominator = ov13850_framerates[fie->index];
-+
-+/* ret = ov13850_try_frame_interval(sensor, &tpf,
-+ * fie->width, fie->height);
-+ * if (ret < 0)
-+ * return -EINVAL;
-+ */
-+ fie->interval = tpf;
-+
-+ return 0;
-+}
-+
-+static int ov13850_g_frame_interval(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_frame_interval *fi)
-+{
-+ struct ov13850_dev *sensor = to_ov13850_dev(sd);
-+
-+ mutex_lock(&sensor->lock);
-+ fi->interval = sensor->frame_interval;
-+ mutex_unlock(&sensor->lock);
-+
-+ return 0;
-+}
-+
-+static int ov13850_s_frame_interval(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_frame_interval *fi)
-+{
-+ struct ov13850_dev *sensor = to_ov13850_dev(sd);
-+ const struct ov13850_mode_info *mode;
-+ int frame_rate, ret = 0;
-+
-+ if (fi->pad != 0)
-+ return -EINVAL;
-+
-+ mutex_lock(&sensor->lock);
-+
-+ if (sensor->streaming) {
-+ ret = -EBUSY;
-+ goto out;
-+ }
-+
-+ mode = sensor->current_mode;
-+
-+ frame_rate = ov13850_try_frame_interval(sensor, &fi->interval,
-+ mode->hact, mode->vact);
-+ if (frame_rate < 0) {
-+ /* Always return a valid frame interval value */
-+ fi->interval = sensor->frame_interval;
-+ goto out;
-+ }
-+
-+ mode = ov13850_find_mode(sensor, frame_rate, mode->hact,
-+ mode->vact, true);
-+ if (!mode) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (mode != sensor->current_mode ||
-+ frame_rate != sensor->current_fr) {
-+ sensor->current_fr = frame_rate;
-+ sensor->frame_interval = fi->interval;
-+ sensor->current_mode = mode;
-+ sensor->pending_mode_change = true;
-+
-+ __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
-+ ov13850_calc_pixel_rate(sensor));
-+ }
-+out:
-+ mutex_unlock(&sensor->lock);
-+ return ret;
-+}
-+
-+static int ov13850_stream_start(struct ov13850_dev *sensor, int enable)
-+{
-+ int ret;
-+
-+ if (enable) { //stream on
-+ mdelay(1000);
-+ ret = ov13850_write_reg(sensor, OV13850_STREAM_CTRL, enable);
-+ } else { //stream off
-+ ret = ov13850_write_reg(sensor, OV13850_STREAM_CTRL, enable);
-+ mdelay(100);
-+ }
-+
-+ return ret;
-+}
-+
-+static int ov13850_s_stream(struct v4l2_subdev *sd, int enable)
-+{
-+ struct ov13850_dev *sensor = to_ov13850_dev(sd);
-+ int ret = 0;
-+
-+ mutex_lock(&sensor->lock);
-+
-+ if (sensor->streaming == !enable) {
-+ if (enable && sensor->pending_mode_change) {
-+ ret = ov13850_set_mode(sensor);
-+ if (ret)
-+ goto out;
-+ }
-+
-+ if (enable && sensor->pending_fmt_change) {
-+ ret = ov13850_set_framefmt(sensor, &sensor->fmt);
-+ if (ret)
-+ goto out;
-+ sensor->pending_fmt_change = false;
-+ }
-+
-+ if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY)
-+ ret = ov13850_set_stream_mipi(sensor, enable);
-+
-+ ret = ov13850_stream_start(sensor, enable);
-+
-+ if (!ret)
-+ sensor->streaming = enable;
-+ }
-+out:
-+ mutex_unlock(&sensor->lock);
-+ return ret;
-+}
-+
-+static const struct v4l2_subdev_core_ops ov13850_core_ops = {
-+ .s_power = ov13850_s_power,
-+ .log_status = v4l2_ctrl_subdev_log_status,
-+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
-+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
-+};
-+
-+static const struct v4l2_subdev_video_ops ov13850_video_ops = {
-+ .g_frame_interval = ov13850_g_frame_interval,
-+ .s_frame_interval = ov13850_s_frame_interval,
-+ .s_stream = ov13850_s_stream,
-+};
-+
-+static const struct v4l2_subdev_pad_ops ov13850_pad_ops = {
-+ .enum_mbus_code = ov13850_enum_mbus_code,
-+ .get_fmt = ov13850_get_fmt,
-+ .set_fmt = ov13850_set_fmt,
-+ .enum_frame_size = ov13850_enum_frame_size,
-+ .enum_frame_interval = ov13850_enum_frame_interval,
-+};
-+
-+static const struct v4l2_subdev_ops ov13850_subdev_ops = {
-+ .core = &ov13850_core_ops,
-+ .video = &ov13850_video_ops,
-+ .pad = &ov13850_pad_ops,
-+};
-+
-+static int ov13850_get_regulators(struct ov13850_dev *sensor)
-+{
-+ int i;
-+
-+ for (i = 0; i < OV13850_NUM_SUPPLIES; i++)
-+ sensor->supplies[i].supply = ov13850_supply_name[i];
-+
-+ return devm_regulator_bulk_get(&sensor->i2c_client->dev,
-+ OV13850_NUM_SUPPLIES,
-+ sensor->supplies);
-+}
-+
-+static int ov13850_check_chip_id(struct ov13850_dev *sensor)
-+{
-+ struct i2c_client *client = sensor->i2c_client;
-+ int ret = 0;
-+ u16 chip_id;
-+
-+ ret = ov13850_set_power_on(sensor);
-+ if (ret)
-+ return ret;
-+
-+#ifdef UNUSED_CODE
-+ ret = ov13850_read_reg16(sensor, OV13850_REG_CHIP_ID, &chip_id);
-+ if (ret) {
-+ dev_err(&client->dev, "%s: failed to read chip identifier\n",
-+ __func__);
-+ goto power_off;
-+ }
-+
-+ if (chip_id != OV13850_CHIP_ID) {
-+ dev_err(&client->dev, "%s: wrong chip identifier, expected 0x%x, got 0x%x\n",
-+ __func__, OV13850_CHIP_ID, chip_id);
-+ ret = -ENXIO;
-+ }
-+ dev_err(&client->dev, "%s: chip identifier, got 0x%x\n",
-+ __func__, chip_id);
-+#endif
-+
-+power_off:
-+ ov13850_set_power_off(sensor);
-+ return ret;
-+}
-+
-+static int ov13850_probe(struct i2c_client *client)
-+{
-+ struct device *dev = &client->dev;
-+ struct fwnode_handle *endpoint;
-+ struct ov13850_dev *sensor;
-+ struct v4l2_mbus_framefmt *fmt;
-+ u32 rotation;
-+ int ret;
-+ u8 chip_id_high, chip_id_low;
-+
-+ sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
-+ if (!sensor)
-+ return -ENOMEM;
-+
-+ sensor->i2c_client = client;
-+
-+ fmt = &sensor->fmt;
-+ fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
-+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
-+ fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
-+ fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
-+ fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace);
-+ fmt->width = 1920;
-+ fmt->height = 1080;
-+ fmt->field = V4L2_FIELD_NONE;
-+ sensor->frame_interval.numerator = 1;
-+ sensor->frame_interval.denominator = ov13850_framerates[OV13850_30_FPS];
-+ sensor->current_fr = OV13850_30_FPS;
-+ sensor->current_mode =
-+ &ov13850_mode_data[OV13850_MODE_1080P_1920_1080];
-+ sensor->last_mode = sensor->current_mode;
-+
-+ sensor->ae_target = 52;
-+
-+ /* optional indication of physical rotation of sensor */
-+ ret = fwnode_property_read_u32(dev_fwnode(&client->dev), "rotation",
-+ &rotation);
-+ if (!ret) {
-+ switch (rotation) {
-+ case 180:
-+ sensor->upside_down = true;
-+ fallthrough;
-+ case 0:
-+ break;
-+ default:
-+ dev_warn(dev, "%u degrees rotation is not supported, ignoring...\n",
-+ rotation);
-+ }
-+ }
-+
-+ endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(&client->dev),
-+ NULL);
-+ if (!endpoint) {
-+ dev_err(dev, "endpoint node not found\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = v4l2_fwnode_endpoint_parse(endpoint, &sensor->ep);
-+ fwnode_handle_put(endpoint);
-+ if (ret) {
-+ dev_err(dev, "Could not parse endpoint\n");
-+ return ret;
-+ }
-+
-+ if (sensor->ep.bus_type != V4L2_MBUS_PARALLEL &&
-+ sensor->ep.bus_type != V4L2_MBUS_CSI2_DPHY &&
-+ sensor->ep.bus_type != V4L2_MBUS_BT656) {
-+ dev_err(dev, "Unsupported bus type %d\n", sensor->ep.bus_type);
-+ return -EINVAL;
-+ }
-+
-+ /* get system clock (xclk) */
-+ sensor->xclk = devm_clk_get(dev, "xclk");
-+ if (IS_ERR(sensor->xclk)) {
-+ dev_err(dev, "failed to get xclk\n");
-+ return PTR_ERR(sensor->xclk);
-+ }
-+
-+ sensor->xclk_freq = clk_get_rate(sensor->xclk);
-+ if (sensor->xclk_freq < OV13850_XCLK_MIN ||
-+ sensor->xclk_freq > OV13850_XCLK_MAX) {
-+ dev_err(dev, "xclk frequency out of range: %d Hz\n",
-+ sensor->xclk_freq);
-+ return -EINVAL;
-+ }
-+
-+ /* request optional power down pin */
-+ sensor->pwdn_gpio = devm_gpiod_get_optional(dev, "powerdown",
-+ GPIOD_OUT_HIGH);
-+ if (IS_ERR(sensor->pwdn_gpio))
-+ return PTR_ERR(sensor->pwdn_gpio);
-+
-+ /* request optional reset pin */
-+ sensor->reset_gpio = devm_gpiod_get_optional(dev, "reset",
-+ GPIOD_OUT_HIGH);
-+ if (IS_ERR(sensor->reset_gpio))
-+ return PTR_ERR(sensor->reset_gpio);
-+
-+ v4l2_i2c_subdev_init(&sensor->sd, client, &ov13850_subdev_ops);
-+
-+ sensor->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
-+ V4L2_SUBDEV_FL_HAS_EVENTS;
-+ sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
-+ sensor->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
-+ ret = media_entity_pads_init(&sensor->sd.entity, 1, &sensor->pad);
-+ if (ret)
-+ return ret;
-+
-+ ret = ov13850_get_regulators(sensor);
-+ if (ret)
-+ return ret;
-+
-+ mutex_init(&sensor->lock);
-+
-+ ret = ov13850_check_chip_id(sensor);
-+ if (ret)
-+ goto entity_cleanup;
-+
-+ ret = ov13850_init_controls(sensor);
-+ if (ret)
-+ goto entity_cleanup;
-+
-+ ret = v4l2_async_register_subdev_sensor(&sensor->sd);
-+ if (ret)
-+ goto free_ctrls;
-+
-+ return 0;
-+
-+free_ctrls:
-+ v4l2_ctrl_handler_free(&sensor->ctrls.handler);
-+entity_cleanup:
-+ media_entity_cleanup(&sensor->sd.entity);
-+ mutex_destroy(&sensor->lock);
-+ return ret;
-+}
-+
-+static int ov13850_remove(struct i2c_client *client)
-+{
-+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
-+ struct ov13850_dev *sensor = to_ov13850_dev(sd);
-+
-+ v4l2_async_unregister_subdev(&sensor->sd);
-+ media_entity_cleanup(&sensor->sd.entity);
-+ v4l2_ctrl_handler_free(&sensor->ctrls.handler);
-+ mutex_destroy(&sensor->lock);
-+
-+ return 0;
-+}
-+
-+static const struct i2c_device_id ov13850_id[] = {
-+ {"ov13850", 0},
-+ {},
-+};
-+MODULE_DEVICE_TABLE(i2c, ov13850_id);
-+
-+static const struct of_device_id ov13850_dt_ids[] = {
-+ { .compatible = "ovti,ov13850" },
-+ { /* sentinel */ }
-+};
-+MODULE_DEVICE_TABLE(of, ov13850_dt_ids);
-+
-+static struct i2c_driver ov13850_i2c_driver = {
-+ .driver = {
-+ .name = "ov13850",
-+ .of_match_table = ov13850_dt_ids,
-+ },
-+ .id_table = ov13850_id,
-+ .probe_new = ov13850_probe,
-+ .remove = ov13850_remove,
-+};
-+
-+module_i2c_driver(ov13850_i2c_driver);
-+
-+MODULE_DESCRIPTION("OV13850 MIPI Camera Subdev Driver");
-+MODULE_LICENSE("GPL");
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/ov4689_mipi.c
-@@ -0,0 +1,2975 @@
-+// SPDX-License-Identifier: GPL-2.0-or-later
-+/*
-+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
-+ *
-+ */
-+
-+#include <linux/clk.h>
-+#include <linux/clk-provider.h>
-+#include <linux/clkdev.h>
-+#include <linux/ctype.h>
-+#include <linux/delay.h>
-+#include <linux/device.h>
-+#include <linux/gpio/consumer.h>
-+#include <linux/i2c.h>
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/pm_runtime.h>
-+#include <linux/of_device.h>
-+#include <linux/regulator/consumer.h>
-+#include <linux/slab.h>
-+#include <linux/types.h>
-+#include <media/v4l2-async.h>
-+#include <media/v4l2-ctrls.h>
-+#include <media/v4l2-device.h>
-+#include <media/v4l2-event.h>
-+#include <media/v4l2-fwnode.h>
-+#include <media/v4l2-subdev.h>
-+#include "stfcamss.h"
-+
-+
-+#define OV4689_LANES 4
-+
-+#define OV4689_LINK_FREQ_500MHZ 500000000LL
-+
-+/* min/typical/max system clock (xclk) frequencies */
-+#define OV4689_XCLK_MIN 6000000
-+#define OV4689_XCLK_MAX 64000000
-+
-+#define OV4689_CHIP_ID (0x4688)
-+
-+#define OV4689_CHIP_ID_HIGH_BYTE 0x300a // max should be 0x46
-+#define OV4689_CHIP_ID_LOW_BYTE 0x300b // max should be 0x88
-+#define OV4689_REG_CHIP_ID 0x300a
-+
-+#define OV4689_REG_H_OUTPUT_SIZE 0x3808
-+#define OV4689_REG_V_OUTPUT_SIZE 0x380a
-+#define OV4689_REG_TIMING_HTS 0x380c
-+#define OV4689_REG_TIMING_VTS 0x380e
-+
-+#define OV4689_REG_EXPOSURE_HI 0x3500
-+#define OV4689_REG_EXPOSURE_MED 0x3501
-+#define OV4689_REG_EXPOSURE_LO 0x3502
-+#define OV4689_REG_GAIN_H 0x3507
-+#define OV4689_REG_GAIN_M 0x3508
-+#define OV4689_REG_GAIN_L 0x3509
-+#define OV4689_REG_TEST_PATTERN 0x5040
-+#define OV4689_REG_TIMING_TC_REG20 0x3820
-+#define OV4689_REG_TIMING_TC_REG21 0x3821
-+
-+#define OV4689_REG_AWB_R_GAIN 0x500C
-+#define OV4689_REG_AWB_B_GAIN 0x5010
-+#define OV4689_REG_STREAM_ON 0x0100
-+
-+#define OV4689_REG_MIPI_SC_CTRL_HI 0x3018
-+#define OV4689_REG_MIPI_SC_CTRL_LOW 0x3019
-+
-+enum ov4689_mode_id {
-+ //OV4689_MODE_720P_1280_720 = 0,
-+ OV4689_MODE_1080P_1920_1080 = 0,
-+ //OV4689_MODE_4M_2688_1520,
-+ OV4689_NUM_MODES,
-+};
-+
-+enum ov4689_frame_rate {
-+ OV4689_15_FPS = 0,
-+ OV4689_30_FPS,
-+ OV4689_45_FPS,
-+ OV4689_60_FPS,
-+ OV4689_90_FPS,
-+ OV4689_120_FPS,
-+ OV4689_150_FPS,
-+ OV4689_180_FPS,
-+ OV4689_330_FPS,
-+ OV4689_NUM_FRAMERATES,
-+};
-+
-+enum ov4689_format_mux {
-+ OV4689_FMT_MUX_RAW,
-+};
-+
-+static const int ov4689_framerates[] = {
-+ [OV4689_15_FPS] = 15,
-+ [OV4689_30_FPS] = 30,
-+ [OV4689_45_FPS] = 45,
-+ [OV4689_60_FPS] = 60,
-+ [OV4689_90_FPS] = 90,
-+ [OV4689_120_FPS] = 120,
-+ [OV4689_150_FPS] = 150,
-+ [OV4689_180_FPS] = 180,
-+ [OV4689_330_FPS] = 330,
-+};
-+
-+/* regulator supplies */
-+static const char * const ov4689_supply_name[] = {
-+ "DOVDD", /* Digital I/O (1.8V) supply */
-+ "AVDD", /* Analog (2.8V) supply */
-+ "DVDD", /* Digital Core (1.5V) supply */
-+};
-+
-+#define OV4689_NUM_SUPPLIES ARRAY_SIZE(ov4689_supply_name)
-+
-+/*
-+ * Image size under 1280 * 960 are SUBSAMPLING
-+ * Image size upper 1280 * 960 are SCALING
-+ */
-+enum ov4689_downsize_mode {
-+ SUBSAMPLING,
-+ SCALING,
-+};
-+
-+struct reg_value {
-+ u16 reg_addr;
-+ u8 val;
-+ u8 mask;
-+ u32 delay_ms;
-+};
-+
-+struct ov4689_mode_info {
-+ enum ov4689_mode_id id;
-+ enum ov4689_downsize_mode dn_mode;
-+ u32 hact;
-+ u32 htot;
-+ u32 vact;
-+ u32 vtot;
-+ const struct reg_value *reg_data;
-+ u32 reg_data_size;
-+ u32 max_fps;
-+};
-+
-+struct ov4689_ctrls {
-+ struct v4l2_ctrl_handler handler;
-+ struct v4l2_ctrl *pixel_rate;
-+ struct {
-+ struct v4l2_ctrl *exposure;
-+ };
-+ struct {
-+ struct v4l2_ctrl *auto_wb;
-+ struct v4l2_ctrl *blue_balance;
-+ struct v4l2_ctrl *red_balance;
-+ };
-+ struct {
-+ struct v4l2_ctrl *anal_gain;
-+ };
-+ struct v4l2_ctrl *brightness;
-+ struct v4l2_ctrl *light_freq;
-+ struct v4l2_ctrl *link_freq;
-+ struct v4l2_ctrl *saturation;
-+ struct v4l2_ctrl *contrast;
-+ struct v4l2_ctrl *hue;
-+ struct v4l2_ctrl *test_pattern;
-+ struct v4l2_ctrl *hflip;
-+ struct v4l2_ctrl *vflip;
-+};
-+
-+struct ov4689_dev {
-+ struct i2c_client *i2c_client;
-+ struct v4l2_subdev sd;
-+ struct media_pad pad;
-+ struct v4l2_fwnode_endpoint ep; /* the parsed DT endpoint info */
-+ struct clk *xclk; /* system clock to OV4689 */
-+ u32 xclk_freq;
-+
-+ struct regulator_bulk_data supplies[OV4689_NUM_SUPPLIES];
-+ struct gpio_desc *reset_gpio;
-+ struct gpio_desc *pwdn_gpio;
-+ bool upside_down;
-+
-+ /* lock to protect all members below */
-+ struct mutex lock;
-+
-+ struct v4l2_mbus_framefmt fmt;
-+
-+ const struct ov4689_mode_info *current_mode;
-+ const struct ov4689_mode_info *last_mode;
-+ enum ov4689_frame_rate current_fr;
-+ struct v4l2_fract frame_interval;
-+
-+ struct ov4689_ctrls ctrls;
-+
-+ bool pending_mode_change;
-+ int streaming;
-+};
-+
-+static inline struct ov4689_dev *to_ov4689_dev(struct v4l2_subdev *sd)
-+{
-+ return container_of(sd, struct ov4689_dev, sd);
-+}
-+
-+static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
-+{
-+ return &container_of(ctrl->handler, struct ov4689_dev,
-+ ctrls.handler)->sd;
-+}
-+
-+/* ov4689 initial register */
-+static const struct reg_value ov4689_init_setting_30fps_1080P[] = {
-+/* ov4689_1080p_30fps_4d */
-+ {0x0103, 0x01, 0, 0},
-+ {0x3638, 0x00, 0, 0},
-+ {0x0300, 0x02, 0, 0},
-+ {0x0302, 0x32, 0, 0},
-+ {0x0303, 0x00, 0, 0},
-+ {0x0304, 0x03, 0, 0},
-+ {0x030b, 0x00, 0, 0},
-+ {0x030d, 0x1e, 0, 0},
-+ {0x030e, 0x04, 0, 0},
-+ {0x030f, 0x01, 0, 0},
-+ {0x0312, 0x01, 0, 0},
-+ {0x031e, 0x00, 0, 0},
-+ {0x3000, 0x20, 0, 0},
-+ {0x3002, 0x00, 0, 0},
-+ {0x3020, 0x93, 0, 0},
-+ {0x3021, 0x03, 0, 0},
-+ {0x3022, 0x01, 0, 0},
-+ {0x3031, 0x0a, 0, 0},
-+ {0x3305, 0xf1, 0, 0},
-+ {0x3307, 0x04, 0, 0},
-+ {0x3309, 0x29, 0, 0},
-+ {0x3500, 0x00, 0, 0},
-+ {0x3501, 0x4c, 0, 0},
-+ {0x3502, 0x00, 0, 0},
-+ {0x3503, 0x04, 0, 0},
-+ {0x3504, 0x00, 0, 0},
-+ {0x3505, 0x00, 0, 0},
-+ {0x3506, 0x00, 0, 0},
-+ {0x3507, 0x00, 0, 0},
-+ {0x3508, 0x00, 0, 0},
-+ {0x3509, 0x80, 0, 0},
-+ {0x350a, 0x00, 0, 0},
-+ {0x350b, 0x00, 0, 0},
-+ {0x350c, 0x00, 0, 0},
-+ {0x350d, 0x00, 0, 0},
-+ {0x350e, 0x00, 0, 0},
-+ {0x350f, 0x80, 0, 0},
-+ {0x3510, 0x00, 0, 0},
-+ {0x3511, 0x00, 0, 0},
-+ {0x3512, 0x00, 0, 0},
-+ {0x3513, 0x00, 0, 0},
-+ {0x3514, 0x00, 0, 0},
-+ {0x3515, 0x80, 0, 0},
-+ {0x3516, 0x00, 0, 0},
-+ {0x3517, 0x00, 0, 0},
-+ {0x3518, 0x00, 0, 0},
-+ {0x3519, 0x00, 0, 0},
-+ {0x351a, 0x00, 0, 0},
-+ {0x351b, 0x80, 0, 0},
-+ {0x351c, 0x00, 0, 0},
-+ {0x351d, 0x00, 0, 0},
-+ {0x351e, 0x00, 0, 0},
-+ {0x351f, 0x00, 0, 0},
-+ {0x3520, 0x00, 0, 0},
-+ {0x3521, 0x80, 0, 0},
-+ {0x3522, 0x08, 0, 0},
-+ {0x3524, 0x08, 0, 0},
-+ {0x3526, 0x08, 0, 0},
-+ {0x3528, 0x08, 0, 0},
-+ {0x352a, 0x08, 0, 0},
-+ {0x3602, 0x00, 0, 0},
-+ {0x3603, 0x40, 0, 0},
-+ {0x3604, 0x02, 0, 0},
-+ {0x3605, 0x00, 0, 0},
-+ {0x3606, 0x00, 0, 0},
-+ {0x3607, 0x00, 0, 0},
-+ {0x3609, 0x12, 0, 0},
-+ {0x360a, 0x40, 0, 0},
-+ {0x360c, 0x08, 0, 0},
-+ {0x360f, 0xe5, 0, 0},
-+ {0x3608, 0x8f, 0, 0},
-+ {0x3611, 0x00, 0, 0},
-+ {0x3613, 0xf7, 0, 0},
-+ {0x3616, 0x58, 0, 0},
-+ {0x3619, 0x99, 0, 0},
-+ {0x361b, 0x60, 0, 0},
-+ {0x361c, 0x7a, 0, 0},
-+ {0x361e, 0x79, 0, 0},
-+ {0x361f, 0x02, 0, 0},
-+ {0x3632, 0x00, 0, 0},
-+ {0x3633, 0x10, 0, 0},
-+ {0x3634, 0x10, 0, 0},
-+ {0x3635, 0x10, 0, 0},
-+ {0x3636, 0x15, 0, 0},
-+ {0x3646, 0x86, 0, 0},
-+ {0x364a, 0x0b, 0, 0},
-+ {0x3700, 0x17, 0, 0},
-+ {0x3701, 0x22, 0, 0},
-+ {0x3703, 0x10, 0, 0},
-+ {0x370a, 0x37, 0, 0},
-+ {0x3705, 0x00, 0, 0},
-+ {0x3706, 0x63, 0, 0},
-+ {0x3709, 0x3c, 0, 0},
-+ {0x370b, 0x01, 0, 0},
-+ {0x370c, 0x30, 0, 0},
-+ {0x3710, 0x24, 0, 0},
-+ {0x3711, 0x0c, 0, 0},
-+ {0x3716, 0x00, 0, 0},
-+ {0x3720, 0x28, 0, 0},
-+ {0x3729, 0x7b, 0, 0},
-+ {0x372a, 0x84, 0, 0},
-+ {0x372b, 0xbd, 0, 0},
-+ {0x372c, 0xbc, 0, 0},
-+ {0x372e, 0x52, 0, 0},
-+ {0x373c, 0x0e, 0, 0},
-+ {0x373e, 0x33, 0, 0},
-+ {0x3743, 0x10, 0, 0},
-+ {0x3744, 0x88, 0, 0},
-+ {0x3745, 0xc0, 0, 0},
-+ {0x374a, 0x43, 0, 0},
-+ {0x374c, 0x00, 0, 0},
-+ {0x374e, 0x23, 0, 0},
-+ {0x3751, 0x7b, 0, 0},
-+ {0x3752, 0x84, 0, 0},
-+ {0x3753, 0xbd, 0, 0},
-+ {0x3754, 0xbc, 0, 0},
-+ {0x3756, 0x52, 0, 0},
-+ {0x375c, 0x00, 0, 0},
-+ {0x3760, 0x00, 0, 0},
-+ {0x3761, 0x00, 0, 0},
-+ {0x3762, 0x00, 0, 0},
-+ {0x3763, 0x00, 0, 0},
-+ {0x3764, 0x00, 0, 0},
-+ {0x3767, 0x04, 0, 0},
-+ {0x3768, 0x04, 0, 0},
-+ {0x3769, 0x08, 0, 0},
-+ {0x376a, 0x08, 0, 0},
-+ {0x376b, 0x20, 0, 0},
-+ {0x376c, 0x00, 0, 0},
-+ {0x376d, 0x00, 0, 0},
-+ {0x376e, 0x00, 0, 0},
-+ {0x3773, 0x00, 0, 0},
-+ {0x3774, 0x51, 0, 0},
-+ {0x3776, 0xbd, 0, 0},
-+ {0x3777, 0xbd, 0, 0},
-+ {0x3781, 0x18, 0, 0},
-+ {0x3783, 0x25, 0, 0},
-+ {0x3798, 0x1b, 0, 0},
-+ {0x3800, 0x01, 0, 0},
-+ {0x3801, 0x88, 0, 0},
-+ {0x3802, 0x00, 0, 0},
-+ {0x3803, 0xe0, 0, 0},
-+ {0x3804, 0x09, 0, 0},
-+ {0x3805, 0x17, 0, 0},
-+ {0x3806, 0x05, 0, 0},
-+ {0x3807, 0x1f, 0, 0},
-+ {0x3808, 0x07, 0, 0},
-+ {0x3809, 0x80, 0, 0},
-+ {0x380a, 0x04, 0, 0},
-+ {0x380b, 0x38, 0, 0},
-+ {0x380c, 0x0d, 0, 0},
-+ {0x380d, 0x70, 0, 0},
-+ {0x380e, 0x04, 0, 0},
-+ {0x380f, 0x8A, 0, 0},
-+ {0x3810, 0x00, 0, 0},
-+ {0x3811, 0x08, 0, 0},
-+ {0x3812, 0x00, 0, 0},
-+ {0x3813, 0x04, 0, 0},
-+ {0x3814, 0x01, 0, 0},
-+ {0x3815, 0x01, 0, 0},
-+ {0x3819, 0x01, 0, 0},
-+ {0x3820, 0x06, 0, 0},
-+ {0x3821, 0x00, 0, 0},
-+ {0x3829, 0x00, 0, 0},
-+ {0x382a, 0x01, 0, 0},
-+ {0x382b, 0x01, 0, 0},
-+ {0x382d, 0x7f, 0, 0},
-+ {0x3830, 0x04, 0, 0},
-+ {0x3836, 0x01, 0, 0},
-+ {0x3837, 0x00, 0, 0},
-+ {0x3841, 0x02, 0, 0},
-+ {0x3846, 0x08, 0, 0},
-+ {0x3847, 0x07, 0, 0},
-+ {0x3d85, 0x36, 0, 0},
-+ {0x3d8c, 0x71, 0, 0},
-+ {0x3d8d, 0xcb, 0, 0},
-+ {0x3f0a, 0x00, 0, 0},
-+ {0x4000, 0xf1, 0, 0},
-+ {0x4001, 0x40, 0, 0},
-+ {0x4002, 0x04, 0, 0},
-+ {0x4003, 0x14, 0, 0},
-+ {0x400e, 0x00, 0, 0},
-+ {0x4011, 0x00, 0, 0},
-+ {0x401a, 0x00, 0, 0},
-+ {0x401b, 0x00, 0, 0},
-+ {0x401c, 0x00, 0, 0},
-+ {0x401d, 0x00, 0, 0},
-+ {0x401f, 0x00, 0, 0},
-+ {0x4020, 0x00, 0, 0},
-+ {0x4021, 0x10, 0, 0},
-+ {0x4022, 0x06, 0, 0},
-+ {0x4023, 0x13, 0, 0},
-+ {0x4024, 0x07, 0, 0},
-+ {0x4025, 0x40, 0, 0},
-+ {0x4026, 0x07, 0, 0},
-+ {0x4027, 0x50, 0, 0},
-+ {0x4028, 0x00, 0, 0},
-+ {0x4029, 0x02, 0, 0},
-+ {0x402a, 0x06, 0, 0},
-+ {0x402b, 0x04, 0, 0},
-+ {0x402c, 0x02, 0, 0},
-+ {0x402d, 0x02, 0, 0},
-+ {0x402e, 0x0e, 0, 0},
-+ {0x402f, 0x04, 0, 0},
-+ {0x4302, 0xff, 0, 0},
-+ {0x4303, 0xff, 0, 0},
-+ {0x4304, 0x00, 0, 0},
-+ {0x4305, 0x00, 0, 0},
-+ {0x4306, 0x00, 0, 0},
-+ {0x4308, 0x02, 0, 0},
-+ {0x4500, 0x6c, 0, 0},
-+ {0x4501, 0xc4, 0, 0},
-+ {0x4502, 0x40, 0, 0},
-+ {0x4503, 0x01, 0, 0},
-+ {0x4601, 0x77, 0, 0},
-+ {0x4800, 0x04, 0, 0},
-+ {0x4813, 0x08, 0, 0},
-+ {0x481f, 0x40, 0, 0},
-+ {0x4829, 0x78, 0, 0},
-+ {0x4837, 0x10, 0, 0},
-+ {0x4b00, 0x2a, 0, 0},
-+ {0x4b0d, 0x00, 0, 0},
-+ {0x4d00, 0x04, 0, 0},
-+ {0x4d01, 0x42, 0, 0},
-+ {0x4d02, 0xd1, 0, 0},
-+ {0x4d03, 0x93, 0, 0},
-+ {0x4d04, 0xf5, 0, 0},
-+ {0x4d05, 0xc1, 0, 0},
-+ {0x5000, 0xf3, 0, 0},
-+ {0x5001, 0x11, 0, 0},
-+ {0x5004, 0x00, 0, 0},
-+ {0x500a, 0x00, 0, 0},
-+ {0x500b, 0x00, 0, 0},
-+ {0x5032, 0x00, 0, 0},
-+ {0x5040, 0x00, 0, 0},
-+ {0x5050, 0x0c, 0, 0},
-+ {0x5500, 0x00, 0, 0},
-+ {0x5501, 0x10, 0, 0},
-+ {0x5502, 0x01, 0, 0},
-+ {0x5503, 0x0f, 0, 0},
-+ {0x8000, 0x00, 0, 0},
-+ {0x8001, 0x00, 0, 0},
-+ {0x8002, 0x00, 0, 0},
-+ {0x8003, 0x00, 0, 0},
-+ {0x8004, 0x00, 0, 0},
-+ {0x8005, 0x00, 0, 0},
-+ {0x8006, 0x00, 0, 0},
-+ {0x8007, 0x00, 0, 0},
-+ {0x8008, 0x00, 0, 0},
-+ {0x3638, 0x00, 0, 0},
-+};
-+
-+static const struct reg_value ov4689_setting_VGA_640_480[] = {
-+ //@@ RES_640x480_2x_Bin_330fps_816Mbps
-+ //OV4689_AM01B_640x480_24M_2lane_816Mbps_330fps_20140210.txt
-+ {0x0103, 0x01, 0, 0},
-+ {0x3638, 0x00, 0, 0},
-+ {0x0300, 0x00, 0, 0}, // 00
-+ {0x0302, 0x22, 0, 0}, // 816Mbps 5a ; 64 ; 5a ; 78 ; 78 ; 2a
-+ {0x0303, 0x00, 0, 0}, // 03 ; 01 ; 02 ;
-+ {0x0304, 0x03, 0, 0},
-+ {0x030b, 0x00, 0, 0},
-+ {0x030d, 0x1e, 0, 0},
-+ {0x030e, 0x04, 0, 0},
-+ {0x030f, 0x01, 0, 0},
-+ {0x0312, 0x01, 0, 0},
-+ {0x031e, 0x00, 0, 0},
-+ {0x3000, 0x20, 0, 0},
-+ {0x3002, 0x00, 0, 0},
-+ {0x3020, 0x93, 0, 0},
-+ {0x3021, 0x03, 0, 0},
-+ {0x3022, 0x01, 0, 0},
-+ {0x3031, 0x0a, 0, 0},
-+ {0x303f, 0x0c, 0, 0},
-+ {0x3305, 0xf1, 0, 0},
-+ {0x3307, 0x04, 0, 0},
-+ {0x3309, 0x29, 0, 0},
-+ {0x3500, 0x00, 0, 0},
-+ {0x3501, 0x4c, 0, 0},
-+ {0x3502, 0x00, 0, 0},
-+ {0x3503, 0x04, 0, 0},
-+ {0x3504, 0x00, 0, 0},
-+ {0x3505, 0x00, 0, 0},
-+ {0x3506, 0x00, 0, 0},
-+ {0x3507, 0x00, 0, 0},
-+ {0x3508, 0x00, 0, 0},
-+ {0x3509, 0x80, 0, 0}, // 8X
-+ {0x350a, 0x00, 0, 0},
-+ {0x350b, 0x00, 0, 0},
-+ {0x350c, 0x00, 0, 0},
-+ {0x350d, 0x00, 0, 0},
-+ {0x350e, 0x00, 0, 0},
-+ {0x350f, 0x80, 0, 0},
-+ {0x3510, 0x00, 0, 0},
-+ {0x3511, 0x00, 0, 0},
-+ {0x3512, 0x00, 0, 0},
-+ {0x3513, 0x00, 0, 0},
-+ {0x3514, 0x00, 0, 0},
-+ {0x3515, 0x80, 0, 0},
-+ {0x3516, 0x00, 0, 0},
-+ {0x3517, 0x00, 0, 0},
-+ {0x3518, 0x00, 0, 0},
-+ {0x3519, 0x00, 0, 0},
-+ {0x351a, 0x00, 0, 0},
-+ {0x351b, 0x80, 0, 0},
-+ {0x351c, 0x00, 0, 0},
-+ {0x351d, 0x00, 0, 0},
-+ {0x351e, 0x00, 0, 0},
-+ {0x351f, 0x00, 0, 0},
-+ {0x3520, 0x00, 0, 0},
-+ {0x3521, 0x80, 0, 0},
-+ {0x3522, 0x08, 0, 0},
-+ {0x3524, 0x08, 0, 0},
-+ {0x3526, 0x08, 0, 0},
-+ {0x3528, 0x08, 0, 0},
-+ {0x352a, 0x08, 0, 0},
-+ {0x3602, 0x00, 0, 0},
-+ {0x3603, 0x40, 0, 0},
-+ {0x3604, 0x02, 0, 0},
-+ {0x3605, 0x00, 0, 0},
-+ {0x3606, 0x00, 0, 0},
-+ {0x3607, 0x00, 0, 0},
-+ {0x3609, 0x12, 0, 0},
-+ {0x360a, 0x40, 0, 0},
-+ {0x360c, 0x08, 0, 0},
-+ {0x360f, 0xe5, 0, 0},
-+ {0x3608, 0x8f, 0, 0},
-+ {0x3611, 0x00, 0, 0},
-+ {0x3613, 0xf7, 0, 0},
-+ {0x3616, 0x58, 0, 0},
-+ {0x3619, 0x99, 0, 0},
-+ {0x361b, 0x60, 0, 0},
-+ {0x361c, 0x7a, 0, 0},
-+ {0x361e, 0x79, 0, 0},
-+ {0x361f, 0x02, 0, 0},
-+ {0x3632, 0x05, 0, 0},
-+ {0x3633, 0x10, 0, 0},
-+ {0x3634, 0x10, 0, 0},
-+ {0x3635, 0x10, 0, 0},
-+ {0x3636, 0x15, 0, 0},
-+ {0x3646, 0x86, 0, 0},
-+ {0x364a, 0x0b, 0, 0},
-+ {0x3700, 0x17, 0, 0},
-+ {0x3701, 0x22, 0, 0},
-+ {0x3703, 0x10, 0, 0},
-+ {0x370a, 0x37, 0, 0},
-+ {0x3705, 0x00, 0, 0},
-+ {0x3706, 0x63, 0, 0},
-+ {0x3709, 0x3c, 0, 0},
-+ {0x370b, 0x01, 0, 0},
-+ {0x370c, 0x30, 0, 0},
-+ {0x3710, 0x24, 0, 0},
-+ {0x3711, 0x0c, 0, 0},
-+ {0x3716, 0x00, 0, 0},
-+ {0x3720, 0x28, 0, 0},
-+ {0x3729, 0x7b, 0, 0},
-+ {0x372a, 0x84, 0, 0},
-+ {0x372b, 0xbd, 0, 0},
-+ {0x372c, 0xbc, 0, 0},
-+ {0x372e, 0x52, 0, 0},
-+ {0x373c, 0x0e, 0, 0},
-+ {0x373e, 0x33, 0, 0},
-+ {0x3743, 0x10, 0, 0},
-+ {0x3744, 0x88, 0, 0},
-+ {0x3745, 0xc0, 0, 0},
-+ {0x374a, 0x43, 0, 0},
-+ {0x374c, 0x00, 0, 0},
-+ {0x374e, 0x23, 0, 0},
-+ {0x3751, 0x7b, 0, 0},
-+ {0x3752, 0x84, 0, 0},
-+ {0x3753, 0xbd, 0, 0},
-+ {0x3754, 0xbc, 0, 0},
-+ {0x3756, 0x52, 0, 0},
-+ {0x375c, 0x00, 0, 0},
-+ {0x3760, 0x00, 0, 0},
-+ {0x3761, 0x00, 0, 0},
-+ {0x3762, 0x00, 0, 0},
-+ {0x3763, 0x00, 0, 0},
-+ {0x3764, 0x00, 0, 0},
-+ {0x3767, 0x04, 0, 0},
-+ {0x3768, 0x04, 0, 0},
-+ {0x3769, 0x08, 0, 0},
-+ {0x376a, 0x08, 0, 0},
-+ {0x376b, 0x40, 0, 0},
-+ {0x376c, 0x00, 0, 0},
-+ {0x376d, 0x00, 0, 0},
-+ {0x376e, 0x00, 0, 0},
-+ {0x3773, 0x00, 0, 0},
-+ {0x3774, 0x51, 0, 0},
-+ {0x3776, 0xbd, 0, 0},
-+ {0x3777, 0xbd, 0, 0},
-+ {0x3781, 0x18, 0, 0},
-+ {0x3783, 0x25, 0, 0},
-+ {0x3798, 0x1b, 0, 0},
-+ {0x3800, 0x00, 0, 0},
-+ {0x3801, 0x48, 0, 0},
-+ {0x3802, 0x00, 0, 0},
-+ {0x3803, 0x2C, 0, 0},
-+ {0x3804, 0x0a, 0, 0},
-+ {0x3805, 0x57, 0, 0},
-+ {0x3806, 0x05, 0, 0},
-+ {0x3807, 0xD3, 0, 0},
-+ {0x3808, 0x02, 0, 0},
-+ {0x3809, 0x80, 0, 0},
-+ {0x380a, 0x01, 0, 0},
-+ {0x380b, 0xe0, 0, 0},
-+
-+ {0x380c, 0x02, 0, 0}, // 0a ; 03
-+ {0x380d, 0x04, 0, 0}, // 1c ; 5C
-+
-+ {0x380e, 0x03, 0, 0},
-+ {0x380f, 0x05, 0, 0},
-+ {0x3810, 0x00, 0, 0},
-+ {0x3811, 0x04, 0, 0},
-+ {0x3812, 0x00, 0, 0},
-+ {0x3813, 0x02, 0, 0},
-+ {0x3814, 0x03, 0, 0},
-+ {0x3815, 0x01, 0, 0},
-+ {0x3819, 0x01, 0, 0},
-+ {0x3820, 0x06, 0, 0},
-+ {0x3821, 0x00, 0, 0},
-+ {0x3829, 0x00, 0, 0},
-+ {0x382a, 0x03, 0, 0},
-+ {0x382b, 0x01, 0, 0},
-+ {0x382d, 0x7f, 0, 0},
-+ {0x3830, 0x08, 0, 0},
-+ {0x3836, 0x02, 0, 0},
-+ {0x3837, 0x00, 0, 0},
-+ {0x3841, 0x02, 0, 0},
-+ {0x3846, 0x08, 0, 0},
-+ {0x3847, 0x07, 0, 0},
-+ {0x3d85, 0x36, 0, 0},
-+ {0x3d8c, 0x71, 0, 0},
-+ {0x3d8d, 0xcb, 0, 0},
-+ {0x3f0a, 0x00, 0, 0},
-+ {0x4000, 0x71, 0, 0},
-+ {0x4001, 0x50, 0, 0},
-+ {0x4002, 0x04, 0, 0},
-+ {0x4003, 0x14, 0, 0},
-+ {0x400e, 0x00, 0, 0},
-+ {0x4011, 0x00, 0, 0},
-+ {0x401a, 0x00, 0, 0},
-+ {0x401b, 0x00, 0, 0},
-+ {0x401c, 0x00, 0, 0},
-+ {0x401d, 0x00, 0, 0},
-+ {0x401f, 0x00, 0, 0},
-+ {0x4020, 0x00, 0, 0},
-+ {0x4021, 0x10, 0, 0},
-+ {0x4022, 0x03, 0, 0},
-+ {0x4023, 0x93, 0, 0},
-+ {0x4024, 0x04, 0, 0},
-+ {0x4025, 0xC0, 0, 0},
-+ {0x4026, 0x04, 0, 0},
-+ {0x4027, 0xD0, 0, 0},
-+ {0x4028, 0x00, 0, 0},
-+ {0x4029, 0x02, 0, 0},
-+ {0x402a, 0x06, 0, 0},
-+ {0x402b, 0x04, 0, 0},
-+ {0x402c, 0x02, 0, 0},
-+ {0x402d, 0x02, 0, 0},
-+ {0x402e, 0x0e, 0, 0},
-+ {0x402f, 0x04, 0, 0},
-+ {0x4302, 0xff, 0, 0},
-+ {0x4303, 0xff, 0, 0},
-+ {0x4304, 0x00, 0, 0},
-+ {0x4305, 0x00, 0, 0},
-+ {0x4306, 0x00, 0, 0},
-+ {0x4308, 0x02, 0, 0},
-+ {0x4500, 0x6c, 0, 0},
-+ {0x4501, 0xc4, 0, 0},
-+ {0x4502, 0x44, 0, 0},
-+ {0x4503, 0x01, 0, 0},
-+ {0x4600, 0x00, 0, 0},
-+ {0x4601, 0x4F, 0, 0},
-+ {0x4800, 0x04, 0, 0},
-+ {0x4813, 0x08, 0, 0},
-+ {0x481f, 0x40, 0, 0},
-+ {0x4829, 0x78, 0, 0},
-+ {0x4837, 0x10, 0, 0}, // 20 ; 10
-+ {0x4b00, 0x2a, 0, 0},
-+ {0x4b0d, 0x00, 0, 0},
-+ {0x4d00, 0x04, 0, 0},
-+ {0x4d01, 0x42, 0, 0},
-+ {0x4d02, 0xd1, 0, 0},
-+ {0x4d03, 0x93, 0, 0},
-+ {0x4d04, 0xf5, 0, 0},
-+ {0x4d05, 0xc1, 0, 0},
-+ {0x5000, 0xf3, 0, 0},
-+ {0x5001, 0x11, 0, 0},
-+ {0x5004, 0x00, 0, 0},
-+ {0x500a, 0x00, 0, 0},
-+ {0x500b, 0x00, 0, 0},
-+ {0x5032, 0x00, 0, 0},
-+ {0x5040, 0x00, 0, 0},
-+ {0x5050, 0x3c, 0, 0},
-+ {0x5500, 0x00, 0, 0},
-+ {0x5501, 0x10, 0, 0},
-+ {0x5502, 0x01, 0, 0},
-+ {0x5503, 0x0f, 0, 0},
-+ {0x8000, 0x00, 0, 0},
-+ {0x8001, 0x00, 0, 0},
-+ {0x8002, 0x00, 0, 0},
-+ {0x8003, 0x00, 0, 0},
-+ {0x8004, 0x00, 0, 0},
-+ {0x8005, 0x00, 0, 0},
-+ {0x8006, 0x00, 0, 0},
-+ {0x8007, 0x00, 0, 0},
-+ {0x8008, 0x00, 0, 0},
-+ {0x3638, 0x00, 0, 0},
-+};
-+
-+static const struct reg_value ov4689_setting_720P_1280_720[] = {
-+ //@@ RES_1280x720_2x_Bin_150fps_816Mbps
-+ //OV4689_AM01B_1280x720_24M_2lane_816Mbps_150fps_20140210.txt
-+ {0x0103, 0x01, 0, 0},
-+ {0x3638, 0x00, 0, 0},
-+ {0x0300, 0x00, 0, 0}, // 00
-+ {0x0302, 0x22, 0, 0}, // 816Mbps 5a ; 64 ; 5a ; 78 ; 78 ; 2a
-+ {0x0303, 0x00, 0, 0}, // 03 ; 01 ; 02 ;
-+ {0x0304, 0x03, 0, 0},
-+ {0x030b, 0x00, 0, 0},
-+ {0x030d, 0x1e, 0, 0},
-+ {0x030e, 0x04, 0, 0},
-+ {0x030f, 0x01, 0, 0},
-+ {0x0312, 0x01, 0, 0},
-+ {0x031e, 0x00, 0, 0},
-+ {0x3000, 0x20, 0, 0},
-+ {0x3002, 0x00, 0, 0},
-+ {0x3020, 0x93, 0, 0},
-+ {0x3021, 0x03, 0, 0},
-+ {0x3022, 0x01, 0, 0},
-+ {0x3031, 0x0a, 0, 0},
-+ {0x303f, 0x0c, 0, 0},
-+ {0x3305, 0xf1, 0, 0},
-+ {0x3307, 0x04, 0, 0},
-+ {0x3309, 0x29, 0, 0},
-+ {0x3500, 0x00, 0, 0},
-+ {0x3501, 0x30, 0, 0},
-+ {0x3502, 0x00, 0, 0},
-+ {0x3503, 0x04, 0, 0},
-+ {0x3504, 0x00, 0, 0},
-+ {0x3505, 0x00, 0, 0},
-+ {0x3506, 0x00, 0, 0},
-+ {0x3507, 0x00, 0, 0},
-+ {0x3508, 0x07, 0, 0},
-+ {0x3509, 0x78, 0, 0}, // 8X
-+ {0x350a, 0x00, 0, 0},
-+ {0x350b, 0x00, 0, 0},
-+ {0x350c, 0x00, 0, 0},
-+ {0x350d, 0x00, 0, 0},
-+ {0x350e, 0x00, 0, 0},
-+ {0x350f, 0x80, 0, 0},
-+ {0x3510, 0x00, 0, 0},
-+ {0x3511, 0x00, 0, 0},
-+ {0x3512, 0x00, 0, 0},
-+ {0x3513, 0x00, 0, 0},
-+ {0x3514, 0x00, 0, 0},
-+ {0x3515, 0x80, 0, 0},
-+ {0x3516, 0x00, 0, 0},
-+ {0x3517, 0x00, 0, 0},
-+ {0x3518, 0x00, 0, 0},
-+ {0x3519, 0x00, 0, 0},
-+ {0x351a, 0x00, 0, 0},
-+ {0x351b, 0x80, 0, 0},
-+ {0x351c, 0x00, 0, 0},
-+ {0x351d, 0x00, 0, 0},
-+ {0x351e, 0x00, 0, 0},
-+ {0x351f, 0x00, 0, 0},
-+ {0x3520, 0x00, 0, 0},
-+ {0x3521, 0x80, 0, 0},
-+ {0x3522, 0x08, 0, 0},
-+ {0x3524, 0x08, 0, 0},
-+ {0x3526, 0x08, 0, 0},
-+ {0x3528, 0x08, 0, 0},
-+ {0x352a, 0x08, 0, 0},
-+ {0x3602, 0x00, 0, 0},
-+ {0x3603, 0x40, 0, 0},
-+ {0x3604, 0x02, 0, 0},
-+ {0x3605, 0x00, 0, 0},
-+ {0x3606, 0x00, 0, 0},
-+ {0x3607, 0x00, 0, 0},
-+ {0x3609, 0x12, 0, 0},
-+ {0x360a, 0x40, 0, 0},
-+ {0x360c, 0x08, 0, 0},
-+ {0x360f, 0xe5, 0, 0},
-+ {0x3608, 0x8f, 0, 0},
-+ {0x3611, 0x00, 0, 0},
-+ {0x3613, 0xf7, 0, 0},
-+ {0x3616, 0x58, 0, 0},
-+ {0x3619, 0x99, 0, 0},
-+ {0x361b, 0x60, 0, 0},
-+ {0x361c, 0x7a, 0, 0},
-+ {0x361e, 0x79, 0, 0},
-+ {0x361f, 0x02, 0, 0},
-+ {0x3632, 0x05, 0, 0},
-+ {0x3633, 0x10, 0, 0},
-+ {0x3634, 0x10, 0, 0},
-+ {0x3635, 0x10, 0, 0},
-+ {0x3636, 0x15, 0, 0},
-+ {0x3646, 0x86, 0, 0},
-+ {0x364a, 0x0b, 0, 0},
-+ {0x3700, 0x17, 0, 0},
-+ {0x3701, 0x22, 0, 0},
-+ {0x3703, 0x10, 0, 0},
-+ {0x370a, 0x37, 0, 0},
-+ {0x3705, 0x00, 0, 0},
-+ {0x3706, 0x63, 0, 0},
-+ {0x3709, 0x3c, 0, 0},
-+ {0x370b, 0x01, 0, 0},
-+ {0x370c, 0x30, 0, 0},
-+ {0x3710, 0x24, 0, 0},
-+ {0x3711, 0x0c, 0, 0},
-+ {0x3716, 0x00, 0, 0},
-+ {0x3720, 0x28, 0, 0},
-+ {0x3729, 0x7b, 0, 0},
-+ {0x372a, 0x84, 0, 0},
-+ {0x372b, 0xbd, 0, 0},
-+ {0x372c, 0xbc, 0, 0},
-+ {0x372e, 0x52, 0, 0},
-+ {0x373c, 0x0e, 0, 0},
-+ {0x373e, 0x33, 0, 0},
-+ {0x3743, 0x10, 0, 0},
-+ {0x3744, 0x88, 0, 0},
-+ {0x3745, 0xc0, 0, 0},
-+ {0x374a, 0x43, 0, 0},
-+ {0x374c, 0x00, 0, 0},
-+ {0x374e, 0x23, 0, 0},
-+ {0x3751, 0x7b, 0, 0},
-+ {0x3752, 0x84, 0, 0},
-+ {0x3753, 0xbd, 0, 0},
-+ {0x3754, 0xbc, 0, 0},
-+ {0x3756, 0x52, 0, 0},
-+ {0x375c, 0x00, 0, 0},
-+ {0x3760, 0x00, 0, 0},
-+ {0x3761, 0x00, 0, 0},
-+ {0x3762, 0x00, 0, 0},
-+ {0x3763, 0x00, 0, 0},
-+ {0x3764, 0x00, 0, 0},
-+ {0x3767, 0x04, 0, 0},
-+ {0x3768, 0x04, 0, 0},
-+ {0x3769, 0x08, 0, 0},
-+ {0x376a, 0x08, 0, 0},
-+ {0x376b, 0x40, 0, 0},
-+ {0x376c, 0x00, 0, 0},
-+ {0x376d, 0x00, 0, 0},
-+ {0x376e, 0x00, 0, 0},
-+ {0x3773, 0x00, 0, 0},
-+ {0x3774, 0x51, 0, 0},
-+ {0x3776, 0xbd, 0, 0},
-+ {0x3777, 0xbd, 0, 0},
-+ {0x3781, 0x18, 0, 0},
-+ {0x3783, 0x25, 0, 0},
-+ {0x3798, 0x1b, 0, 0},
-+ {0x3800, 0x00, 0, 0},
-+ {0x3801, 0x48, 0, 0},
-+ {0x3802, 0x00, 0, 0},
-+ {0x3803, 0x2C, 0, 0},
-+ {0x3804, 0x0a, 0, 0},
-+ {0x3805, 0x57, 0, 0},
-+ {0x3806, 0x05, 0, 0},
-+ {0x3807, 0xD3, 0, 0},
-+ {0x3808, 0x05, 0, 0},
-+ {0x3809, 0x00, 0, 0},
-+ {0x380a, 0x02, 0, 0},
-+ {0x380b, 0xD0, 0, 0},
-+#ifndef UNUSED_CODE
-+ {0x380c, 0x04, 0, 0}, // 0a ; 03
-+ {0x380d, 0x08, 0, 0}, // 1c ; 5C
-+#else
-+ {0x380c, 0x05, 0, 0}, // 120fps
-+ {0x380d, 0x0A, 0, 0},
-+#endif
-+ {0x380e, 0x03, 0, 0},
-+ {0x380f, 0x05, 0, 0},
-+ {0x3810, 0x00, 0, 0},
-+ {0x3811, 0x04, 0, 0},
-+ {0x3812, 0x00, 0, 0},
-+ {0x3813, 0x02, 0, 0},
-+ {0x3814, 0x03, 0, 0},
-+ {0x3815, 0x01, 0, 0},
-+ {0x3819, 0x01, 0, 0},
-+ {0x3820, 0x06, 0, 0},
-+ {0x3821, 0x00, 0, 0},
-+ {0x3829, 0x00, 0, 0},
-+ {0x382a, 0x03, 0, 0},
-+ {0x382b, 0x01, 0, 0},
-+ {0x382d, 0x7f, 0, 0},
-+ {0x3830, 0x08, 0, 0},
-+ {0x3836, 0x02, 0, 0},
-+ {0x3837, 0x00, 0, 0},
-+ {0x3841, 0x02, 0, 0},
-+ {0x3846, 0x08, 0, 0},
-+ {0x3847, 0x07, 0, 0},
-+ {0x3d85, 0x36, 0, 0},
-+ {0x3d8c, 0x71, 0, 0},
-+ {0x3d8d, 0xcb, 0, 0},
-+ {0x3f0a, 0x00, 0, 0},
-+ {0x4000, 0x71, 0, 0},
-+ {0x4001, 0x50, 0, 0},
-+ {0x4002, 0x04, 0, 0},
-+ {0x4003, 0x14, 0, 0},
-+ {0x400e, 0x00, 0, 0},
-+ {0x4011, 0x00, 0, 0},
-+ {0x401a, 0x00, 0, 0},
-+ {0x401b, 0x00, 0, 0},
-+ {0x401c, 0x00, 0, 0},
-+ {0x401d, 0x00, 0, 0},
-+ {0x401f, 0x00, 0, 0},
-+ {0x4020, 0x00, 0, 0},
-+ {0x4021, 0x10, 0, 0},
-+ {0x4022, 0x03, 0, 0},
-+ {0x4023, 0x93, 0, 0},
-+ {0x4024, 0x04, 0, 0},
-+ {0x4025, 0xC0, 0, 0},
-+ {0x4026, 0x04, 0, 0},
-+ {0x4027, 0xD0, 0, 0},
-+ {0x4028, 0x00, 0, 0},
-+ {0x4029, 0x02, 0, 0},
-+ {0x402a, 0x06, 0, 0},
-+ {0x402b, 0x04, 0, 0},
-+ {0x402c, 0x02, 0, 0},
-+ {0x402d, 0x02, 0, 0},
-+ {0x402e, 0x0e, 0, 0},
-+ {0x402f, 0x04, 0, 0},
-+ {0x4302, 0xff, 0, 0},
-+ {0x4303, 0xff, 0, 0},
-+ {0x4304, 0x00, 0, 0},
-+ {0x4305, 0x00, 0, 0},
-+ {0x4306, 0x00, 0, 0},
-+ {0x4308, 0x02, 0, 0},
-+ {0x4500, 0x6c, 0, 0},
-+ {0x4501, 0xc4, 0, 0},
-+ {0x4502, 0x44, 0, 0},
-+ {0x4503, 0x01, 0, 0},
-+ {0x4600, 0x00, 0, 0},
-+ {0x4601, 0x4F, 0, 0},
-+ {0x4800, 0x04, 0, 0},
-+ {0x4813, 0x08, 0, 0},
-+ {0x481f, 0x40, 0, 0},
-+ {0x4829, 0x78, 0, 0},
-+ {0x4837, 0x10, 0, 0}, // 20 ; 10
-+ {0x4b00, 0x2a, 0, 0},
-+ {0x4b0d, 0x00, 0, 0},
-+ {0x4d00, 0x04, 0, 0},
-+ {0x4d01, 0x42, 0, 0},
-+ {0x4d02, 0xd1, 0, 0},
-+ {0x4d03, 0x93, 0, 0},
-+ {0x4d04, 0xf5, 0, 0},
-+ {0x4d05, 0xc1, 0, 0},
-+ {0x5000, 0xf3, 0, 0},
-+ {0x5001, 0x11, 0, 0},
-+ {0x5004, 0x00, 0, 0},
-+ {0x500a, 0x00, 0, 0},
-+ {0x500b, 0x00, 0, 0},
-+ {0x5032, 0x00, 0, 0},
-+ {0x5040, 0x00, 0, 0},
-+ {0x5050, 0x3c, 0, 0},
-+ {0x5500, 0x00, 0, 0},
-+ {0x5501, 0x10, 0, 0},
-+ {0x5502, 0x01, 0, 0},
-+ {0x5503, 0x0f, 0, 0},
-+ {0x8000, 0x00, 0, 0},
-+ {0x8001, 0x00, 0, 0},
-+ {0x8002, 0x00, 0, 0},
-+ {0x8003, 0x00, 0, 0},
-+ {0x8004, 0x00, 0, 0},
-+ {0x8005, 0x00, 0, 0},
-+ {0x8006, 0x00, 0, 0},
-+ {0x8007, 0x00, 0, 0},
-+ {0x8008, 0x00, 0, 0},
-+ {0x3638, 0x00, 0, 0},
-+};
-+
-+static const struct reg_value ov4689_setting_1080P_1920_1080[] = {
-+ //@@ RES_1920x1080_60fps_816Mbps 2lanes
-+ {0x0103, 0x01, 0, 0},
-+ {0x3638, 0x00, 0, 0},
-+ {0x0300, 0x00, 0, 0}, // clk
-+ {0x0302, 0x22, 0, 0},
-+ {0x0303, 0x00, 0, 0},
-+ {0x0304, 0x03, 0, 0},
-+ {0x030b, 0x00, 0, 0},
-+ {0x030d, 0x1e, 0, 0},
-+ {0x030e, 0x04, 0, 0},
-+ {0x030f, 0x01, 0, 0},
-+ {0x0312, 0x01, 0, 0},
-+ {0x031e, 0x00, 0, 0},
-+ {0x3000, 0x20, 0, 0},
-+ {0x3002, 0x00, 0, 0},
-+ {0x3020, 0x93, 0, 0},
-+ {0x3021, 0x03, 0, 0},
-+ {0x3022, 0x01, 0, 0},
-+ {0x3031, 0x0a, 0, 0},
-+ {0x303f, 0x0c, 0, 0},
-+ {0x3305, 0xf1, 0, 0},
-+ {0x3307, 0x04, 0, 0},
-+ {0x3309, 0x29, 0, 0},
-+ {0x3500, 0x00, 0, 0}, // AEC
-+ {0x3501, 0x4c, 0, 0},
-+ {0x3502, 0x00, 0, 0},
-+ {0x3503, 0x04, 0, 0},
-+ {0x3504, 0x00, 0, 0},
-+ {0x3505, 0x00, 0, 0},
-+ {0x3506, 0x00, 0, 0},
-+ {0x3507, 0x00, 0, 0},
-+ {0x3508, 0x00, 0, 0},
-+ {0x3509, 0x80, 0, 0},
-+ {0x350a, 0x00, 0, 0},
-+ {0x350b, 0x00, 0, 0},
-+ {0x350c, 0x00, 0, 0},
-+ {0x350d, 0x00, 0, 0},
-+ {0x350e, 0x00, 0, 0},
-+ {0x350f, 0x80, 0, 0},
-+ {0x3510, 0x00, 0, 0},
-+ {0x3511, 0x00, 0, 0},
-+ {0x3512, 0x00, 0, 0},
-+ {0x3513, 0x00, 0, 0},
-+ {0x3514, 0x00, 0, 0},
-+ {0x3515, 0x80, 0, 0},
-+ {0x3516, 0x00, 0, 0},
-+ {0x3517, 0x00, 0, 0},
-+ {0x3518, 0x00, 0, 0},
-+ {0x3519, 0x00, 0, 0},
-+ {0x351a, 0x00, 0, 0},
-+ {0x351b, 0x80, 0, 0},
-+ {0x351c, 0x00, 0, 0},
-+ {0x351d, 0x00, 0, 0},
-+ {0x351e, 0x00, 0, 0},
-+ {0x351f, 0x00, 0, 0},
-+ {0x3520, 0x00, 0, 0},
-+ {0x3521, 0x80, 0, 0},
-+ {0x3522, 0x08, 0, 0},
-+ {0x3524, 0x08, 0, 0},
-+ {0x3526, 0x08, 0, 0},
-+ {0x3528, 0x08, 0, 0},
-+ {0x352a, 0x08, 0, 0},
-+ {0x3602, 0x00, 0, 0},
-+ {0x3603, 0x40, 0, 0},
-+ {0x3604, 0x02, 0, 0},
-+ {0x3605, 0x00, 0, 0},
-+ {0x3606, 0x00, 0, 0},
-+ {0x3607, 0x00, 0, 0},
-+ {0x3609, 0x12, 0, 0},
-+ {0x360a, 0x40, 0, 0},
-+ {0x360c, 0x08, 0, 0},
-+ {0x360f, 0xe5, 0, 0},
-+ {0x3608, 0x8f, 0, 0},
-+ {0x3611, 0x00, 0, 0},
-+ {0x3613, 0xf7, 0, 0},
-+ {0x3616, 0x58, 0, 0},
-+ {0x3619, 0x99, 0, 0},
-+ {0x361b, 0x60, 0, 0},
-+ {0x361c, 0x7a, 0, 0},
-+ {0x361e, 0x79, 0, 0},
-+ {0x361f, 0x02, 0, 0},
-+ {0x3632, 0x00, 0, 0},
-+ {0x3633, 0x10, 0, 0},
-+ {0x3634, 0x10, 0, 0},
-+ {0x3635, 0x10, 0, 0},
-+ {0x3636, 0x15, 0, 0},
-+ {0x3646, 0x86, 0, 0},
-+ {0x364a, 0x0b, 0, 0},
-+ {0x3700, 0x17, 0, 0},
-+ {0x3701, 0x22, 0, 0},
-+ {0x3703, 0x10, 0, 0},
-+ {0x370a, 0x37, 0, 0},
-+ {0x3705, 0x00, 0, 0},
-+ {0x3706, 0x63, 0, 0},
-+ {0x3709, 0x3c, 0, 0},
-+ {0x370b, 0x01, 0, 0},
-+ {0x370c, 0x30, 0, 0},
-+ {0x3710, 0x24, 0, 0},
-+ {0x3711, 0x0c, 0, 0},
-+ {0x3716, 0x00, 0, 0},
-+ {0x3720, 0x28, 0, 0},
-+ {0x3729, 0x7b, 0, 0},
-+ {0x372a, 0x84, 0, 0},
-+ {0x372b, 0xbd, 0, 0},
-+ {0x372c, 0xbc, 0, 0},
-+ {0x372e, 0x52, 0, 0},
-+ {0x373c, 0x0e, 0, 0},
-+ {0x373e, 0x33, 0, 0},
-+ {0x3743, 0x10, 0, 0},
-+ {0x3744, 0x88, 0, 0},
-+ {0x3745, 0xc0, 0, 0},
-+ {0x374a, 0x43, 0, 0},
-+ {0x374c, 0x00, 0, 0},
-+ {0x374e, 0x23, 0, 0},
-+ {0x3751, 0x7b, 0, 0},
-+ {0x3752, 0x84, 0, 0},
-+ {0x3753, 0xbd, 0, 0},
-+ {0x3754, 0xbc, 0, 0},
-+ {0x3756, 0x52, 0, 0},
-+ {0x375c, 0x00, 0, 0},
-+ {0x3760, 0x00, 0, 0},
-+ {0x3761, 0x00, 0, 0},
-+ {0x3762, 0x00, 0, 0},
-+ {0x3763, 0x00, 0, 0},
-+ {0x3764, 0x00, 0, 0},
-+ {0x3767, 0x04, 0, 0},
-+ {0x3768, 0x04, 0, 0},
-+ {0x3769, 0x08, 0, 0},
-+ {0x376a, 0x08, 0, 0},
-+ {0x376b, 0x20, 0, 0},
-+ {0x376c, 0x00, 0, 0},
-+ {0x376d, 0x00, 0, 0},
-+ {0x376e, 0x00, 0, 0},
-+ {0x3773, 0x00, 0, 0},
-+ {0x3774, 0x51, 0, 0},
-+ {0x3776, 0xbd, 0, 0},
-+ {0x3777, 0xbd, 0, 0},
-+ {0x3781, 0x18, 0, 0},
-+ {0x3783, 0x25, 0, 0},
-+ {0x3798, 0x1b, 0, 0},
-+ {0x3800, 0x01, 0, 0}, // timings
-+ {0x3801, 0x88, 0, 0},
-+ {0x3802, 0x00, 0, 0},
-+ {0x3803, 0xe0, 0, 0},
-+ {0x3804, 0x09, 0, 0},
-+ {0x3805, 0x17, 0, 0},
-+ {0x3806, 0x05, 0, 0},
-+ {0x3807, 0x1f, 0, 0},
-+ {0x3808, 0x07, 0, 0},
-+ {0x3809, 0x80, 0, 0},
-+ {0x380a, 0x04, 0, 0},
-+ {0x380b, 0x38, 0, 0},
-+ {0x380c, 0x06, 0, 0},
-+ {0x380d, 0xe0, 0, 0},
-+ {0x380e, 0x04, 0, 0},
-+ {0x380f, 0x70, 0, 0},
-+ {0x3810, 0x00, 0, 0},
-+ {0x3811, 0x08, 0, 0},
-+ {0x3812, 0x00, 0, 0},
-+ {0x3813, 0x04, 0, 0},
-+ {0x3814, 0x01, 0, 0},
-+ {0x3815, 0x01, 0, 0},
-+ {0x3819, 0x01, 0, 0},
-+ {0x3820, 0x06, 0, 0},
-+ {0x3821, 0x00, 0, 0},
-+ {0x3829, 0x00, 0, 0},
-+ {0x382a, 0x01, 0, 0},
-+ {0x382b, 0x01, 0, 0},
-+ {0x382d, 0x7f, 0, 0},
-+ {0x3830, 0x04, 0, 0},
-+ {0x3836, 0x01, 0, 0},
-+ {0x3837, 0x00, 0, 0},
-+ {0x3841, 0x02, 0, 0},
-+ {0x3846, 0x08, 0, 0},
-+ {0x3847, 0x07, 0, 0},
-+ {0x3d85, 0x36, 0, 0},
-+ {0x3d8c, 0x71, 0, 0},
-+ {0x3d8d, 0xcb, 0, 0},
-+ {0x3f0a, 0x00, 0, 0},
-+ {0x4000, 0xf1, 0, 0},
-+ {0x4001, 0x40, 0, 0},
-+ {0x4002, 0x04, 0, 0},
-+ {0x4003, 0x14, 0, 0},
-+ {0x400e, 0x00, 0, 0},
-+ {0x4011, 0x00, 0, 0},
-+ {0x401a, 0x00, 0, 0},
-+ {0x401b, 0x00, 0, 0},
-+ {0x401c, 0x00, 0, 0},
-+ {0x401d, 0x00, 0, 0},
-+ {0x401f, 0x00, 0, 0},
-+ {0x4020, 0x00, 0, 0},
-+ {0x4021, 0x10, 0, 0},
-+ {0x4022, 0x06, 0, 0},
-+ {0x4023, 0x13, 0, 0},
-+ {0x4024, 0x07, 0, 0},
-+ {0x4025, 0x40, 0, 0},
-+ {0x4026, 0x07, 0, 0},
-+ {0x4027, 0x50, 0, 0},
-+ {0x4028, 0x00, 0, 0},
-+ {0x4029, 0x02, 0, 0},
-+ {0x402a, 0x06, 0, 0},
-+ {0x402b, 0x04, 0, 0},
-+ {0x402c, 0x02, 0, 0},
-+ {0x402d, 0x02, 0, 0},
-+ {0x402e, 0x0e, 0, 0},
-+ {0x402f, 0x04, 0, 0},
-+ {0x4302, 0xff, 0, 0},
-+ {0x4303, 0xff, 0, 0},
-+ {0x4304, 0x00, 0, 0},
-+ {0x4305, 0x00, 0, 0},
-+ {0x4306, 0x00, 0, 0},
-+ {0x4308, 0x02, 0, 0},
-+ {0x4500, 0x6c, 0, 0},
-+ {0x4501, 0xc4, 0, 0},
-+ {0x4502, 0x40, 0, 0},
-+ {0x4503, 0x01, 0, 0},
-+ {0x4601, 0x77, 0, 0},
-+ {0x4800, 0x04, 0, 0},
-+ {0x4813, 0x08, 0, 0},
-+ {0x481f, 0x40, 0, 0},
-+ {0x4829, 0x78, 0, 0},
-+ {0x4837, 0x10, 0, 0},
-+ {0x4b00, 0x2a, 0, 0},
-+ {0x4b0d, 0x00, 0, 0},
-+ {0x4d00, 0x04, 0, 0},
-+ {0x4d01, 0x42, 0, 0},
-+ {0x4d02, 0xd1, 0, 0},
-+ {0x4d03, 0x93, 0, 0},
-+ {0x4d04, 0xf5, 0, 0},
-+ {0x4d05, 0xc1, 0, 0},
-+ {0x5000, 0xf3, 0, 0},
-+ {0x5001, 0x11, 0, 0},
-+ {0x5004, 0x00, 0, 0},
-+ {0x500a, 0x00, 0, 0},
-+ {0x500b, 0x00, 0, 0},
-+ {0x5032, 0x00, 0, 0},
-+ {0x5040, 0x00, 0, 0},
-+ {0x5050, 0x0c, 0, 0},
-+ {0x5500, 0x00, 0, 0},
-+ {0x5501, 0x10, 0, 0},
-+ {0x5502, 0x01, 0, 0},
-+ {0x5503, 0x0f, 0, 0},
-+ {0x8000, 0x00, 0, 0},
-+ {0x8001, 0x00, 0, 0},
-+ {0x8002, 0x00, 0, 0},
-+ {0x8003, 0x00, 0, 0},
-+ {0x8004, 0x00, 0, 0},
-+ {0x8005, 0x00, 0, 0},
-+ {0x8006, 0x00, 0, 0},
-+ {0x8007, 0x00, 0, 0},
-+ {0x8008, 0x00, 0, 0},
-+ {0x3638, 0x00, 0, 0},
-+};
-+
-+static const struct reg_value ov4689_setting_4M_2688_1520[] = {
-+ //@@ 0 10 RES_2688x1520_default(60fps)
-+ //102 2630 960
-+ {0x0103, 0x01, 0, 0},
-+ {0x3638, 0x00, 0, 0},
-+ {0x0300, 0x00, 0, 0},
-+ {0x0302, 0x22, 0, 0}, // 2a ;1008Mbps,23 ;; 840Mbps
-+ {0x0304, 0x03, 0, 0},
-+ {0x030b, 0x00, 0, 0},
-+ {0x030d, 0x1e, 0, 0},
-+ {0x030e, 0x04, 0, 0},
-+ {0x030f, 0x01, 0, 0},
-+ {0x0312, 0x01, 0, 0},
-+ {0x031e, 0x00, 0, 0},
-+ {0x3000, 0x20, 0, 0},
-+ {0x3002, 0x00, 0, 0},
-+ {0x3020, 0x93, 0, 0},
-+ {0x3021, 0x03, 0, 0},
-+ {0x3022, 0x01, 0, 0},
-+ {0x3031, 0x0a, 0, 0},
-+ {0x303f, 0x0c, 0, 0},
-+ {0x3305, 0xf1, 0, 0},
-+ {0x3307, 0x04, 0, 0},
-+ {0x3309, 0x29, 0, 0},
-+ {0x3500, 0x00, 0, 0},
-+ {0x3501, 0x60, 0, 0},
-+ {0x3502, 0x00, 0, 0},
-+ {0x3503, 0x04, 0, 0},
-+ {0x3504, 0x00, 0, 0},
-+ {0x3505, 0x00, 0, 0},
-+ {0x3506, 0x00, 0, 0},
-+ {0x3507, 0x00, 0, 0},
-+ {0x3508, 0x00, 0, 0},
-+ {0x3509, 0x80, 0, 0},
-+ {0x350a, 0x00, 0, 0},
-+ {0x350b, 0x00, 0, 0},
-+ {0x350c, 0x00, 0, 0},
-+ {0x350d, 0x00, 0, 0},
-+ {0x350e, 0x00, 0, 0},
-+ {0x350f, 0x80, 0, 0},
-+ {0x3510, 0x00, 0, 0},
-+ {0x3511, 0x00, 0, 0},
-+ {0x3512, 0x00, 0, 0},
-+ {0x3513, 0x00, 0, 0},
-+ {0x3514, 0x00, 0, 0},
-+ {0x3515, 0x80, 0, 0},
-+ {0x3516, 0x00, 0, 0},
-+ {0x3517, 0x00, 0, 0},
-+ {0x3518, 0x00, 0, 0},
-+ {0x3519, 0x00, 0, 0},
-+ {0x351a, 0x00, 0, 0},
-+ {0x351b, 0x80, 0, 0},
-+ {0x351c, 0x00, 0, 0},
-+ {0x351d, 0x00, 0, 0},
-+ {0x351e, 0x00, 0, 0},
-+ {0x351f, 0x00, 0, 0},
-+ {0x3520, 0x00, 0, 0},
-+ {0x3521, 0x80, 0, 0},
-+ {0x3522, 0x08, 0, 0},
-+ {0x3524, 0x08, 0, 0},
-+ {0x3526, 0x08, 0, 0},
-+ {0x3528, 0x08, 0, 0},
-+ {0x352a, 0x08, 0, 0},
-+ {0x3602, 0x00, 0, 0},
-+ {0x3603, 0x40, 0, 0},
-+ {0x3604, 0x02, 0, 0},
-+ {0x3605, 0x00, 0, 0},
-+ {0x3606, 0x00, 0, 0},
-+ {0x3607, 0x00, 0, 0},
-+ {0x3609, 0x12, 0, 0},
-+ {0x360a, 0x40, 0, 0},
-+ {0x360c, 0x08, 0, 0},
-+ {0x360f, 0xe5, 0, 0},
-+ {0x3608, 0x8f, 0, 0},
-+ {0x3611, 0x00, 0, 0},
-+ {0x3613, 0xf7, 0, 0},
-+ {0x3616, 0x58, 0, 0},
-+ {0x3619, 0x99, 0, 0},
-+ {0x361b, 0x60, 0, 0},
-+ {0x361c, 0x7a, 0, 0},
-+ {0x361e, 0x79, 0, 0},
-+ {0x361f, 0x02, 0, 0},
-+ {0x3632, 0x00, 0, 0},
-+ {0x3633, 0x10, 0, 0},
-+ {0x3634, 0x10, 0, 0},
-+ {0x3635, 0x10, 0, 0},
-+ {0x3636, 0x15, 0, 0},
-+ {0x3646, 0x86, 0, 0},
-+ {0x364a, 0x0b, 0, 0},
-+ {0x3700, 0x17, 0, 0},
-+ {0x3701, 0x22, 0, 0},
-+ {0x3703, 0x10, 0, 0},
-+ {0x370a, 0x37, 0, 0},
-+ {0x3705, 0x00, 0, 0},
-+ {0x3706, 0x63, 0, 0},
-+ {0x3709, 0x3c, 0, 0},
-+ {0x370b, 0x01, 0, 0},
-+ {0x370c, 0x30, 0, 0},
-+ {0x3710, 0x24, 0, 0},
-+ {0x3711, 0x0c, 0, 0},
-+ {0x3716, 0x00, 0, 0},
-+ {0x3720, 0x28, 0, 0},
-+ {0x3729, 0x7b, 0, 0},
-+ {0x372a, 0x84, 0, 0},
-+ {0x372b, 0xbd, 0, 0},
-+ {0x372c, 0xbc, 0, 0},
-+ {0x372e, 0x52, 0, 0},
-+ {0x373c, 0x0e, 0, 0},
-+ {0x373e, 0x33, 0, 0},
-+ {0x3743, 0x10, 0, 0},
-+ {0x3744, 0x88, 0, 0},
-+ {0x3745, 0xc0, 0, 0},
-+ {0x374a, 0x43, 0, 0},
-+ {0x374c, 0x00, 0, 0},
-+ {0x374e, 0x23, 0, 0},
-+ {0x3751, 0x7b, 0, 0},
-+ {0x3752, 0x84, 0, 0},
-+ {0x3753, 0xbd, 0, 0},
-+ {0x3754, 0xbc, 0, 0},
-+ {0x3756, 0x52, 0, 0},
-+ {0x375c, 0x00, 0, 0},
-+ {0x3760, 0x00, 0, 0},
-+ {0x3761, 0x00, 0, 0},
-+ {0x3762, 0x00, 0, 0},
-+ {0x3763, 0x00, 0, 0},
-+ {0x3764, 0x00, 0, 0},
-+ {0x3767, 0x04, 0, 0},
-+ {0x3768, 0x04, 0, 0},
-+ {0x3769, 0x08, 0, 0},
-+ {0x376a, 0x08, 0, 0},
-+ {0x376b, 0x20, 0, 0},
-+ {0x376c, 0x00, 0, 0},
-+ {0x376d, 0x00, 0, 0},
-+ {0x376e, 0x00, 0, 0},
-+ {0x3773, 0x00, 0, 0},
-+ {0x3774, 0x51, 0, 0},
-+ {0x3776, 0xbd, 0, 0},
-+ {0x3777, 0xbd, 0, 0},
-+ {0x3781, 0x18, 0, 0},
-+ {0x3783, 0x25, 0, 0},
-+ {0x3798, 0x1b, 0, 0},
-+ {0x3800, 0x00, 0, 0},
-+ {0x3801, 0x08, 0, 0},
-+ {0x3802, 0x00, 0, 0},
-+ {0x3803, 0x04, 0, 0},
-+ {0x3804, 0x0a, 0, 0},
-+ {0x3805, 0x97, 0, 0},
-+ {0x3806, 0x05, 0, 0},
-+ {0x3807, 0xfb, 0, 0},
-+ {0x3808, 0x0a, 0, 0},
-+ {0x3809, 0x80, 0, 0},
-+ {0x380a, 0x05, 0, 0},
-+ {0x380b, 0xf0, 0, 0},
-+ {0x380c, 0x03, 0, 0},
-+ {0x380d, 0x5c, 0, 0},
-+ {0x380e, 0x06, 0, 0},
-+ {0x380f, 0x12, 0, 0},
-+ {0x3810, 0x00, 0, 0},
-+ {0x3811, 0x08, 0, 0},
-+ {0x3812, 0x00, 0, 0},
-+ {0x3813, 0x04, 0, 0},
-+ {0x3814, 0x01, 0, 0},
-+ {0x3815, 0x01, 0, 0},
-+ {0x3819, 0x01, 0, 0},
-+ {0x3820, 0x00, 0, 0},
-+ {0x3821, 0x06, 0, 0},
-+ {0x3829, 0x00, 0, 0},
-+ {0x382a, 0x01, 0, 0},
-+ {0x382b, 0x01, 0, 0},
-+ {0x382d, 0x7f, 0, 0},
-+ {0x3830, 0x04, 0, 0},
-+ {0x3836, 0x01, 0, 0},
-+ {0x3837, 0x00, 0, 0},
-+ {0x3841, 0x02, 0, 0},
-+ {0x3846, 0x08, 0, 0},
-+ {0x3847, 0x07, 0, 0},
-+ {0x3d85, 0x36, 0, 0},
-+ {0x3d8c, 0x71, 0, 0},
-+ {0x3d8d, 0xcb, 0, 0},
-+ {0x3f0a, 0x00, 0, 0},
-+ {0x4000, 0x71, 0, 0},
-+ {0x4001, 0x40, 0, 0},
-+ {0x4002, 0x04, 0, 0},
-+ {0x4003, 0x14, 0, 0},
-+ {0x400e, 0x00, 0, 0},
-+ {0x4011, 0x00, 0, 0},
-+ {0x401a, 0x00, 0, 0},
-+ {0x401b, 0x00, 0, 0},
-+ {0x401c, 0x00, 0, 0},
-+ {0x401d, 0x00, 0, 0},
-+ {0x401f, 0x00, 0, 0},
-+ {0x4020, 0x00, 0, 0},
-+ {0x4021, 0x10, 0, 0},
-+ {0x4022, 0x07, 0, 0},
-+ {0x4023, 0xcf, 0, 0},
-+ {0x4024, 0x09, 0, 0},
-+ {0x4025, 0x60, 0, 0},
-+ {0x4026, 0x09, 0, 0},
-+ {0x4027, 0x6f, 0, 0},
-+ {0x4028, 0x00, 0, 0},
-+ {0x4029, 0x02, 0, 0},
-+ {0x402a, 0x06, 0, 0},
-+ {0x402b, 0x04, 0, 0},
-+ {0x402c, 0x02, 0, 0},
-+ {0x402d, 0x02, 0, 0},
-+ {0x402e, 0x0e, 0, 0},
-+ {0x402f, 0x04, 0, 0},
-+ {0x4302, 0xff, 0, 0},
-+ {0x4303, 0xff, 0, 0},
-+ {0x4304, 0x00, 0, 0},
-+ {0x4305, 0x00, 0, 0},
-+ {0x4306, 0x00, 0, 0},
-+ {0x4308, 0x02, 0, 0},
-+ {0x4500, 0x6c, 0, 0},
-+ {0x4501, 0xc4, 0, 0},
-+ {0x4502, 0x40, 0, 0},
-+ {0x4503, 0x01, 0, 0},
-+ {0x4601, 0x04, 0, 0},
-+ {0x4800, 0x04, 0, 0},
-+ {0x4813, 0x08, 0, 0},
-+ {0x481f, 0x40, 0, 0},
-+ {0x4829, 0x78, 0, 0},
-+ {0x4837, 0x14, 0, 0}, // 10
-+ {0x4b00, 0x2a, 0, 0},
-+ {0x4b0d, 0x00, 0, 0},
-+ {0x4d00, 0x04, 0, 0},
-+ {0x4d01, 0x42, 0, 0},
-+ {0x4d02, 0xd1, 0, 0},
-+ {0x4d03, 0x93, 0, 0},
-+ {0x4d04, 0xf5, 0, 0},
-+ {0x4d05, 0xc1, 0, 0},
-+ {0x5000, 0xf3, 0, 0},
-+ {0x5001, 0x11, 0, 0},
-+ {0x5004, 0x00, 0, 0},
-+ {0x500a, 0x00, 0, 0},
-+ {0x500b, 0x00, 0, 0},
-+ {0x5032, 0x00, 0, 0},
-+ {0x5040, 0x00, 0, 0},
-+ {0x5050, 0x0c, 0, 0},
-+ {0x5500, 0x00, 0, 0},
-+ {0x5501, 0x10, 0, 0},
-+ {0x5502, 0x01, 0, 0},
-+ {0x5503, 0x0f, 0, 0},
-+ {0x8000, 0x00, 0, 0},
-+ {0x8001, 0x00, 0, 0},
-+ {0x8002, 0x00, 0, 0},
-+ {0x8003, 0x00, 0, 0},
-+ {0x8004, 0x00, 0, 0},
-+ {0x8005, 0x00, 0, 0},
-+ {0x8006, 0x00, 0, 0},
-+ {0x8007, 0x00, 0, 0},
-+ {0x8008, 0x00, 0, 0},
-+ {0x3638, 0x00, 0, 0},
-+// {0x0100, 0x01, 0, 0},
-+
-+// {0x0100, 0x00, 0, 0},
-+ {0x380c, 0x0A, 0, 0}, // 05
-+ {0x380d, 0x0A, 0, 0}, // 10
-+ {0x380e, 0x06, 0, 0},
-+ {0x380f, 0x12, 0, 0},
-+// {0x0100, 0x01, 0, 0},
-+ {0x3105, 0x31, 0, 0},
-+ {0x301a, 0xf9, 0, 0},
-+ {0x3508, 0x07, 0, 0},
-+ {0x484b, 0x05, 0, 0},
-+ {0x4805, 0x03, 0, 0},
-+ {0x3601, 0x01, 0, 0},
-+ {0x3745, 0xc0, 0, 0},
-+ {0x3798, 0x1b, 0, 0},
-+// {0x0100, 0x01, 0, 0},
-+ {0xffff, 0x0a, 0, 0},
-+ {0x3105, 0x11, 0, 0},
-+ {0x301a, 0xf1, 0, 0},
-+ {0x4805, 0x00, 0, 0},
-+ {0x301a, 0xf0, 0, 0},
-+ {0x3208, 0x00, 0, 0},
-+ {0x302a, 0x00, 0, 0},
-+ {0x302a, 0x00, 0, 0},
-+ {0x302a, 0x00, 0, 0},
-+ {0x302a, 0x00, 0, 0},
-+ {0x302a, 0x00, 0, 0},
-+ {0x3601, 0x00, 0, 0},
-+ {0x3638, 0x00, 0, 0},
-+ {0x3208, 0x10, 0, 0},
-+ {0x3208, 0xa0, 0, 0},
-+};
-+
-+/* power-on sensor init reg table */
-+static const struct ov4689_mode_info ov4689_mode_init_data = {
-+
-+};
-+
-+static const struct ov4689_mode_info
-+ov4689_mode_data[OV4689_NUM_MODES] = {
-+ // {OV4689_MODE_720P_1280_720, SUBSAMPLING,
-+ // 1280, 0x408, 720, 0x305,
-+ // ov4689_setting_720P_1280_720,
-+ // ARRAY_SIZE(ov4689_setting_720P_1280_720),
-+ // OV4689_150_FPS},
-+ // {OV4689_MODE_1080P_1920_1080, SCALING,
-+ // 1920, 0x6e0, 1080, 0x470,
-+ // ov4689_setting_1080P_1920_1080,
-+ // ARRAY_SIZE(ov4689_setting_1080P_1920_1080),
-+ // OV4689_60_FPS},
-+ // {OV4689_MODE_4M_2688_1520, SCALING,
-+ // 2688, 0xa0a, 1520, 0x612,
-+ // ov4689_setting_4M_2688_1520,
-+ // ARRAY_SIZE(ov4689_setting_4M_2688_1520),
-+ // OV4689_60_FPS},
-+
-+ {OV4689_MODE_1080P_1920_1080, SCALING,
-+ 1920, 0x6e0, 1080, 0x470,
-+ ov4689_init_setting_30fps_1080P,
-+ ARRAY_SIZE(ov4689_init_setting_30fps_1080P),
-+ OV4689_60_FPS},
-+};
-+
-+static int ov4689_write_reg(struct ov4689_dev *sensor, u16 reg, u8 val)
-+{
-+ struct i2c_client *client = sensor->i2c_client;
-+ struct i2c_msg msg;
-+ u8 buf[3];
-+ int ret;
-+
-+ buf[0] = reg >> 8;
-+ buf[1] = reg & 0xff;
-+ buf[2] = val;
-+
-+ msg.addr = client->addr;
-+ msg.flags = client->flags;
-+ msg.buf = buf;
-+ msg.len = sizeof(buf);
-+
-+ ret = i2c_transfer(client->adapter, &msg, 1);
-+ if (ret < 0) {
-+ dev_err(&client->dev, "%s: error: reg=%x, val=%x\n",
-+ __func__, reg, val);
-+ return ret;
-+ }
-+
-+ return 0;
-+}
-+
-+static int ov4689_read_reg(struct ov4689_dev *sensor, u16 reg, u8 *val)
-+{
-+ struct i2c_client *client = sensor->i2c_client;
-+ struct i2c_msg msg[2];
-+ u8 buf[2];
-+ int ret;
-+
-+ buf[0] = reg >> 8;
-+ buf[1] = reg & 0xff;
-+
-+ msg[0].addr = client->addr;
-+ msg[0].flags = client->flags;
-+ msg[0].buf = buf;
-+ msg[0].len = sizeof(buf);
-+
-+ msg[1].addr = client->addr;
-+ msg[1].flags = client->flags | I2C_M_RD;
-+ msg[1].buf = buf;
-+ msg[1].len = 1;
-+
-+ ret = i2c_transfer(client->adapter, msg, 2);
-+ if (ret < 0) {
-+ dev_err(&client->dev, "%s: error: reg=%x\n",
-+ __func__, reg);
-+ return ret;
-+ }
-+
-+ *val = buf[0];
-+ return 0;
-+}
-+
-+static int ov4689_read_reg16(struct ov4689_dev *sensor, u16 reg, u16 *val)
-+{
-+ u8 hi, lo;
-+ int ret;
-+
-+ ret = ov4689_read_reg(sensor, reg, &hi);
-+ if (ret)
-+ return ret;
-+ ret = ov4689_read_reg(sensor, reg + 1, &lo);
-+ if (ret)
-+ return ret;
-+
-+ *val = ((u16)hi << 8) | (u16)lo;
-+ return 0;
-+}
-+
-+static int ov4689_write_reg16(struct ov4689_dev *sensor, u16 reg, u16 val)
-+{
-+ int ret;
-+
-+ ret = ov4689_write_reg(sensor, reg, val >> 8);
-+ if (ret)
-+ return ret;
-+
-+ return ov4689_write_reg(sensor, reg + 1, val & 0xff);
-+}
-+
-+static int ov4689_mod_reg(struct ov4689_dev *sensor, u16 reg,
-+ u8 mask, u8 val)
-+{
-+ u8 readval;
-+ int ret;
-+
-+ ret = ov4689_read_reg(sensor, reg, &readval);
-+ if (ret)
-+ return ret;
-+
-+ readval &= ~mask;
-+ val &= mask;
-+ val |= readval;
-+
-+ return ov4689_write_reg(sensor, reg, val);
-+}
-+
-+static int ov4689_set_timings(struct ov4689_dev *sensor,
-+ const struct ov4689_mode_info *mode)
-+{
-+ return 0;
-+}
-+
-+static int ov4689_load_regs(struct ov4689_dev *sensor,
-+ const struct ov4689_mode_info *mode)
-+{
-+ const struct reg_value *regs = mode->reg_data;
-+ unsigned int i;
-+ u32 delay_ms;
-+ u16 reg_addr;
-+ u8 mask, val;
-+ int ret = 0;
-+
-+ st_info(ST_SENSOR, "%s, mode = 0x%x\n", __func__, mode->id);
-+ for (i = 0; i < mode->reg_data_size; ++i, ++regs) {
-+ delay_ms = regs->delay_ms;
-+ reg_addr = regs->reg_addr;
-+ val = regs->val;
-+ mask = regs->mask;
-+
-+ if (mask)
-+ ret = ov4689_mod_reg(sensor, reg_addr, mask, val);
-+ else
-+ ret = ov4689_write_reg(sensor, reg_addr, val);
-+ if (ret)
-+ break;
-+
-+ if (delay_ms)
-+ usleep_range(1000 * delay_ms, 1000 * delay_ms + 100);
-+ }
-+
-+ return ov4689_set_timings(sensor, mode);
-+}
-+
-+#ifdef UNUSED_CODE
-+static int ov4689_get_exposure(struct ov4689_dev *sensor)
-+{
-+ int exp, ret;
-+ u8 temp;
-+
-+ ret = ov4689_read_reg(sensor, OV4689_REG_EXPOSURE_HI, &temp);
-+ if (ret)
-+ return ret;
-+ exp = ((int)temp & 0x0f) << 16;
-+ ret = ov4689_read_reg(sensor, OV4689_REG_EXPOSURE_MED, &temp);
-+ if (ret)
-+ return ret;
-+ exp |= ((int)temp << 8);
-+ ret = ov4689_read_reg(sensor, OV4689_REG_EXPOSURE_LO, &temp);
-+ if (ret)
-+ return ret;
-+ exp |= (int)temp;
-+
-+ return exp >> 4;
-+}
-+#endif
-+
-+static int ov4689_set_exposure(struct ov4689_dev *sensor, u32 exposure)
-+{
-+ int ret;
-+
-+ st_info(ST_SENSOR, "%s, exposure = 0x%x\n", __func__, exposure);
-+ exposure <<= 4;
-+
-+ ret = ov4689_write_reg(sensor,
-+ OV4689_REG_EXPOSURE_LO,
-+ exposure & 0xff);
-+ if (ret)
-+ return ret;
-+ ret = ov4689_write_reg(sensor,
-+ OV4689_REG_EXPOSURE_MED,
-+ (exposure >> 8) & 0xff);
-+ if (ret)
-+ return ret;
-+ return ov4689_write_reg(sensor,
-+ OV4689_REG_EXPOSURE_HI,
-+ (exposure >> 16) & 0x0f);
-+}
-+
-+static int ov4689_get_gain(struct ov4689_dev *sensor)
-+{
-+ u32 gain = 0;
-+ u8 val;
-+
-+ ov4689_read_reg(sensor, OV4689_REG_GAIN_H, &val);
-+ gain = (val & 0x3) << 16;
-+ ov4689_read_reg(sensor, OV4689_REG_GAIN_M, &val);
-+ gain |= val << 8;
-+ ov4689_read_reg(sensor, OV4689_REG_GAIN_L, &val);
-+ gain |= val;
-+
-+ return gain;
-+}
-+
-+static int ov4689_set_gain(struct ov4689_dev *sensor, int gain)
-+{
-+ ov4689_write_reg(sensor, OV4689_REG_GAIN_H,
-+ (gain >> 16) & 0x3);
-+ ov4689_write_reg(sensor, OV4689_REG_GAIN_M,
-+ (gain >> 8) & 0xff);
-+ ov4689_write_reg(sensor, OV4689_REG_GAIN_L,
-+ gain & 0xff);
-+ return 0;
-+}
-+
-+#ifdef UNUSED_CODE
-+static int ov4689_get_sysclk(struct ov4689_dev *sensor)
-+{
-+ return 0;
-+}
-+
-+static int ov4689_set_night_mode(struct ov4689_dev *sensor)
-+{
-+ return 0;
-+}
-+
-+static int ov4689_get_hts(struct ov4689_dev *sensor)
-+{
-+ /* read HTS from register settings */
-+ u16 hts;
-+ int ret;
-+
-+ ret = ov4689_read_reg16(sensor, OV4689_REG_TIMING_HTS, &hts);
-+ if (ret)
-+ return ret;
-+ return hts;
-+}
-+#endif
-+
-+static int ov4689_get_vts(struct ov4689_dev *sensor)
-+{
-+ u16 vts;
-+ int ret;
-+
-+ ret = ov4689_read_reg16(sensor, OV4689_REG_TIMING_VTS, &vts);
-+ if (ret)
-+ return ret;
-+ return vts;
-+}
-+
-+#ifdef UNUSED_CODE
-+static int ov4689_set_vts(struct ov4689_dev *sensor, int vts)
-+{
-+ return ov4689_write_reg16(sensor, OV4689_REG_TIMING_VTS, vts);
-+}
-+
-+static int ov4689_get_light_freq(struct ov4689_dev *sensor)
-+{
-+ return 0;
-+}
-+
-+static int ov4689_set_bandingfilter(struct ov4689_dev *sensor)
-+{
-+ return 0;
-+}
-+
-+static int ov4689_set_ae_target(struct ov4689_dev *sensor, int target)
-+{
-+ return 0;
-+}
-+
-+static int ov4689_get_binning(struct ov4689_dev *sensor)
-+{
-+ return 0;
-+}
-+
-+static int ov4689_set_binning(struct ov4689_dev *sensor, bool enable)
-+{
-+ return 0;
-+}
-+#endif
-+
-+static const struct ov4689_mode_info *
-+ov4689_find_mode(struct ov4689_dev *sensor, enum ov4689_frame_rate fr,
-+ int width, int height, bool nearest)
-+{
-+ const struct ov4689_mode_info *mode;
-+
-+ mode = v4l2_find_nearest_size(ov4689_mode_data,
-+ ARRAY_SIZE(ov4689_mode_data),
-+ hact, vact,
-+ width, height);
-+
-+ if (!mode ||
-+ (!nearest && (mode->hact != width || mode->vact != height)))
-+ return NULL;
-+
-+ /* Check to see if the current mode exceeds the max frame rate */
-+ if (ov4689_framerates[fr] > ov4689_framerates[mode->max_fps])
-+ return NULL;
-+
-+ return mode;
-+}
-+
-+static u64 ov4689_calc_pixel_rate(struct ov4689_dev *sensor)
-+{
-+ u64 rate;
-+
-+ rate = sensor->current_mode->vact * sensor->current_mode->hact;
-+ rate *= ov4689_framerates[sensor->current_fr];
-+
-+ return rate;
-+}
-+
-+/*
-+ * After trying the various combinations, reading various
-+ * documentations spread around the net, and from the various
-+ * feedback, the clock tree is probably as follows:
-+ *
-+ * +--------------+
-+ * | Ext. Clock |
-+ * +-+------------+
-+ * | +----------+
-+ * +->| PLL1 | - reg 0x030a, bit0 for the pre-dividerp
-+ * +-+--------+ - reg 0x0300, bits 0-2 for the pre-divider
-+ * +-+--------+ - reg 0x0301~0x0302, for the multiplier
-+ * | +--------------+
-+ * +->| MIPI Divider | - reg 0x0303, bits 0-3 for the pre-divider
-+ * | +---------> MIPI PHY CLK
-+ * | +-----+
-+ * | +->| PLL1_DIV_MIPI | - reg 0x0304, bits 0-1 for the divider
-+ * | +----------------> PCLK
-+ * | +-----+
-+ *
-+ * +--------------+
-+ * | Ext. Clock |
-+ * +-+------------+
-+ * | +----------+
-+ * +->| PLL2 | - reg 0x0311, bit0 for the pre-dividerp
-+ * +-+--------+ - reg 0x030b, bits 0-2 for the pre-divider
-+ * +-+--------+ - reg 0x030c~0x030d, for the multiplier
-+ * | +--------------+
-+ * +->| SCLK Divider | - reg 0x030F, bits 0-3 for the pre-divider
-+ * +-+--------+ - reg 0x030E, bits 0-2 for the divider
-+ * | +---------> SCLK
-+ *
-+ * | +-----+
-+ * +->| DAC Divider | - reg 0x0312, bits 0-3 for the divider
-+ * | +----------------> DACCLK
-+ **
-+ */
-+
-+/*
-+ * ov4689_set_mipi_pclk() - Calculate the clock tree configuration values
-+ * for the MIPI CSI-2 output.
-+ *
-+ * @rate: The requested bandwidth per lane in bytes per second.
-+ * 'Bandwidth Per Lane' is calculated as:
-+ * bpl = HTOT * VTOT * FPS * bpp / num_lanes;
-+ *
-+ * This function use the requested bandwidth to calculate:
-+ *
-+ * - mipi_pclk = bpl / 2; ( / 2 is for CSI-2 DDR)
-+ * - mipi_phy_clk = mipi_pclk * PLL1_DIV_MIPI;
-+ *
-+ * with these fixed parameters:
-+ * PLL1_PREDIVP = 1;
-+ * PLL1_PREDIV = 1; (MIPI_BIT_MODE == 8 ? 2 : 2,5);
-+ * PLL1_DIVM = 1;
-+ * PLL1_DIV_MIPI = 4;
-+ *
-+ * FIXME: this have been tested with 10-bit raw and 2 lanes setup only.
-+ * MIPI_DIV is fixed to value 2, but it -might- be changed according to the
-+ * above formula for setups with 1 lane or image formats with different bpp.
-+ *
-+ * FIXME: this deviates from the sensor manual documentation which is quite
-+ * thin on the MIPI clock tree generation part.
-+ */
-+
-+#define PLL1_PREDIVP 1 // bypass
-+#define PLL1_PREDIV 1 // bypass
-+#define PLL1_DIVM 1 // bypass
-+#define PLL1_DIV_MIPI 3 // div
-+#define PLL1_DIV_MIPI_BASE 1 // div
-+
-+#define PLL1_DIVSP 1 // no use
-+#define PLL1_DIVS 1 // no use
-+
-+#define PLL2_PREDIVP 0
-+#define PLL2_PREDIV 0
-+#define PLL2_DIVSP 1
-+#define PLL2_DIVS 4
-+#define PLL2_DIVDAC 1
-+
-+#define OV4689_PLL1_PREDIVP 0x030a // bits[0]
-+#define OV4689_PLL1_PREDIV 0x0300 // bits[2:0]
-+#define OV4689_PLL1_MULTIPLIER 0x0301 // bits[9:8] 0x0302 bits[7:0]
-+#define OV4689_PLL1_DIVM 0x0303 // bits[3:0]
-+#define OV4689_PLL1_DIV_MIPI 0x0304 // bits[1:0]
-+
-+#define OV4689_PLL1_DIVSP 0x0305 //bits[1:0]
-+#define OV4689_PLL1_DIVS 0x0306 // bits[0]
-+
-+#define OV4689_PLL2_PREDIVP 0x0311 // bits[0]
-+#define OV4689_PLL2_PREDIV 0x030b // bits[2:0]
-+#define OV4689_PLL2_MULTIPLIER 0x030c // bits[9:8] 0x030d bits[7:0]
-+#define OV4689_PLL2_DIVSP 0x030f // bits[3:0]
-+#define OV4689_PLL2_DIVS 0x030e // bits[2:0]
-+#define OV4689_PLL2_DIVDAC 0x0312 // bits[3:0]
-+
-+static int ov4689_set_mipi_pclk(struct ov4689_dev *sensor,
-+ unsigned long rate)
-+{
-+ const struct ov4689_mode_info *mode = sensor->current_mode;
-+ //const struct ov4689_mode_info *orig_mode = sensor->last_mode;
-+ u8 val;
-+ int ret = 0;
-+ int fps = ov4689_framerates[sensor->current_fr];
-+ u16 htot, val16;
-+
-+ htot = mode->htot * ov4689_framerates[mode->max_fps] / fps;
-+
-+ ret = ov4689_write_reg16(sensor, OV4689_REG_TIMING_HTS, htot);
-+
-+ ret = ov4689_read_reg(sensor, OV4689_REG_TIMING_HTS, &val);
-+ val16 = val << 8;
-+ ret = ov4689_read_reg(sensor, OV4689_REG_TIMING_HTS + 1, &val);
-+ val16 |= val;
-+
-+ st_info(ST_SENSOR, "fps = %d, max_fps = %d\n", fps, mode->max_fps);
-+ st_info(ST_SENSOR, "mode->htot = 0x%x, htot = 0x%x\n", mode->htot,
-+ htot);
-+ st_info(ST_SENSOR, "reg: 0x%x = 0x%x\n", OV4689_REG_TIMING_HTS, val16);
-+
-+ return 0;
-+}
-+
-+/*
-+ * if sensor changes inside scaling or subsampling
-+ * change mode directly
-+ */
-+static int ov4689_set_mode_direct(struct ov4689_dev *sensor,
-+ const struct ov4689_mode_info *mode)
-+{
-+ if (!mode->reg_data)
-+ return -EINVAL;
-+
-+ /* Write capture setting */
-+ return ov4689_load_regs(sensor, mode);
-+}
-+
-+static int ov4689_set_mode(struct ov4689_dev *sensor)
-+{
-+ const struct ov4689_mode_info *mode = sensor->current_mode;
-+
-+ int ret = 0;
-+
-+ ret = ov4689_set_mode_direct(sensor, mode);
-+ if (ret < 0)
-+ return ret;
-+
-+ ret = ov4689_set_mipi_pclk(sensor, 0);
-+ if (ret < 0)
-+ return 0;
-+
-+ sensor->pending_mode_change = false;
-+ sensor->last_mode = mode;
-+ return 0;
-+}
-+
-+/* restore the last set video mode after chip power-on */
-+static int ov4689_restore_mode(struct ov4689_dev *sensor)
-+{
-+ int ret;
-+
-+ /* first load the initial register values */
-+ ret = ov4689_load_regs(sensor, &ov4689_mode_init_data);
-+ if (ret < 0)
-+ return ret;
-+ sensor->last_mode = &ov4689_mode_init_data;
-+
-+ /* now restore the last capture mode */
-+ ret = ov4689_set_mode(sensor);
-+ if (ret < 0)
-+ return ret;
-+
-+ return ret;
-+}
-+
-+static void ov4689_power(struct ov4689_dev *sensor, bool enable)
-+{
-+ if (!sensor->pwdn_gpio)
-+ return;
-+ gpiod_set_value_cansleep(sensor->pwdn_gpio, enable ? 0 : 1);
-+}
-+
-+static void ov4689_reset(struct ov4689_dev *sensor)
-+{
-+ if (!sensor->reset_gpio)
-+ return;
-+
-+ gpiod_set_value_cansleep(sensor->reset_gpio, 0);
-+
-+ usleep_range(5000, 25000);
-+
-+ gpiod_set_value_cansleep(sensor->reset_gpio, 1);
-+ usleep_range(1000, 2000);
-+}
-+
-+static int ov4689_set_power_on(struct device *dev)
-+{
-+ struct i2c_client *client = to_i2c_client(dev);
-+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
-+ struct ov4689_dev *sensor = to_ov4689_dev(sd);
-+ int ret;
-+
-+ ret = clk_prepare_enable(sensor->xclk);
-+ if (ret) {
-+ dev_err(&client->dev, "%s: failed to enable clock\n",
-+ __func__);
-+ return ret;
-+ }
-+
-+ ret = regulator_bulk_enable(OV4689_NUM_SUPPLIES,
-+ sensor->supplies);
-+ if (ret) {
-+ dev_err(&client->dev, "%s: failed to enable regulators\n",
-+ __func__);
-+ goto xclk_off;
-+ }
-+
-+ ov4689_reset(sensor);
-+ ov4689_power(sensor, true);
-+
-+ return 0;
-+
-+xclk_off:
-+ clk_disable_unprepare(sensor->xclk);
-+ return ret;
-+}
-+
-+static int ov4689_set_power_off(struct device *dev)
-+{
-+ struct i2c_client *client = to_i2c_client(dev);
-+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
-+ struct ov4689_dev *sensor = to_ov4689_dev(sd);
-+
-+ ov4689_power(sensor, false);
-+ regulator_bulk_disable(OV4689_NUM_SUPPLIES, sensor->supplies);
-+ clk_disable_unprepare(sensor->xclk);
-+
-+ return 0;
-+}
-+
-+static int ov4689_try_frame_interval(struct ov4689_dev *sensor,
-+ struct v4l2_fract *fi,
-+ u32 width, u32 height)
-+{
-+ const struct ov4689_mode_info *mode;
-+ enum ov4689_frame_rate rate = OV4689_15_FPS;
-+ int minfps, maxfps, best_fps, fps;
-+ int i;
-+
-+ minfps = ov4689_framerates[OV4689_15_FPS];
-+ maxfps = ov4689_framerates[OV4689_NUM_FRAMERATES - 1];
-+
-+ if (fi->numerator == 0) {
-+ fi->denominator = maxfps;
-+ fi->numerator = 1;
-+ rate = OV4689_60_FPS;
-+ goto find_mode;
-+ }
-+
-+ fps = clamp_val(DIV_ROUND_CLOSEST(fi->denominator, fi->numerator),
-+ minfps, maxfps);
-+
-+ best_fps = minfps;
-+ for (i = 0; i < ARRAY_SIZE(ov4689_framerates); i++) {
-+ int curr_fps = ov4689_framerates[i];
-+
-+ if (abs(curr_fps - fps) < abs(best_fps - fps)) {
-+ best_fps = curr_fps;
-+ rate = i;
-+ }
-+ }
-+ st_info(ST_SENSOR, "best_fps = %d, fps = %d\n", best_fps, fps);
-+
-+ fi->numerator = 1;
-+ fi->denominator = best_fps;
-+
-+find_mode:
-+ mode = ov4689_find_mode(sensor, rate, width, height, false);
-+ return mode ? rate : -EINVAL;
-+}
-+
-+static int ov4689_enum_mbus_code(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_mbus_code_enum *code)
-+{
-+ if (code->pad != 0)
-+ return -EINVAL;
-+
-+ if (code->index)
-+ return -EINVAL;
-+
-+ code->code = MEDIA_BUS_FMT_SBGGR10_1X10;
-+ return 0;
-+}
-+
-+static int ov4689_get_fmt(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_format *format)
-+{
-+ struct ov4689_dev *sensor = to_ov4689_dev(sd);
-+ struct v4l2_mbus_framefmt *fmt;
-+
-+ if (format->pad != 0)
-+ return -EINVAL;
-+
-+ mutex_lock(&sensor->lock);
-+
-+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
-+ fmt = v4l2_subdev_get_try_format(&sensor->sd, state,
-+ format->pad);
-+ else
-+ fmt = &sensor->fmt;
-+
-+ format->format = *fmt;
-+
-+ mutex_unlock(&sensor->lock);
-+
-+ return 0;
-+}
-+
-+static int ov4689_try_fmt_internal(struct v4l2_subdev *sd,
-+ struct v4l2_mbus_framefmt *fmt,
-+ enum ov4689_frame_rate fr,
-+ const struct ov4689_mode_info **new_mode)
-+{
-+ struct ov4689_dev *sensor = to_ov4689_dev(sd);
-+ const struct ov4689_mode_info *mode;
-+
-+ mode = ov4689_find_mode(sensor, fr, fmt->width, fmt->height, true);
-+ if (!mode)
-+ return -EINVAL;
-+ fmt->width = mode->hact;
-+ fmt->height = mode->vact;
-+
-+ if (new_mode)
-+ *new_mode = mode;
-+
-+ fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
-+
-+ return 0;
-+}
-+
-+static int ov4689_set_fmt(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_format *format)
-+{
-+ struct ov4689_dev *sensor = to_ov4689_dev(sd);
-+ const struct ov4689_mode_info *new_mode;
-+ struct v4l2_mbus_framefmt *mbus_fmt = &format->format;
-+ struct v4l2_mbus_framefmt *fmt;
-+ int ret;
-+
-+ if (format->pad != 0)
-+ return -EINVAL;
-+
-+ mutex_lock(&sensor->lock);
-+
-+ if (sensor->streaming) {
-+ ret = -EBUSY;
-+ goto out;
-+ }
-+
-+ ret = ov4689_try_fmt_internal(sd, mbus_fmt, 0, &new_mode);
-+ if (ret)
-+ goto out;
-+
-+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
-+ fmt = v4l2_subdev_get_try_format(sd, state, 0);
-+ else
-+ fmt = &sensor->fmt;
-+
-+ *fmt = *mbus_fmt;
-+
-+ if (new_mode != sensor->current_mode) {
-+ sensor->current_mode = new_mode;
-+ sensor->pending_mode_change = true;
-+ }
-+ if (new_mode->max_fps < sensor->current_fr) {
-+ sensor->current_fr = new_mode->max_fps;
-+ sensor->frame_interval.numerator = 1;
-+ sensor->frame_interval.denominator =
-+ ov4689_framerates[sensor->current_fr];
-+ sensor->current_mode = new_mode;
-+ sensor->pending_mode_change = true;
-+ }
-+
-+ __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
-+ ov4689_calc_pixel_rate(sensor));
-+out:
-+ mutex_unlock(&sensor->lock);
-+ return ret;
-+}
-+
-+/*
-+ * Sensor Controls.
-+ */
-+
-+static int ov4689_set_ctrl_hue(struct ov4689_dev *sensor, int value)
-+{
-+ int ret = 0;
-+
-+ return ret;
-+}
-+
-+static int ov4689_set_ctrl_contrast(struct ov4689_dev *sensor, int value)
-+{
-+ int ret = 0;
-+
-+ return ret;
-+}
-+
-+static int ov4689_set_ctrl_saturation(struct ov4689_dev *sensor, int value)
-+{
-+ int ret = 0;
-+
-+ return ret;
-+}
-+
-+static int ov4689_set_ctrl_white_balance(struct ov4689_dev *sensor, int awb)
-+{
-+ struct ov4689_ctrls *ctrls = &sensor->ctrls;
-+ int ret = 0;
-+
-+ if (!awb && (ctrls->red_balance->is_new
-+ || ctrls->blue_balance->is_new)) {
-+ u16 red = (u16)ctrls->red_balance->val;
-+ u16 blue = (u16)ctrls->blue_balance->val;
-+
-+ st_info(ST_SENSOR, "red = 0x%x, blue = 0x%x\n", red, blue);
-+ ret = ov4689_write_reg16(sensor, OV4689_REG_AWB_R_GAIN, red);
-+ if (ret)
-+ return ret;
-+ ret = ov4689_write_reg16(sensor, OV4689_REG_AWB_B_GAIN, blue);
-+ }
-+ return ret;
-+}
-+
-+static int ov4689_set_ctrl_exposure(struct ov4689_dev *sensor,
-+ enum v4l2_exposure_auto_type auto_exposure)
-+{
-+ struct ov4689_ctrls *ctrls = &sensor->ctrls;
-+ bool auto_exp = (auto_exposure == V4L2_EXPOSURE_AUTO);
-+ int ret = 0;
-+
-+ if (!auto_exp && ctrls->exposure->is_new) {
-+ u16 max_exp = 0;
-+
-+ ret = ov4689_read_reg16(sensor, OV4689_REG_V_OUTPUT_SIZE,
-+ &max_exp);
-+
-+ ret = ov4689_get_vts(sensor);
-+ if (ret < 0)
-+ return ret;
-+ max_exp += ret;
-+ ret = 0;
-+
-+ st_info(ST_SENSOR, "%s, max_exp = 0x%x\n", __func__, max_exp);
-+ if (ctrls->exposure->val < max_exp)
-+ ret = ov4689_set_exposure(sensor, ctrls->exposure->val);
-+ }
-+
-+ return ret;
-+}
-+
-+static const s64 link_freq_menu_items[] = {
-+ OV4689_LINK_FREQ_500MHZ
-+};
-+
-+static const char * const test_pattern_menu[] = {
-+ "Disabled",
-+ "Color bars",
-+ "Color bars w/ rolling bar",
-+ "Color squares",
-+ "Color squares w/ rolling bar",
-+};
-+
-+#define OV4689_TEST_ENABLE BIT(7)
-+#define OV4689_TEST_ROLLING BIT(6) /* rolling horizontal bar */
-+#define OV4689_TEST_TRANSPARENT BIT(5)
-+#define OV4689_TEST_SQUARE_BW BIT(4) /* black & white squares */
-+#define OV4689_TEST_BAR_STANDARD (0 << 2)
-+#define OV4689_TEST_BAR_DARKER_1 (1 << 2)
-+#define OV4689_TEST_BAR_DARKER_2 (2 << 2)
-+#define OV4689_TEST_BAR_DARKER_3 (3 << 2)
-+#define OV4689_TEST_BAR (0 << 0)
-+#define OV4689_TEST_RANDOM (1 << 0)
-+#define OV4689_TEST_SQUARE (2 << 0)
-+#define OV4689_TEST_BLACK (3 << 0)
-+
-+static const u8 test_pattern_val[] = {
-+ 0,
-+ OV4689_TEST_ENABLE | OV4689_TEST_BAR_STANDARD |
-+ OV4689_TEST_BAR,
-+ OV4689_TEST_ENABLE | OV4689_TEST_ROLLING |
-+ OV4689_TEST_BAR_DARKER_1 | OV4689_TEST_BAR,
-+ OV4689_TEST_ENABLE | OV4689_TEST_SQUARE,
-+ OV4689_TEST_ENABLE | OV4689_TEST_ROLLING | OV4689_TEST_SQUARE,
-+};
-+
-+static int ov4689_set_ctrl_test_pattern(struct ov4689_dev *sensor, int value)
-+{
-+ return ov4689_write_reg(sensor, OV4689_REG_TEST_PATTERN,
-+ test_pattern_val[value]);
-+}
-+
-+static int ov4689_set_ctrl_light_freq(struct ov4689_dev *sensor, int value)
-+{
-+ return 0;
-+}
-+
-+static int ov4689_set_ctrl_hflip(struct ov4689_dev *sensor, int value)
-+{
-+ /*
-+ * TIMING TC REG21:
-+ * - [2]: Digital mirror
-+ * - [1]: Array mirror
-+ */
-+ return ov4689_mod_reg(sensor, OV4689_REG_TIMING_TC_REG21,
-+ BIT(2) | BIT(1),
-+ (!(value ^ sensor->upside_down)) ?
-+ (BIT(2) | BIT(1)) : 0);
-+}
-+
-+static int ov4689_set_ctrl_vflip(struct ov4689_dev *sensor, int value)
-+{
-+ /*
-+ * TIMING TC REG20:
-+ * - [2]: Digital vflip
-+ * - [1]: Array vflip
-+ */
-+ return ov4689_mod_reg(sensor, OV4689_REG_TIMING_TC_REG20,
-+ BIT(2) | BIT(1),
-+ (value ^ sensor->upside_down) ?
-+ (BIT(2) | BIT(1)) : 0);
-+}
-+
-+static int ov4689_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
-+{
-+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
-+ struct ov4689_dev *sensor = to_ov4689_dev(sd);
-+ int val;
-+
-+ /* v4l2_ctrl_lock() locks our own mutex */
-+
-+ if (!pm_runtime_get_if_in_use(&sensor->i2c_client->dev))
-+ return 0;
-+
-+ switch (ctrl->id) {
-+ case V4L2_CID_ANALOGUE_GAIN:
-+ val = ov4689_get_gain(sensor);
-+ break;
-+ }
-+
-+ pm_runtime_put(&sensor->i2c_client->dev);
-+
-+ return 0;
-+}
-+
-+static int ov4689_s_ctrl(struct v4l2_ctrl *ctrl)
-+{
-+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
-+ struct ov4689_dev *sensor = to_ov4689_dev(sd);
-+ int ret;
-+
-+ /* v4l2_ctrl_lock() locks our own mutex */
-+
-+ /*
-+ * If the device is not powered up by the host driver do
-+ * not apply any controls to H/W at this time. Instead
-+ * the controls will be restored at start streaming time.
-+ */
-+ if (!pm_runtime_get_if_in_use(&sensor->i2c_client->dev))
-+ return 0;
-+
-+ switch (ctrl->id) {
-+ case V4L2_CID_ANALOGUE_GAIN:
-+ ret = ov4689_set_gain(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_EXPOSURE:
-+ ret = ov4689_set_ctrl_exposure(sensor, V4L2_EXPOSURE_MANUAL);
-+ break;
-+ case V4L2_CID_AUTO_WHITE_BALANCE:
-+ ret = ov4689_set_ctrl_white_balance(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_HUE:
-+ ret = ov4689_set_ctrl_hue(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_CONTRAST:
-+ ret = ov4689_set_ctrl_contrast(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_SATURATION:
-+ ret = ov4689_set_ctrl_saturation(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_TEST_PATTERN:
-+ ret = ov4689_set_ctrl_test_pattern(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_POWER_LINE_FREQUENCY:
-+ ret = ov4689_set_ctrl_light_freq(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_HFLIP:
-+ ret = ov4689_set_ctrl_hflip(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_VFLIP:
-+ ret = ov4689_set_ctrl_vflip(sensor, ctrl->val);
-+ break;
-+ default:
-+ ret = -EINVAL;
-+ break;
-+ }
-+
-+ pm_runtime_put(&sensor->i2c_client->dev);
-+
-+ return ret;
-+}
-+
-+static const struct v4l2_ctrl_ops ov4689_ctrl_ops = {
-+ .g_volatile_ctrl = ov4689_g_volatile_ctrl,
-+ .s_ctrl = ov4689_s_ctrl,
-+};
-+
-+static int ov4689_init_controls(struct ov4689_dev *sensor)
-+{
-+ const struct v4l2_ctrl_ops *ops = &ov4689_ctrl_ops;
-+ struct ov4689_ctrls *ctrls = &sensor->ctrls;
-+ struct v4l2_ctrl_handler *hdl = &ctrls->handler;
-+ int ret;
-+
-+ v4l2_ctrl_handler_init(hdl, 32);
-+
-+ /* we can use our own mutex for the ctrl lock */
-+ hdl->lock = &sensor->lock;
-+
-+ /* Clock related controls */
-+ ctrls->pixel_rate = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_PIXEL_RATE,
-+ 0, INT_MAX, 1,
-+ ov4689_calc_pixel_rate(sensor));
-+
-+ /* Auto/manual white balance */
-+ ctrls->auto_wb = v4l2_ctrl_new_std(hdl, ops,
-+ V4L2_CID_AUTO_WHITE_BALANCE,
-+ 0, 1, 1, 0);
-+ ctrls->blue_balance = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BLUE_BALANCE,
-+ 0, 4095, 1, 1024);
-+ ctrls->red_balance = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_RED_BALANCE,
-+ 0, 4095, 1, 1024);
-+
-+ ctrls->exposure = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_EXPOSURE,
-+ 4, 0xfff8, 1, 0x4c00);
-+ ctrls->anal_gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_ANALOGUE_GAIN,
-+ 0x10, 0xfff8, 1, 0x0080);
-+ ctrls->test_pattern =
-+ v4l2_ctrl_new_std_menu_items(hdl, ops, V4L2_CID_TEST_PATTERN,
-+ ARRAY_SIZE(test_pattern_menu) - 1,
-+ 0, 0, test_pattern_menu);
-+ ctrls->hflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HFLIP,
-+ 0, 1, 1, 0);
-+ ctrls->vflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VFLIP,
-+ 0, 1, 1, 0);
-+ ctrls->light_freq =
-+ v4l2_ctrl_new_std_menu(hdl, ops,
-+ V4L2_CID_POWER_LINE_FREQUENCY,
-+ V4L2_CID_POWER_LINE_FREQUENCY_AUTO, 0,
-+ V4L2_CID_POWER_LINE_FREQUENCY_50HZ);
-+ ctrls->link_freq = v4l2_ctrl_new_int_menu(hdl, ops, V4L2_CID_LINK_FREQ,
-+ 0, 0, link_freq_menu_items);
-+ if (hdl->error) {
-+ ret = hdl->error;
-+ goto free_ctrls;
-+ }
-+
-+ ctrls->pixel_rate->flags |= V4L2_CTRL_FLAG_READ_ONLY;
-+ ctrls->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
-+ // ctrls->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
-+ // ctrls->anal_gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
-+
-+ v4l2_ctrl_auto_cluster(3, &ctrls->auto_wb, 0, false);
-+
-+ sensor->sd.ctrl_handler = hdl;
-+ return 0;
-+
-+free_ctrls:
-+ v4l2_ctrl_handler_free(hdl);
-+ return ret;
-+}
-+
-+static int ov4689_enum_frame_size(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_frame_size_enum *fse)
-+{
-+ if (fse->pad != 0)
-+ return -EINVAL;
-+ if (fse->index >= OV4689_NUM_MODES)
-+ return -EINVAL;
-+
-+ fse->min_width =
-+ ov4689_mode_data[fse->index].hact;
-+ fse->max_width = fse->min_width;
-+ fse->min_height =
-+ ov4689_mode_data[fse->index].vact;
-+ fse->max_height = fse->min_height;
-+
-+ return 0;
-+}
-+
-+static int ov4689_enum_frame_interval(
-+ struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_frame_interval_enum *fie)
-+{
-+ //struct ov4689_dev *sensor = to_ov4689_dev(sd);
-+ struct v4l2_fract tpf;
-+ int i = 0;
-+
-+ if (fie->pad != 0)
-+ return -EINVAL;
-+ if (fie->index >= OV4689_NUM_FRAMERATES)
-+ return -EINVAL;
-+
-+ tpf.numerator = 1;
-+ tpf.denominator = ov4689_framerates[fie->index];
-+
-+ // ret = ov4689_try_frame_interval(sensor, &tpf,
-+ // fie->width, fie->height);
-+ // if (ret < 0)
-+ // return -EINVAL;
-+
-+ pr_debug("fie->width = %d, fie->height = %d\n", fie->width, fie->height);
-+ for (i = 0; i < OV4689_NUM_MODES; i++) {
-+ if (fie->width == ov4689_mode_data[i].hact &&
-+ fie->height == ov4689_mode_data[i].vact)
-+ break;
-+ }
-+ if (i == OV4689_NUM_MODES)
-+ return -ENOTTY;
-+
-+ fie->interval = tpf;
-+ fie->width = ov4689_mode_data[i].hact;
-+ fie->height = ov4689_mode_data[i].vact;
-+
-+ return 0;
-+}
-+
-+static int ov4689_g_frame_interval(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_frame_interval *fi)
-+{
-+ struct ov4689_dev *sensor = to_ov4689_dev(sd);
-+
-+ mutex_lock(&sensor->lock);
-+ fi->interval = sensor->frame_interval;
-+ mutex_unlock(&sensor->lock);
-+
-+ return 0;
-+}
-+
-+static int ov4689_s_frame_interval(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_frame_interval *fi)
-+{
-+ struct ov4689_dev *sensor = to_ov4689_dev(sd);
-+ const struct ov4689_mode_info *mode;
-+ int frame_rate, ret = 0;
-+
-+ if (fi->pad != 0)
-+ return -EINVAL;
-+
-+ mutex_lock(&sensor->lock);
-+
-+ if (sensor->streaming) {
-+ ret = -EBUSY;
-+ goto out;
-+ }
-+
-+ mode = sensor->current_mode;
-+
-+ frame_rate = ov4689_try_frame_interval(sensor, &fi->interval,
-+ mode->hact, mode->vact);
-+ if (frame_rate < 0) {
-+ /* Always return a valid frame interval value */
-+ fi->interval = sensor->frame_interval;
-+ goto out;
-+ }
-+
-+ mode = ov4689_find_mode(sensor, frame_rate, mode->hact,
-+ mode->vact, true);
-+ if (!mode) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (mode != sensor->current_mode ||
-+ frame_rate != sensor->current_fr) {
-+ sensor->current_fr = frame_rate;
-+ sensor->frame_interval = fi->interval;
-+ sensor->current_mode = mode;
-+ sensor->pending_mode_change = true;
-+
-+ __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
-+ ov4689_calc_pixel_rate(sensor));
-+ }
-+out:
-+ mutex_unlock(&sensor->lock);
-+ return ret;
-+}
-+
-+static int ov4689_stream_start(struct ov4689_dev *sensor, int enable)
-+{
-+ return ov4689_write_reg(sensor, OV4689_REG_STREAM_ON, enable);
-+}
-+
-+static int ov4689_s_stream(struct v4l2_subdev *sd, int enable)
-+{
-+ struct ov4689_dev *sensor = to_ov4689_dev(sd);
-+ int ret = 0;
-+
-+ if (enable) {
-+ pm_runtime_get_sync(&sensor->i2c_client->dev);
-+
-+ ret = v4l2_ctrl_handler_setup(&sensor->ctrls.handler);
-+ if (ret) {
-+ pm_runtime_put_sync(&sensor->i2c_client->dev);
-+ return ret;
-+ }
-+ }
-+
-+ mutex_lock(&sensor->lock);
-+
-+ if (sensor->streaming == !enable) {
-+ if (enable) {
-+ ret = ov4689_restore_mode(sensor);
-+ if (ret)
-+ goto out;
-+ }
-+
-+ if (enable && sensor->pending_mode_change) {
-+ ret = ov4689_set_mode(sensor);
-+ if (ret)
-+ goto out;
-+ }
-+
-+ if (sensor->ep.bus.mipi_csi2.num_data_lanes == 2) {
-+ ov4689_write_reg(sensor, OV4689_REG_MIPI_SC_CTRL_HI, 0x32);
-+ ov4689_write_reg(sensor, OV4689_REG_MIPI_SC_CTRL_LOW, 0x0c);
-+ } else if (sensor->ep.bus.mipi_csi2.num_data_lanes == 4) {
-+ ov4689_write_reg(sensor, OV4689_REG_MIPI_SC_CTRL_HI, 0x72);
-+ ov4689_write_reg(sensor, OV4689_REG_MIPI_SC_CTRL_LOW, 0x00);
-+ } else {
-+ dev_err(&sensor->i2c_client->dev, "Unsupport lane num\n");
-+ }
-+
-+ ret = ov4689_stream_start(sensor, enable);
-+ if (ret)
-+ goto out;
-+ }
-+ sensor->streaming += enable ? 1 : -1;
-+ WARN_ON(sensor->streaming < 0);
-+out:
-+ mutex_unlock(&sensor->lock);
-+
-+ if (!enable || ret)
-+ pm_runtime_put_sync(&sensor->i2c_client->dev);
-+
-+ return ret;
-+}
-+
-+static const struct v4l2_subdev_core_ops ov4689_core_ops = {
-+ .log_status = v4l2_ctrl_subdev_log_status,
-+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
-+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
-+};
-+
-+static const struct v4l2_subdev_video_ops ov4689_video_ops = {
-+ .g_frame_interval = ov4689_g_frame_interval,
-+ .s_frame_interval = ov4689_s_frame_interval,
-+ .s_stream = ov4689_s_stream,
-+};
-+
-+static const struct v4l2_subdev_pad_ops ov4689_pad_ops = {
-+ .enum_mbus_code = ov4689_enum_mbus_code,
-+ .get_fmt = ov4689_get_fmt,
-+ .set_fmt = ov4689_set_fmt,
-+ .enum_frame_size = ov4689_enum_frame_size,
-+ .enum_frame_interval = ov4689_enum_frame_interval,
-+};
-+
-+static const struct v4l2_subdev_ops ov4689_subdev_ops = {
-+ .core = &ov4689_core_ops,
-+ .video = &ov4689_video_ops,
-+ .pad = &ov4689_pad_ops,
-+};
-+
-+static int ov4689_get_regulators(struct ov4689_dev *sensor)
-+{
-+ int i;
-+
-+ for (i = 0; i < OV4689_NUM_SUPPLIES; i++)
-+ sensor->supplies[i].supply = ov4689_supply_name[i];
-+
-+ return devm_regulator_bulk_get(&sensor->i2c_client->dev,
-+ OV4689_NUM_SUPPLIES,
-+ sensor->supplies);
-+}
-+
-+static int ov4689_check_chip_id(struct ov4689_dev *sensor)
-+{
-+ struct i2c_client *client = sensor->i2c_client;
-+ int ret = 0;
-+ u16 chip_id;
-+
-+ ret = ov4689_read_reg16(sensor, OV4689_REG_CHIP_ID, &chip_id);
-+ if (ret) {
-+ dev_err(&client->dev, "%s: failed to read chip identifier\n",
-+ __func__);
-+ return ret;
-+ }
-+
-+ if (chip_id != OV4689_CHIP_ID) {
-+ dev_err(&client->dev, "%s: wrong chip identifier, expected 0x%x, got 0x%x\n",
-+ __func__, OV4689_CHIP_ID, chip_id);
-+ return -ENXIO;
-+ }
-+ dev_err(&client->dev, "%s: chip identifier, got 0x%x\n",
-+ __func__, chip_id);
-+
-+ return 0;
-+}
-+
-+static int ov4689_probe(struct i2c_client *client)
-+{
-+ struct device *dev = &client->dev;
-+ struct fwnode_handle *endpoint;
-+ struct ov4689_dev *sensor;
-+ struct v4l2_mbus_framefmt *fmt;
-+ u32 rotation;
-+ int ret;
-+
-+ sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
-+ if (!sensor)
-+ return -ENOMEM;
-+
-+ sensor->i2c_client = client;
-+
-+ fmt = &sensor->fmt;
-+ fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
-+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
-+ fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
-+ fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
-+ fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace);
-+ fmt->width = 1920;
-+ fmt->height = 1080;
-+ fmt->field = V4L2_FIELD_NONE;
-+ sensor->frame_interval.numerator = 1;
-+ sensor->frame_interval.denominator = ov4689_framerates[OV4689_30_FPS];
-+ sensor->current_fr = OV4689_30_FPS;
-+ sensor->current_mode =
-+ &ov4689_mode_data[OV4689_MODE_1080P_1920_1080];
-+ sensor->last_mode = sensor->current_mode;
-+
-+
-+ /* optional indication of physical rotation of sensor */
-+ ret = fwnode_property_read_u32(dev_fwnode(&client->dev), "rotation",
-+ &rotation);
-+ if (!ret) {
-+ switch (rotation) {
-+ case 180:
-+ sensor->upside_down = true;
-+ fallthrough;
-+ case 0:
-+ break;
-+ default:
-+ dev_warn(dev, "%u degrees rotation is not supported, ignoring...\n",
-+ rotation);
-+ }
-+ }
-+
-+ endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(&client->dev),
-+ NULL);
-+ if (!endpoint) {
-+ dev_err(dev, "endpoint node not found\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = v4l2_fwnode_endpoint_parse(endpoint, &sensor->ep);
-+ fwnode_handle_put(endpoint);
-+ if (ret) {
-+ dev_err(dev, "Could not parse endpoint\n");
-+ return ret;
-+ }
-+
-+ if (sensor->ep.bus_type != V4L2_MBUS_CSI2_DPHY) {
-+ dev_err(dev, "Unsupported bus type %d\n", sensor->ep.bus_type);
-+ return -EINVAL;
-+ }
-+
-+ /* get system clock (xclk) */
-+ sensor->xclk = devm_clk_get(dev, "xclk");
-+ if (IS_ERR(sensor->xclk)) {
-+ dev_err(dev, "failed to get xclk\n");
-+ return PTR_ERR(sensor->xclk);
-+ }
-+
-+ sensor->xclk_freq = clk_get_rate(sensor->xclk);
-+ if (sensor->xclk_freq < OV4689_XCLK_MIN ||
-+ sensor->xclk_freq > OV4689_XCLK_MAX) {
-+ dev_err(dev, "xclk frequency out of range: %d Hz\n",
-+ sensor->xclk_freq);
-+ return -EINVAL;
-+ }
-+
-+ /* request optional power down pin */
-+ sensor->pwdn_gpio = devm_gpiod_get_optional(dev, "powerdown",
-+ GPIOD_OUT_HIGH);
-+ if (IS_ERR(sensor->pwdn_gpio))
-+ return PTR_ERR(sensor->pwdn_gpio);
-+
-+ /* request optional reset pin */
-+ sensor->reset_gpio = devm_gpiod_get_optional(dev, "reset",
-+ GPIOD_OUT_HIGH);
-+ if (IS_ERR(sensor->reset_gpio))
-+ return PTR_ERR(sensor->reset_gpio);
-+
-+ v4l2_i2c_subdev_init(&sensor->sd, client, &ov4689_subdev_ops);
-+
-+ sensor->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
-+ V4L2_SUBDEV_FL_HAS_EVENTS;
-+ sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
-+ sensor->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
-+ ret = media_entity_pads_init(&sensor->sd.entity, 1, &sensor->pad);
-+ if (ret)
-+ return ret;
-+
-+ ret = ov4689_get_regulators(sensor);
-+ if (ret)
-+ return ret;
-+
-+ mutex_init(&sensor->lock);
-+
-+ ret = ov4689_set_power_on(dev);
-+ if (ret) {
-+ dev_err(dev, "failed to power on\n");
-+ goto entity_cleanup;
-+ }
-+
-+ ret = ov4689_check_chip_id(sensor);
-+ if (ret)
-+ goto error_power_off;
-+
-+ ret = ov4689_init_controls(sensor);
-+ if (ret)
-+ goto error_power_off;
-+
-+ ret = v4l2_async_register_subdev_sensor(&sensor->sd);
-+ if (ret)
-+ goto free_ctrls;
-+
-+ pm_runtime_set_active(dev);
-+ pm_runtime_enable(dev);
-+
-+ return 0;
-+
-+free_ctrls:
-+ v4l2_ctrl_handler_free(&sensor->ctrls.handler);
-+error_power_off:
-+ ov4689_set_power_off(dev);
-+entity_cleanup:
-+ media_entity_cleanup(&sensor->sd.entity);
-+ mutex_destroy(&sensor->lock);
-+ return ret;
-+}
-+
-+static void ov4689_remove(struct i2c_client *client)
-+{
-+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
-+ struct ov4689_dev *sensor = to_ov4689_dev(sd);
-+
-+ v4l2_async_unregister_subdev(&sensor->sd);
-+ media_entity_cleanup(&sensor->sd.entity);
-+ v4l2_ctrl_handler_free(&sensor->ctrls.handler);
-+ mutex_destroy(&sensor->lock);
-+
-+ pm_runtime_disable(&client->dev);
-+ if (!pm_runtime_status_suspended(&client->dev))
-+ ov4689_set_power_off(&client->dev);
-+ pm_runtime_set_suspended(&client->dev);
-+}
-+
-+static const struct i2c_device_id ov4689_id[] = {
-+ { "ov4689", 0 },
-+ {},
-+};
-+MODULE_DEVICE_TABLE(i2c, ov4689_id);
-+
-+static const struct of_device_id ov4689_dt_ids[] = {
-+ { .compatible = "ovti,ov4689" },
-+ { /* sentinel */ }
-+};
-+MODULE_DEVICE_TABLE(of, ov4689_dt_ids);
-+
-+static const struct dev_pm_ops ov4689_pm_ops = {
-+ SET_RUNTIME_PM_OPS(ov4689_set_power_off, ov4689_set_power_on, NULL)
-+};
-+
-+static struct i2c_driver ov4689_i2c_driver = {
-+ .driver = {
-+ .name = "ov4689",
-+ .of_match_table = ov4689_dt_ids,
-+ .pm = &ov4689_pm_ops,
-+ },
-+ .id_table = ov4689_id,
-+ .probe = ov4689_probe,
-+ .remove = ov4689_remove,
-+};
-+
-+module_i2c_driver(ov4689_i2c_driver);
-+
-+MODULE_DESCRIPTION("OV4689 MIPI Camera Subdev Driver");
-+MODULE_LICENSE("GPL");
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/ov5640.c
-@@ -0,0 +1,3227 @@
-+// SPDX-License-Identifier: GPL-2.0-or-later
-+/*
-+ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
-+ * Copyright (C) 2014-2017 Mentor Graphics Inc.
-+ *
-+ */
-+
-+#include <linux/clk.h>
-+#include <linux/clk-provider.h>
-+#include <linux/clkdev.h>
-+#include <linux/ctype.h>
-+#include <linux/delay.h>
-+#include <linux/device.h>
-+#include <linux/gpio/consumer.h>
-+#include <linux/i2c.h>
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/of_device.h>
-+#include <linux/regulator/consumer.h>
-+#include <linux/slab.h>
-+#include <linux/types.h>
-+#include <media/v4l2-async.h>
-+#include <media/v4l2-ctrls.h>
-+#include <media/v4l2-device.h>
-+#include <media/v4l2-event.h>
-+#include <media/v4l2-fwnode.h>
-+#include <media/v4l2-subdev.h>
-+#include "stfcamss.h"
-+
-+/* min/typical/max system clock (xclk) frequencies */
-+#define OV5640_XCLK_MIN 6000000
-+#define OV5640_XCLK_MAX 54000000
-+
-+#define OV5640_SKIP_FRAMES 4
-+
-+#define OV5640_CHIP_ID 0x5640
-+#define OV5640_DEFAULT_SLAVE_ID 0x3c
-+
-+#define OV5640_REG_SYS_RESET02 0x3002
-+#define OV5640_REG_SYS_CLOCK_ENABLE02 0x3006
-+#define OV5640_REG_SYS_CTRL0 0x3008
-+#define OV5640_REG_SYS_CTRL0_SW_PWDN 0x42
-+#define OV5640_REG_SYS_CTRL0_SW_PWUP 0x02
-+#define OV5640_REG_CHIP_ID 0x300a
-+#define OV5640_REG_IO_MIPI_CTRL00 0x300e
-+#define OV5640_REG_PAD_OUTPUT_ENABLE01 0x3017
-+#define OV5640_REG_PAD_OUTPUT_ENABLE02 0x3018
-+#define OV5640_REG_PAD_OUTPUT00 0x3019
-+#define OV5640_REG_SYSTEM_CONTROL1 0x302e
-+#define OV5640_REG_SC_PLL_CTRL0 0x3034
-+#define OV5640_REG_SC_PLL_CTRL1 0x3035
-+#define OV5640_REG_SC_PLL_CTRL2 0x3036
-+#define OV5640_REG_SC_PLL_CTRL3 0x3037
-+#define OV5640_REG_SLAVE_ID 0x3100
-+#define OV5640_REG_SCCB_SYS_CTRL1 0x3103
-+#define OV5640_REG_SYS_ROOT_DIVIDER 0x3108
-+#define OV5640_REG_AWB_R_GAIN 0x3400
-+#define OV5640_REG_AWB_G_GAIN 0x3402
-+#define OV5640_REG_AWB_B_GAIN 0x3404
-+#define OV5640_REG_AWB_MANUAL_CTRL 0x3406
-+#define OV5640_REG_AEC_PK_EXPOSURE_HI 0x3500
-+#define OV5640_REG_AEC_PK_EXPOSURE_MED 0x3501
-+#define OV5640_REG_AEC_PK_EXPOSURE_LO 0x3502
-+#define OV5640_REG_AEC_PK_MANUAL 0x3503
-+#define OV5640_REG_AEC_PK_REAL_GAIN 0x350a
-+#define OV5640_REG_AEC_PK_VTS 0x350c
-+#define OV5640_REG_TIMING_DVPHO 0x3808
-+#define OV5640_REG_TIMING_DVPVO 0x380a
-+#define OV5640_REG_TIMING_HTS 0x380c
-+#define OV5640_REG_TIMING_VTS 0x380e
-+#define OV5640_REG_TIMING_TC_REG20 0x3820
-+#define OV5640_REG_TIMING_TC_REG21 0x3821
-+#define OV5640_REG_AEC_CTRL00 0x3a00
-+#define OV5640_REG_AEC_B50_STEP 0x3a08
-+#define OV5640_REG_AEC_B60_STEP 0x3a0a
-+#define OV5640_REG_AEC_CTRL0D 0x3a0d
-+#define OV5640_REG_AEC_CTRL0E 0x3a0e
-+#define OV5640_REG_AEC_CTRL0F 0x3a0f
-+#define OV5640_REG_AEC_CTRL10 0x3a10
-+#define OV5640_REG_AEC_CTRL11 0x3a11
-+#define OV5640_REG_AEC_CTRL1B 0x3a1b
-+#define OV5640_REG_AEC_CTRL1E 0x3a1e
-+#define OV5640_REG_AEC_CTRL1F 0x3a1f
-+#define OV5640_REG_HZ5060_CTRL00 0x3c00
-+#define OV5640_REG_HZ5060_CTRL01 0x3c01
-+#define OV5640_REG_SIGMADELTA_CTRL0C 0x3c0c
-+#define OV5640_REG_FRAME_CTRL01 0x4202
-+#define OV5640_REG_FORMAT_CONTROL00 0x4300
-+#define OV5640_REG_VFIFO_HSIZE 0x4602
-+#define OV5640_REG_VFIFO_VSIZE 0x4604
-+#define OV5640_REG_JPG_MODE_SELECT 0x4713
-+#define OV5640_REG_CCIR656_CTRL00 0x4730
-+#define OV5640_REG_POLARITY_CTRL00 0x4740
-+#define OV5640_REG_MIPI_CTRL00 0x4800
-+#define OV5640_REG_DEBUG_MODE 0x4814
-+#define OV5640_REG_ISP_FORMAT_MUX_CTRL 0x501f
-+#define OV5640_REG_PRE_ISP_TEST_SET1 0x503d
-+#define OV5640_REG_SDE_CTRL0 0x5580
-+#define OV5640_REG_SDE_CTRL1 0x5581
-+#define OV5640_REG_SDE_CTRL3 0x5583
-+#define OV5640_REG_SDE_CTRL4 0x5584
-+#define OV5640_REG_SDE_CTRL5 0x5585
-+#define OV5640_REG_AVG_READOUT 0x56a1
-+
-+enum ov5640_mode_id {
-+ OV5640_MODE_QCIF_176_144 = 0,
-+ OV5640_MODE_QVGA_320_240,
-+ OV5640_MODE_VGA_640_480,
-+ OV5640_MODE_NTSC_720_480,
-+ OV5640_MODE_PAL_720_576,
-+ OV5640_MODE_XGA_1024_768,
-+ OV5640_MODE_720P_1280_720,
-+ OV5640_MODE_1080P_1920_1080,
-+ OV5640_MODE_QSXGA_2592_1944,
-+ OV5640_NUM_MODES,
-+};
-+
-+enum ov5640_frame_rate {
-+ OV5640_15_FPS = 0,
-+ OV5640_30_FPS,
-+ OV5640_60_FPS,
-+ OV5640_NUM_FRAMERATES,
-+};
-+
-+enum ov5640_format_mux {
-+ OV5640_FMT_MUX_YUV422 = 0,
-+ OV5640_FMT_MUX_RGB,
-+ OV5640_FMT_MUX_DITHER,
-+ OV5640_FMT_MUX_RAW_DPC,
-+ OV5640_FMT_MUX_SNR_RAW,
-+ OV5640_FMT_MUX_RAW_CIP,
-+};
-+
-+struct ov5640_pixfmt {
-+ u32 code;
-+ u32 colorspace;
-+};
-+
-+static const struct ov5640_pixfmt ov5640_formats[] = {
-+ { MEDIA_BUS_FMT_JPEG_1X8, V4L2_COLORSPACE_JPEG, },
-+ { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_SRGB, },
-+ { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_SRGB, },
-+ { MEDIA_BUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB, },
-+ { MEDIA_BUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB, },
-+ { MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB, },
-+ { MEDIA_BUS_FMT_SGBRG8_1X8, V4L2_COLORSPACE_SRGB, },
-+ { MEDIA_BUS_FMT_SGRBG8_1X8, V4L2_COLORSPACE_SRGB, },
-+ { MEDIA_BUS_FMT_SRGGB8_1X8, V4L2_COLORSPACE_SRGB, },
-+};
-+
-+/*
-+ * FIXME: remove this when a subdev API becomes available
-+ * to set the MIPI CSI-2 virtual channel.
-+ */
-+static unsigned int virtual_channel;
-+module_param(virtual_channel, uint, 0444);
-+MODULE_PARM_DESC(virtual_channel,
-+ "MIPI CSI-2 virtual channel (0..3), default 0");
-+
-+static const int ov5640_framerates[] = {
-+ [OV5640_15_FPS] = 15,
-+ [OV5640_30_FPS] = 30,
-+ [OV5640_60_FPS] = 60,
-+};
-+
-+/* regulator supplies */
-+static const char * const ov5640_supply_name[] = {
-+ "DOVDD", /* Digital I/O (1.8V) supply */
-+ "AVDD", /* Analog (2.8V) supply */
-+ "DVDD", /* Digital Core (1.5V) supply */
-+};
-+
-+#define OV5640_NUM_SUPPLIES ARRAY_SIZE(ov5640_supply_name)
-+
-+/*
-+ * Image size under 1280 * 960 are SUBSAMPLING
-+ * Image size upper 1280 * 960 are SCALING
-+ */
-+enum ov5640_downsize_mode {
-+ SUBSAMPLING,
-+ SCALING,
-+};
-+
-+struct reg_value {
-+ u16 reg_addr;
-+ u8 val;
-+ u8 mask;
-+ u32 delay_ms;
-+};
-+
-+struct ov5640_mode_info {
-+ enum ov5640_mode_id id;
-+ enum ov5640_downsize_mode dn_mode;
-+ u32 hact;
-+ u32 htot;
-+ u32 vact;
-+ u32 vtot;
-+ const struct reg_value *reg_data;
-+ u32 reg_data_size;
-+ u32 max_fps;
-+};
-+
-+struct ov5640_ctrls {
-+ struct v4l2_ctrl_handler handler;
-+ struct v4l2_ctrl *pixel_rate;
-+ struct {
-+ struct v4l2_ctrl *auto_exp;
-+ struct v4l2_ctrl *exposure;
-+ };
-+ struct {
-+ struct v4l2_ctrl *auto_wb;
-+ struct v4l2_ctrl *blue_balance;
-+ struct v4l2_ctrl *red_balance;
-+ };
-+ struct {
-+ struct v4l2_ctrl *auto_gain;
-+ struct v4l2_ctrl *gain;
-+ };
-+ struct v4l2_ctrl *brightness;
-+ struct v4l2_ctrl *light_freq;
-+ struct v4l2_ctrl *saturation;
-+ struct v4l2_ctrl *contrast;
-+ struct v4l2_ctrl *hue;
-+ struct v4l2_ctrl *test_pattern;
-+ struct v4l2_ctrl *hflip;
-+ struct v4l2_ctrl *vflip;
-+};
-+
-+struct ov5640_dev {
-+ struct i2c_client *i2c_client;
-+ struct v4l2_subdev sd;
-+ struct media_pad pad;
-+ struct v4l2_fwnode_endpoint ep; /* the parsed DT endpoint info */
-+ struct clk *xclk; /* system clock to OV5640 */
-+ u32 xclk_freq;
-+
-+ struct regulator_bulk_data supplies[OV5640_NUM_SUPPLIES];
-+ struct gpio_desc *reset_gpio;
-+ struct gpio_desc *pwdn_gpio;
-+ bool upside_down;
-+
-+ /* lock to protect all members below */
-+ struct mutex lock;
-+
-+ int power_count;
-+
-+ struct v4l2_mbus_framefmt fmt;
-+ bool pending_fmt_change;
-+
-+ const struct ov5640_mode_info *current_mode;
-+ const struct ov5640_mode_info *last_mode;
-+ enum ov5640_frame_rate current_fr;
-+ struct v4l2_fract frame_interval;
-+
-+ struct ov5640_ctrls ctrls;
-+
-+ u32 prev_sysclk, prev_hts;
-+ u32 ae_low, ae_high, ae_target;
-+
-+ bool pending_mode_change;
-+ int streaming;
-+};
-+
-+static inline struct ov5640_dev *to_ov5640_dev(struct v4l2_subdev *sd)
-+{
-+ return container_of(sd, struct ov5640_dev, sd);
-+}
-+
-+static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
-+{
-+ return &container_of(ctrl->handler, struct ov5640_dev,
-+ ctrls.handler)->sd;
-+}
-+
-+/*
-+ * FIXME: all of these register tables are likely filled with
-+ * entries that set the register to their power-on default values,
-+ * and which are otherwise not touched by this driver. Those entries
-+ * should be identified and removed to speed register load time
-+ * over i2c.
-+ */
-+/* YUV422 UYVY VGA@30fps */
-+static const struct reg_value ov5640_init_setting_30fps_VGA[] = {
-+ {0x3103, 0x11, 0, 0}, {0x3008, 0x82, 0, 5}, {0x3008, 0x42, 0, 0},
-+ {0x3103, 0x03, 0, 0}, {0x3630, 0x36, 0, 0},
-+ {0x3631, 0x0e, 0, 0}, {0x3632, 0xe2, 0, 0}, {0x3633, 0x12, 0, 0},
-+ {0x3621, 0xe0, 0, 0}, {0x3704, 0xa0, 0, 0}, {0x3703, 0x5a, 0, 0},
-+ {0x3715, 0x78, 0, 0}, {0x3717, 0x01, 0, 0}, {0x370b, 0x60, 0, 0},
-+ {0x3705, 0x1a, 0, 0}, {0x3905, 0x02, 0, 0}, {0x3906, 0x10, 0, 0},
-+ {0x3901, 0x0a, 0, 0}, {0x3731, 0x12, 0, 0}, {0x3600, 0x08, 0, 0},
-+ {0x3601, 0x33, 0, 0}, {0x302d, 0x60, 0, 0}, {0x3620, 0x52, 0, 0},
-+ {0x371b, 0x20, 0, 0}, {0x471c, 0x50, 0, 0}, {0x3a13, 0x43, 0, 0},
-+ {0x3a18, 0x00, 0, 0}, {0x3a19, 0xf8, 0, 0}, {0x3635, 0x13, 0, 0},
-+ {0x3636, 0x03, 0, 0}, {0x3634, 0x40, 0, 0}, {0x3622, 0x01, 0, 0},
-+ {0x3c01, 0xa4, 0, 0}, {0x3c04, 0x28, 0, 0}, {0x3c05, 0x98, 0, 0},
-+ {0x3c06, 0x00, 0, 0}, {0x3c07, 0x08, 0, 0}, {0x3c08, 0x00, 0, 0},
-+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
-+ {0x3820, 0x41, 0, 0}, {0x3821, 0x07, 0, 0}, {0x3814, 0x31, 0, 0},
-+ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
-+ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
-+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
-+ {0x3810, 0x00, 0, 0},
-+ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
-+ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
-+ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
-+ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
-+ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
-+ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
-+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x3000, 0x00, 0, 0},
-+ {0x3002, 0x1c, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3006, 0xc3, 0, 0},
-+ {0x302e, 0x08, 0, 0}, {0x4300, 0x3f, 0, 0},
-+ {0x501f, 0x00, 0, 0}, {0x4407, 0x04, 0, 0},
-+ {0x440e, 0x00, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
-+ {0x4837, 0x0a, 0, 0}, {0x3824, 0x02, 0, 0},
-+ {0x5000, 0xa7, 0, 0}, {0x5001, 0xa3, 0, 0}, {0x5180, 0xff, 0, 0},
-+ {0x5181, 0xf2, 0, 0}, {0x5182, 0x00, 0, 0}, {0x5183, 0x14, 0, 0},
-+ {0x5184, 0x25, 0, 0}, {0x5185, 0x24, 0, 0}, {0x5186, 0x09, 0, 0},
-+ {0x5187, 0x09, 0, 0}, {0x5188, 0x09, 0, 0}, {0x5189, 0x88, 0, 0},
-+ {0x518a, 0x54, 0, 0}, {0x518b, 0xee, 0, 0}, {0x518c, 0xb2, 0, 0},
-+ {0x518d, 0x50, 0, 0}, {0x518e, 0x34, 0, 0}, {0x518f, 0x6b, 0, 0},
-+ {0x5190, 0x46, 0, 0}, {0x5191, 0xf8, 0, 0}, {0x5192, 0x04, 0, 0},
-+ {0x5193, 0x70, 0, 0}, {0x5194, 0xf0, 0, 0}, {0x5195, 0xf0, 0, 0},
-+ {0x5196, 0x03, 0, 0}, {0x5197, 0x01, 0, 0}, {0x5198, 0x04, 0, 0},
-+ {0x5199, 0x6c, 0, 0}, {0x519a, 0x04, 0, 0}, {0x519b, 0x00, 0, 0},
-+ {0x519c, 0x09, 0, 0}, {0x519d, 0x2b, 0, 0}, {0x519e, 0x38, 0, 0},
-+ {0x5381, 0x1e, 0, 0}, {0x5382, 0x5b, 0, 0}, {0x5383, 0x08, 0, 0},
-+ {0x5384, 0x0a, 0, 0}, {0x5385, 0x7e, 0, 0}, {0x5386, 0x88, 0, 0},
-+ {0x5387, 0x7c, 0, 0}, {0x5388, 0x6c, 0, 0}, {0x5389, 0x10, 0, 0},
-+ {0x538a, 0x01, 0, 0}, {0x538b, 0x98, 0, 0}, {0x5300, 0x08, 0, 0},
-+ {0x5301, 0x30, 0, 0}, {0x5302, 0x10, 0, 0}, {0x5303, 0x00, 0, 0},
-+ {0x5304, 0x08, 0, 0}, {0x5305, 0x30, 0, 0}, {0x5306, 0x08, 0, 0},
-+ {0x5307, 0x16, 0, 0}, {0x5309, 0x08, 0, 0}, {0x530a, 0x30, 0, 0},
-+ {0x530b, 0x04, 0, 0}, {0x530c, 0x06, 0, 0}, {0x5480, 0x01, 0, 0},
-+ {0x5481, 0x08, 0, 0}, {0x5482, 0x14, 0, 0}, {0x5483, 0x28, 0, 0},
-+ {0x5484, 0x51, 0, 0}, {0x5485, 0x65, 0, 0}, {0x5486, 0x71, 0, 0},
-+ {0x5487, 0x7d, 0, 0}, {0x5488, 0x87, 0, 0}, {0x5489, 0x91, 0, 0},
-+ {0x548a, 0x9a, 0, 0}, {0x548b, 0xaa, 0, 0}, {0x548c, 0xb8, 0, 0},
-+ {0x548d, 0xcd, 0, 0}, {0x548e, 0xdd, 0, 0}, {0x548f, 0xea, 0, 0},
-+ {0x5490, 0x1d, 0, 0}, {0x5580, 0x02, 0, 0}, {0x5583, 0x40, 0, 0},
-+ {0x5584, 0x10, 0, 0}, {0x5589, 0x10, 0, 0}, {0x558a, 0x00, 0, 0},
-+ {0x558b, 0xf8, 0, 0}, {0x5800, 0x23, 0, 0}, {0x5801, 0x14, 0, 0},
-+ {0x5802, 0x0f, 0, 0}, {0x5803, 0x0f, 0, 0}, {0x5804, 0x12, 0, 0},
-+ {0x5805, 0x26, 0, 0}, {0x5806, 0x0c, 0, 0}, {0x5807, 0x08, 0, 0},
-+ {0x5808, 0x05, 0, 0}, {0x5809, 0x05, 0, 0}, {0x580a, 0x08, 0, 0},
-+ {0x580b, 0x0d, 0, 0}, {0x580c, 0x08, 0, 0}, {0x580d, 0x03, 0, 0},
-+ {0x580e, 0x00, 0, 0}, {0x580f, 0x00, 0, 0}, {0x5810, 0x03, 0, 0},
-+ {0x5811, 0x09, 0, 0}, {0x5812, 0x07, 0, 0}, {0x5813, 0x03, 0, 0},
-+ {0x5814, 0x00, 0, 0}, {0x5815, 0x01, 0, 0}, {0x5816, 0x03, 0, 0},
-+ {0x5817, 0x08, 0, 0}, {0x5818, 0x0d, 0, 0}, {0x5819, 0x08, 0, 0},
-+ {0x581a, 0x05, 0, 0}, {0x581b, 0x06, 0, 0}, {0x581c, 0x08, 0, 0},
-+ {0x581d, 0x0e, 0, 0}, {0x581e, 0x29, 0, 0}, {0x581f, 0x17, 0, 0},
-+ {0x5820, 0x11, 0, 0}, {0x5821, 0x11, 0, 0}, {0x5822, 0x15, 0, 0},
-+ {0x5823, 0x28, 0, 0}, {0x5824, 0x46, 0, 0}, {0x5825, 0x26, 0, 0},
-+ {0x5826, 0x08, 0, 0}, {0x5827, 0x26, 0, 0}, {0x5828, 0x64, 0, 0},
-+ {0x5829, 0x26, 0, 0}, {0x582a, 0x24, 0, 0}, {0x582b, 0x22, 0, 0},
-+ {0x582c, 0x24, 0, 0}, {0x582d, 0x24, 0, 0}, {0x582e, 0x06, 0, 0},
-+ {0x582f, 0x22, 0, 0}, {0x5830, 0x40, 0, 0}, {0x5831, 0x42, 0, 0},
-+ {0x5832, 0x24, 0, 0}, {0x5833, 0x26, 0, 0}, {0x5834, 0x24, 0, 0},
-+ {0x5835, 0x22, 0, 0}, {0x5836, 0x22, 0, 0}, {0x5837, 0x26, 0, 0},
-+ {0x5838, 0x44, 0, 0}, {0x5839, 0x24, 0, 0}, {0x583a, 0x26, 0, 0},
-+ {0x583b, 0x28, 0, 0}, {0x583c, 0x42, 0, 0}, {0x583d, 0xce, 0, 0},
-+ {0x5025, 0x00, 0, 0}, {0x3a0f, 0x30, 0, 0}, {0x3a10, 0x28, 0, 0},
-+ {0x3a1b, 0x30, 0, 0}, {0x3a1e, 0x26, 0, 0}, {0x3a11, 0x60, 0, 0},
-+ {0x3a1f, 0x14, 0, 0}, {0x3008, 0x02, 0, 0}, {0x3c00, 0x04, 0, 300},
-+};
-+
-+static const struct reg_value ov5640_setting_VGA_640_480[] = {
-+ {0x3c07, 0x08, 0, 0},
-+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
-+ {0x3814, 0x31, 0, 0},
-+ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
-+ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
-+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
-+ {0x3810, 0x00, 0, 0},
-+ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
-+ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
-+ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
-+ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
-+ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
-+ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
-+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
-+ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
-+ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
-+};
-+
-+static const struct reg_value ov5640_setting_XGA_1024_768[] = {
-+ {0x3c07, 0x08, 0, 0},
-+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
-+ {0x3814, 0x31, 0, 0},
-+ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
-+ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
-+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
-+ {0x3810, 0x00, 0, 0},
-+ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
-+ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
-+ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
-+ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
-+ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
-+ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
-+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
-+ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
-+ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
-+};
-+
-+static const struct reg_value ov5640_setting_QVGA_320_240[] = {
-+ {0x3c07, 0x08, 0, 0},
-+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
-+ {0x3814, 0x31, 0, 0},
-+ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
-+ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
-+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
-+ {0x3810, 0x00, 0, 0},
-+ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
-+ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
-+ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
-+ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
-+ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
-+ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
-+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
-+ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
-+ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
-+};
-+
-+static const struct reg_value ov5640_setting_QCIF_176_144[] = {
-+ {0x3c07, 0x08, 0, 0},
-+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
-+ {0x3814, 0x31, 0, 0},
-+ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
-+ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
-+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
-+ {0x3810, 0x00, 0, 0},
-+ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
-+ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
-+ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
-+ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
-+ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
-+ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
-+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
-+ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
-+ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
-+};
-+
-+static const struct reg_value ov5640_setting_NTSC_720_480[] = {
-+ {0x3c07, 0x08, 0, 0},
-+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
-+ {0x3814, 0x31, 0, 0},
-+ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
-+ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
-+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
-+ {0x3810, 0x00, 0, 0},
-+ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x3c, 0, 0},
-+ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
-+ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
-+ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
-+ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
-+ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
-+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
-+ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
-+ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
-+};
-+
-+static const struct reg_value ov5640_setting_PAL_720_576[] = {
-+ {0x3c07, 0x08, 0, 0},
-+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
-+ {0x3814, 0x31, 0, 0},
-+ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
-+ {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
-+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
-+ {0x3810, 0x00, 0, 0},
-+ {0x3811, 0x38, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
-+ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
-+ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
-+ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
-+ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
-+ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
-+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
-+ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
-+ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
-+};
-+
-+static const struct reg_value ov5640_setting_720P_1280_720[] = {
-+ {0x3c07, 0x07, 0, 0},
-+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
-+ {0x3814, 0x31, 0, 0},
-+ {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
-+ {0x3802, 0x00, 0, 0}, {0x3803, 0xfa, 0, 0}, {0x3804, 0x0a, 0, 0},
-+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x06, 0, 0}, {0x3807, 0xa9, 0, 0},
-+ {0x3810, 0x00, 0, 0},
-+ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x04, 0, 0},
-+ {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
-+ {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x02, 0, 0},
-+ {0x3a03, 0xe4, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0xbc, 0, 0},
-+ {0x3a0a, 0x01, 0, 0}, {0x3a0b, 0x72, 0, 0}, {0x3a0e, 0x01, 0, 0},
-+ {0x3a0d, 0x02, 0, 0}, {0x3a14, 0x02, 0, 0}, {0x3a15, 0xe4, 0, 0},
-+ {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
-+ {0x4407, 0x04, 0, 0}, {0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0},
-+ {0x3824, 0x04, 0, 0}, {0x5001, 0x83, 0, 0},
-+};
-+
-+static const struct reg_value ov5640_setting_1080P_1920_1080[] = {
-+ {0x3c07, 0x08, 0, 0},
-+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
-+ {0x3814, 0x11, 0, 0},
-+ {0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
-+ {0x3802, 0x00, 0, 0}, {0x3803, 0x00, 0, 0}, {0x3804, 0x0a, 0, 0},
-+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9f, 0, 0},
-+ {0x3810, 0x00, 0, 0},
-+ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x04, 0, 0},
-+ {0x3618, 0x04, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x21, 0, 0},
-+ {0x3709, 0x12, 0, 0}, {0x370c, 0x00, 0, 0}, {0x3a02, 0x03, 0, 0},
-+ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
-+ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
-+ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
-+ {0x4001, 0x02, 0, 0}, {0x4004, 0x06, 0, 0},
-+ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
-+ {0x3824, 0x02, 0, 0}, {0x5001, 0x83, 0, 0},
-+ {0x3c07, 0x07, 0, 0}, {0x3c08, 0x00, 0, 0},
-+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
-+ {0x3800, 0x01, 0, 0}, {0x3801, 0x50, 0, 0}, {0x3802, 0x01, 0, 0},
-+ {0x3803, 0xb2, 0, 0}, {0x3804, 0x08, 0, 0}, {0x3805, 0xef, 0, 0},
-+ {0x3806, 0x05, 0, 0}, {0x3807, 0xf1, 0, 0},
-+ {0x3612, 0x2b, 0, 0}, {0x3708, 0x64, 0, 0},
-+ {0x3a02, 0x04, 0, 0}, {0x3a03, 0x60, 0, 0}, {0x3a08, 0x01, 0, 0},
-+ {0x3a09, 0x50, 0, 0}, {0x3a0a, 0x01, 0, 0}, {0x3a0b, 0x18, 0, 0},
-+ {0x3a0e, 0x03, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x04, 0, 0},
-+ {0x3a15, 0x60, 0, 0}, {0x4407, 0x04, 0, 0},
-+ {0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3824, 0x04, 0, 0},
-+ {0x4005, 0x1a, 0, 0},
-+};
-+
-+static const struct reg_value ov5640_setting_QSXGA_2592_1944[] = {
-+ {0x3c07, 0x08, 0, 0},
-+ {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
-+ {0x3814, 0x11, 0, 0},
-+ {0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
-+ {0x3802, 0x00, 0, 0}, {0x3803, 0x00, 0, 0}, {0x3804, 0x0a, 0, 0},
-+ {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9f, 0, 0},
-+ {0x3810, 0x00, 0, 0},
-+ {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x04, 0, 0},
-+ {0x3618, 0x04, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x21, 0, 0},
-+ {0x3709, 0x12, 0, 0}, {0x370c, 0x00, 0, 0}, {0x3a02, 0x03, 0, 0},
-+ {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
-+ {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
-+ {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
-+ {0x4001, 0x02, 0, 0}, {0x4004, 0x06, 0, 0},
-+ {0x4407, 0x04, 0, 0}, {0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0},
-+ {0x3824, 0x04, 0, 0}, {0x5001, 0x83, 0, 70},
-+};
-+
-+/* power-on sensor init reg table */
-+static const struct ov5640_mode_info ov5640_mode_init_data = {
-+ 0, SUBSAMPLING, 640, 1896, 480, 984,
-+ ov5640_init_setting_30fps_VGA,
-+ ARRAY_SIZE(ov5640_init_setting_30fps_VGA),
-+ OV5640_30_FPS,
-+};
-+
-+static const struct ov5640_mode_info
-+ov5640_mode_data[OV5640_NUM_MODES] = {
-+ {OV5640_MODE_QCIF_176_144, SUBSAMPLING,
-+ 176, 1896, 144, 984,
-+ ov5640_setting_QCIF_176_144,
-+ ARRAY_SIZE(ov5640_setting_QCIF_176_144),
-+ OV5640_30_FPS},
-+ {OV5640_MODE_QVGA_320_240, SUBSAMPLING,
-+ 320, 1896, 240, 984,
-+ ov5640_setting_QVGA_320_240,
-+ ARRAY_SIZE(ov5640_setting_QVGA_320_240),
-+ OV5640_30_FPS},
-+ {OV5640_MODE_VGA_640_480, SUBSAMPLING,
-+ 640, 1896, 480, 1080,
-+ ov5640_setting_VGA_640_480,
-+ ARRAY_SIZE(ov5640_setting_VGA_640_480),
-+ OV5640_60_FPS},
-+ {OV5640_MODE_NTSC_720_480, SUBSAMPLING,
-+ 720, 1896, 480, 984,
-+ ov5640_setting_NTSC_720_480,
-+ ARRAY_SIZE(ov5640_setting_NTSC_720_480),
-+ OV5640_30_FPS},
-+ {OV5640_MODE_PAL_720_576, SUBSAMPLING,
-+ 720, 1896, 576, 984,
-+ ov5640_setting_PAL_720_576,
-+ ARRAY_SIZE(ov5640_setting_PAL_720_576),
-+ OV5640_30_FPS},
-+ {OV5640_MODE_XGA_1024_768, SUBSAMPLING,
-+ 1024, 1896, 768, 1080,
-+ ov5640_setting_XGA_1024_768,
-+ ARRAY_SIZE(ov5640_setting_XGA_1024_768),
-+ OV5640_30_FPS},
-+ {OV5640_MODE_720P_1280_720, SUBSAMPLING,
-+ 1280, 1892, 720, 740,
-+ ov5640_setting_720P_1280_720,
-+ ARRAY_SIZE(ov5640_setting_720P_1280_720),
-+ OV5640_30_FPS},
-+ {OV5640_MODE_1080P_1920_1080, SCALING,
-+ 1920, 2500, 1080, 1120,
-+ ov5640_setting_1080P_1920_1080,
-+ ARRAY_SIZE(ov5640_setting_1080P_1920_1080),
-+ OV5640_30_FPS},
-+ {OV5640_MODE_QSXGA_2592_1944, SCALING,
-+ 2592, 2844, 1944, 1968,
-+ ov5640_setting_QSXGA_2592_1944,
-+ ARRAY_SIZE(ov5640_setting_QSXGA_2592_1944),
-+ OV5640_15_FPS},
-+};
-+
-+static int ov5640_init_slave_id(struct ov5640_dev *sensor)
-+{
-+ struct i2c_client *client = sensor->i2c_client;
-+ struct i2c_msg msg;
-+ u8 buf[3];
-+ int ret;
-+
-+ if (client->addr == OV5640_DEFAULT_SLAVE_ID)
-+ return 0;
-+
-+ buf[0] = OV5640_REG_SLAVE_ID >> 8;
-+ buf[1] = OV5640_REG_SLAVE_ID & 0xff;
-+ buf[2] = client->addr << 1;
-+
-+ msg.addr = OV5640_DEFAULT_SLAVE_ID;
-+ msg.flags = 0;
-+ msg.buf = buf;
-+ msg.len = sizeof(buf);
-+
-+ ret = i2c_transfer(client->adapter, &msg, 1);
-+ if (ret < 0) {
-+ dev_err(&client->dev, "%s: failed with %d\n", __func__, ret);
-+ return ret;
-+ }
-+
-+ return 0;
-+}
-+
-+static int ov5640_write_reg(struct ov5640_dev *sensor, u16 reg, u8 val)
-+{
-+ struct i2c_client *client = sensor->i2c_client;
-+ struct i2c_msg msg;
-+ u8 buf[3];
-+ int ret;
-+
-+ buf[0] = reg >> 8;
-+ buf[1] = reg & 0xff;
-+ buf[2] = val;
-+
-+ msg.addr = client->addr;
-+ msg.flags = client->flags;
-+ msg.buf = buf;
-+ msg.len = sizeof(buf);
-+
-+ ret = i2c_transfer(client->adapter, &msg, 1);
-+ if (ret < 0) {
-+ dev_err(&client->dev, "%s: error: reg=%x, val=%x\n",
-+ __func__, reg, val);
-+ return ret;
-+ }
-+
-+ return 0;
-+}
-+
-+static int ov5640_read_reg(struct ov5640_dev *sensor, u16 reg, u8 *val)
-+{
-+ struct i2c_client *client = sensor->i2c_client;
-+ struct i2c_msg msg[2];
-+ u8 buf[2];
-+ int ret;
-+
-+ buf[0] = reg >> 8;
-+ buf[1] = reg & 0xff;
-+
-+ msg[0].addr = client->addr;
-+ msg[0].flags = client->flags;
-+ msg[0].buf = buf;
-+ msg[0].len = sizeof(buf);
-+
-+ msg[1].addr = client->addr;
-+ msg[1].flags = client->flags | I2C_M_RD;
-+ msg[1].buf = buf;
-+ msg[1].len = 1;
-+
-+ ret = i2c_transfer(client->adapter, msg, 2);
-+ if (ret < 0) {
-+ dev_err(&client->dev, "%s: error: reg=%x\n",
-+ __func__, reg);
-+ return ret;
-+ }
-+
-+ *val = buf[0];
-+ return 0;
-+}
-+
-+static int ov5640_read_reg16(struct ov5640_dev *sensor, u16 reg, u16 *val)
-+{
-+ u8 hi, lo;
-+ int ret;
-+
-+ ret = ov5640_read_reg(sensor, reg, &hi);
-+ if (ret)
-+ return ret;
-+ ret = ov5640_read_reg(sensor, reg + 1, &lo);
-+ if (ret)
-+ return ret;
-+
-+ *val = ((u16)hi << 8) | (u16)lo;
-+ return 0;
-+}
-+
-+static int ov5640_write_reg16(struct ov5640_dev *sensor, u16 reg, u16 val)
-+{
-+ int ret;
-+
-+ ret = ov5640_write_reg(sensor, reg, val >> 8);
-+ if (ret)
-+ return ret;
-+
-+ return ov5640_write_reg(sensor, reg + 1, val & 0xff);
-+}
-+
-+static int ov5640_mod_reg(struct ov5640_dev *sensor, u16 reg,
-+ u8 mask, u8 val)
-+{
-+ u8 readval;
-+ int ret;
-+
-+ ret = ov5640_read_reg(sensor, reg, &readval);
-+ if (ret)
-+ return ret;
-+
-+ readval &= ~mask;
-+ val &= mask;
-+ val |= readval;
-+
-+ return ov5640_write_reg(sensor, reg, val);
-+}
-+
-+/*
-+ * After trying the various combinations, reading various
-+ * documentations spread around the net, and from the various
-+ * feedback, the clock tree is probably as follows:
-+ *
-+ * +--------------+
-+ * | Ext. Clock |
-+ * +-+------------+
-+ * | +----------+
-+ * +->| PLL1 | - reg 0x3036, for the multiplier
-+ * +-+--------+ - reg 0x3037, bits 0-3 for the pre-divider
-+ * | +--------------+
-+ * +->| System Clock | - reg 0x3035, bits 4-7
-+ * +-+------------+
-+ * | +--------------+
-+ * +->| MIPI Divider | - reg 0x3035, bits 0-3
-+ * | +-+------------+
-+ * | +----------------> MIPI SCLK
-+ * | + +-----+
-+ * | +->| / 2 |-------> MIPI BIT CLK
-+ * | +-----+
-+ * | +--------------+
-+ * +->| PLL Root Div | - reg 0x3037, bit 4
-+ * +-+------------+
-+ * | +---------+
-+ * +->| Bit Div | - reg 0x3034, bits 0-3
-+ * +-+-------+
-+ * | +-------------+
-+ * +->| SCLK Div | - reg 0x3108, bits 0-1
-+ * | +-+-----------+
-+ * | +---------------> SCLK
-+ * | +-------------+
-+ * +->| SCLK 2X Div | - reg 0x3108, bits 2-3
-+ * | +-+-----------+
-+ * | +---------------> SCLK 2X
-+ * | +-------------+
-+ * +->| PCLK Div | - reg 0x3108, bits 4-5
-+ * ++------------+
-+ * + +-----------+
-+ * +->| P_DIV | - reg 0x3035, bits 0-3
-+ * +-----+-----+
-+ * +------------> PCLK
-+ *
-+ * This is deviating from the datasheet at least for the register
-+ * 0x3108, since it's said here that the PCLK would be clocked from
-+ * the PLL.
-+ *
-+ * There seems to be also (unverified) constraints:
-+ * - the PLL pre-divider output rate should be in the 4-27MHz range
-+ * - the PLL multiplier output rate should be in the 500-1000MHz range
-+ * - PCLK >= SCLK * 2 in YUV, >= SCLK in Raw or JPEG
-+ *
-+ * In the two latter cases, these constraints are met since our
-+ * factors are hardcoded. If we were to change that, we would need to
-+ * take this into account. The only varying parts are the PLL
-+ * multiplier and the system clock divider, which are shared between
-+ * all these clocks so won't cause any issue.
-+ */
-+
-+/*
-+ * This is supposed to be ranging from 1 to 8, but the value is always
-+ * set to 3 in the vendor kernels.
-+ */
-+#define OV5640_PLL_PREDIV 3
-+
-+#define OV5640_PLL_MULT_MIN 4
-+#define OV5640_PLL_MULT_MAX 252
-+
-+/*
-+ * This is supposed to be ranging from 1 to 16, but the value is
-+ * always set to either 1 or 2 in the vendor kernels.
-+ */
-+#define OV5640_SYSDIV_MIN 1
-+#define OV5640_SYSDIV_MAX 16
-+
-+/*
-+ * Hardcode these values for scaler and non-scaler modes.
-+ * FIXME: to be re-calcualted for 1 data lanes setups
-+ */
-+#define OV5640_MIPI_DIV_PCLK 2
-+#define OV5640_MIPI_DIV_SCLK 1
-+
-+/*
-+ * This is supposed to be ranging from 1 to 2, but the value is always
-+ * set to 2 in the vendor kernels.
-+ */
-+#define OV5640_PLL_ROOT_DIV 2
-+#define OV5640_PLL_CTRL3_PLL_ROOT_DIV_2 BIT(4)
-+
-+/*
-+ * We only supports 8-bit formats at the moment
-+ */
-+#define OV5640_BIT_DIV 2
-+#define OV5640_PLL_CTRL0_MIPI_MODE_8BIT 0x08
-+
-+/*
-+ * This is supposed to be ranging from 1 to 8, but the value is always
-+ * set to 2 in the vendor kernels.
-+ */
-+#define OV5640_SCLK_ROOT_DIV 2
-+
-+/*
-+ * This is hardcoded so that the consistency is maintained between SCLK and
-+ * SCLK 2x.
-+ */
-+#define OV5640_SCLK2X_ROOT_DIV (OV5640_SCLK_ROOT_DIV / 2)
-+
-+/*
-+ * This is supposed to be ranging from 1 to 8, but the value is always
-+ * set to 1 in the vendor kernels.
-+ */
-+#define OV5640_PCLK_ROOT_DIV 1
-+#define OV5640_PLL_SYS_ROOT_DIVIDER_BYPASS 0x00
-+
-+static unsigned long ov5640_compute_sys_clk(struct ov5640_dev *sensor,
-+ u8 pll_prediv, u8 pll_mult,
-+ u8 sysdiv)
-+{
-+ unsigned long sysclk = sensor->xclk_freq / pll_prediv * pll_mult;
-+
-+ /* PLL1 output cannot exceed 1GHz. */
-+ if (sysclk / 1000000 > 1000)
-+ return 0;
-+
-+ return sysclk / sysdiv;
-+}
-+
-+static unsigned long ov5640_calc_sys_clk(struct ov5640_dev *sensor,
-+ unsigned long rate,
-+ u8 *pll_prediv, u8 *pll_mult,
-+ u8 *sysdiv)
-+{
-+ unsigned long best = ~0;
-+ u8 best_sysdiv = 1, best_mult = 1;
-+ u8 _sysdiv, _pll_mult;
-+
-+ for (_sysdiv = OV5640_SYSDIV_MIN;
-+ _sysdiv <= OV5640_SYSDIV_MAX;
-+ _sysdiv++) {
-+ for (_pll_mult = OV5640_PLL_MULT_MIN;
-+ _pll_mult <= OV5640_PLL_MULT_MAX;
-+ _pll_mult++) {
-+ unsigned long _rate;
-+
-+ /*
-+ * The PLL multiplier cannot be odd if above
-+ * 127.
-+ */
-+ if (_pll_mult > 127 && (_pll_mult % 2))
-+ continue;
-+
-+ _rate = ov5640_compute_sys_clk(sensor,
-+ OV5640_PLL_PREDIV,
-+ _pll_mult, _sysdiv);
-+
-+ /*
-+ * We have reached the maximum allowed PLL1 output,
-+ * increase sysdiv.
-+ */
-+ if (!_rate)
-+ break;
-+
-+ /*
-+ * Prefer rates above the expected clock rate than
-+ * below, even if that means being less precise.
-+ */
-+ if (_rate < rate)
-+ continue;
-+
-+ if (abs(rate - _rate) < abs(rate - best)) {
-+ best = _rate;
-+ best_sysdiv = _sysdiv;
-+ best_mult = _pll_mult;
-+ }
-+
-+ if (_rate == rate)
-+ goto out;
-+ }
-+ }
-+
-+out:
-+ *sysdiv = best_sysdiv;
-+ *pll_prediv = OV5640_PLL_PREDIV;
-+ *pll_mult = best_mult;
-+
-+ return best;
-+}
-+
-+/*
-+ * ov5640_set_mipi_pclk() - Calculate the clock tree configuration values
-+ * for the MIPI CSI-2 output.
-+ *
-+ * @rate: The requested bandwidth per lane in bytes per second.
-+ * 'Bandwidth Per Lane' is calculated as:
-+ * bpl = HTOT * VTOT * FPS * bpp / num_lanes;
-+ *
-+ * This function use the requested bandwidth to calculate:
-+ * - sample_rate = bpl / (bpp / num_lanes);
-+ * = bpl / (PLL_RDIV * BIT_DIV * PCLK_DIV * MIPI_DIV / num_lanes);
-+ *
-+ * - mipi_sclk = bpl / MIPI_DIV / 2; ( / 2 is for CSI-2 DDR)
-+ *
-+ * with these fixed parameters:
-+ * PLL_RDIV = 2;
-+ * BIT_DIVIDER = 2; (MIPI_BIT_MODE == 8 ? 2 : 2,5);
-+ * PCLK_DIV = 1;
-+ *
-+ * The MIPI clock generation differs for modes that use the scaler and modes
-+ * that do not. In case the scaler is in use, the MIPI_SCLK generates the MIPI
-+ * BIT CLk, and thus:
-+ *
-+ * - mipi_sclk = bpl / MIPI_DIV / 2;
-+ * MIPI_DIV = 1;
-+ *
-+ * For modes that do not go through the scaler, the MIPI BIT CLOCK is generated
-+ * from the pixel clock, and thus:
-+ *
-+ * - sample_rate = bpl / (bpp / num_lanes);
-+ * = bpl / (2 * 2 * 1 * MIPI_DIV / num_lanes);
-+ * = bpl / (4 * MIPI_DIV / num_lanes);
-+ * - MIPI_DIV = bpp / (4 * num_lanes);
-+ *
-+ * FIXME: this have been tested with 16bpp and 2 lanes setup only.
-+ * MIPI_DIV is fixed to value 2, but it -might- be changed according to the
-+ * above formula for setups with 1 lane or image formats with different bpp.
-+ *
-+ * FIXME: this deviates from the sensor manual documentation which is quite
-+ * thin on the MIPI clock tree generation part.
-+ */
-+static int ov5640_set_mipi_pclk(struct ov5640_dev *sensor,
-+ unsigned long rate)
-+{
-+ const struct ov5640_mode_info *mode = sensor->current_mode;
-+ u8 prediv, mult, sysdiv;
-+ u8 mipi_div;
-+ int ret;
-+
-+ /*
-+ * 1280x720 is reported to use 'SUBSAMPLING' only,
-+ * but according to the sensor manual it goes through the
-+ * scaler before subsampling.
-+ */
-+ if (mode->dn_mode == SCALING ||
-+ (mode->id == OV5640_MODE_720P_1280_720))
-+ mipi_div = OV5640_MIPI_DIV_SCLK;
-+ else
-+ mipi_div = OV5640_MIPI_DIV_PCLK;
-+
-+ ov5640_calc_sys_clk(sensor, rate, &prediv, &mult, &sysdiv);
-+
-+ ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL0,
-+ 0x0f, OV5640_PLL_CTRL0_MIPI_MODE_8BIT);
-+
-+ ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL1,
-+ 0xff, sysdiv << 4 | mipi_div);
-+ if (ret)
-+ return ret;
-+
-+ ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL2, 0xff, mult);
-+ if (ret)
-+ return ret;
-+
-+ ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL3,
-+ 0x1f, OV5640_PLL_CTRL3_PLL_ROOT_DIV_2 | prediv);
-+ if (ret)
-+ return ret;
-+
-+ return ov5640_mod_reg(sensor, OV5640_REG_SYS_ROOT_DIVIDER,
-+ 0x30, OV5640_PLL_SYS_ROOT_DIVIDER_BYPASS);
-+}
-+
-+static unsigned long ov5640_calc_pclk(struct ov5640_dev *sensor,
-+ unsigned long rate,
-+ u8 *pll_prediv, u8 *pll_mult, u8 *sysdiv,
-+ u8 *pll_rdiv, u8 *bit_div, u8 *pclk_div)
-+{
-+ unsigned long _rate = rate * OV5640_PLL_ROOT_DIV * OV5640_BIT_DIV *
-+ OV5640_PCLK_ROOT_DIV;
-+
-+ _rate = ov5640_calc_sys_clk(sensor, _rate, pll_prediv, pll_mult,
-+ sysdiv);
-+ *pll_rdiv = OV5640_PLL_ROOT_DIV;
-+ *bit_div = OV5640_BIT_DIV;
-+ *pclk_div = OV5640_PCLK_ROOT_DIV;
-+
-+ return _rate / *pll_rdiv / *bit_div / *pclk_div;
-+}
-+
-+static int ov5640_set_dvp_pclk(struct ov5640_dev *sensor, unsigned long rate)
-+{
-+ u8 prediv, mult, sysdiv, pll_rdiv, bit_div, pclk_div;
-+ int ret;
-+
-+ ov5640_calc_pclk(sensor, rate, &prediv, &mult, &sysdiv, &pll_rdiv,
-+ &bit_div, &pclk_div);
-+
-+#ifndef CONFIG_VIN_SENSOR_OV5640
-+ if (bit_div == 2)
-+ bit_div = 8;
-+#else
-+ bit_div = 0xa;
-+#endif
-+ ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL0,
-+ 0x0f, bit_div);
-+ if (ret)
-+ return ret;
-+
-+ /*
-+ * We need to set sysdiv according to the clock, and to clear
-+ * the MIPI divider.
-+ */
-+ ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL1,
-+ 0xff, sysdiv << 4);
-+ if (ret)
-+ return ret;
-+
-+ ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL2,
-+ 0xff, mult);
-+ if (ret)
-+ return ret;
-+
-+ ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL3,
-+ 0x1f, prediv | ((pll_rdiv - 1) << 4));
-+ if (ret)
-+ return ret;
-+
-+ return ov5640_mod_reg(sensor, OV5640_REG_SYS_ROOT_DIVIDER, 0x30,
-+ (ilog2(pclk_div) << 4));
-+}
-+
-+/* set JPEG framing sizes */
-+static int ov5640_set_jpeg_timings(struct ov5640_dev *sensor,
-+ const struct ov5640_mode_info *mode)
-+{
-+ int ret;
-+
-+ /*
-+ * compression mode 3 timing
-+ *
-+ * Data is transmitted with programmable width (VFIFO_HSIZE).
-+ * No padding done. Last line may have less data. Varying
-+ * number of lines per frame, depending on amount of data.
-+ */
-+ ret = ov5640_mod_reg(sensor, OV5640_REG_JPG_MODE_SELECT, 0x7, 0x3);
-+ if (ret < 0)
-+ return ret;
-+
-+ ret = ov5640_write_reg16(sensor, OV5640_REG_VFIFO_HSIZE, mode->hact);
-+ if (ret < 0)
-+ return ret;
-+
-+ return ov5640_write_reg16(sensor, OV5640_REG_VFIFO_VSIZE, mode->vact);
-+}
-+
-+/* download ov5640 settings to sensor through i2c */
-+static int ov5640_set_timings(struct ov5640_dev *sensor,
-+ const struct ov5640_mode_info *mode)
-+{
-+ int ret;
-+
-+ if (sensor->fmt.code == MEDIA_BUS_FMT_JPEG_1X8) {
-+ ret = ov5640_set_jpeg_timings(sensor, mode);
-+ if (ret < 0)
-+ return ret;
-+ }
-+
-+ ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_DVPHO, mode->hact);
-+ if (ret < 0)
-+ return ret;
-+
-+ ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_DVPVO, mode->vact);
-+ if (ret < 0)
-+ return ret;
-+
-+ ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_HTS, mode->htot);
-+ if (ret < 0)
-+ return ret;
-+
-+ return ov5640_write_reg16(sensor, OV5640_REG_TIMING_VTS, mode->vtot);
-+}
-+
-+static int ov5640_load_regs(struct ov5640_dev *sensor,
-+ const struct ov5640_mode_info *mode)
-+{
-+ const struct reg_value *regs = mode->reg_data;
-+ unsigned int i;
-+ u32 delay_ms;
-+ u16 reg_addr;
-+ u8 mask, val;
-+ int ret = 0;
-+
-+ st_info(ST_SENSOR, "%s, mode = 0x%x\n", __func__, mode->id);
-+ for (i = 0; i < mode->reg_data_size; ++i, ++regs) {
-+ delay_ms = regs->delay_ms;
-+ reg_addr = regs->reg_addr;
-+ val = regs->val;
-+ mask = regs->mask;
-+
-+ /* remain in power down mode for DVP */
-+ if (regs->reg_addr == OV5640_REG_SYS_CTRL0 &&
-+ val == OV5640_REG_SYS_CTRL0_SW_PWUP &&
-+ sensor->ep.bus_type != V4L2_MBUS_CSI2_DPHY)
-+ continue;
-+
-+ if (mask)
-+ ret = ov5640_mod_reg(sensor, reg_addr, mask, val);
-+ else
-+ ret = ov5640_write_reg(sensor, reg_addr, val);
-+ if (ret)
-+ break;
-+
-+ if (delay_ms)
-+ usleep_range(1000 * delay_ms, 1000 * delay_ms + 100);
-+ }
-+
-+ return ov5640_set_timings(sensor, mode);
-+}
-+
-+static int ov5640_set_autoexposure(struct ov5640_dev *sensor, bool on)
-+{
-+ return ov5640_mod_reg(sensor, OV5640_REG_AEC_PK_MANUAL,
-+ BIT(0), on ? 0 : BIT(0));
-+}
-+
-+/* read exposure, in number of line periods */
-+static int ov5640_get_exposure(struct ov5640_dev *sensor)
-+{
-+ int exp, ret;
-+ u8 temp;
-+
-+ ret = ov5640_read_reg(sensor, OV5640_REG_AEC_PK_EXPOSURE_HI, &temp);
-+ if (ret)
-+ return ret;
-+ exp = ((int)temp & 0x0f) << 16;
-+ ret = ov5640_read_reg(sensor, OV5640_REG_AEC_PK_EXPOSURE_MED, &temp);
-+ if (ret)
-+ return ret;
-+ exp |= ((int)temp << 8);
-+ ret = ov5640_read_reg(sensor, OV5640_REG_AEC_PK_EXPOSURE_LO, &temp);
-+ if (ret)
-+ return ret;
-+ exp |= (int)temp;
-+
-+ return exp >> 4;
-+}
-+
-+/* write exposure, given number of line periods */
-+static int ov5640_set_exposure(struct ov5640_dev *sensor, u32 exposure)
-+{
-+ int ret;
-+
-+ exposure <<= 4;
-+
-+ ret = ov5640_write_reg(sensor,
-+ OV5640_REG_AEC_PK_EXPOSURE_LO,
-+ exposure & 0xff);
-+ if (ret)
-+ return ret;
-+ ret = ov5640_write_reg(sensor,
-+ OV5640_REG_AEC_PK_EXPOSURE_MED,
-+ (exposure >> 8) & 0xff);
-+ if (ret)
-+ return ret;
-+ return ov5640_write_reg(sensor,
-+ OV5640_REG_AEC_PK_EXPOSURE_HI,
-+ (exposure >> 16) & 0x0f);
-+}
-+
-+static int ov5640_get_gain(struct ov5640_dev *sensor)
-+{
-+ u16 gain;
-+ int ret;
-+
-+ ret = ov5640_read_reg16(sensor, OV5640_REG_AEC_PK_REAL_GAIN, &gain);
-+ if (ret)
-+ return ret;
-+
-+ return gain & 0x3ff;
-+}
-+
-+static int ov5640_set_gain(struct ov5640_dev *sensor, int gain)
-+{
-+ return ov5640_write_reg16(sensor, OV5640_REG_AEC_PK_REAL_GAIN,
-+ (u16)gain & 0x3ff);
-+}
-+
-+static int ov5640_set_autogain(struct ov5640_dev *sensor, bool on)
-+{
-+ return ov5640_mod_reg(sensor, OV5640_REG_AEC_PK_MANUAL,
-+ BIT(1), on ? 0 : BIT(1));
-+}
-+
-+static int ov5640_set_stream_dvp(struct ov5640_dev *sensor, bool on)
-+{
-+ return ov5640_write_reg(sensor, OV5640_REG_SYS_CTRL0, on ?
-+ OV5640_REG_SYS_CTRL0_SW_PWUP :
-+ OV5640_REG_SYS_CTRL0_SW_PWDN);
-+}
-+
-+static int ov5640_set_stream_mipi(struct ov5640_dev *sensor, bool on)
-+{
-+ int ret;
-+
-+ /*
-+ * Enable/disable the MIPI interface
-+ *
-+ * 0x300e = on ? 0x45 : 0x40
-+ *
-+ * FIXME: the sensor manual (version 2.03) reports
-+ * [7:5] = 000 : 1 data lane mode
-+ * [7:5] = 001 : 2 data lanes mode
-+ * But this settings do not work, while the following ones
-+ * have been validated for 2 data lanes mode.
-+ *
-+ * [7:5] = 010 : 2 data lanes mode
-+ * [4] = 0 : Power up MIPI HS Tx
-+ * [3] = 0 : Power up MIPI LS Rx
-+ * [2] = 1/0 : MIPI interface enable/disable
-+ * [1:0] = 01/00: FIXME: 'debug'
-+ */
-+ ret = ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00,
-+ on ? 0x45 : 0x40);
-+ if (ret)
-+ return ret;
-+
-+ return ov5640_write_reg(sensor, OV5640_REG_FRAME_CTRL01,
-+ on ? 0x00 : 0x0f);
-+}
-+
-+static int ov5640_get_sysclk(struct ov5640_dev *sensor)
-+{
-+ /* calculate sysclk */
-+ u32 xvclk = sensor->xclk_freq / 10000;
-+ u32 multiplier, prediv, VCO, sysdiv, pll_rdiv;
-+ u32 sclk_rdiv_map[] = {1, 2, 4, 8};
-+ u32 bit_div2x = 1, sclk_rdiv, sysclk;
-+ u8 temp1, temp2;
-+ int ret;
-+
-+ ret = ov5640_read_reg(sensor, OV5640_REG_SC_PLL_CTRL0, &temp1);
-+ if (ret)
-+ return ret;
-+ temp2 = temp1 & 0x0f;
-+ if (temp2 == 8 || temp2 == 10)
-+ bit_div2x = temp2 / 2;
-+
-+ ret = ov5640_read_reg(sensor, OV5640_REG_SC_PLL_CTRL1, &temp1);
-+ if (ret)
-+ return ret;
-+ sysdiv = temp1 >> 4;
-+ if (sysdiv == 0)
-+ sysdiv = 16;
-+
-+ ret = ov5640_read_reg(sensor, OV5640_REG_SC_PLL_CTRL2, &temp1);
-+ if (ret)
-+ return ret;
-+ multiplier = temp1;
-+
-+ ret = ov5640_read_reg(sensor, OV5640_REG_SC_PLL_CTRL3, &temp1);
-+ if (ret)
-+ return ret;
-+ prediv = temp1 & 0x0f;
-+ pll_rdiv = ((temp1 >> 4) & 0x01) + 1;
-+
-+ ret = ov5640_read_reg(sensor, OV5640_REG_SYS_ROOT_DIVIDER, &temp1);
-+ if (ret)
-+ return ret;
-+ temp2 = temp1 & 0x03;
-+ sclk_rdiv = sclk_rdiv_map[temp2];
-+
-+ if (!prediv || !sysdiv || !pll_rdiv || !bit_div2x)
-+ return -EINVAL;
-+
-+ VCO = xvclk * multiplier / prediv;
-+
-+ sysclk = VCO / sysdiv / pll_rdiv * 2 / bit_div2x / sclk_rdiv;
-+
-+ return sysclk;
-+}
-+
-+static int ov5640_set_night_mode(struct ov5640_dev *sensor)
-+{
-+ /* read HTS from register settings */
-+ u8 mode;
-+ int ret;
-+
-+ ret = ov5640_read_reg(sensor, OV5640_REG_AEC_CTRL00, &mode);
-+ if (ret)
-+ return ret;
-+ mode &= 0xfb;
-+ return ov5640_write_reg(sensor, OV5640_REG_AEC_CTRL00, mode);
-+}
-+
-+static int ov5640_get_hts(struct ov5640_dev *sensor)
-+{
-+ /* read HTS from register settings */
-+ u16 hts;
-+ int ret;
-+
-+ ret = ov5640_read_reg16(sensor, OV5640_REG_TIMING_HTS, &hts);
-+ if (ret)
-+ return ret;
-+ return hts;
-+}
-+
-+static int ov5640_get_vts(struct ov5640_dev *sensor)
-+{
-+ u16 vts;
-+ int ret;
-+
-+ ret = ov5640_read_reg16(sensor, OV5640_REG_TIMING_VTS, &vts);
-+ if (ret)
-+ return ret;
-+ return vts;
-+}
-+
-+static int ov5640_set_vts(struct ov5640_dev *sensor, int vts)
-+{
-+ return ov5640_write_reg16(sensor, OV5640_REG_TIMING_VTS, vts);
-+}
-+
-+static int ov5640_get_light_freq(struct ov5640_dev *sensor)
-+{
-+ /* get banding filter value */
-+ int ret, light_freq = 0;
-+ u8 temp, temp1;
-+
-+ ret = ov5640_read_reg(sensor, OV5640_REG_HZ5060_CTRL01, &temp);
-+ if (ret)
-+ return ret;
-+
-+ if (temp & 0x80) {
-+ /* manual */
-+ ret = ov5640_read_reg(sensor, OV5640_REG_HZ5060_CTRL00,
-+ &temp1);
-+ if (ret)
-+ return ret;
-+ if (temp1 & 0x04) {
-+ /* 50Hz */
-+ light_freq = 50;
-+ } else {
-+ /* 60Hz */
-+ light_freq = 60;
-+ }
-+ } else {
-+ /* auto */
-+ ret = ov5640_read_reg(sensor, OV5640_REG_SIGMADELTA_CTRL0C,
-+ &temp1);
-+ if (ret)
-+ return ret;
-+
-+ if (temp1 & 0x01) {
-+ /* 50Hz */
-+ light_freq = 50;
-+ } else {
-+ /* 60Hz */
-+ }
-+ }
-+
-+ return light_freq;
-+}
-+
-+static int ov5640_set_bandingfilter(struct ov5640_dev *sensor)
-+{
-+ u32 band_step60, max_band60, band_step50, max_band50, prev_vts;
-+ int ret;
-+
-+ /* read preview PCLK */
-+ ret = ov5640_get_sysclk(sensor);
-+ if (ret < 0)
-+ return ret;
-+ if (ret == 0)
-+ return -EINVAL;
-+ sensor->prev_sysclk = ret;
-+ /* read preview HTS */
-+ ret = ov5640_get_hts(sensor);
-+ if (ret < 0)
-+ return ret;
-+ if (ret == 0)
-+ return -EINVAL;
-+ sensor->prev_hts = ret;
-+
-+ /* read preview VTS */
-+ ret = ov5640_get_vts(sensor);
-+ if (ret < 0)
-+ return ret;
-+ prev_vts = ret;
-+
-+ /* calculate banding filter */
-+ /* 60Hz */
-+ band_step60 = sensor->prev_sysclk * 100 / sensor->prev_hts * 100 / 120;
-+ ret = ov5640_write_reg16(sensor, OV5640_REG_AEC_B60_STEP, band_step60);
-+ if (ret)
-+ return ret;
-+ if (!band_step60)
-+ return -EINVAL;
-+ max_band60 = (int)((prev_vts - 4) / band_step60);
-+ ret = ov5640_write_reg(sensor, OV5640_REG_AEC_CTRL0D, max_band60);
-+ if (ret)
-+ return ret;
-+
-+ /* 50Hz */
-+ band_step50 = sensor->prev_sysclk * 100 / sensor->prev_hts;
-+ ret = ov5640_write_reg16(sensor, OV5640_REG_AEC_B50_STEP, band_step50);
-+ if (ret)
-+ return ret;
-+ if (!band_step50)
-+ return -EINVAL;
-+ max_band50 = (int)((prev_vts - 4) / band_step50);
-+ return ov5640_write_reg(sensor, OV5640_REG_AEC_CTRL0E, max_band50);
-+}
-+
-+static int ov5640_set_ae_target(struct ov5640_dev *sensor, int target)
-+{
-+ /* stable in high */
-+ u32 fast_high, fast_low;
-+ int ret;
-+
-+ sensor->ae_low = target * 23 / 25; /* 0.92 */
-+ sensor->ae_high = target * 27 / 25; /* 1.08 */
-+
-+ fast_high = sensor->ae_high << 1;
-+ if (fast_high > 255)
-+ fast_high = 255;
-+
-+ fast_low = sensor->ae_low >> 1;
-+
-+ ret = ov5640_write_reg(sensor, OV5640_REG_AEC_CTRL0F, sensor->ae_high);
-+ if (ret)
-+ return ret;
-+ ret = ov5640_write_reg(sensor, OV5640_REG_AEC_CTRL10, sensor->ae_low);
-+ if (ret)
-+ return ret;
-+ ret = ov5640_write_reg(sensor, OV5640_REG_AEC_CTRL1B, sensor->ae_high);
-+ if (ret)
-+ return ret;
-+ ret = ov5640_write_reg(sensor, OV5640_REG_AEC_CTRL1E, sensor->ae_low);
-+ if (ret)
-+ return ret;
-+ ret = ov5640_write_reg(sensor, OV5640_REG_AEC_CTRL11, fast_high);
-+ if (ret)
-+ return ret;
-+ return ov5640_write_reg(sensor, OV5640_REG_AEC_CTRL1F, fast_low);
-+}
-+
-+static int ov5640_get_binning(struct ov5640_dev *sensor)
-+{
-+ u8 temp;
-+ int ret;
-+
-+ ret = ov5640_read_reg(sensor, OV5640_REG_TIMING_TC_REG21, &temp);
-+ if (ret)
-+ return ret;
-+
-+ return temp & BIT(0);
-+}
-+
-+static int ov5640_set_binning(struct ov5640_dev *sensor, bool enable)
-+{
-+ int ret;
-+
-+ /*
-+ * TIMING TC REG21:
-+ * - [0]: Horizontal binning enable
-+ */
-+ ret = ov5640_mod_reg(sensor, OV5640_REG_TIMING_TC_REG21,
-+ BIT(0), enable ? BIT(0) : 0);
-+ if (ret)
-+ return ret;
-+ /*
-+ * TIMING TC REG20:
-+ * - [0]: Undocumented, but hardcoded init sequences
-+ * are always setting REG21/REG20 bit 0 to same value...
-+ */
-+ return ov5640_mod_reg(sensor, OV5640_REG_TIMING_TC_REG20,
-+ BIT(0), enable ? BIT(0) : 0);
-+}
-+
-+static int ov5640_set_virtual_channel(struct ov5640_dev *sensor)
-+{
-+ struct i2c_client *client = sensor->i2c_client;
-+ u8 temp, channel = virtual_channel;
-+ int ret;
-+
-+ if (channel > 3) {
-+ dev_err(&client->dev,
-+ "%s: wrong virtual_channel parameter, expected (0..3), got %d\n",
-+ __func__, channel);
-+ return -EINVAL;
-+ }
-+
-+ ret = ov5640_read_reg(sensor, OV5640_REG_DEBUG_MODE, &temp);
-+ if (ret)
-+ return ret;
-+ temp &= ~(3 << 6);
-+ temp |= (channel << 6);
-+ return ov5640_write_reg(sensor, OV5640_REG_DEBUG_MODE, temp);
-+}
-+
-+static const struct ov5640_mode_info *
-+ov5640_find_mode(struct ov5640_dev *sensor, enum ov5640_frame_rate fr,
-+ int width, int height, bool nearest)
-+{
-+ const struct ov5640_mode_info *mode;
-+
-+ mode = v4l2_find_nearest_size(ov5640_mode_data,
-+ ARRAY_SIZE(ov5640_mode_data),
-+ hact, vact,
-+ width, height);
-+
-+ if (!mode ||
-+ (!nearest && (mode->hact != width || mode->vact != height)))
-+ return NULL;
-+
-+ /* Check to see if the current mode exceeds the max frame rate */
-+ if (ov5640_framerates[fr] > ov5640_framerates[mode->max_fps])
-+ return NULL;
-+
-+ return mode;
-+}
-+
-+static u64 ov5640_calc_pixel_rate(struct ov5640_dev *sensor)
-+{
-+ u64 rate;
-+
-+ rate = sensor->current_mode->vtot * sensor->current_mode->htot;
-+ rate *= ov5640_framerates[sensor->current_fr];
-+
-+ return rate;
-+}
-+
-+/*
-+ * sensor changes between scaling and subsampling, go through
-+ * exposure calculation
-+ */
-+static int ov5640_set_mode_exposure_calc(struct ov5640_dev *sensor,
-+ const struct ov5640_mode_info *mode)
-+{
-+ u32 prev_shutter, prev_gain16;
-+ u32 cap_shutter, cap_gain16;
-+ u32 cap_sysclk, cap_hts, cap_vts;
-+ u32 light_freq, cap_bandfilt, cap_maxband;
-+ u32 cap_gain16_shutter;
-+ u8 average;
-+ int ret;
-+
-+ if (!mode->reg_data)
-+ return -EINVAL;
-+
-+ /* read preview shutter */
-+ ret = ov5640_get_exposure(sensor);
-+ if (ret < 0)
-+ return ret;
-+ prev_shutter = ret;
-+ ret = ov5640_get_binning(sensor);
-+ if (ret < 0)
-+ return ret;
-+ if (ret && mode->id != OV5640_MODE_720P_1280_720 &&
-+ mode->id != OV5640_MODE_1080P_1920_1080)
-+ prev_shutter *= 2;
-+
-+ /* read preview gain */
-+ ret = ov5640_get_gain(sensor);
-+ if (ret < 0)
-+ return ret;
-+ prev_gain16 = ret;
-+
-+ /* get average */
-+ ret = ov5640_read_reg(sensor, OV5640_REG_AVG_READOUT, &average);
-+ if (ret)
-+ return ret;
-+
-+ /* turn off night mode for capture */
-+ ret = ov5640_set_night_mode(sensor);
-+ if (ret < 0)
-+ return ret;
-+
-+ /* Write capture setting */
-+ ret = ov5640_load_regs(sensor, mode);
-+ if (ret < 0)
-+ return ret;
-+
-+ /* read capture VTS */
-+ ret = ov5640_get_vts(sensor);
-+ if (ret < 0)
-+ return ret;
-+ cap_vts = ret;
-+ ret = ov5640_get_hts(sensor);
-+ if (ret < 0)
-+ return ret;
-+ if (ret == 0)
-+ return -EINVAL;
-+ cap_hts = ret;
-+
-+ ret = ov5640_get_sysclk(sensor);
-+ if (ret < 0)
-+ return ret;
-+ if (ret == 0)
-+ return -EINVAL;
-+ cap_sysclk = ret;
-+
-+ /* calculate capture banding filter */
-+ ret = ov5640_get_light_freq(sensor);
-+ if (ret < 0)
-+ return ret;
-+ light_freq = ret;
-+
-+ if (light_freq == 60) {
-+ /* 60Hz */
-+ cap_bandfilt = cap_sysclk * 100 / cap_hts * 100 / 120;
-+ } else {
-+ /* 50Hz */
-+ cap_bandfilt = cap_sysclk * 100 / cap_hts;
-+ }
-+
-+ if (!sensor->prev_sysclk) {
-+ ret = ov5640_get_sysclk(sensor);
-+ if (ret < 0)
-+ return ret;
-+ if (ret == 0)
-+ return -EINVAL;
-+ sensor->prev_sysclk = ret;
-+ }
-+
-+ if (!cap_bandfilt)
-+ return -EINVAL;
-+
-+ cap_maxband = (int)((cap_vts - 4) / cap_bandfilt);
-+
-+ /* calculate capture shutter/gain16 */
-+ if (average > sensor->ae_low && average < sensor->ae_high) {
-+ /* in stable range */
-+ cap_gain16_shutter =
-+ prev_gain16 * prev_shutter *
-+ cap_sysclk / sensor->prev_sysclk *
-+ sensor->prev_hts / cap_hts *
-+ sensor->ae_target / average;
-+ } else {
-+ cap_gain16_shutter =
-+ prev_gain16 * prev_shutter *
-+ cap_sysclk / sensor->prev_sysclk *
-+ sensor->prev_hts / cap_hts;
-+ }
-+
-+ /* gain to shutter */
-+ if (cap_gain16_shutter < (cap_bandfilt * 16)) {
-+ /* shutter < 1/100 */
-+ cap_shutter = cap_gain16_shutter / 16;
-+ if (cap_shutter < 1)
-+ cap_shutter = 1;
-+
-+ cap_gain16 = cap_gain16_shutter / cap_shutter;
-+ if (cap_gain16 < 16)
-+ cap_gain16 = 16;
-+ } else {
-+ if (cap_gain16_shutter > (cap_bandfilt * cap_maxband * 16)) {
-+ /* exposure reach max */
-+ cap_shutter = cap_bandfilt * cap_maxband;
-+ if (!cap_shutter)
-+ return -EINVAL;
-+
-+ cap_gain16 = cap_gain16_shutter / cap_shutter;
-+ } else {
-+ /* 1/100 < (cap_shutter = n/100) =< max */
-+ cap_shutter =
-+ ((int)(cap_gain16_shutter / 16 / cap_bandfilt))
-+ * cap_bandfilt;
-+ if (!cap_shutter)
-+ return -EINVAL;
-+
-+ cap_gain16 = cap_gain16_shutter / cap_shutter;
-+ }
-+ }
-+
-+ /* set capture gain */
-+ ret = ov5640_set_gain(sensor, cap_gain16);
-+ if (ret)
-+ return ret;
-+
-+ /* write capture shutter */
-+ if (cap_shutter > (cap_vts - 4)) {
-+ cap_vts = cap_shutter + 4;
-+ ret = ov5640_set_vts(sensor, cap_vts);
-+ if (ret < 0)
-+ return ret;
-+ }
-+
-+ /* set exposure */
-+ return ov5640_set_exposure(sensor, cap_shutter);
-+}
-+
-+/*
-+ * if sensor changes inside scaling or subsampling
-+ * change mode directly
-+ */
-+static int ov5640_set_mode_direct(struct ov5640_dev *sensor,
-+ const struct ov5640_mode_info *mode)
-+{
-+ if (!mode->reg_data)
-+ return -EINVAL;
-+
-+ /* Write capture setting */
-+ return ov5640_load_regs(sensor, mode);
-+}
-+
-+static int ov5640_set_mode(struct ov5640_dev *sensor)
-+{
-+ const struct ov5640_mode_info *mode = sensor->current_mode;
-+ const struct ov5640_mode_info *orig_mode = sensor->last_mode;
-+ enum ov5640_downsize_mode dn_mode, orig_dn_mode;
-+ bool auto_gain = sensor->ctrls.auto_gain->val == 1;
-+ bool auto_exp = sensor->ctrls.auto_exp->val == V4L2_EXPOSURE_AUTO;
-+ unsigned long rate;
-+ int ret;
-+
-+ dn_mode = mode->dn_mode;
-+ orig_dn_mode = orig_mode->dn_mode;
-+
-+ /* auto gain and exposure must be turned off when changing modes */
-+ if (auto_gain) {
-+ ret = ov5640_set_autogain(sensor, false);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ if (auto_exp) {
-+ ret = ov5640_set_autoexposure(sensor, false);
-+ if (ret)
-+ goto restore_auto_gain;
-+ }
-+
-+ /*
-+ * All the formats we support have 16 bits per pixel, seems to require
-+ * the same rate than YUV, so we can just use 16 bpp all the time.
-+ */
-+ rate = ov5640_calc_pixel_rate(sensor) * 16;
-+ if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY) {
-+ rate = rate / sensor->ep.bus.mipi_csi2.num_data_lanes;
-+ ret = ov5640_set_mipi_pclk(sensor, rate);
-+ } else {
-+ rate = rate / sensor->ep.bus.parallel.bus_width;
-+ ret = ov5640_set_dvp_pclk(sensor, rate);
-+ }
-+
-+ if (ret < 0)
-+ return 0;
-+
-+ if ((dn_mode == SUBSAMPLING && orig_dn_mode == SCALING) ||
-+ (dn_mode == SCALING && orig_dn_mode == SUBSAMPLING)) {
-+ /*
-+ * change between subsampling and scaling
-+ * go through exposure calculation
-+ */
-+ ret = ov5640_set_mode_exposure_calc(sensor, mode);
-+ } else {
-+ /*
-+ * change inside subsampling or scaling
-+ * download firmware directly
-+ */
-+ ret = ov5640_set_mode_direct(sensor, mode);
-+ }
-+ if (ret < 0)
-+ goto restore_auto_exp_gain;
-+
-+ /* restore auto gain and exposure */
-+ if (auto_gain)
-+ ov5640_set_autogain(sensor, true);
-+ if (auto_exp)
-+ ov5640_set_autoexposure(sensor, true);
-+
-+ ret = ov5640_set_binning(sensor, dn_mode != SCALING);
-+ if (ret < 0)
-+ return ret;
-+ ret = ov5640_set_ae_target(sensor, sensor->ae_target);
-+ if (ret < 0)
-+ return ret;
-+ ret = ov5640_get_light_freq(sensor);
-+ if (ret < 0)
-+ return ret;
-+ ret = ov5640_set_bandingfilter(sensor);
-+ if (ret < 0)
-+ return ret;
-+ ret = ov5640_set_virtual_channel(sensor);
-+ if (ret < 0)
-+ return ret;
-+
-+ sensor->pending_mode_change = false;
-+ sensor->last_mode = mode;
-+
-+ return 0;
-+
-+restore_auto_exp_gain:
-+ if (auto_exp)
-+ ov5640_set_autoexposure(sensor, true);
-+restore_auto_gain:
-+ if (auto_gain)
-+ ov5640_set_autogain(sensor, true);
-+
-+ return ret;
-+}
-+
-+static int ov5640_set_framefmt(struct ov5640_dev *sensor,
-+ struct v4l2_mbus_framefmt *format);
-+
-+/* restore the last set video mode after chip power-on */
-+static int ov5640_restore_mode(struct ov5640_dev *sensor)
-+{
-+ int ret;
-+
-+ /* first load the initial register values */
-+ ret = ov5640_load_regs(sensor, &ov5640_mode_init_data);
-+ if (ret < 0)
-+ return ret;
-+ sensor->last_mode = &ov5640_mode_init_data;
-+
-+ ret = ov5640_mod_reg(sensor, OV5640_REG_SYS_ROOT_DIVIDER, 0x3f,
-+ (ilog2(OV5640_SCLK2X_ROOT_DIV) << 2) |
-+ ilog2(OV5640_SCLK_ROOT_DIV));
-+ if (ret)
-+ return ret;
-+
-+ /* now restore the last capture mode */
-+ ret = ov5640_set_mode(sensor);
-+ if (ret < 0)
-+ return ret;
-+
-+ return ov5640_set_framefmt(sensor, &sensor->fmt);
-+}
-+
-+static void ov5640_power(struct ov5640_dev *sensor, bool enable)
-+{
-+ if (!sensor->pwdn_gpio)
-+ return;
-+ gpiod_set_value_cansleep(sensor->pwdn_gpio, enable ? 0 : 1);
-+}
-+
-+static void ov5640_reset(struct ov5640_dev *sensor)
-+{
-+ if (!sensor->reset_gpio)
-+ return;
-+
-+ gpiod_set_value_cansleep(sensor->reset_gpio, 0);
-+
-+ /* camera power cycle */
-+ ov5640_power(sensor, false);
-+ usleep_range(5000, 10000);
-+ ov5640_power(sensor, true);
-+ usleep_range(5000, 10000);
-+
-+ gpiod_set_value_cansleep(sensor->reset_gpio, 1);
-+ usleep_range(1000, 2000);
-+
-+ gpiod_set_value_cansleep(sensor->reset_gpio, 0);
-+ usleep_range(20000, 25000);
-+}
-+
-+static int ov5640_set_power_on(struct ov5640_dev *sensor)
-+{
-+ struct i2c_client *client = sensor->i2c_client;
-+ int ret;
-+
-+ ret = clk_prepare_enable(sensor->xclk);
-+ if (ret) {
-+ dev_err(&client->dev, "%s: failed to enable clock\n",
-+ __func__);
-+ return ret;
-+ }
-+
-+ ret = regulator_bulk_enable(OV5640_NUM_SUPPLIES,
-+ sensor->supplies);
-+ if (ret) {
-+ dev_err(&client->dev, "%s: failed to enable regulators\n",
-+ __func__);
-+ goto xclk_off;
-+ }
-+
-+ ov5640_reset(sensor);
-+ ov5640_power(sensor, true);
-+
-+ ret = ov5640_init_slave_id(sensor);
-+ if (ret)
-+ goto power_off;
-+
-+ return 0;
-+
-+power_off:
-+ ov5640_power(sensor, false);
-+ regulator_bulk_disable(OV5640_NUM_SUPPLIES, sensor->supplies);
-+xclk_off:
-+ clk_disable_unprepare(sensor->xclk);
-+ return ret;
-+}
-+
-+static void ov5640_set_power_off(struct ov5640_dev *sensor)
-+{
-+ ov5640_power(sensor, false);
-+ regulator_bulk_disable(OV5640_NUM_SUPPLIES, sensor->supplies);
-+ clk_disable_unprepare(sensor->xclk);
-+}
-+
-+static int ov5640_set_power_mipi(struct ov5640_dev *sensor, bool on)
-+{
-+ int ret;
-+
-+ if (!on) {
-+ /* Reset MIPI bus settings to their default values. */
-+ ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x58);
-+ ov5640_write_reg(sensor, OV5640_REG_MIPI_CTRL00, 0x04);
-+ ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT00, 0x00);
-+ return 0;
-+ }
-+
-+ /*
-+ * Power up MIPI HS Tx and LS Rx; 2 data lanes mode
-+ *
-+ * 0x300e = 0x40
-+ * [7:5] = 010 : 2 data lanes mode (see FIXME note in
-+ * "ov5640_set_stream_mipi()")
-+ * [4] = 0 : Power up MIPI HS Tx
-+ * [3] = 0 : Power up MIPI LS Rx
-+ * [2] = 0 : MIPI interface disabled
-+ */
-+ ret = ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x40);
-+ if (ret)
-+ return ret;
-+
-+ /*
-+ * Gate clock and set LP11 in 'no packets mode' (idle)
-+ *
-+ * 0x4800 = 0x24
-+ * [5] = 1 : Gate clock when 'no packets'
-+ * [2] = 1 : MIPI bus in LP11 when 'no packets'
-+ */
-+ ret = ov5640_write_reg(sensor, OV5640_REG_MIPI_CTRL00, 0x24);
-+ if (ret)
-+ return ret;
-+
-+ /*
-+ * Set data lanes and clock in LP11 when 'sleeping'
-+ *
-+ * 0x3019 = 0x70
-+ * [6] = 1 : MIPI data lane 2 in LP11 when 'sleeping'
-+ * [5] = 1 : MIPI data lane 1 in LP11 when 'sleeping'
-+ * [4] = 1 : MIPI clock lane in LP11 when 'sleeping'
-+ */
-+ ret = ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT00, 0x70);
-+ if (ret)
-+ return ret;
-+
-+ /* Give lanes some time to coax into LP11 state. */
-+ usleep_range(500, 1000);
-+
-+ return 0;
-+}
-+
-+static int ov5640_set_power_dvp(struct ov5640_dev *sensor, bool on)
-+{
-+ unsigned int flags = sensor->ep.bus.parallel.flags;
-+ bool bt656 = sensor->ep.bus_type == V4L2_MBUS_BT656;
-+ u8 polarities = 0;
-+ int ret;
-+
-+ if (!on) {
-+ /* Reset settings to their default values. */
-+ ov5640_write_reg(sensor, OV5640_REG_CCIR656_CTRL00, 0x00);
-+ ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x58);
-+ ov5640_write_reg(sensor, OV5640_REG_POLARITY_CTRL00, 0x20);
-+ ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE01, 0x00);
-+ ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE02, 0x00);
-+ return 0;
-+ }
-+
-+ /*
-+ * Note about parallel port configuration.
-+ *
-+ * When configured in parallel mode, the OV5640 will
-+ * output 10 bits data on DVP data lines [9:0].
-+ * If only 8 bits data are wanted, the 8 bits data lines
-+ * of the camera interface must be physically connected
-+ * on the DVP data lines [9:2].
-+ *
-+ * Control lines polarity can be configured through
-+ * devicetree endpoint control lines properties.
-+ * If no endpoint control lines properties are set,
-+ * polarity will be as below:
-+ * - VSYNC: active high
-+ * - HREF: active low
-+ * - PCLK: active low
-+ *
-+ * VSYNC & HREF are not configured if BT656 bus mode is selected
-+ */
-+
-+ /*
-+ * BT656 embedded synchronization configuration
-+ *
-+ * CCIR656 CTRL00
-+ * - [7]: SYNC code selection (0: auto generate sync code,
-+ * 1: sync code from regs 0x4732-0x4735)
-+ * - [6]: f value in CCIR656 SYNC code when fixed f value
-+ * - [5]: Fixed f value
-+ * - [4:3]: Blank toggle data options (00: data=1'h040/1'h200,
-+ * 01: data from regs 0x4736-0x4738, 10: always keep 0)
-+ * - [1]: Clip data disable
-+ * - [0]: CCIR656 mode enable
-+ *
-+ * Default CCIR656 SAV/EAV mode with default codes
-+ * SAV=0xff000080 & EAV=0xff00009d is enabled here with settings:
-+ * - CCIR656 mode enable
-+ * - auto generation of sync codes
-+ * - blank toggle data 1'h040/1'h200
-+ * - clip reserved data (0x00 & 0xff changed to 0x01 & 0xfe)
-+ */
-+ ret = ov5640_write_reg(sensor, OV5640_REG_CCIR656_CTRL00,
-+ bt656 ? 0x01 : 0x00);
-+ if (ret)
-+ return ret;
-+
-+ /*
-+ * configure parallel port control lines polarity
-+ *
-+ * POLARITY CTRL0
-+ * - [5]: PCLK polarity (0: active low, 1: active high)
-+ * - [1]: HREF polarity (0: active low, 1: active high)
-+ * - [0]: VSYNC polarity (mismatch here between
-+ * datasheet and hardware, 0 is active high
-+ * and 1 is active low...)
-+ */
-+ if (!bt656) {
-+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
-+ polarities |= BIT(1);
-+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
-+ polarities |= BIT(0);
-+ }
-+ if (flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
-+ polarities |= BIT(5);
-+
-+ ret = ov5640_write_reg(sensor, OV5640_REG_POLARITY_CTRL00, polarities);
-+ if (ret)
-+ return ret;
-+
-+ /*
-+ * powerdown MIPI TX/RX PHY & enable DVP
-+ *
-+ * MIPI CONTROL 00
-+ * [4] = 1 : Power down MIPI HS Tx
-+ * [3] = 1 : Power down MIPI LS Rx
-+ * [2] = 0 : DVP enable (MIPI disable)
-+ */
-+ ret = ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x58);
-+ if (ret)
-+ return ret;
-+
-+ /*
-+ * enable VSYNC/HREF/PCLK DVP control lines
-+ * & D[9:6] DVP data lines
-+ *
-+ * PAD OUTPUT ENABLE 01
-+ * - 6: VSYNC output enable
-+ * - 5: HREF output enable
-+ * - 4: PCLK output enable
-+ * - [3:0]: D[9:6] output enable
-+ */
-+ ret = ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE01,
-+ bt656 ? 0x1f : 0x7f);
-+ if (ret)
-+ return ret;
-+
-+ /*
-+ * enable D[5:0] DVP data lines
-+ *
-+ * PAD OUTPUT ENABLE 02
-+ * - [7:2]: D[5:0] output enable
-+ */
-+ return ov5640_write_reg(sensor, OV5640_REG_PAD_OUTPUT_ENABLE02, 0xfc);
-+}
-+
-+static int ov5640_set_power(struct ov5640_dev *sensor, bool on)
-+{
-+ int ret = 0;
-+
-+ if (on) {
-+ ret = ov5640_set_power_on(sensor);
-+ if (ret)
-+ return ret;
-+
-+ ret = ov5640_restore_mode(sensor);
-+ if (ret)
-+ goto power_off;
-+ }
-+
-+ if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY)
-+ ret = ov5640_set_power_mipi(sensor, on);
-+ else
-+ ret = ov5640_set_power_dvp(sensor, on);
-+ if (ret)
-+ goto power_off;
-+
-+ if (!on)
-+ ov5640_set_power_off(sensor);
-+
-+ return 0;
-+
-+power_off:
-+ ov5640_set_power_off(sensor);
-+ return ret;
-+}
-+
-+/* --------------- Subdev Operations --------------- */
-+
-+static int ov5640_s_power(struct v4l2_subdev *sd, int on)
-+{
-+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
-+ int ret = 0;
-+
-+ mutex_lock(&sensor->lock);
-+
-+ /*
-+ * If the power count is modified from 0 to != 0 or from != 0 to 0,
-+ * update the power state.
-+ */
-+ if (sensor->power_count == !on) {
-+ ret = ov5640_set_power(sensor, !!on);
-+ if (ret)
-+ goto out;
-+ }
-+
-+ /* Update the power count. */
-+ sensor->power_count += on ? 1 : -1;
-+ WARN_ON(sensor->power_count < 0);
-+out:
-+ mutex_unlock(&sensor->lock);
-+
-+ if (on && !ret && sensor->power_count == 1) {
-+ /* restore controls */
-+ ret = v4l2_ctrl_handler_setup(&sensor->ctrls.handler);
-+ }
-+
-+ return ret;
-+}
-+
-+static int ov5640_try_frame_interval(struct ov5640_dev *sensor,
-+ struct v4l2_fract *fi,
-+ u32 width, u32 height)
-+{
-+ const struct ov5640_mode_info *mode;
-+ enum ov5640_frame_rate rate = OV5640_15_FPS;
-+ int minfps, maxfps, best_fps, fps;
-+ int i;
-+
-+ minfps = ov5640_framerates[OV5640_15_FPS];
-+ maxfps = ov5640_framerates[OV5640_60_FPS];
-+
-+ if (fi->numerator == 0) {
-+ fi->denominator = maxfps;
-+ fi->numerator = 1;
-+ rate = OV5640_60_FPS;
-+ goto find_mode;
-+ }
-+
-+ fps = clamp_val(DIV_ROUND_CLOSEST(fi->denominator, fi->numerator),
-+ minfps, maxfps);
-+
-+ best_fps = minfps;
-+ for (i = 0; i < ARRAY_SIZE(ov5640_framerates); i++) {
-+ int curr_fps = ov5640_framerates[i];
-+
-+ if (abs(curr_fps - fps) < abs(best_fps - fps)) {
-+ best_fps = curr_fps;
-+ rate = i;
-+ }
-+ }
-+
-+ fi->numerator = 1;
-+ fi->denominator = best_fps;
-+
-+find_mode:
-+ mode = ov5640_find_mode(sensor, rate, width, height, false);
-+ return mode ? rate : -EINVAL;
-+}
-+
-+static int ov5640_get_fmt(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_format *format)
-+{
-+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
-+ struct v4l2_mbus_framefmt *fmt;
-+
-+ if (format->pad != 0)
-+ return -EINVAL;
-+
-+ mutex_lock(&sensor->lock);
-+
-+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
-+ fmt = v4l2_subdev_get_try_format(&sensor->sd, state,
-+ format->pad);
-+ else
-+ fmt = &sensor->fmt;
-+
-+ format->format = *fmt;
-+
-+ mutex_unlock(&sensor->lock);
-+
-+ return 0;
-+}
-+
-+static int ov5640_try_fmt_internal(struct v4l2_subdev *sd,
-+ struct v4l2_mbus_framefmt *fmt,
-+ enum ov5640_frame_rate fr,
-+ const struct ov5640_mode_info **new_mode)
-+{
-+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
-+ const struct ov5640_mode_info *mode;
-+ int i;
-+
-+ mode = ov5640_find_mode(sensor, fr, fmt->width, fmt->height, true);
-+ if (!mode)
-+ return -EINVAL;
-+ fmt->width = mode->hact;
-+ fmt->height = mode->vact;
-+
-+ if (new_mode)
-+ *new_mode = mode;
-+
-+ for (i = 0; i < ARRAY_SIZE(ov5640_formats); i++)
-+ if (ov5640_formats[i].code == fmt->code)
-+ break;
-+ if (i >= ARRAY_SIZE(ov5640_formats))
-+ i = 0;
-+
-+ fmt->code = ov5640_formats[i].code;
-+ fmt->colorspace = ov5640_formats[i].colorspace;
-+ fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
-+ fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
-+ fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace);
-+
-+ return 0;
-+}
-+
-+static int ov5640_set_fmt(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_format *format)
-+{
-+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
-+ const struct ov5640_mode_info *new_mode;
-+ struct v4l2_mbus_framefmt *mbus_fmt = &format->format;
-+ struct v4l2_mbus_framefmt *fmt;
-+ int ret;
-+
-+ if (format->pad != 0)
-+ return -EINVAL;
-+
-+ mutex_lock(&sensor->lock);
-+
-+ if (sensor->streaming) {
-+ ret = -EBUSY;
-+ goto out;
-+ }
-+
-+ ret = ov5640_try_fmt_internal(sd, mbus_fmt, 0, &new_mode);
-+ if (ret)
-+ goto out;
-+
-+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
-+ fmt = v4l2_subdev_get_try_format(sd, state, 0);
-+ else
-+ fmt = &sensor->fmt;
-+
-+ if (mbus_fmt->code != sensor->fmt.code)
-+ sensor->pending_fmt_change = true;
-+
-+ *fmt = *mbus_fmt;
-+
-+ if (new_mode != sensor->current_mode) {
-+ sensor->current_mode = new_mode;
-+ sensor->pending_mode_change = true;
-+ }
-+ if (new_mode->max_fps < sensor->current_fr) {
-+ sensor->current_fr = new_mode->max_fps;
-+ sensor->frame_interval.numerator = 1;
-+ sensor->frame_interval.denominator =
-+ ov5640_framerates[sensor->current_fr];
-+ sensor->current_mode = new_mode;
-+ sensor->pending_mode_change = true;
-+ }
-+
-+ __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
-+ ov5640_calc_pixel_rate(sensor));
-+out:
-+ mutex_unlock(&sensor->lock);
-+ return ret;
-+}
-+
-+static int ov5640_set_framefmt(struct ov5640_dev *sensor,
-+ struct v4l2_mbus_framefmt *format)
-+{
-+ int ret = 0;
-+ bool is_jpeg = false;
-+ u8 fmt, mux;
-+
-+ switch (format->code) {
-+ case MEDIA_BUS_FMT_UYVY8_2X8:
-+ /* YUV422, UYVY */
-+ fmt = 0x3f;
-+ mux = OV5640_FMT_MUX_YUV422;
-+ break;
-+ case MEDIA_BUS_FMT_YUYV8_2X8:
-+ /* YUV422, YUYV */
-+ fmt = 0x30;
-+ mux = OV5640_FMT_MUX_YUV422;
-+ break;
-+ case MEDIA_BUS_FMT_RGB565_2X8_LE:
-+ /* RGB565 {g[2:0],b[4:0]},{r[4:0],g[5:3]} */
-+ fmt = 0x6F;
-+ mux = OV5640_FMT_MUX_RGB;
-+ break;
-+ case MEDIA_BUS_FMT_RGB565_2X8_BE:
-+ /* RGB565 {r[4:0],g[5:3]},{g[2:0],b[4:0]} */
-+ fmt = 0x61;
-+ mux = OV5640_FMT_MUX_RGB;
-+ break;
-+ case MEDIA_BUS_FMT_JPEG_1X8:
-+ /* YUV422, YUYV */
-+ fmt = 0x30;
-+ mux = OV5640_FMT_MUX_YUV422;
-+ is_jpeg = true;
-+ break;
-+ case MEDIA_BUS_FMT_SBGGR8_1X8:
-+ /* Raw, BGBG... / GRGR... */
-+ fmt = 0x00;
-+ mux = OV5640_FMT_MUX_RAW_DPC;
-+ break;
-+ case MEDIA_BUS_FMT_SGBRG8_1X8:
-+ /* Raw bayer, GBGB... / RGRG... */
-+ fmt = 0x01;
-+ mux = OV5640_FMT_MUX_RAW_DPC;
-+ break;
-+ case MEDIA_BUS_FMT_SGRBG8_1X8:
-+ /* Raw bayer, GRGR... / BGBG... */
-+ fmt = 0x02;
-+ mux = OV5640_FMT_MUX_RAW_DPC;
-+ break;
-+ case MEDIA_BUS_FMT_SRGGB8_1X8:
-+ /* Raw bayer, RGRG... / GBGB... */
-+ fmt = 0x03;
-+ mux = OV5640_FMT_MUX_RAW_DPC;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ /* FORMAT CONTROL00: YUV and RGB formatting */
-+ ret = ov5640_write_reg(sensor, OV5640_REG_FORMAT_CONTROL00, fmt);
-+ if (ret)
-+ return ret;
-+
-+ /* FORMAT MUX CONTROL: ISP YUV or RGB */
-+ ret = ov5640_write_reg(sensor, OV5640_REG_ISP_FORMAT_MUX_CTRL, mux);
-+ if (ret)
-+ return ret;
-+
-+ /*
-+ * TIMING TC REG21:
-+ * - [5]: JPEG enable
-+ */
-+ ret = ov5640_mod_reg(sensor, OV5640_REG_TIMING_TC_REG21,
-+ BIT(5), is_jpeg ? BIT(5) : 0);
-+ if (ret)
-+ return ret;
-+
-+ /*
-+ * SYSTEM RESET02:
-+ * - [4]: Reset JFIFO
-+ * - [3]: Reset SFIFO
-+ * - [2]: Reset JPEG
-+ */
-+ ret = ov5640_mod_reg(sensor, OV5640_REG_SYS_RESET02,
-+ BIT(4) | BIT(3) | BIT(2),
-+ is_jpeg ? 0 : (BIT(4) | BIT(3) | BIT(2)));
-+ if (ret)
-+ return ret;
-+
-+ /*
-+ * CLOCK ENABLE02:
-+ * - [5]: Enable JPEG 2x clock
-+ * - [3]: Enable JPEG clock
-+ */
-+ return ov5640_mod_reg(sensor, OV5640_REG_SYS_CLOCK_ENABLE02,
-+ BIT(5) | BIT(3),
-+ is_jpeg ? (BIT(5) | BIT(3)) : 0);
-+}
-+
-+/*
-+ * Sensor Controls.
-+ */
-+
-+static int ov5640_set_ctrl_hue(struct ov5640_dev *sensor, int value)
-+{
-+ int ret;
-+
-+ if (value) {
-+ ret = ov5640_mod_reg(sensor, OV5640_REG_SDE_CTRL0,
-+ BIT(0), BIT(0));
-+ if (ret)
-+ return ret;
-+ ret = ov5640_write_reg16(sensor, OV5640_REG_SDE_CTRL1, value);
-+ } else {
-+ ret = ov5640_mod_reg(sensor, OV5640_REG_SDE_CTRL0, BIT(0), 0);
-+ }
-+
-+ return ret;
-+}
-+
-+static int ov5640_set_ctrl_contrast(struct ov5640_dev *sensor, int value)
-+{
-+ int ret;
-+
-+ if (value) {
-+ ret = ov5640_mod_reg(sensor, OV5640_REG_SDE_CTRL0,
-+ BIT(2), BIT(2));
-+ if (ret)
-+ return ret;
-+ ret = ov5640_write_reg(sensor, OV5640_REG_SDE_CTRL5,
-+ value & 0xff);
-+ } else {
-+ ret = ov5640_mod_reg(sensor, OV5640_REG_SDE_CTRL0, BIT(2), 0);
-+ }
-+
-+ return ret;
-+}
-+
-+static int ov5640_set_ctrl_saturation(struct ov5640_dev *sensor, int value)
-+{
-+ int ret;
-+
-+ if (value) {
-+ ret = ov5640_mod_reg(sensor, OV5640_REG_SDE_CTRL0,
-+ BIT(1), BIT(1));
-+ if (ret)
-+ return ret;
-+ ret = ov5640_write_reg(sensor, OV5640_REG_SDE_CTRL3,
-+ value & 0xff);
-+ if (ret)
-+ return ret;
-+ ret = ov5640_write_reg(sensor, OV5640_REG_SDE_CTRL4,
-+ value & 0xff);
-+ } else {
-+ ret = ov5640_mod_reg(sensor, OV5640_REG_SDE_CTRL0, BIT(1), 0);
-+ }
-+
-+ return ret;
-+}
-+
-+static int ov5640_set_ctrl_white_balance(struct ov5640_dev *sensor, int awb)
-+{
-+ int ret;
-+
-+ ret = ov5640_mod_reg(sensor, OV5640_REG_AWB_MANUAL_CTRL,
-+ BIT(0), awb ? 0 : 1);
-+ if (ret)
-+ return ret;
-+
-+ if (!awb) {
-+ u16 red = (u16)sensor->ctrls.red_balance->val;
-+ u16 blue = (u16)sensor->ctrls.blue_balance->val;
-+
-+ ret = ov5640_write_reg16(sensor, OV5640_REG_AWB_R_GAIN, red);
-+ if (ret)
-+ return ret;
-+ ret = ov5640_write_reg16(sensor, OV5640_REG_AWB_B_GAIN, blue);
-+ }
-+
-+ return ret;
-+}
-+
-+static int ov5640_set_ctrl_exposure(struct ov5640_dev *sensor,
-+ enum v4l2_exposure_auto_type auto_exposure)
-+{
-+ struct ov5640_ctrls *ctrls = &sensor->ctrls;
-+ bool auto_exp = (auto_exposure == V4L2_EXPOSURE_AUTO);
-+ int ret = 0;
-+
-+ if (ctrls->auto_exp->is_new) {
-+ ret = ov5640_set_autoexposure(sensor, auto_exp);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ if (!auto_exp && ctrls->exposure->is_new) {
-+ u16 max_exp;
-+
-+ ret = ov5640_read_reg16(sensor, OV5640_REG_AEC_PK_VTS,
-+ &max_exp);
-+ if (ret)
-+ return ret;
-+ ret = ov5640_get_vts(sensor);
-+ if (ret < 0)
-+ return ret;
-+ max_exp += ret;
-+ ret = 0;
-+
-+ if (ctrls->exposure->val < max_exp)
-+ ret = ov5640_set_exposure(sensor, ctrls->exposure->val);
-+ }
-+
-+ return ret;
-+}
-+
-+static int ov5640_set_ctrl_gain(struct ov5640_dev *sensor, bool auto_gain)
-+{
-+ struct ov5640_ctrls *ctrls = &sensor->ctrls;
-+ int ret = 0;
-+
-+ if (ctrls->auto_gain->is_new) {
-+ ret = ov5640_set_autogain(sensor, auto_gain);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ if (!auto_gain && ctrls->gain->is_new)
-+ ret = ov5640_set_gain(sensor, ctrls->gain->val);
-+
-+ return ret;
-+}
-+
-+static const char * const test_pattern_menu[] = {
-+ "Disabled",
-+ "Color bars",
-+ "Color bars w/ rolling bar",
-+ "Color squares",
-+ "Color squares w/ rolling bar",
-+};
-+
-+#define OV5640_TEST_ENABLE BIT(7)
-+#define OV5640_TEST_ROLLING BIT(6) /* rolling horizontal bar */
-+#define OV5640_TEST_TRANSPARENT BIT(5)
-+#define OV5640_TEST_SQUARE_BW BIT(4) /* black & white squares */
-+#define OV5640_TEST_BAR_STANDARD (0 << 2)
-+#define OV5640_TEST_BAR_VERT_CHANGE_1 (1 << 2)
-+#define OV5640_TEST_BAR_HOR_CHANGE (2 << 2)
-+#define OV5640_TEST_BAR_VERT_CHANGE_2 (3 << 2)
-+#define OV5640_TEST_BAR (0 << 0)
-+#define OV5640_TEST_RANDOM (1 << 0)
-+#define OV5640_TEST_SQUARE (2 << 0)
-+#define OV5640_TEST_BLACK (3 << 0)
-+
-+static const u8 test_pattern_val[] = {
-+ 0,
-+ OV5640_TEST_ENABLE | OV5640_TEST_BAR_VERT_CHANGE_1 |
-+ OV5640_TEST_BAR,
-+ OV5640_TEST_ENABLE | OV5640_TEST_ROLLING |
-+ OV5640_TEST_BAR_VERT_CHANGE_1 | OV5640_TEST_BAR,
-+ OV5640_TEST_ENABLE | OV5640_TEST_SQUARE,
-+ OV5640_TEST_ENABLE | OV5640_TEST_ROLLING | OV5640_TEST_SQUARE,
-+};
-+
-+static int ov5640_set_ctrl_test_pattern(struct ov5640_dev *sensor, int value)
-+{
-+ return ov5640_write_reg(sensor, OV5640_REG_PRE_ISP_TEST_SET1,
-+ test_pattern_val[value]);
-+}
-+
-+static int ov5640_set_ctrl_light_freq(struct ov5640_dev *sensor, int value)
-+{
-+ int ret;
-+
-+ ret = ov5640_mod_reg(sensor, OV5640_REG_HZ5060_CTRL01, BIT(7),
-+ (value == V4L2_CID_POWER_LINE_FREQUENCY_AUTO) ?
-+ 0 : BIT(7));
-+ if (ret)
-+ return ret;
-+
-+ return ov5640_mod_reg(sensor, OV5640_REG_HZ5060_CTRL00, BIT(2),
-+ (value == V4L2_CID_POWER_LINE_FREQUENCY_50HZ) ?
-+ BIT(2) : 0);
-+}
-+
-+static int ov5640_set_ctrl_hflip(struct ov5640_dev *sensor, int value)
-+{
-+ /*
-+ * If sensor is mounted upside down, mirror logic is inversed.
-+ *
-+ * Sensor is a BSI (Back Side Illuminated) one,
-+ * so image captured is physically mirrored.
-+ * This is why mirror logic is inversed in
-+ * order to cancel this mirror effect.
-+ */
-+
-+ /*
-+ * TIMING TC REG21:
-+ * - [2]: ISP mirror
-+ * - [1]: Sensor mirror
-+ */
-+ return ov5640_mod_reg(sensor, OV5640_REG_TIMING_TC_REG21,
-+ BIT(2) | BIT(1),
-+ (!(value ^ sensor->upside_down)) ?
-+ (BIT(2) | BIT(1)) : 0);
-+}
-+
-+static int ov5640_set_ctrl_vflip(struct ov5640_dev *sensor, int value)
-+{
-+ /* If sensor is mounted upside down, flip logic is inversed */
-+
-+ /*
-+ * TIMING TC REG20:
-+ * - [2]: ISP vflip
-+ * - [1]: Sensor vflip
-+ */
-+ return ov5640_mod_reg(sensor, OV5640_REG_TIMING_TC_REG20,
-+ BIT(2) | BIT(1),
-+ (value ^ sensor->upside_down) ?
-+ (BIT(2) | BIT(1)) : 0);
-+}
-+
-+static int ov5640_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
-+{
-+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
-+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
-+ int val;
-+
-+ /* v4l2_ctrl_lock() locks our own mutex */
-+
-+ switch (ctrl->id) {
-+ case V4L2_CID_AUTOGAIN:
-+ val = ov5640_get_gain(sensor);
-+ if (val < 0)
-+ return val;
-+ sensor->ctrls.gain->val = val;
-+ break;
-+ case V4L2_CID_EXPOSURE_AUTO:
-+ val = ov5640_get_exposure(sensor);
-+ if (val < 0)
-+ return val;
-+ sensor->ctrls.exposure->val = val;
-+ break;
-+ }
-+
-+ return 0;
-+}
-+
-+static int ov5640_s_ctrl(struct v4l2_ctrl *ctrl)
-+{
-+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
-+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
-+ int ret;
-+
-+ /* v4l2_ctrl_lock() locks our own mutex */
-+
-+ /*
-+ * If the device is not powered up by the host driver do
-+ * not apply any controls to H/W at this time. Instead
-+ * the controls will be restored right after power-up.
-+ */
-+ if (sensor->power_count == 0)
-+ return 0;
-+
-+ switch (ctrl->id) {
-+ case V4L2_CID_AUTOGAIN:
-+ ret = ov5640_set_ctrl_gain(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_EXPOSURE_AUTO:
-+ ret = ov5640_set_ctrl_exposure(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_AUTO_WHITE_BALANCE:
-+ ret = ov5640_set_ctrl_white_balance(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_HUE:
-+ ret = ov5640_set_ctrl_hue(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_CONTRAST:
-+ ret = ov5640_set_ctrl_contrast(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_SATURATION:
-+ ret = ov5640_set_ctrl_saturation(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_TEST_PATTERN:
-+ ret = ov5640_set_ctrl_test_pattern(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_POWER_LINE_FREQUENCY:
-+ ret = ov5640_set_ctrl_light_freq(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_HFLIP:
-+ ret = ov5640_set_ctrl_hflip(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_VFLIP:
-+ ret = ov5640_set_ctrl_vflip(sensor, ctrl->val);
-+ break;
-+ default:
-+ ret = -EINVAL;
-+ break;
-+ }
-+
-+ return ret;
-+}
-+
-+static const struct v4l2_ctrl_ops ov5640_ctrl_ops = {
-+ .g_volatile_ctrl = ov5640_g_volatile_ctrl,
-+ .s_ctrl = ov5640_s_ctrl,
-+};
-+
-+static int ov5640_init_controls(struct ov5640_dev *sensor)
-+{
-+ const struct v4l2_ctrl_ops *ops = &ov5640_ctrl_ops;
-+ struct ov5640_ctrls *ctrls = &sensor->ctrls;
-+ struct v4l2_ctrl_handler *hdl = &ctrls->handler;
-+ int ret;
-+
-+ v4l2_ctrl_handler_init(hdl, 32);
-+
-+ /* we can use our own mutex for the ctrl lock */
-+ hdl->lock = &sensor->lock;
-+
-+ /* Clock related controls */
-+ ctrls->pixel_rate = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_PIXEL_RATE,
-+ 0, INT_MAX, 1,
-+ ov5640_calc_pixel_rate(sensor));
-+
-+ /* Auto/manual white balance */
-+ ctrls->auto_wb = v4l2_ctrl_new_std(hdl, ops,
-+ V4L2_CID_AUTO_WHITE_BALANCE,
-+ 0, 1, 1, 1);
-+ ctrls->blue_balance = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BLUE_BALANCE,
-+ 0, 4095, 1, 0);
-+ ctrls->red_balance = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_RED_BALANCE,
-+ 0, 4095, 1, 0);
-+ /* Auto/manual exposure */
-+ ctrls->auto_exp = v4l2_ctrl_new_std_menu(hdl, ops,
-+ V4L2_CID_EXPOSURE_AUTO,
-+ V4L2_EXPOSURE_MANUAL, 0,
-+ V4L2_EXPOSURE_AUTO);
-+ ctrls->exposure = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_EXPOSURE,
-+ 0, 65535, 1, 0);
-+ /* Auto/manual gain */
-+ ctrls->auto_gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTOGAIN,
-+ 0, 1, 1, 1);
-+ ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN,
-+ 0, 1023, 1, 0);
-+
-+ ctrls->saturation = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SATURATION,
-+ 0, 255, 1, 64);
-+ ctrls->hue = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HUE,
-+ 0, 359, 1, 0);
-+ ctrls->contrast = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_CONTRAST,
-+ 0, 255, 1, 0);
-+ ctrls->test_pattern =
-+ v4l2_ctrl_new_std_menu_items(hdl, ops, V4L2_CID_TEST_PATTERN,
-+ ARRAY_SIZE(test_pattern_menu) - 1,
-+ 0, 0, test_pattern_menu);
-+ ctrls->hflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HFLIP,
-+ 0, 1, 1, 0);
-+ ctrls->vflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VFLIP,
-+ 0, 1, 1, 0);
-+
-+ ctrls->light_freq =
-+ v4l2_ctrl_new_std_menu(hdl, ops,
-+ V4L2_CID_POWER_LINE_FREQUENCY,
-+ V4L2_CID_POWER_LINE_FREQUENCY_AUTO, 0,
-+ V4L2_CID_POWER_LINE_FREQUENCY_50HZ);
-+
-+ if (hdl->error) {
-+ ret = hdl->error;
-+ goto free_ctrls;
-+ }
-+
-+ ctrls->pixel_rate->flags |= V4L2_CTRL_FLAG_READ_ONLY;
-+ ctrls->gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
-+ ctrls->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
-+
-+ v4l2_ctrl_auto_cluster(3, &ctrls->auto_wb, 0, false);
-+ v4l2_ctrl_auto_cluster(2, &ctrls->auto_gain, 0, true);
-+ v4l2_ctrl_auto_cluster(2, &ctrls->auto_exp, 1, true);
-+
-+ sensor->sd.ctrl_handler = hdl;
-+ return 0;
-+
-+free_ctrls:
-+ v4l2_ctrl_handler_free(hdl);
-+ return ret;
-+}
-+
-+static int ov5640_enum_frame_size(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_frame_size_enum *fse)
-+{
-+ if (fse->pad != 0)
-+ return -EINVAL;
-+ if (fse->index >= OV5640_NUM_MODES)
-+ return -EINVAL;
-+
-+ fse->min_width =
-+ ov5640_mode_data[fse->index].hact;
-+ fse->max_width = fse->min_width;
-+ fse->min_height =
-+ ov5640_mode_data[fse->index].vact;
-+ fse->max_height = fse->min_height;
-+
-+ return 0;
-+}
-+
-+static int ov5640_enum_frame_interval(
-+ struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_frame_interval_enum *fie)
-+{
-+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
-+ struct v4l2_fract tpf;
-+ int ret;
-+
-+ if (fie->pad != 0)
-+ return -EINVAL;
-+ if (fie->index >= OV5640_NUM_FRAMERATES)
-+ return -EINVAL;
-+
-+ tpf.numerator = 1;
-+ tpf.denominator = ov5640_framerates[fie->index];
-+
-+ ret = ov5640_try_frame_interval(sensor, &tpf,
-+ fie->width, fie->height);
-+ if (ret < 0)
-+ return -EINVAL;
-+
-+ fie->interval = tpf;
-+ return 0;
-+}
-+
-+static int ov5640_g_frame_interval(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_frame_interval *fi)
-+{
-+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
-+
-+ mutex_lock(&sensor->lock);
-+ fi->interval = sensor->frame_interval;
-+ mutex_unlock(&sensor->lock);
-+
-+ return 0;
-+}
-+
-+static int ov5640_s_frame_interval(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_frame_interval *fi)
-+{
-+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
-+ const struct ov5640_mode_info *mode;
-+ int frame_rate, ret = 0;
-+
-+ if (fi->pad != 0)
-+ return -EINVAL;
-+
-+ mutex_lock(&sensor->lock);
-+
-+ if (sensor->streaming) {
-+ ret = -EBUSY;
-+ goto out;
-+ }
-+
-+ mode = sensor->current_mode;
-+
-+ frame_rate = ov5640_try_frame_interval(sensor, &fi->interval,
-+ mode->hact, mode->vact);
-+ if (frame_rate < 0) {
-+ /* Always return a valid frame interval value */
-+ fi->interval = sensor->frame_interval;
-+ goto out;
-+ }
-+
-+ mode = ov5640_find_mode(sensor, frame_rate, mode->hact,
-+ mode->vact, true);
-+ if (!mode) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (mode != sensor->current_mode ||
-+ frame_rate != sensor->current_fr) {
-+ sensor->current_fr = frame_rate;
-+ sensor->frame_interval = fi->interval;
-+ sensor->current_mode = mode;
-+ sensor->pending_mode_change = true;
-+
-+ __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
-+ ov5640_calc_pixel_rate(sensor));
-+ }
-+out:
-+ mutex_unlock(&sensor->lock);
-+ return ret;
-+}
-+
-+static int ov5640_enum_mbus_code(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_mbus_code_enum *code)
-+{
-+ if (code->pad != 0)
-+ return -EINVAL;
-+ if (code->index >= ARRAY_SIZE(ov5640_formats))
-+ return -EINVAL;
-+
-+ code->code = ov5640_formats[code->index].code;
-+ return 0;
-+}
-+
-+static int ov5640_s_stream(struct v4l2_subdev *sd, int enable)
-+{
-+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
-+ int ret = 0;
-+
-+ mutex_lock(&sensor->lock);
-+
-+ if (sensor->streaming == !enable) {
-+ if (enable && sensor->pending_mode_change) {
-+ ret = ov5640_set_mode(sensor);
-+ if (ret)
-+ goto out;
-+ }
-+
-+ if (enable && sensor->pending_fmt_change) {
-+ ret = ov5640_set_framefmt(sensor, &sensor->fmt);
-+ if (ret)
-+ goto out;
-+ sensor->pending_fmt_change = false;
-+ }
-+
-+ if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY)
-+ ret = ov5640_set_stream_mipi(sensor, enable);
-+ else
-+ ret = ov5640_set_stream_dvp(sensor, enable);
-+
-+ if (ret)
-+ goto out;
-+ }
-+ sensor->streaming += enable ? 1 : -1;
-+ WARN_ON(sensor->streaming < 0);
-+out:
-+ mutex_unlock(&sensor->lock);
-+ return ret;
-+}
-+
-+int ov5640_skip_frames(struct v4l2_subdev *sd, u32 *frames)
-+{
-+ *frames = OV5640_SKIP_FRAMES;
-+ return 0;
-+}
-+
-+static const struct v4l2_subdev_core_ops ov5640_core_ops = {
-+ .s_power = ov5640_s_power,
-+ .log_status = v4l2_ctrl_subdev_log_status,
-+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
-+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
-+};
-+
-+static const struct v4l2_subdev_video_ops ov5640_video_ops = {
-+ .g_frame_interval = ov5640_g_frame_interval,
-+ .s_frame_interval = ov5640_s_frame_interval,
-+ .s_stream = ov5640_s_stream,
-+};
-+
-+static const struct v4l2_subdev_pad_ops ov5640_pad_ops = {
-+ .enum_mbus_code = ov5640_enum_mbus_code,
-+ .get_fmt = ov5640_get_fmt,
-+ .set_fmt = ov5640_set_fmt,
-+ .enum_frame_size = ov5640_enum_frame_size,
-+ .enum_frame_interval = ov5640_enum_frame_interval,
-+};
-+
-+static const struct v4l2_subdev_sensor_ops ov5640_sensor_ops = {
-+ .g_skip_frames = ov5640_skip_frames,
-+};
-+
-+static const struct v4l2_subdev_ops ov5640_subdev_ops = {
-+ .core = &ov5640_core_ops,
-+ .video = &ov5640_video_ops,
-+ .pad = &ov5640_pad_ops,
-+ .sensor = &ov5640_sensor_ops,
-+};
-+
-+static int ov5640_get_regulators(struct ov5640_dev *sensor)
-+{
-+ int i;
-+
-+ for (i = 0; i < OV5640_NUM_SUPPLIES; i++)
-+ sensor->supplies[i].supply = ov5640_supply_name[i];
-+
-+ return devm_regulator_bulk_get(&sensor->i2c_client->dev,
-+ OV5640_NUM_SUPPLIES,
-+ sensor->supplies);
-+}
-+
-+static int ov5640_check_chip_id(struct ov5640_dev *sensor)
-+{
-+ struct i2c_client *client = sensor->i2c_client;
-+ int ret = 0;
-+ u16 chip_id;
-+
-+ ret = ov5640_set_power_on(sensor);
-+ if (ret)
-+ return ret;
-+
-+ ret = ov5640_read_reg16(sensor, OV5640_REG_CHIP_ID, &chip_id);
-+ if (ret) {
-+ dev_err(&client->dev, "%s: failed to read chip identifier\n",
-+ __func__);
-+ goto power_off;
-+ }
-+
-+ if (chip_id != OV5640_CHIP_ID) {
-+ dev_err(&client->dev, "%s: wrong chip identifier, expected 0x%x, got 0x%x\n",
-+ __func__, OV5640_CHIP_ID, chip_id);
-+ ret = -ENXIO;
-+ }
-+ dev_err(&client->dev, "%s: chip identifier, got 0x%x\n",
-+ __func__, chip_id);
-+
-+power_off:
-+ ov5640_set_power_off(sensor);
-+ return ret;
-+}
-+
-+static int ov5640_probe(struct i2c_client *client)
-+{
-+ struct device *dev = &client->dev;
-+ struct fwnode_handle *endpoint;
-+ struct ov5640_dev *sensor;
-+ struct v4l2_mbus_framefmt *fmt;
-+ u32 rotation;
-+ int ret;
-+
-+ sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
-+ if (!sensor)
-+ return -ENOMEM;
-+
-+ sensor->i2c_client = client;
-+
-+ /*
-+ * default init sequence initialize sensor to
-+ * YUV422 UYVY VGA@30fps
-+ */
-+ fmt = &sensor->fmt;
-+ fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
-+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
-+ fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
-+ fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
-+ fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace);
-+ fmt->width = 640;
-+ fmt->height = 480;
-+ fmt->field = V4L2_FIELD_NONE;
-+ sensor->frame_interval.numerator = 1;
-+ sensor->frame_interval.denominator = ov5640_framerates[OV5640_30_FPS];
-+ sensor->current_fr = OV5640_30_FPS;
-+ sensor->current_mode =
-+ &ov5640_mode_data[OV5640_MODE_VGA_640_480];
-+ sensor->last_mode = sensor->current_mode;
-+
-+ sensor->ae_target = 52;
-+
-+ /* optional indication of physical rotation of sensor */
-+ ret = fwnode_property_read_u32(dev_fwnode(&client->dev), "rotation",
-+ &rotation);
-+ if (!ret) {
-+ switch (rotation) {
-+ case 180:
-+ sensor->upside_down = true;
-+ fallthrough;
-+ case 0:
-+ break;
-+ default:
-+ dev_warn(dev, "%u degrees rotation is not supported, ignoring...\n",
-+ rotation);
-+ }
-+ }
-+
-+ endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(&client->dev),
-+ NULL);
-+ if (!endpoint) {
-+ dev_err(dev, "endpoint node not found\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = v4l2_fwnode_endpoint_parse(endpoint, &sensor->ep);
-+ fwnode_handle_put(endpoint);
-+ if (ret) {
-+ dev_err(dev, "Could not parse endpoint\n");
-+ return ret;
-+ }
-+
-+ if (sensor->ep.bus_type != V4L2_MBUS_PARALLEL &&
-+ sensor->ep.bus_type != V4L2_MBUS_CSI2_DPHY &&
-+ sensor->ep.bus_type != V4L2_MBUS_BT656) {
-+ dev_err(dev, "Unsupported bus type %d\n", sensor->ep.bus_type);
-+ return -EINVAL;
-+ }
-+
-+ /* get system clock (xclk) */
-+ sensor->xclk = devm_clk_get(dev, "xclk");
-+ if (IS_ERR(sensor->xclk)) {
-+ dev_err(dev, "failed to get xclk\n");
-+ return PTR_ERR(sensor->xclk);
-+ }
-+
-+ sensor->xclk_freq = clk_get_rate(sensor->xclk);
-+ if (sensor->xclk_freq < OV5640_XCLK_MIN ||
-+ sensor->xclk_freq > OV5640_XCLK_MAX) {
-+ dev_err(dev, "xclk frequency out of range: %d Hz\n",
-+ sensor->xclk_freq);
-+ return -EINVAL;
-+ }
-+
-+ /* request optional power down pin */
-+ sensor->pwdn_gpio = devm_gpiod_get_optional(dev, "powerdown",
-+ GPIOD_OUT_HIGH);
-+ if (IS_ERR(sensor->pwdn_gpio))
-+ return PTR_ERR(sensor->pwdn_gpio);
-+
-+ /* request optional reset pin */
-+ sensor->reset_gpio = devm_gpiod_get_optional(dev, "reset",
-+ GPIOD_OUT_HIGH);
-+ if (IS_ERR(sensor->reset_gpio))
-+ return PTR_ERR(sensor->reset_gpio);
-+
-+ v4l2_i2c_subdev_init(&sensor->sd, client, &ov5640_subdev_ops);
-+
-+ sensor->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
-+ V4L2_SUBDEV_FL_HAS_EVENTS;
-+ sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
-+ sensor->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
-+ ret = media_entity_pads_init(&sensor->sd.entity, 1, &sensor->pad);
-+ if (ret)
-+ return ret;
-+
-+ ret = ov5640_get_regulators(sensor);
-+ if (ret)
-+ return ret;
-+
-+ mutex_init(&sensor->lock);
-+
-+ ret = ov5640_check_chip_id(sensor);
-+ if (ret)
-+ goto entity_cleanup;
-+
-+ ret = ov5640_init_controls(sensor);
-+ if (ret)
-+ goto entity_cleanup;
-+
-+ ret = v4l2_async_register_subdev_sensor(&sensor->sd);
-+ if (ret)
-+ goto free_ctrls;
-+
-+ return 0;
-+
-+free_ctrls:
-+ v4l2_ctrl_handler_free(&sensor->ctrls.handler);
-+entity_cleanup:
-+ media_entity_cleanup(&sensor->sd.entity);
-+ mutex_destroy(&sensor->lock);
-+ return ret;
-+}
-+
-+static int ov5640_remove(struct i2c_client *client)
-+{
-+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
-+ struct ov5640_dev *sensor = to_ov5640_dev(sd);
-+
-+ v4l2_async_unregister_subdev(&sensor->sd);
-+ media_entity_cleanup(&sensor->sd.entity);
-+ v4l2_ctrl_handler_free(&sensor->ctrls.handler);
-+ mutex_destroy(&sensor->lock);
-+
-+ return 0;
-+}
-+
-+static const struct i2c_device_id ov5640_id[] = {
-+ {"ov5640", 0},
-+ {},
-+};
-+MODULE_DEVICE_TABLE(i2c, ov5640_id);
-+
-+static const struct of_device_id ov5640_dt_ids[] = {
-+ { .compatible = "ovti,ov5640" },
-+ { /* sentinel */ }
-+};
-+MODULE_DEVICE_TABLE(of, ov5640_dt_ids);
-+
-+static struct i2c_driver ov5640_i2c_driver = {
-+ .driver = {
-+ .name = "ov5640",
-+ .of_match_table = ov5640_dt_ids,
-+ },
-+ .id_table = ov5640_id,
-+ .probe_new = ov5640_probe,
-+ .remove = ov5640_remove,
-+};
-+
-+module_i2c_driver(ov5640_i2c_driver);
-+
-+MODULE_DESCRIPTION("OV5640 MIPI Camera Subdev Driver");
-+MODULE_LICENSE("GPL");
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/sc2235.c
-@@ -0,0 +1,1914 @@
-+// SPDX-License-Identifier: GPL-2.0-or-later
-+/*
-+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
-+ *
-+ */
-+
-+#include <linux/clk.h>
-+#include <linux/clk-provider.h>
-+#include <linux/clkdev.h>
-+#include <linux/ctype.h>
-+#include <linux/delay.h>
-+#include <linux/device.h>
-+#include <linux/gpio/consumer.h>
-+#include <linux/i2c.h>
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/pm_runtime.h>
-+#include <linux/of_device.h>
-+#include <linux/regulator/consumer.h>
-+#include <linux/slab.h>
-+#include <linux/types.h>
-+#include <media/v4l2-async.h>
-+#include <media/v4l2-ctrls.h>
-+#include <media/v4l2-device.h>
-+#include <media/v4l2-event.h>
-+#include <media/v4l2-fwnode.h>
-+#include <media/v4l2-subdev.h>
-+#include "stfcamss.h"
-+
-+/* min/typical/max system clock (xclk) frequencies */
-+#define SC2235_XCLK_MIN 6000000
-+#define SC2235_XCLK_MAX 27000000
-+
-+#define SC2235_CHIP_ID (0x2235)
-+
-+#define SC2235_REG_CHIP_ID 0x3107
-+#define SC2235_REG_AEC_PK_MANUAL 0x3e03
-+#define SC2235_REG_AEC_PK_EXPOSURE_HI 0x3e01
-+#define SC2235_REG_AEC_PK_EXPOSURE_LO 0x3e02
-+#define SC2235_REG_AEC_PK_REAL_GAIN 0x3e08
-+#define SC2235_REG_TIMING_HTS 0x320c
-+#define SC2235_REG_TIMING_VTS 0x320e
-+#define SC2235_REG_TEST_SET0 0x4501
-+#define SC2235_REG_TEST_SET1 0x3902
-+#define SC2235_REG_TIMING_TC_REG21 0x3221
-+#define SC2235_REG_SC_PLL_CTRL0 0x3039
-+#define SC2235_REG_SC_PLL_CTRL1 0x303a
-+#define SC2235_REG_STREAM_ON 0x0100
-+
-+enum sc2235_mode_id {
-+ SC2235_MODE_1080P_1920_1080 = 0,
-+ SC2235_NUM_MODES,
-+};
-+
-+enum sc2235_frame_rate {
-+ SC2235_15_FPS = 0,
-+ SC2235_30_FPS,
-+ SC2235_NUM_FRAMERATES,
-+};
-+
-+struct sc2235_pixfmt {
-+ u32 code;
-+ u32 colorspace;
-+};
-+
-+static const struct sc2235_pixfmt sc2235_formats[] = {
-+ { MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB, },
-+};
-+
-+static const int sc2235_framerates[] = {
-+ [SC2235_15_FPS] = 15,
-+ [SC2235_30_FPS] = 30,
-+};
-+
-+/* regulator supplies */
-+static const char * const sc2235_supply_name[] = {
-+ "DOVDD", /* Digital I/O (1.8V) supply */
-+ "AVDD", /* Analog (2.8V) supply */
-+ "DVDD", /* Digital Core (1.5V) supply */
-+};
-+
-+#define SC2235_NUM_SUPPLIES ARRAY_SIZE(sc2235_supply_name)
-+
-+struct reg_value {
-+ u16 reg_addr;
-+ u8 val;
-+ u8 mask;
-+ u32 delay_ms;
-+};
-+
-+struct sc2235_mode_info {
-+ enum sc2235_mode_id id;
-+ u32 hact;
-+ u32 htot;
-+ u32 vact;
-+ u32 vtot;
-+ const struct reg_value *reg_data;
-+ u32 reg_data_size;
-+ u32 max_fps;
-+};
-+
-+struct sc2235_ctrls {
-+ struct v4l2_ctrl_handler handler;
-+ struct v4l2_ctrl *pixel_rate;
-+ struct {
-+ struct v4l2_ctrl *auto_exp;
-+ struct v4l2_ctrl *exposure;
-+ };
-+ struct {
-+ struct v4l2_ctrl *auto_wb;
-+ struct v4l2_ctrl *blue_balance;
-+ struct v4l2_ctrl *red_balance;
-+ };
-+ struct {
-+ struct v4l2_ctrl *auto_gain;
-+ struct v4l2_ctrl *gain;
-+ };
-+ struct v4l2_ctrl *brightness;
-+ struct v4l2_ctrl *light_freq;
-+ struct v4l2_ctrl *saturation;
-+ struct v4l2_ctrl *contrast;
-+ struct v4l2_ctrl *hue;
-+ struct v4l2_ctrl *test_pattern;
-+ struct v4l2_ctrl *hflip;
-+ struct v4l2_ctrl *vflip;
-+};
-+
-+struct sc2235_dev {
-+ struct i2c_client *i2c_client;
-+ struct v4l2_subdev sd;
-+ struct media_pad pad;
-+ struct v4l2_fwnode_endpoint ep; /* the parsed DT endpoint info */
-+ struct clk *xclk; /* system clock to SC2235 */
-+ u32 xclk_freq;
-+
-+ struct regulator_bulk_data supplies[SC2235_NUM_SUPPLIES];
-+ struct gpio_desc *reset_gpio;
-+ struct gpio_desc *pwdn_gpio;
-+ bool upside_down;
-+
-+ /* lock to protect all members below */
-+ struct mutex lock;
-+
-+ struct v4l2_mbus_framefmt fmt;
-+ bool pending_fmt_change;
-+
-+ const struct sc2235_mode_info *current_mode;
-+ const struct sc2235_mode_info *last_mode;
-+ enum sc2235_frame_rate current_fr;
-+ struct v4l2_fract frame_interval;
-+
-+ struct sc2235_ctrls ctrls;
-+
-+ bool pending_mode_change;
-+ int streaming;
-+};
-+
-+static inline struct sc2235_dev *to_sc2235_dev(struct v4l2_subdev *sd)
-+{
-+ return container_of(sd, struct sc2235_dev, sd);
-+}
-+
-+static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
-+{
-+ return &container_of(ctrl->handler, struct sc2235_dev,
-+ ctrls.handler)->sd;
-+}
-+
-+/* sc2235 initial register 30fps*/
-+static struct reg_value sc2235_init_regs_tbl_1080[] = {
-+ {0x0103, 0x01, 0, 50},
-+ {0x0100, 0x00, 0, 0},
-+ {0x3039, 0x80, 0, 0},
-+ {0x3621, 0x28, 0, 0},
-+
-+ {0x3309, 0x60, 0, 0},
-+ {0x331f, 0x4d, 0, 0},
-+ {0x3321, 0x4f, 0, 0},
-+ {0x33b5, 0x10, 0, 0},
-+
-+ {0x3303, 0x20, 0, 0},
-+ {0x331e, 0x0d, 0, 0},
-+ {0x3320, 0x0f, 0, 0},
-+
-+ {0x3622, 0x02, 0, 0},
-+ {0x3633, 0x42, 0, 0},
-+ {0x3634, 0x42, 0, 0},
-+
-+ {0x3306, 0x66, 0, 0},
-+ {0x330b, 0xd1, 0, 0},
-+
-+ {0x3301, 0x0e, 0, 0},
-+
-+ {0x320c, 0x08, 0, 0},
-+ {0x320d, 0x98, 0, 0},
-+
-+ {0x3364, 0x05, 0, 0}, // [2] 1: write at sampling ending
-+
-+ {0x363c, 0x28, 0, 0}, //bypass nvdd
-+ {0x363b, 0x0a, 0, 0}, //HVDD
-+ {0x3635, 0xa0, 0, 0}, //TXVDD
-+
-+ {0x4500, 0x59, 0, 0},
-+ {0x3d08, 0x00, 0, 0},
-+ {0x3908, 0x11, 0, 0},
-+
-+ {0x363c, 0x08, 0, 0},
-+
-+ {0x3e03, 0x03, 0, 0},
-+ {0x3e01, 0x46, 0, 0},
-+
-+ //0703
-+ {0x3381, 0x0a, 0, 0},
-+ {0x3348, 0x09, 0, 0},
-+ {0x3349, 0x50, 0, 0},
-+ {0x334a, 0x02, 0, 0},
-+ {0x334b, 0x60, 0, 0},
-+
-+ {0x3380, 0x04, 0, 0},
-+ {0x3340, 0x06, 0, 0},
-+ {0x3341, 0x50, 0, 0},
-+ {0x3342, 0x02, 0, 0},
-+ {0x3343, 0x60, 0, 0},
-+
-+ //0707
-+
-+ {0x3632, 0x88, 0, 0}, //anti sm
-+ {0x3309, 0xa0, 0, 0},
-+ {0x331f, 0x8d, 0, 0},
-+ {0x3321, 0x8f, 0, 0},
-+
-+ {0x335e, 0x01, 0, 0}, //ana dithering
-+ {0x335f, 0x03, 0, 0},
-+ {0x337c, 0x04, 0, 0},
-+ {0x337d, 0x06, 0, 0},
-+ {0x33a0, 0x05, 0, 0},
-+ {0x3301, 0x05, 0, 0},
-+
-+ {0x337f, 0x03, 0, 0},
-+ {0x3368, 0x02, 0, 0},
-+ {0x3369, 0x00, 0, 0},
-+ {0x336a, 0x00, 0, 0},
-+ {0x336b, 0x00, 0, 0},
-+ {0x3367, 0x08, 0, 0},
-+ {0x330e, 0x30, 0, 0},
-+
-+ {0x3366, 0x7c, 0, 0}, // div_rst gap
-+
-+ {0x3635, 0xc1, 0, 0},
-+ {0x363b, 0x09, 0, 0},
-+ {0x363c, 0x07, 0, 0},
-+
-+ {0x391e, 0x00, 0, 0},
-+
-+ {0x3637, 0x14, 0, 0}, //fullwell 7K
-+
-+ {0x3306, 0x54, 0, 0},
-+ {0x330b, 0xd8, 0, 0},
-+ {0x366e, 0x08, 0, 0}, // ofs auto en [3]
-+ {0x366f, 0x2f, 0, 0},
-+
-+ {0x3631, 0x84, 0, 0},
-+ {0x3630, 0x48, 0, 0},
-+ {0x3622, 0x06, 0, 0},
-+
-+ //ramp by sc
-+ {0x3638, 0x1f, 0, 0},
-+ {0x3625, 0x02, 0, 0},
-+ {0x3636, 0x24, 0, 0},
-+
-+ //0714
-+ {0x3348, 0x08, 0, 0},
-+ {0x3e03, 0x0b, 0, 0},
-+
-+ //7.17 fpn
-+ {0x3342, 0x03, 0, 0},
-+ {0x3343, 0xa0, 0, 0},
-+ {0x334a, 0x03, 0, 0},
-+ {0x334b, 0xa0, 0, 0},
-+
-+ //0718
-+ {0x3343, 0xb0, 0, 0},
-+ {0x334b, 0xb0, 0, 0},
-+
-+ //0720
-+ //digital ctrl
-+ {0x3802, 0x01, 0, 0},
-+ {0x3235, 0x04, 0, 0},
-+ {0x3236, 0x63, 0, 0}, // vts-2
-+
-+ //fpn
-+ {0x3343, 0xd0, 0, 0},
-+ {0x334b, 0xd0, 0, 0},
-+ {0x3348, 0x07, 0, 0},
-+ {0x3349, 0x80, 0, 0},
-+
-+ //0724
-+ {0x391b, 0x4d, 0, 0},
-+
-+ {0x3342, 0x04, 0, 0},
-+ {0x3343, 0x20, 0, 0},
-+ {0x334a, 0x04, 0, 0},
-+ {0x334b, 0x20, 0, 0},
-+
-+ //0804
-+ {0x3222, 0x29, 0, 0},
-+ {0x3901, 0x02, 0, 0},
-+
-+ //0808
-+
-+ // auto blc
-+ {0x3900, 0xD5, 0, 0}, // Bit[0]: blc_enable
-+ {0x3902, 0x45, 0, 0}, // Bit[6]: blc_auto_en
-+
-+ // blc target
-+ {0x3907, 0x00, 0, 0},
-+ {0x3908, 0x00, 0, 0},
-+
-+ // auto dpc
-+ {0x5000, 0x00, 0, 0}, // Bit[2]: white dead pixel cancel enable, Bit[1]: black dead pixel cancel enable
-+
-+ //digital ctrl
-+ {0x3f00, 0x07, 0, 0}, // bit[2] = 1
-+ {0x3f04, 0x08, 0, 0},
-+ {0x3f05, 0x74, 0, 0}, // hts - { 0x24
-+
-+ //0809
-+ {0x330b, 0xc8, 0, 0},
-+
-+ //0817
-+ {0x3306, 0x4a, 0, 0},
-+ {0x330b, 0xca, 0, 0},
-+ {0x3639, 0x09, 0, 0},
-+
-+ //manual DPC
-+ {0x5780, 0xff, 0, 0},
-+ {0x5781, 0x04, 0, 0},
-+ {0x5785, 0x18, 0, 0},
-+
-+ //0822
-+ {0x3039, 0x35, 0, 0}, //fps
-+ {0x303a, 0x2e, 0, 0},
-+ {0x3034, 0x05, 0, 0},
-+ {0x3035, 0x2a, 0, 0},
-+
-+ {0x320c, 0x08, 0, 0},
-+ {0x320d, 0xca, 0, 0},
-+ {0x320e, 0x04, 0, 0},
-+ {0x320f, 0xb0, 0, 0},
-+
-+ {0x3f04, 0x08, 0, 0},
-+ {0x3f05, 0xa6, 0, 0}, // hts - { 0x24
-+
-+ {0x3235, 0x04, 0, 0},
-+ {0x3236, 0xae, 0, 0}, // vts-2
-+
-+ //0825
-+ {0x3313, 0x05, 0, 0},
-+ {0x3678, 0x42, 0, 0},
-+
-+ //for AE control per frame
-+ {0x3670, 0x00, 0, 0},
-+ {0x3633, 0x42, 0, 0},
-+
-+ {0x3802, 0x00, 0, 0},
-+
-+ //20180126
-+ {0x3677, 0x3f, 0, 0},
-+ {0x3306, 0x44, 0, 0}, //20180126[3c },4a]
-+ {0x330b, 0xca, 0, 0}, //20180126[c2 },d3]
-+
-+ //20180202
-+ {0x3237, 0x08, 0, 0},
-+ {0x3238, 0x9a, 0, 0}, //hts-0x30
-+
-+ //20180417
-+ {0x3640, 0x01, 0, 0},
-+ {0x3641, 0x02, 0, 0},
-+
-+ {0x3301, 0x12, 0, 0}, //[8 },15]20180126
-+ {0x3631, 0x84, 0, 0},
-+ {0x366f, 0x2f, 0, 0},
-+ {0x3622, 0xc6, 0, 0}, //20180117
-+
-+ {0x3e03, 0x03, 0, 0}, // Bit[3]: AGC table mapping method, Bit[1]: AGC manual, BIt[0]: AEC manual
-+
-+ // {0x0100, 0x00, 0, 0},
-+ // {0x4501, 0xc8, 0, 0}, //bar testing
-+ // {0x3902, 0x45, 0, 0},
-+};
-+
-+static struct reg_value sc2235_setting_1080P_1920_1080[] = {
-+
-+};
-+
-+/* power-on sensor init reg table */
-+static const struct sc2235_mode_info sc2235_mode_init_data = {
-+ SC2235_MODE_1080P_1920_1080,
-+ 1920, 0x8ca, 1080, 0x4b0,
-+ sc2235_init_regs_tbl_1080,
-+ ARRAY_SIZE(sc2235_init_regs_tbl_1080),
-+ SC2235_30_FPS,
-+};
-+
-+static const struct sc2235_mode_info
-+sc2235_mode_data[SC2235_NUM_MODES] = {
-+ {SC2235_MODE_1080P_1920_1080,
-+ 1920, 0x8ca, 1080, 0x4b0,
-+ sc2235_setting_1080P_1920_1080,
-+ ARRAY_SIZE(sc2235_setting_1080P_1920_1080),
-+ SC2235_30_FPS},
-+};
-+
-+static int sc2235_write_reg(struct sc2235_dev *sensor, u16 reg, u8 val)
-+{
-+ struct i2c_client *client = sensor->i2c_client;
-+ struct i2c_msg msg;
-+ u8 buf[3];
-+ int ret;
-+
-+ buf[0] = reg >> 8;
-+ buf[1] = reg & 0xff;
-+ buf[2] = val;
-+
-+ msg.addr = client->addr;
-+ msg.flags = client->flags;
-+ msg.buf = buf;
-+ msg.len = sizeof(buf);
-+
-+ ret = i2c_transfer(client->adapter, &msg, 1);
-+ if (ret < 0) {
-+ dev_err(&client->dev, "%s: error: reg=%x, val=%x\n",
-+ __func__, reg, val);
-+ return ret;
-+ }
-+
-+ return 0;
-+}
-+
-+static int sc2235_read_reg(struct sc2235_dev *sensor, u16 reg, u8 *val)
-+{
-+ struct i2c_client *client = sensor->i2c_client;
-+ struct i2c_msg msg[2];
-+ u8 buf[2];
-+ int ret;
-+
-+ buf[0] = reg >> 8;
-+ buf[1] = reg & 0xff;
-+
-+ msg[0].addr = client->addr;
-+ msg[0].flags = client->flags;
-+ msg[0].buf = buf;
-+ msg[0].len = sizeof(buf);
-+
-+ msg[1].addr = client->addr;
-+ msg[1].flags = client->flags | I2C_M_RD;
-+ msg[1].buf = buf;
-+ msg[1].len = 1;
-+
-+ ret = i2c_transfer(client->adapter, msg, 2);
-+ if (ret < 0) {
-+ dev_err(&client->dev, "%s: error: reg=%x\n",
-+ __func__, reg);
-+ return ret;
-+ }
-+
-+ *val = buf[0];
-+ return 0;
-+}
-+
-+static int sc2235_read_reg16(struct sc2235_dev *sensor, u16 reg, u16 *val)
-+{
-+ u8 hi, lo;
-+ int ret;
-+
-+ ret = sc2235_read_reg(sensor, reg, &hi);
-+ if (ret)
-+ return ret;
-+ ret = sc2235_read_reg(sensor, reg + 1, &lo);
-+ if (ret)
-+ return ret;
-+
-+ *val = ((u16)hi << 8) | (u16)lo;
-+ return 0;
-+}
-+
-+static int sc2235_write_reg16(struct sc2235_dev *sensor, u16 reg, u16 val)
-+{
-+ int ret;
-+
-+ ret = sc2235_write_reg(sensor, reg, val >> 8);
-+ if (ret)
-+ return ret;
-+
-+ return sc2235_write_reg(sensor, reg + 1, val & 0xff);
-+}
-+
-+static int sc2235_mod_reg(struct sc2235_dev *sensor, u16 reg,
-+ u8 mask, u8 val)
-+{
-+ u8 readval;
-+ int ret;
-+
-+ ret = sc2235_read_reg(sensor, reg, &readval);
-+ if (ret)
-+ return ret;
-+
-+ readval &= ~mask;
-+ val &= mask;
-+ val |= readval;
-+
-+ return sc2235_write_reg(sensor, reg, val);
-+}
-+
-+#define SC2235_PLL_PREDIV 3
-+
-+#define SC2235_SYSDIV_MIN 0
-+#define SC2235_SYSDIV_MAX 7
-+
-+#define SC2235_PLL_MULT_MIN 0
-+#define SC2235_PLL_MULT_MAX 63
-+
-+#ifdef UNUSED_CODE
-+static unsigned long sc2235_compute_sys_clk(struct sc2235_dev *sensor,
-+ u8 pll_pre, u8 pll_mult,
-+ u8 sysdiv)
-+{
-+ unsigned long sysclk =
-+ sensor->xclk_freq * (64 - pll_mult) / (pll_pre * (sysdiv + 1));
-+
-+ /* PLL1 output cannot exceed 1GHz. */
-+ if (sysclk / 1000000 > 1000)
-+ return 0;
-+
-+ return sysclk;
-+}
-+
-+static unsigned long sc2235_calc_sys_clk(struct sc2235_dev *sensor,
-+ unsigned long rate,
-+ u8 *pll_prediv, u8 *pll_mult,
-+ u8 *sysdiv)
-+{
-+ unsigned long best = ~0;
-+ u8 best_sysdiv = 1, best_mult = 1;
-+ u8 _sysdiv, _pll_mult;
-+
-+ for (_sysdiv = SC2235_SYSDIV_MIN;
-+ _sysdiv <= SC2235_SYSDIV_MAX;
-+ _sysdiv++) {
-+ for (_pll_mult = SC2235_PLL_MULT_MIN;
-+ _pll_mult <= SC2235_PLL_MULT_MAX;
-+ _pll_mult++) {
-+ unsigned long _rate;
-+
-+ _rate = sc2235_compute_sys_clk(sensor,
-+ SC2235_PLL_PREDIV,
-+ _pll_mult, _sysdiv);
-+
-+ /*
-+ * We have reached the maximum allowed PLL1 output,
-+ * increase sysdiv.
-+ */
-+ if (!_rate)
-+ break;
-+
-+ /*
-+ * Prefer rates above the expected clock rate than
-+ * below, even if that means being less precise.
-+ */
-+ if (_rate < rate)
-+ continue;
-+
-+ if (abs(rate - _rate) < abs(rate - best)) {
-+ best = _rate;
-+ best_sysdiv = _sysdiv;
-+ best_mult = _pll_mult;
-+ }
-+
-+ if (_rate == rate)
-+ goto out;
-+ }
-+ }
-+
-+out:
-+ *sysdiv = best_sysdiv;
-+ *pll_prediv = SC2235_PLL_PREDIV;
-+ *pll_mult = best_mult;
-+
-+ return best;
-+}
-+#endif
-+
-+static int sc2235_set_timings(struct sc2235_dev *sensor,
-+ const struct sc2235_mode_info *mode)
-+{
-+ int ret = 0;
-+
-+ return ret;
-+}
-+
-+static int sc2235_load_regs(struct sc2235_dev *sensor,
-+ const struct sc2235_mode_info *mode)
-+{
-+ const struct reg_value *regs = mode->reg_data;
-+ unsigned int i;
-+ u32 delay_ms;
-+ u16 reg_addr;
-+ u8 mask, val;
-+ int ret = 0;
-+
-+ for (i = 0; i < mode->reg_data_size; ++i, ++regs) {
-+ delay_ms = regs->delay_ms;
-+ reg_addr = regs->reg_addr;
-+ val = regs->val;
-+ mask = regs->mask;
-+
-+ if (mask)
-+ ret = sc2235_mod_reg(sensor, reg_addr, mask, val);
-+ else
-+ ret = sc2235_write_reg(sensor, reg_addr, val);
-+ if (ret)
-+ break;
-+
-+ if (delay_ms)
-+ usleep_range(1000 * delay_ms, 1000 * delay_ms + 100);
-+ }
-+
-+ return sc2235_set_timings(sensor, mode);
-+}
-+
-+static int sc2235_set_autoexposure(struct sc2235_dev *sensor, bool on)
-+{
-+ return sc2235_mod_reg(sensor, SC2235_REG_AEC_PK_MANUAL,
-+ BIT(0), on ? 0 : BIT(0));
-+}
-+
-+static int sc2235_get_exposure(struct sc2235_dev *sensor)
-+{
-+ int exp = 0, ret = 0;
-+ u8 temp;
-+
-+ ret = sc2235_read_reg(sensor, SC2235_REG_AEC_PK_EXPOSURE_HI, &temp);
-+ if (ret)
-+ return ret;
-+ exp |= (int)temp << 8;
-+ ret = sc2235_read_reg(sensor, SC2235_REG_AEC_PK_EXPOSURE_LO, &temp);
-+ if (ret)
-+ return ret;
-+ exp |= (int)temp;
-+
-+ return exp >> 4;
-+}
-+
-+static int sc2235_set_exposure(struct sc2235_dev *sensor, u32 exposure)
-+{
-+ int ret;
-+
-+ exposure <<= 4;
-+
-+ ret = sc2235_write_reg(sensor,
-+ SC2235_REG_AEC_PK_EXPOSURE_LO,
-+ exposure & 0xff);
-+ if (ret)
-+ return ret;
-+ return sc2235_write_reg(sensor,
-+ SC2235_REG_AEC_PK_EXPOSURE_HI,
-+ (exposure >> 8) & 0xff);
-+}
-+
-+static int sc2235_get_gain(struct sc2235_dev *sensor)
-+{
-+ u16 gain;
-+ int ret;
-+
-+ ret = sc2235_read_reg16(sensor, SC2235_REG_AEC_PK_REAL_GAIN, &gain);
-+ if (ret)
-+ return ret;
-+
-+ return gain & 0x1fff;
-+}
-+
-+static int sc2235_set_gain(struct sc2235_dev *sensor, int gain)
-+{
-+ return sc2235_write_reg16(sensor, SC2235_REG_AEC_PK_REAL_GAIN,
-+ (u16)gain & 0x1fff);
-+}
-+
-+static int sc2235_set_autogain(struct sc2235_dev *sensor, bool on)
-+{
-+ return sc2235_mod_reg(sensor, SC2235_REG_AEC_PK_MANUAL,
-+ BIT(1), on ? 0 : BIT(1));
-+}
-+
-+#ifdef UNUSED_CODE
-+static int sc2235_get_sysclk(struct sc2235_dev *sensor)
-+{
-+ return 0;
-+}
-+
-+static int sc2235_set_night_mode(struct sc2235_dev *sensor)
-+{
-+ return 0;
-+}
-+
-+static int sc2235_get_hts(struct sc2235_dev *sensor)
-+{
-+ u16 hts;
-+ int ret;
-+
-+ ret = sc2235_read_reg16(sensor, SC2235_REG_TIMING_HTS, &hts);
-+ if (ret)
-+ return ret;
-+ return hts;
-+}
-+#endif
-+
-+static int sc2235_get_vts(struct sc2235_dev *sensor)
-+{
-+ u16 vts;
-+ int ret;
-+
-+ ret = sc2235_read_reg16(sensor, SC2235_REG_TIMING_VTS, &vts);
-+ if (ret)
-+ return ret;
-+ return vts;
-+}
-+
-+#ifdef UNUSED_CODE
-+static int sc2235_set_vts(struct sc2235_dev *sensor, int vts)
-+{
-+ return sc2235_write_reg16(sensor, SC2235_REG_TIMING_VTS, vts);
-+}
-+
-+static int sc2235_get_light_freq(struct sc2235_dev *sensor)
-+{
-+ return 0;
-+}
-+
-+static int sc2235_set_bandingfilter(struct sc2235_dev *sensor)
-+{
-+ return 0;
-+}
-+
-+static int sc2235_set_ae_target(struct sc2235_dev *sensor, int target)
-+{
-+ return 0;
-+}
-+
-+static int sc2235_get_binning(struct sc2235_dev *sensor)
-+{
-+ return 0;
-+}
-+
-+static int sc2235_set_binning(struct sc2235_dev *sensor, bool enable)
-+{
-+ return 0;
-+}
-+
-+#endif
-+
-+static const struct sc2235_mode_info *
-+sc2235_find_mode(struct sc2235_dev *sensor, enum sc2235_frame_rate fr,
-+ int width, int height, bool nearest)
-+{
-+ const struct sc2235_mode_info *mode;
-+
-+ mode = v4l2_find_nearest_size(sc2235_mode_data,
-+ ARRAY_SIZE(sc2235_mode_data),
-+ hact, vact,
-+ width, height);
-+
-+ if (!mode ||
-+ (!nearest && (mode->hact != width || mode->vact != height)))
-+ return NULL;
-+
-+ /* Check to see if the current mode exceeds the max frame rate */
-+ if (sc2235_framerates[fr] > sc2235_framerates[mode->max_fps])
-+ return NULL;
-+
-+ return mode;
-+}
-+
-+static u64 sc2235_calc_pixel_rate(struct sc2235_dev *sensor)
-+{
-+ u64 rate;
-+
-+ rate = sensor->current_mode->vtot * sensor->current_mode->htot;
-+ rate *= sc2235_framerates[sensor->current_fr];
-+
-+ return rate;
-+}
-+
-+#ifdef UNUSED_CODE
-+/*
-+ * sc2235_set_dvp_pclk() - Calculate the clock tree configuration values
-+ * for the dvp output.
-+ *
-+ * @rate: The requested bandwidth per lane in bytes per second.
-+ * 'Bandwidth Per Lane' is calculated as:
-+ * rate = HTOT * VTOT * FPS;
-+ *
-+ * This function use the requested bandwidth to calculate:
-+ * - rate = xclk * (64 - M) / (N * (S + 1));
-+ *
-+ */
-+
-+#define PLL_PREDIV 1
-+#define PLL_SYSEL 0
-+
-+static int sc2235_set_dvp_pclk(struct sc2235_dev *sensor,
-+ unsigned long rate)
-+{
-+ u8 prediv, mult, sysdiv;
-+ int ret = 0;
-+
-+ sc2235_calc_sys_clk(sensor, rate, &prediv, &mult,
-+ &sysdiv);
-+
-+
-+ return ret;
-+}
-+
-+/*
-+ * if sensor changes inside scaling or subsampling
-+ * change mode directly
-+ */
-+static int sc2235_set_mode_direct(struct sc2235_dev *sensor,
-+ const struct sc2235_mode_info *mode)
-+{
-+ if (!mode->reg_data)
-+ return -EINVAL;
-+
-+ /* Write capture setting */
-+ return sc2235_load_regs(sensor, mode);
-+}
-+#endif
-+
-+static int sc2235_set_mode(struct sc2235_dev *sensor)
-+{
-+#ifdef UNUSED_CODE
-+ bool auto_exp = sensor->ctrls.auto_exp->val == V4L2_EXPOSURE_AUTO;
-+ const struct sc2235_mode_info *mode = sensor->current_mode;
-+#endif
-+ bool auto_gain = sensor->ctrls.auto_gain->val == 1;
-+ int ret = 0;
-+
-+ /* auto gain and exposure must be turned off when changing modes */
-+ if (auto_gain) {
-+ ret = sc2235_set_autogain(sensor, false);
-+ if (ret)
-+ return ret;
-+ }
-+#ifdef UNUSED_CODE
-+ /* This issue will be addressed in the EVB board*/
-+ /* This action will result in poor image display 2021 1111*/
-+ if (auto_exp) {
-+ ret = sc2235_set_autoexposure(sensor, false);
-+ if (ret)
-+ goto restore_auto_gain;
-+ }
-+
-+ rate = sc2235_calc_pixel_rate(sensor);
-+
-+ ret = sc2235_set_dvp_pclk(sensor, rate);
-+ if (ret < 0)
-+ return 0;
-+
-+ ret = sc2235_set_mode_direct(sensor, mode);
-+ if (ret < 0)
-+ goto restore_auto_exp_gain;
-+
-+ /* restore auto gain and exposure */
-+ if (auto_gain)
-+ sc2235_set_autogain(sensor, true);
-+ if (auto_exp)
-+ sc2235_set_autoexposure(sensor, true);
-+
-+
-+ sensor->pending_mode_change = false;
-+ sensor->last_mode = mode;
-+ return 0;
-+
-+restore_auto_exp_gain:
-+ if (auto_exp)
-+ sc2235_set_autoexposure(sensor, true);
-+restore_auto_gain:
-+ if (auto_gain)
-+ sc2235_set_autogain(sensor, true);
-+#endif
-+ return ret;
-+}
-+
-+static int sc2235_set_framefmt(struct sc2235_dev *sensor,
-+ struct v4l2_mbus_framefmt *format);
-+
-+/* restore the last set video mode after chip power-on */
-+static int sc2235_restore_mode(struct sc2235_dev *sensor)
-+{
-+ int ret;
-+
-+ /* first load the initial register values */
-+ ret = sc2235_load_regs(sensor, &sc2235_mode_init_data);
-+ if (ret < 0)
-+ return ret;
-+ sensor->last_mode = &sc2235_mode_init_data;
-+ /* now restore the last capture mode */
-+ ret = sc2235_set_mode(sensor);
-+ if (ret < 0)
-+ return ret;
-+
-+ return sc2235_set_framefmt(sensor, &sensor->fmt);
-+}
-+
-+static void sc2235_power(struct sc2235_dev *sensor, bool enable)
-+{
-+ if (!sensor->pwdn_gpio)
-+ return;
-+ gpiod_set_value_cansleep(sensor->pwdn_gpio, enable ? 0 : 1);
-+}
-+
-+static void sc2235_reset(struct sc2235_dev *sensor)
-+{
-+ if (!sensor->reset_gpio)
-+ return;
-+
-+ gpiod_set_value_cansleep(sensor->reset_gpio, 0);
-+
-+ /* camera power cycle */
-+ sc2235_power(sensor, false);
-+ usleep_range(5000, 10000);
-+ sc2235_power(sensor, true);
-+ usleep_range(5000, 10000);
-+
-+ gpiod_set_value_cansleep(sensor->reset_gpio, 1);
-+ usleep_range(1000, 2000);
-+
-+ gpiod_set_value_cansleep(sensor->reset_gpio, 0);
-+ usleep_range(20000, 25000);
-+}
-+
-+static int sc2235_set_power_on(struct device *dev)
-+{
-+ struct i2c_client *client = to_i2c_client(dev);
-+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
-+ struct sc2235_dev *sensor = to_sc2235_dev(sd);
-+ int ret;
-+
-+ ret = clk_prepare_enable(sensor->xclk);
-+ if (ret) {
-+ dev_err(&client->dev, "%s: failed to enable clock\n",
-+ __func__);
-+ return ret;
-+ }
-+
-+ ret = regulator_bulk_enable(SC2235_NUM_SUPPLIES,
-+ sensor->supplies);
-+ if (ret) {
-+ dev_err(&client->dev, "%s: failed to enable regulators\n",
-+ __func__);
-+ goto xclk_off;
-+ }
-+
-+ sc2235_reset(sensor);
-+ sc2235_power(sensor, true);
-+
-+ return 0;
-+
-+xclk_off:
-+ clk_disable_unprepare(sensor->xclk);
-+ return ret;
-+}
-+
-+static int sc2235_set_power_off(struct device *dev)
-+{
-+ struct i2c_client *client = to_i2c_client(dev);
-+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
-+ struct sc2235_dev *sensor = to_sc2235_dev(sd);
-+
-+ sc2235_power(sensor, false);
-+ regulator_bulk_disable(SC2235_NUM_SUPPLIES, sensor->supplies);
-+ clk_disable_unprepare(sensor->xclk);
-+
-+ return 0;
-+}
-+
-+static int sc2235_set_power(struct sc2235_dev *sensor, bool on)
-+{
-+ int ret = 0;
-+
-+ if (on) {
-+ pm_runtime_get_sync(&sensor->i2c_client->dev);
-+
-+ ret = sc2235_restore_mode(sensor);
-+ if (ret)
-+ goto power_off;
-+ }
-+
-+ if (!on)
-+ pm_runtime_put_sync(&sensor->i2c_client->dev);
-+
-+ return 0;
-+
-+power_off:
-+ pm_runtime_put_sync(&sensor->i2c_client->dev);
-+
-+ return ret;
-+}
-+
-+static int sc2235_s_power(struct v4l2_subdev *sd, int on)
-+{
-+ struct sc2235_dev *sensor = to_sc2235_dev(sd);
-+ int ret = 0;
-+
-+ mutex_lock(&sensor->lock);
-+
-+ ret = sc2235_set_power(sensor, !!on);
-+ if (ret)
-+ goto out;
-+
-+ mutex_unlock(&sensor->lock);
-+ return 0;
-+
-+out:
-+ mutex_unlock(&sensor->lock);
-+ return ret;
-+}
-+
-+static int sc2235_try_frame_interval(struct sc2235_dev *sensor,
-+ struct v4l2_fract *fi,
-+ u32 width, u32 height)
-+{
-+ const struct sc2235_mode_info *mode;
-+ enum sc2235_frame_rate rate = SC2235_15_FPS;
-+ int minfps, maxfps, best_fps, fps;
-+ int i;
-+
-+ minfps = sc2235_framerates[SC2235_15_FPS];
-+ maxfps = sc2235_framerates[SC2235_30_FPS];
-+
-+ if (fi->numerator == 0) {
-+ fi->denominator = maxfps;
-+ fi->numerator = 1;
-+ rate = SC2235_30_FPS;
-+ goto find_mode;
-+ }
-+
-+ fps = clamp_val(DIV_ROUND_CLOSEST(fi->denominator, fi->numerator),
-+ minfps, maxfps);
-+
-+ best_fps = minfps;
-+ for (i = 0; i < ARRAY_SIZE(sc2235_framerates); i++) {
-+ int curr_fps = sc2235_framerates[i];
-+
-+ if (abs(curr_fps - fps) < abs(best_fps - fps)) {
-+ best_fps = curr_fps;
-+ rate = i;
-+ }
-+ }
-+
-+ fi->numerator = 1;
-+ fi->denominator = best_fps;
-+
-+find_mode:
-+ mode = sc2235_find_mode(sensor, rate, width, height, false);
-+ return mode ? rate : -EINVAL;
-+}
-+
-+static int sc2235_get_fmt(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_format *format)
-+{
-+ struct sc2235_dev *sensor = to_sc2235_dev(sd);
-+ struct v4l2_mbus_framefmt *fmt;
-+
-+ if (format->pad != 0)
-+ return -EINVAL;
-+
-+ mutex_lock(&sensor->lock);
-+
-+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
-+ fmt = v4l2_subdev_get_try_format(&sensor->sd, state,
-+ format->pad);
-+ else
-+ fmt = &sensor->fmt;
-+
-+ format->format = *fmt;
-+
-+ mutex_unlock(&sensor->lock);
-+
-+ return 0;
-+}
-+
-+static int sc2235_try_fmt_internal(struct v4l2_subdev *sd,
-+ struct v4l2_mbus_framefmt *fmt,
-+ enum sc2235_frame_rate fr,
-+ const struct sc2235_mode_info **new_mode)
-+{
-+ struct sc2235_dev *sensor = to_sc2235_dev(sd);
-+ const struct sc2235_mode_info *mode;
-+ int i;
-+
-+ mode = sc2235_find_mode(sensor, fr, fmt->width, fmt->height, true);
-+ if (!mode)
-+ return -EINVAL;
-+ fmt->width = mode->hact;
-+ fmt->height = mode->vact;
-+
-+ if (new_mode)
-+ *new_mode = mode;
-+
-+ for (i = 0; i < ARRAY_SIZE(sc2235_formats); i++)
-+ if (sc2235_formats[i].code == fmt->code)
-+ break;
-+ if (i >= ARRAY_SIZE(sc2235_formats))
-+ i = 0;
-+
-+ fmt->code = sc2235_formats[i].code;
-+ fmt->colorspace = sc2235_formats[i].colorspace;
-+ fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
-+ fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
-+ fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace);
-+
-+ return 0;
-+}
-+
-+static int sc2235_set_fmt(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_format *format)
-+{
-+ struct sc2235_dev *sensor = to_sc2235_dev(sd);
-+ const struct sc2235_mode_info *new_mode;
-+ struct v4l2_mbus_framefmt *mbus_fmt = &format->format;
-+ struct v4l2_mbus_framefmt *fmt;
-+ int ret;
-+
-+ if (format->pad != 0)
-+ return -EINVAL;
-+ mutex_lock(&sensor->lock);
-+
-+ if (sensor->streaming) {
-+ ret = -EBUSY;
-+ goto out;
-+ }
-+
-+ ret = sc2235_try_fmt_internal(sd, mbus_fmt, 0, &new_mode);
-+ if (ret)
-+ goto out;
-+
-+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
-+ fmt = v4l2_subdev_get_try_format(sd, state, 0);
-+ else
-+ fmt = &sensor->fmt;
-+
-+ if (mbus_fmt->code != sensor->fmt.code)
-+ sensor->pending_fmt_change = true;
-+
-+ *fmt = *mbus_fmt;
-+
-+ if (new_mode != sensor->current_mode) {
-+ sensor->current_mode = new_mode;
-+ sensor->pending_mode_change = true;
-+ }
-+ if (new_mode->max_fps < sensor->current_fr) {
-+ sensor->current_fr = new_mode->max_fps;
-+ sensor->frame_interval.numerator = 1;
-+ sensor->frame_interval.denominator =
-+ sc2235_framerates[sensor->current_fr];
-+ sensor->current_mode = new_mode;
-+ sensor->pending_mode_change = true;
-+ }
-+
-+ __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
-+ sc2235_calc_pixel_rate(sensor));
-+out:
-+ mutex_unlock(&sensor->lock);
-+ return ret;
-+}
-+
-+static int sc2235_set_framefmt(struct sc2235_dev *sensor,
-+ struct v4l2_mbus_framefmt *format)
-+{
-+ int ret = 0;
-+
-+ switch (format->code) {
-+ default:
-+ return ret;
-+ }
-+ return ret;
-+}
-+
-+/*
-+ * Sensor Controls.
-+ */
-+
-+static int sc2235_set_ctrl_hue(struct sc2235_dev *sensor, int value)
-+{
-+ int ret = 0;
-+ return ret;
-+}
-+
-+static int sc2235_set_ctrl_contrast(struct sc2235_dev *sensor, int value)
-+{
-+ int ret = 0;
-+ return ret;
-+}
-+
-+static int sc2235_set_ctrl_saturation(struct sc2235_dev *sensor, int value)
-+{
-+ int ret = 0;
-+ return ret;
-+}
-+
-+static int sc2235_set_ctrl_white_balance(struct sc2235_dev *sensor, int awb)
-+{
-+ int ret = 0;
-+ return ret;
-+}
-+
-+static int sc2235_set_ctrl_exposure(struct sc2235_dev *sensor,
-+ enum v4l2_exposure_auto_type auto_exposure)
-+{
-+ struct sc2235_ctrls *ctrls = &sensor->ctrls;
-+ bool auto_exp = (auto_exposure == V4L2_EXPOSURE_AUTO);
-+ int ret = 0;
-+
-+ if (ctrls->auto_exp->is_new) {
-+ ret = sc2235_set_autoexposure(sensor, auto_exp);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ if (!auto_exp && ctrls->exposure->is_new) {
-+ u16 max_exp = 0;
-+
-+ ret = sc2235_get_vts(sensor);
-+ if (ret < 0)
-+ return ret;
-+ max_exp += ret - 4;
-+ ret = 0;
-+
-+ if (ctrls->exposure->val < max_exp)
-+ ret = sc2235_set_exposure(sensor, ctrls->exposure->val);
-+ }
-+
-+ return ret;
-+}
-+
-+static int sc2235_set_ctrl_gain(struct sc2235_dev *sensor, bool auto_gain)
-+{
-+ struct sc2235_ctrls *ctrls = &sensor->ctrls;
-+ int ret = 0;
-+
-+ if (ctrls->auto_gain->is_new) {
-+ ret = sc2235_set_autogain(sensor, auto_gain);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ if (!auto_gain && ctrls->gain->is_new)
-+ ret = sc2235_set_gain(sensor, ctrls->gain->val);
-+
-+ return ret;
-+}
-+
-+static const char * const test_pattern_menu[] = {
-+ "Disabled",
-+ "Black bars",
-+ "Auto Black bars",
-+};
-+
-+#define SC2235_TEST_ENABLE BIT(3)
-+#define SC2235_TEST_BLACK (3 << 0)
-+
-+static int sc2235_set_ctrl_test_pattern(struct sc2235_dev *sensor, int value)
-+{
-+ int ret = 0;
-+ /*
-+ *For 7110 platform, refer to 1125 FW code configuration. This operation will cause the image to be white.
-+ */
-+#ifdef UNUSED_CODE
-+ ret = sc2235_mod_reg(sensor, SC2235_REG_TEST_SET0, BIT(3),
-+ !!value << 3);
-+
-+ ret |= sc2235_mod_reg(sensor, SC2235_REG_TEST_SET1, BIT(6),
-+ (value >> 1) << 6);
-+#endif
-+ return ret;
-+}
-+
-+static int sc2235_set_ctrl_light_freq(struct sc2235_dev *sensor, int value)
-+{
-+ return 0;
-+}
-+
-+static int sc2235_set_ctrl_hflip(struct sc2235_dev *sensor, int value)
-+{
-+ return sc2235_mod_reg(sensor, SC2235_REG_TIMING_TC_REG21,
-+ BIT(2) | BIT(1),
-+ (!(value ^ sensor->upside_down)) ?
-+ (BIT(2) | BIT(1)) : 0);
-+}
-+
-+static int sc2235_set_ctrl_vflip(struct sc2235_dev *sensor, int value)
-+{
-+ return sc2235_mod_reg(sensor, SC2235_REG_TIMING_TC_REG21,
-+ BIT(6) | BIT(5),
-+ (value ^ sensor->upside_down) ?
-+ (BIT(6) | BIT(5)) : 0);
-+}
-+
-+static int sc2235_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
-+{
-+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
-+ struct sc2235_dev *sensor = to_sc2235_dev(sd);
-+ int val;
-+
-+ /* v4l2_ctrl_lock() locks our own mutex */
-+
-+ if (!pm_runtime_get_if_in_use(&sensor->i2c_client->dev))
-+ return 0;
-+
-+ switch (ctrl->id) {
-+ case V4L2_CID_AUTOGAIN:
-+ val = sc2235_get_gain(sensor);
-+ if (val < 0)
-+ return val;
-+ sensor->ctrls.gain->val = val;
-+ break;
-+ case V4L2_CID_EXPOSURE_AUTO:
-+ val = sc2235_get_exposure(sensor);
-+ if (val < 0)
-+ return val;
-+ sensor->ctrls.exposure->val = val;
-+ break;
-+ }
-+
-+ pm_runtime_put(&sensor->i2c_client->dev);
-+
-+ return 0;
-+}
-+
-+static int sc2235_s_ctrl(struct v4l2_ctrl *ctrl)
-+{
-+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
-+ struct sc2235_dev *sensor = to_sc2235_dev(sd);
-+ int ret;
-+
-+ /* v4l2_ctrl_lock() locks our own mutex */
-+
-+ /*
-+ * If the device is not powered up by the host driver do
-+ * not apply any controls to H/W at this time. Instead
-+ * the controls will be restored at start streaming time.
-+ */
-+ if (!pm_runtime_get_if_in_use(&sensor->i2c_client->dev))
-+ return 0;
-+
-+ switch (ctrl->id) {
-+ case V4L2_CID_AUTOGAIN:
-+ ret = sc2235_set_ctrl_gain(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_EXPOSURE_AUTO:
-+ ret = sc2235_set_ctrl_exposure(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_AUTO_WHITE_BALANCE:
-+ ret = sc2235_set_ctrl_white_balance(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_HUE:
-+ ret = sc2235_set_ctrl_hue(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_CONTRAST:
-+ ret = sc2235_set_ctrl_contrast(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_SATURATION:
-+ ret = sc2235_set_ctrl_saturation(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_TEST_PATTERN:
-+ ret = sc2235_set_ctrl_test_pattern(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_POWER_LINE_FREQUENCY:
-+ ret = sc2235_set_ctrl_light_freq(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_HFLIP:
-+ ret = sc2235_set_ctrl_hflip(sensor, ctrl->val);
-+ break;
-+ case V4L2_CID_VFLIP:
-+ ret = sc2235_set_ctrl_vflip(sensor, ctrl->val);
-+ break;
-+ default:
-+ ret = -EINVAL;
-+ break;
-+ }
-+
-+ pm_runtime_put(&sensor->i2c_client->dev);
-+
-+ return ret;
-+}
-+
-+static const struct v4l2_ctrl_ops sc2235_ctrl_ops = {
-+ .g_volatile_ctrl = sc2235_g_volatile_ctrl,
-+ .s_ctrl = sc2235_s_ctrl,
-+};
-+
-+static int sc2235_init_controls(struct sc2235_dev *sensor)
-+{
-+ const struct v4l2_ctrl_ops *ops = &sc2235_ctrl_ops;
-+ struct sc2235_ctrls *ctrls = &sensor->ctrls;
-+ struct v4l2_ctrl_handler *hdl = &ctrls->handler;
-+ int ret;
-+
-+ v4l2_ctrl_handler_init(hdl, 32);
-+
-+ /* we can use our own mutex for the ctrl lock */
-+ hdl->lock = &sensor->lock;
-+
-+ /* Clock related controls */
-+ ctrls->pixel_rate = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_PIXEL_RATE,
-+ 0, INT_MAX, 1,
-+ sc2235_calc_pixel_rate(sensor));
-+
-+ /* Auto/manual white balance */
-+ ctrls->auto_wb = v4l2_ctrl_new_std(hdl, ops,
-+ V4L2_CID_AUTO_WHITE_BALANCE,
-+ 0, 1, 1, 1);
-+ ctrls->blue_balance = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BLUE_BALANCE,
-+ 0, 4095, 1, 0);
-+ ctrls->red_balance = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_RED_BALANCE,
-+ 0, 4095, 1, 0);
-+ /* Auto/manual exposure */
-+#ifdef UNUSED_CODE
-+ /*
-+ *For 7110 platform, This operation will cause the image to be white.
-+ */
-+ ctrls->auto_exp = v4l2_ctrl_new_std_menu(hdl, ops,
-+ V4L2_CID_EXPOSURE_AUTO,
-+ V4L2_EXPOSURE_MANUAL, 0,
-+ V4L2_EXPOSURE_AUTO);
-+ ctrls->exposure = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_EXPOSURE,
-+ 0, 65535, 1, 0);
-+ /* Auto/manual gain */
-+ ctrls->auto_gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTOGAIN,
-+ 0, 1, 1, 1);
-+ ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN,
-+ 0, 1023, 1, 0);
-+#else
-+ ctrls->auto_exp = v4l2_ctrl_new_std_menu(hdl, ops,
-+ V4L2_CID_EXPOSURE_AUTO,
-+ V4L2_EXPOSURE_MANUAL, 0,
-+ 1);
-+ ctrls->exposure = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_EXPOSURE,
-+ 0, 65535, 1, 720);
-+ /* Auto/manual gain */
-+ ctrls->auto_gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTOGAIN,
-+ 0, 1, 1, 0);
-+ ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN,
-+ 0, 1023, 1, 0x10);
-+#endif
-+ ctrls->saturation = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SATURATION,
-+ 0, 255, 1, 64);
-+ ctrls->hue = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HUE,
-+ 0, 359, 1, 0);
-+ ctrls->contrast = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_CONTRAST,
-+ 0, 255, 1, 0);
-+ ctrls->test_pattern =
-+ v4l2_ctrl_new_std_menu_items(hdl, ops, V4L2_CID_TEST_PATTERN,
-+ ARRAY_SIZE(test_pattern_menu) - 1,
-+ 0, 0, test_pattern_menu); //0x02
-+ ctrls->hflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HFLIP,
-+ 0, 1, 1, 1);
-+ ctrls->vflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VFLIP,
-+ 0, 1, 1, 0);
-+
-+ ctrls->light_freq =
-+ v4l2_ctrl_new_std_menu(hdl, ops,
-+ V4L2_CID_POWER_LINE_FREQUENCY,
-+ V4L2_CID_POWER_LINE_FREQUENCY_AUTO, 0,
-+ V4L2_CID_POWER_LINE_FREQUENCY_50HZ);
-+
-+ if (hdl->error) {
-+ ret = hdl->error;
-+ goto free_ctrls;
-+ }
-+
-+ ctrls->pixel_rate->flags |= V4L2_CTRL_FLAG_READ_ONLY;
-+ ctrls->gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
-+ ctrls->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
-+
-+ v4l2_ctrl_auto_cluster(3, &ctrls->auto_wb, 0, false);
-+ v4l2_ctrl_auto_cluster(2, &ctrls->auto_gain, 0, true);
-+ v4l2_ctrl_auto_cluster(2, &ctrls->auto_exp, 1, true);
-+
-+ sensor->sd.ctrl_handler = hdl;
-+ return 0;
-+
-+free_ctrls:
-+ v4l2_ctrl_handler_free(hdl);
-+ return ret;
-+}
-+
-+static int sc2235_enum_frame_size(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_frame_size_enum *fse)
-+{
-+ if (fse->pad != 0)
-+ return -EINVAL;
-+ if (fse->index >= SC2235_NUM_MODES)
-+ return -EINVAL;
-+
-+ fse->min_width =
-+ sc2235_mode_data[fse->index].hact;
-+ fse->max_width = fse->min_width;
-+ fse->min_height =
-+ sc2235_mode_data[fse->index].vact;
-+ fse->max_height = fse->min_height;
-+
-+ return 0;
-+}
-+
-+static int sc2235_enum_frame_interval(
-+ struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_frame_interval_enum *fie)
-+{
-+ struct v4l2_fract tpf;
-+ int i;
-+
-+ if (fie->pad != 0)
-+ return -EINVAL;
-+
-+ if (fie->index >= SC2235_NUM_FRAMERATES)
-+ return -EINVAL;
-+
-+ tpf.numerator = 1;
-+ tpf.denominator = sc2235_framerates[fie->index];
-+
-+ for (i = 0; i < SC2235_NUM_MODES; i++) {
-+ if (fie->width == sc2235_mode_data[i].hact &&
-+ fie->height == sc2235_mode_data[i].vact)
-+ break;
-+ }
-+ if (i == SC2235_NUM_MODES)
-+ return -ENOTTY;
-+
-+ fie->interval = tpf;
-+ return 0;
-+}
-+
-+static int sc2235_g_frame_interval(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_frame_interval *fi)
-+{
-+ struct sc2235_dev *sensor = to_sc2235_dev(sd);
-+
-+ mutex_lock(&sensor->lock);
-+ fi->interval = sensor->frame_interval;
-+ mutex_unlock(&sensor->lock);
-+
-+ return 0;
-+}
-+
-+static int sc2235_s_frame_interval(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_frame_interval *fi)
-+{
-+ struct sc2235_dev *sensor = to_sc2235_dev(sd);
-+ const struct sc2235_mode_info *mode;
-+ int frame_rate, ret = 0;
-+
-+ if (fi->pad != 0)
-+ return -EINVAL;
-+
-+ mutex_lock(&sensor->lock);
-+
-+ if (sensor->streaming) {
-+ ret = -EBUSY;
-+ goto out;
-+ }
-+
-+ mode = sensor->current_mode;
-+
-+ frame_rate = sc2235_try_frame_interval(sensor, &fi->interval,
-+ mode->hact, mode->vact);
-+ if (frame_rate < 0) {
-+ /* Always return a valid frame interval value */
-+ fi->interval = sensor->frame_interval;
-+ goto out;
-+ }
-+
-+ mode = sc2235_find_mode(sensor, frame_rate, mode->hact,
-+ mode->vact, true);
-+ if (!mode) {
-+ ret = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (mode != sensor->current_mode ||
-+ frame_rate != sensor->current_fr) {
-+ sensor->current_fr = frame_rate;
-+ sensor->frame_interval = fi->interval;
-+ sensor->current_mode = mode;
-+ sensor->pending_mode_change = true;
-+
-+ __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
-+ sc2235_calc_pixel_rate(sensor));
-+ }
-+out:
-+ mutex_unlock(&sensor->lock);
-+ return ret;
-+}
-+
-+static int sc2235_enum_mbus_code(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_mbus_code_enum *code)
-+{
-+ if (code->pad != 0)
-+ return -EINVAL;
-+ if (code->index >= ARRAY_SIZE(sc2235_formats))
-+ return -EINVAL;
-+
-+ code->code = sc2235_formats[code->index].code;
-+ return 0;
-+}
-+
-+static int sc2235_stream_start(struct sc2235_dev *sensor, int enable)
-+{
-+ return sc2235_mod_reg(sensor, SC2235_REG_STREAM_ON, BIT(0), !!enable);
-+}
-+
-+static int sc2235_s_stream(struct v4l2_subdev *sd, int enable)
-+{
-+ struct sc2235_dev *sensor = to_sc2235_dev(sd);
-+ int ret = 0;
-+
-+ if (enable) {
-+ ret = v4l2_ctrl_handler_setup(&sensor->ctrls.handler);
-+ if (ret)
-+ return ret;
-+ }
-+
-+ mutex_lock(&sensor->lock);
-+
-+ if (sensor->streaming == !enable) {
-+ if (enable && sensor->pending_mode_change) {
-+ ret = sc2235_set_mode(sensor);
-+ if (ret)
-+ goto out;
-+ }
-+
-+ if (enable && sensor->pending_fmt_change) {
-+ ret = sc2235_set_framefmt(sensor, &sensor->fmt);
-+ if (ret)
-+ goto out;
-+ sensor->pending_fmt_change = false;
-+ }
-+
-+ ret = sc2235_stream_start(sensor, enable);
-+ if (ret)
-+ goto out;
-+ }
-+ sensor->streaming += enable ? 1 : -1;
-+ WARN_ON(sensor->streaming < 0);
-+out:
-+ mutex_unlock(&sensor->lock);
-+
-+ return ret;
-+}
-+
-+static const struct v4l2_subdev_core_ops sc2235_core_ops = {
-+ .s_power = sc2235_s_power,
-+ .log_status = v4l2_ctrl_subdev_log_status,
-+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
-+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
-+};
-+
-+static const struct v4l2_subdev_video_ops sc2235_video_ops = {
-+ .g_frame_interval = sc2235_g_frame_interval,
-+ .s_frame_interval = sc2235_s_frame_interval,
-+ .s_stream = sc2235_s_stream,
-+};
-+
-+static const struct v4l2_subdev_pad_ops sc2235_pad_ops = {
-+ .enum_mbus_code = sc2235_enum_mbus_code,
-+ .get_fmt = sc2235_get_fmt,
-+ .set_fmt = sc2235_set_fmt,
-+ .enum_frame_size = sc2235_enum_frame_size,
-+ .enum_frame_interval = sc2235_enum_frame_interval,
-+};
-+
-+static const struct v4l2_subdev_ops sc2235_subdev_ops = {
-+ .core = &sc2235_core_ops,
-+ .video = &sc2235_video_ops,
-+ .pad = &sc2235_pad_ops,
-+};
-+
-+static int sc2235_get_regulators(struct sc2235_dev *sensor)
-+{
-+ int i;
-+
-+ for (i = 0; i < SC2235_NUM_SUPPLIES; i++)
-+ sensor->supplies[i].supply = sc2235_supply_name[i];
-+
-+ return devm_regulator_bulk_get(&sensor->i2c_client->dev,
-+ SC2235_NUM_SUPPLIES,
-+ sensor->supplies);
-+}
-+
-+static int sc2235_check_chip_id(struct sc2235_dev *sensor)
-+{
-+ struct i2c_client *client = sensor->i2c_client;
-+ int ret = 0;
-+ u16 chip_id;
-+
-+ ret = sc2235_read_reg16(sensor, SC2235_REG_CHIP_ID, &chip_id);
-+ if (ret) {
-+ dev_err(&client->dev, "%s: failed to read chip identifier\n",
-+ __func__);
-+ return ret;
-+ }
-+
-+ if (chip_id != SC2235_CHIP_ID) {
-+ dev_err(&client->dev, "%s: wrong chip identifier, expected 0x%x, got 0x%x\n",
-+ __func__, SC2235_CHIP_ID, chip_id);
-+ return -ENXIO;
-+ }
-+ dev_err(&client->dev, "%s: chip identifier, got 0x%x\n",
-+ __func__, chip_id);
-+
-+ return 0;
-+}
-+
-+static int sc2235_probe(struct i2c_client *client)
-+{
-+ struct device *dev = &client->dev;
-+ struct fwnode_handle *endpoint;
-+ struct sc2235_dev *sensor;
-+ struct v4l2_mbus_framefmt *fmt;
-+ u32 rotation;
-+ int ret;
-+
-+ sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
-+ if (!sensor)
-+ return -ENOMEM;
-+
-+ sensor->i2c_client = client;
-+
-+ fmt = &sensor->fmt;
-+ fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
-+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
-+ fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
-+ fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
-+ fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace);
-+ fmt->width = 1920;
-+ fmt->height = 1080;
-+ fmt->field = V4L2_FIELD_NONE;
-+ sensor->frame_interval.numerator = 1;
-+ sensor->frame_interval.denominator = sc2235_framerates[SC2235_30_FPS];
-+ sensor->current_fr = SC2235_30_FPS;
-+ sensor->current_mode =
-+ &sc2235_mode_data[SC2235_MODE_1080P_1920_1080];
-+ sensor->last_mode = sensor->current_mode;
-+
-+ /* optional indication of physical rotation of sensor */
-+ ret = fwnode_property_read_u32(dev_fwnode(&client->dev), "rotation",
-+ &rotation);
-+ if (!ret) {
-+ switch (rotation) {
-+ case 180:
-+ sensor->upside_down = true;
-+ fallthrough;
-+ case 0:
-+ break;
-+ default:
-+ dev_warn(dev, "%u degrees rotation is not supported, ignoring...\n",
-+ rotation);
-+ }
-+ }
-+
-+ endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(&client->dev),
-+ NULL);
-+ if (!endpoint) {
-+ dev_err(dev, "endpoint node not found\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = v4l2_fwnode_endpoint_parse(endpoint, &sensor->ep);
-+ fwnode_handle_put(endpoint);
-+ if (ret) {
-+ dev_err(dev, "Could not parse endpoint\n");
-+ return ret;
-+ }
-+
-+ if (sensor->ep.bus_type != V4L2_MBUS_PARALLEL) {
-+ dev_err(dev, "Unsupported bus type %d\n", sensor->ep.bus_type);
-+ return -EINVAL;
-+ }
-+
-+ /* get system clock (xclk) */
-+ sensor->xclk = devm_clk_get(dev, "xclk");
-+ if (IS_ERR(sensor->xclk)) {
-+ dev_err(dev, "failed to get xclk\n");
-+ return PTR_ERR(sensor->xclk);
-+ }
-+
-+ sensor->xclk_freq = clk_get_rate(sensor->xclk);
-+ if (sensor->xclk_freq < SC2235_XCLK_MIN ||
-+ sensor->xclk_freq > SC2235_XCLK_MAX) {
-+ dev_err(dev, "xclk frequency out of range: %d Hz\n",
-+ sensor->xclk_freq);
-+ return -EINVAL;
-+ }
-+
-+ sensor->pwdn_gpio = devm_gpiod_get_optional(dev, "powerdown",
-+ GPIOD_OUT_HIGH);
-+ if (IS_ERR(sensor->pwdn_gpio))
-+ return PTR_ERR(sensor->pwdn_gpio);
-+
-+ sensor->reset_gpio = devm_gpiod_get_optional(dev, "reset",
-+ GPIOD_OUT_HIGH);
-+ if (IS_ERR(sensor->reset_gpio))
-+ return PTR_ERR(sensor->reset_gpio);
-+
-+ v4l2_i2c_subdev_init(&sensor->sd, client, &sc2235_subdev_ops);
-+
-+ sensor->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
-+ V4L2_SUBDEV_FL_HAS_EVENTS;
-+ sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
-+ sensor->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
-+ ret = media_entity_pads_init(&sensor->sd.entity, 1, &sensor->pad);
-+ if (ret)
-+ return ret;
-+
-+ ret = sc2235_get_regulators(sensor);
-+ if (ret)
-+ return ret;
-+ mutex_init(&sensor->lock);
-+
-+ ret = sc2235_set_power_on(dev);
-+ if (ret) {
-+ dev_err(dev, "failed to power on\n");
-+ goto entity_cleanup;
-+ }
-+
-+ ret = sc2235_check_chip_id(sensor);
-+ if (ret)
-+ goto entity_power_off;
-+
-+ ret = sc2235_init_controls(sensor);
-+ if (ret)
-+ goto entity_power_off;
-+
-+ ret = v4l2_async_register_subdev_sensor(&sensor->sd);
-+ if (ret)
-+ goto free_ctrls;
-+
-+ pm_runtime_set_active(dev);
-+ pm_runtime_enable(dev);
-+
-+ return 0;
-+
-+free_ctrls:
-+ v4l2_ctrl_handler_free(&sensor->ctrls.handler);
-+entity_power_off:
-+ sc2235_set_power_off(dev);
-+entity_cleanup:
-+ media_entity_cleanup(&sensor->sd.entity);
-+ mutex_destroy(&sensor->lock);
-+ return ret;
-+}
-+
-+static void sc2235_remove(struct i2c_client *client)
-+{
-+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
-+ struct sc2235_dev *sensor = to_sc2235_dev(sd);
-+
-+ v4l2_async_unregister_subdev(&sensor->sd);
-+ media_entity_cleanup(&sensor->sd.entity);
-+ v4l2_ctrl_handler_free(&sensor->ctrls.handler);
-+ mutex_destroy(&sensor->lock);
-+
-+ pm_runtime_disable(&client->dev);
-+ if (!pm_runtime_status_suspended(&client->dev))
-+ sc2235_set_power_off(&client->dev);
-+ pm_runtime_set_suspended(&client->dev);
-+}
-+
-+static const struct i2c_device_id sc2235_id[] = {
-+ { "sc2235", 0 },
-+ {},
-+};
-+MODULE_DEVICE_TABLE(i2c, sc2235_id);
-+
-+static const struct of_device_id sc2235_dt_ids[] = {
-+ { .compatible = "smartsens,sc2235" },
-+ { /* sentinel */ }
-+};
-+MODULE_DEVICE_TABLE(of, sc2235_dt_ids);
-+
-+static const struct dev_pm_ops sc2235_pm_ops = {
-+ SET_RUNTIME_PM_OPS(sc2235_set_power_off, sc2235_set_power_on, NULL)
-+};
-+
-+static struct i2c_driver sc2235_i2c_driver = {
-+ .driver = {
-+ .name = "sc2235",
-+ .of_match_table = sc2235_dt_ids,
-+ .pm = &sc2235_pm_ops,
-+ },
-+ .id_table = sc2235_id,
-+ .probe = sc2235_probe,
-+ .remove = sc2235_remove,
-+};
-+
-+module_i2c_driver(sc2235_i2c_driver);
-+
-+MODULE_DESCRIPTION("SC2235 Camera Subdev Driver");
-+MODULE_LICENSE("GPL");
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/stf_common.h
-@@ -0,0 +1,185 @@
-+/* SPDX-License-Identifier: GPL-2.0
-+ *
-+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
-+ *
-+ */
-+
-+#ifndef STF_COMMON_H
-+#define STF_COMMON_H
-+
-+#include <linux/kern_levels.h>
-+
-+// #define STF_DEBUG
-+
-+// #define USE_CSIDPHY_ONE_CLK_MODE 1
-+
-+enum {
-+ ST_DVP = 0x0001,
-+ ST_CSIPHY = 0x0002,
-+ ST_CSI = 0x0004,
-+ ST_ISP = 0x0008,
-+ ST_VIN = 0x0010,
-+ ST_VIDEO = 0x0020,
-+ ST_CAMSS = 0x0040,
-+ ST_SENSOR = 0x0080,
-+};
-+
-+enum {
-+ ST_NONE = 0x00,
-+ ST_ERR = 0x01,
-+ ST_WARN = 0x02,
-+ ST_INFO = 0x03,
-+ ST_DEBUG = 0x04,
-+};
-+
-+extern unsigned int stdbg_level;
-+extern unsigned int stdbg_mask;
-+
-+#define ST_MODULE2STRING(__module) ({ \
-+ char *__str; \
-+ \
-+ switch (__module) { \
-+ case ST_DVP: \
-+ __str = "st_dvp"; \
-+ break; \
-+ case ST_CSIPHY: \
-+ __str = "st_csiphy"; \
-+ break; \
-+ case ST_CSI: \
-+ __str = "st_csi"; \
-+ break; \
-+ case ST_ISP: \
-+ __str = "st_isp"; \
-+ break; \
-+ case ST_VIN: \
-+ __str = "st_vin"; \
-+ break; \
-+ case ST_VIDEO: \
-+ __str = "st_video"; \
-+ break; \
-+ case ST_CAMSS: \
-+ __str = "st_camss"; \
-+ break; \
-+ case ST_SENSOR: \
-+ __str = "st_sensor"; \
-+ break; \
-+ default: \
-+ __str = "unknow"; \
-+ break; \
-+ } \
-+ \
-+ __str; \
-+ })
-+
-+#define st_debug(module, __fmt, arg...) \
-+ do { \
-+ if (stdbg_level > ST_INFO) { \
-+ if (stdbg_mask & module) \
-+ pr_err("[%s] debug: " __fmt, \
-+ ST_MODULE2STRING(module), \
-+ ## arg); \
-+ } \
-+ } while (0)
-+
-+#define st_info(module, __fmt, arg...) \
-+ do { \
-+ if (stdbg_level > ST_WARN) { \
-+ if (stdbg_mask & module) \
-+ pr_err("[%s] info: " __fmt, \
-+ ST_MODULE2STRING(module), \
-+ ## arg); \
-+ } \
-+ } while (0)
-+
-+#define st_warn(module, __fmt, arg...) \
-+ do { \
-+ if (stdbg_level > ST_ERR) { \
-+ if (stdbg_mask & module) \
-+ pr_err("[%s] warn: " __fmt, \
-+ ST_MODULE2STRING(module), \
-+ ## arg); \
-+ } \
-+ } while (0)
-+
-+#define st_err(module, __fmt, arg...) \
-+ do { \
-+ if (stdbg_level > ST_NONE) { \
-+ if (stdbg_mask & module) \
-+ pr_err("[%s] error: " __fmt, \
-+ ST_MODULE2STRING(module), \
-+ ## arg); \
-+ } \
-+ } while (0)
-+
-+#define st_err_ratelimited(module, fmt, ...) \
-+ do { \
-+ static DEFINE_RATELIMIT_STATE(_rs, \
-+ DEFAULT_RATELIMIT_INTERVAL, \
-+ DEFAULT_RATELIMIT_BURST); \
-+ if (__ratelimit(&_rs) && (stdbg_level > ST_NONE)) { \
-+ if (stdbg_mask & module) \
-+ pr_err("[%s] error: " fmt, \
-+ ST_MODULE2STRING(module), \
-+ ##__VA_ARGS__); \
-+ } \
-+ } while (0)
-+
-+#define set_bits(p, v, b, m) (((p) & ~(m)) | ((v) << (b)))
-+
-+static inline u32 reg_read(void __iomem *base, u32 reg)
-+{
-+ return ioread32(base + reg);
-+}
-+
-+static inline void reg_write(void __iomem *base, u32 reg, u32 val)
-+{
-+ iowrite32(val, base + reg);
-+}
-+
-+static inline void reg_set_bit(void __iomem *base, u32 reg, u32 mask, u32 val)
-+{
-+ u32 value;
-+
-+ value = ioread32(base + reg) & ~mask;
-+ val &= mask;
-+ val |= value;
-+ iowrite32(val, base + reg);
-+}
-+
-+static inline void reg_set(void __iomem *base, u32 reg, u32 mask)
-+{
-+ iowrite32(ioread32(base + reg) | mask, base + reg);
-+}
-+
-+static inline void reg_clear(void __iomem *base, u32 reg, u32 mask)
-+{
-+ iowrite32(ioread32(base + reg) & ~mask, base + reg);
-+}
-+
-+static inline void reg_set_highest_bit(void __iomem *base, u32 reg)
-+{
-+ u32 val;
-+
-+ val = ioread32(base + reg);
-+ val &= ~(0x1 << 31);
-+ val |= (0x1 & 0x1) << 31;
-+ iowrite32(val, base + reg);
-+}
-+
-+static inline void reg_clr_highest_bit(void __iomem *base, u32 reg)
-+{
-+ u32 val;
-+
-+ val = ioread32(base + reg);
-+ val &= ~(0x1 << 31);
-+ val |= (0x0 & 0x1) << 31;
-+ iowrite32(val, base + reg);
-+}
-+
-+static inline void print_reg(unsigned int module, void __iomem *base, u32 reg)
-+{
-+ //st_debug(module, "REG 0x%x = 0x%x\n",
-+ // base + reg, ioread32(base + reg));
-+}
-+
-+#endif /* STF_COMMON_H */
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/stf_csi.c
-@@ -0,0 +1,465 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
-+ *
-+ */
-+
-+#include "stfcamss.h"
-+#include <media/v4l2-async.h>
-+#include <media/v4l2-ctrls.h>
-+#include <media/v4l2-device.h>
-+#include <media/v4l2-event.h>
-+#include <media/v4l2-fwnode.h>
-+#include <media/v4l2-subdev.h>
-+
-+static const struct csi_format csi_formats_sink[] = {
-+ { MEDIA_BUS_FMT_UYVY8_2X8, 16},
-+ { MEDIA_BUS_FMT_SRGGB10_1X10, 10},
-+ { MEDIA_BUS_FMT_SGRBG10_1X10, 10},
-+ { MEDIA_BUS_FMT_SGBRG10_1X10, 10},
-+ { MEDIA_BUS_FMT_SBGGR10_1X10, 10},
-+};
-+
-+/* this bpp need see csi controllor */
-+static const struct csi_format csi_formats_src[] = {
-+ { MEDIA_BUS_FMT_AYUV8_1X32, 32},
-+ { MEDIA_BUS_FMT_SRGGB10_1X10, 16},
-+ { MEDIA_BUS_FMT_SGRBG10_1X10, 16},
-+ { MEDIA_BUS_FMT_SGBRG10_1X10, 16},
-+ { MEDIA_BUS_FMT_SBGGR10_1X10, 16},
-+};
-+
-+static int csi_find_format(u32 code,
-+ const struct csi_format *formats,
-+ unsigned int nformats)
-+{
-+ int i;
-+
-+ for (i = 0; i < nformats; i++)
-+ if (formats[i].code == code)
-+ return i;
-+ return -EINVAL;
-+}
-+
-+int stf_csi_subdev_init(struct stfcamss *stfcamss)
-+{
-+ struct stf_csi_dev *csi_dev = stfcamss->csi_dev;
-+
-+ csi_dev->s_type = SENSOR_VIN;
-+ csi_dev->hw_ops = &csi_ops;
-+ csi_dev->stfcamss = stfcamss;
-+ csi_dev->formats_sink = csi_formats_sink;
-+ csi_dev->nformats_sink = ARRAY_SIZE(csi_formats_sink);
-+ csi_dev->formats_src = csi_formats_src;
-+ csi_dev->nformats_src = ARRAY_SIZE(csi_formats_src);
-+ mutex_init(&csi_dev->stream_lock);
-+ return 0;
-+}
-+
-+static int csi_set_power(struct v4l2_subdev *sd, int on)
-+{
-+ struct stf_csi_dev *csi_dev = v4l2_get_subdevdata(sd);
-+
-+ csi_dev->hw_ops->csi_power_on(csi_dev, (u8)on);
-+ return 0;
-+}
-+
-+static struct v4l2_mbus_framefmt *
-+__csi_get_format(struct stf_csi_dev *csi_dev,
-+ struct v4l2_subdev_state *state,
-+ unsigned int pad,
-+ enum v4l2_subdev_format_whence which)
-+{
-+ if (which == V4L2_SUBDEV_FORMAT_TRY)
-+ return v4l2_subdev_get_try_format(&csi_dev->subdev, state, pad);
-+
-+ return &csi_dev->fmt[pad];
-+}
-+
-+static u32 code_to_data_type(int code)
-+{
-+ switch (code) {
-+ case MEDIA_BUS_FMT_SRGGB10_1X10:
-+ case MEDIA_BUS_FMT_SGRBG10_1X10:
-+ case MEDIA_BUS_FMT_SGBRG10_1X10:
-+ case MEDIA_BUS_FMT_SBGGR10_1X10:
-+ return 0x2b;
-+ case MEDIA_BUS_FMT_UYVY8_2X8:
-+ return 0x1E;
-+ default:
-+ return 0x2b;
-+ }
-+}
-+
-+static int csi_set_stream(struct v4l2_subdev *sd, int enable)
-+{
-+ struct stf_csi_dev *csi_dev = v4l2_get_subdevdata(sd);
-+ struct v4l2_mbus_framefmt *format;
-+ int ret = 0;
-+ u32 code, width, dt;
-+ u8 bpp;
-+
-+ format = __csi_get_format(csi_dev, NULL, STF_CSI_PAD_SINK,
-+ V4L2_SUBDEV_FORMAT_ACTIVE);
-+ if (format == NULL)
-+ return -EINVAL;
-+
-+ width = format->width;
-+
-+ ret = csi_find_format(format->code,
-+ csi_dev->formats_sink,
-+ csi_dev->nformats_sink);
-+ if (ret < 0)
-+ return ret;
-+
-+ code = csi_dev->formats_sink[ret].code;
-+ bpp = csi_dev->formats_src[ret].bpp;
-+ dt = code_to_data_type(code);
-+
-+ mutex_lock(&csi_dev->stream_lock);
-+ if (enable) {
-+ if (csi_dev->stream_count == 0) {
-+ csi_dev->hw_ops->csi_clk_enable(csi_dev);
-+ csi_dev->hw_ops->csi_stream_set(csi_dev, enable, dt, width, bpp);
-+ }
-+ csi_dev->stream_count++;
-+ } else {
-+ if (csi_dev->stream_count == 0)
-+ goto exit;
-+ if (csi_dev->stream_count == 1) {
-+ csi_dev->hw_ops->csi_stream_set(csi_dev, enable, dt, width, bpp);
-+ csi_dev->hw_ops->csi_clk_disable(csi_dev);
-+ }
-+ csi_dev->stream_count--;
-+ }
-+exit:
-+ mutex_unlock(&csi_dev->stream_lock);
-+ return 0;
-+}
-+
-+static void csi_try_format(struct stf_csi_dev *csi_dev,
-+ struct v4l2_subdev_state *state,
-+ unsigned int pad,
-+ struct v4l2_mbus_framefmt *fmt,
-+ enum v4l2_subdev_format_whence which)
-+{
-+ unsigned int i;
-+
-+ switch (pad) {
-+ case STF_CSI_PAD_SINK:
-+ /* Set format on sink pad */
-+
-+ for (i = 0; i < csi_dev->nformats_sink; i++)
-+ if (fmt->code == csi_dev->formats_sink[i].code)
-+ break;
-+
-+ if (i >= csi_dev->nformats_sink)
-+ fmt->code = csi_dev->formats_sink[0].code;
-+
-+ fmt->width = clamp_t(u32,
-+ fmt->width,
-+ STFCAMSS_FRAME_MIN_WIDTH,
-+ STFCAMSS_FRAME_MAX_WIDTH);
-+ fmt->height = clamp_t(u32,
-+ fmt->height,
-+ STFCAMSS_FRAME_MIN_HEIGHT,
-+ STFCAMSS_FRAME_MAX_HEIGHT);
-+
-+
-+ fmt->field = V4L2_FIELD_NONE;
-+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
-+ fmt->flags = 0;
-+
-+ break;
-+
-+ case STF_CSI_PAD_SRC:
-+ /* Set format on src pad */
-+
-+ for (i = 0; i < csi_dev->nformats_src; i++)
-+ if (fmt->code == csi_dev->formats_src[i].code)
-+ break;
-+
-+ if (i >= csi_dev->nformats_src)
-+ fmt->code = csi_dev->formats_src[0].code;
-+
-+ fmt->width = clamp_t(u32,
-+ fmt->width,
-+ STFCAMSS_FRAME_MIN_WIDTH,
-+ STFCAMSS_FRAME_MAX_WIDTH);
-+ fmt->height = clamp_t(u32,
-+ fmt->height,
-+ STFCAMSS_FRAME_MIN_HEIGHT,
-+ STFCAMSS_FRAME_MAX_HEIGHT);
-+
-+ fmt->field = V4L2_FIELD_NONE;
-+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
-+ fmt->flags = 0;
-+
-+ break;
-+ }
-+}
-+
-+static int csi_enum_mbus_code(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_mbus_code_enum *code)
-+{
-+ struct stf_csi_dev *csi_dev = v4l2_get_subdevdata(sd);
-+
-+ if (code->index >= csi_dev->nformats_sink)
-+ return -EINVAL;
-+ if (code->pad == STF_CSI_PAD_SINK) {
-+ code->code = csi_dev->formats_sink[code->index].code;
-+ } else {
-+ struct v4l2_mbus_framefmt *sink_fmt;
-+
-+ sink_fmt = __csi_get_format(csi_dev, state, STF_CSI_PAD_SINK,
-+ code->which);
-+
-+ code->code = sink_fmt->code;
-+ if (!code->code)
-+ return -EINVAL;
-+ }
-+ code->flags = 0;
-+
-+ return 0;
-+}
-+
-+static int csi_enum_frame_size(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_frame_size_enum *fse)
-+{
-+ struct stf_csi_dev *csi_dev = v4l2_get_subdevdata(sd);
-+ struct v4l2_mbus_framefmt format;
-+
-+ if (fse->index != 0)
-+ return -EINVAL;
-+
-+ format.code = fse->code;
-+ format.width = 1;
-+ format.height = 1;
-+ csi_try_format(csi_dev, state, fse->pad, &format, fse->which);
-+ fse->min_width = format.width;
-+ fse->min_height = format.height;
-+
-+ if (format.code != fse->code)
-+ return -EINVAL;
-+
-+ format.code = fse->code;
-+ format.width = -1;
-+ format.height = -1;
-+ csi_try_format(csi_dev, state, fse->pad, &format, fse->which);
-+ fse->max_width = format.width;
-+ fse->max_height = format.height;
-+
-+ return 0;
-+}
-+
-+static int csi_get_format(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_format *fmt)
-+{
-+ struct stf_csi_dev *csi_dev = v4l2_get_subdevdata(sd);
-+ struct v4l2_mbus_framefmt *format;
-+
-+ format = __csi_get_format(csi_dev, state, fmt->pad, fmt->which);
-+ if (format == NULL)
-+ return -EINVAL;
-+
-+ fmt->format = *format;
-+
-+ return 0;
-+}
-+
-+static int csi_set_format(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_format *fmt)
-+{
-+ struct stf_csi_dev *csi_dev = v4l2_get_subdevdata(sd);
-+ struct v4l2_mbus_framefmt *format;
-+ struct v4l2_mbus_framefmt *format_src;
-+ int ret;
-+
-+ format = __csi_get_format(csi_dev, state, fmt->pad, fmt->which);
-+ if (format == NULL)
-+ return -EINVAL;
-+
-+ mutex_lock(&csi_dev->stream_lock);
-+ if (csi_dev->stream_count) {
-+ fmt->format = *format;
-+ mutex_unlock(&csi_dev->stream_lock);
-+ goto out;
-+ } else {
-+ csi_try_format(csi_dev, state, fmt->pad, &fmt->format, fmt->which);
-+ *format = fmt->format;
-+ }
-+ mutex_unlock(&csi_dev->stream_lock);
-+
-+ if (fmt->pad == STF_CSI_PAD_SINK) {
-+ format_src = __csi_get_format(csi_dev, state, STF_DVP_PAD_SRC,
-+ fmt->which);
-+
-+ ret = csi_find_format(format->code, csi_dev->formats_sink,
-+ csi_dev->nformats_sink);
-+ if (ret < 0)
-+ return ret;
-+
-+ format_src->code = csi_dev->formats_src[ret].code;
-+ csi_try_format(csi_dev, state, STF_DVP_PAD_SRC, format_src,
-+ fmt->which);
-+ }
-+out:
-+ return 0;
-+}
-+
-+static int csi_init_formats(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_fh *fh)
-+{
-+ struct v4l2_subdev_format format = {
-+ .pad = STF_CSI_PAD_SINK,
-+ .which = fh ? V4L2_SUBDEV_FORMAT_TRY :
-+ V4L2_SUBDEV_FORMAT_ACTIVE,
-+ .format = {
-+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
-+ .width = 1920,
-+ .height = 1080
-+ }
-+ };
-+
-+ return csi_set_format(sd, fh ? fh->state : NULL, &format);
-+}
-+
-+static int csi_link_setup(struct media_entity *entity,
-+ const struct media_pad *local,
-+ const struct media_pad *remote, u32 flags)
-+{
-+ if ((local->flags & MEDIA_PAD_FL_SOURCE) &&
-+ (flags & MEDIA_LNK_FL_ENABLED)) {
-+ struct v4l2_subdev *sd;
-+ struct stf_csi_dev *csi_dev;
-+ struct vin_line *line;
-+
-+ if (media_pad_remote_pad_first(local))
-+ return -EBUSY;
-+
-+ sd = media_entity_to_v4l2_subdev(entity);
-+ csi_dev = v4l2_get_subdevdata(sd);
-+
-+ sd = media_entity_to_v4l2_subdev(remote->entity);
-+ line = v4l2_get_subdevdata(sd);
-+ if (line->sdev_type == VIN_DEV_TYPE)
-+ csi_dev->s_type = SENSOR_VIN;
-+ if (line->sdev_type == ISP_DEV_TYPE)
-+ csi_dev->s_type = SENSOR_ISP;
-+ st_info(ST_CSI, "CSI device sensor type: %d\n", csi_dev->s_type);
-+ }
-+
-+ if ((local->flags & MEDIA_PAD_FL_SINK) &&
-+ (flags & MEDIA_LNK_FL_ENABLED)) {
-+ struct v4l2_subdev *sd;
-+ struct stf_csi_dev *csi_dev;
-+ struct stf_csiphy_dev *csiphy_dev;
-+
-+ if (media_pad_remote_pad_first(local))
-+ return -EBUSY;
-+
-+ sd = media_entity_to_v4l2_subdev(entity);
-+ csi_dev = v4l2_get_subdevdata(sd);
-+
-+ sd = media_entity_to_v4l2_subdev(remote->entity);
-+ csiphy_dev = v4l2_get_subdevdata(sd);
-+
-+ st_info(ST_CSI, "CSI0 link to csiphy0\n");
-+ }
-+
-+ return 0;
-+}
-+
-+static const struct v4l2_subdev_core_ops csi_core_ops = {
-+ .s_power = csi_set_power,
-+};
-+
-+static const struct v4l2_subdev_video_ops csi_video_ops = {
-+ .s_stream = csi_set_stream,
-+};
-+
-+static const struct v4l2_subdev_pad_ops csi_pad_ops = {
-+ .enum_mbus_code = csi_enum_mbus_code,
-+ .enum_frame_size = csi_enum_frame_size,
-+ .get_fmt = csi_get_format,
-+ .set_fmt = csi_set_format,
-+};
-+
-+static const struct v4l2_subdev_ops csi_v4l2_ops = {
-+ .core = &csi_core_ops,
-+ .video = &csi_video_ops,
-+ .pad = &csi_pad_ops,
-+};
-+
-+static const struct v4l2_subdev_internal_ops csi_v4l2_internal_ops = {
-+ .open = csi_init_formats,
-+};
-+
-+static const struct media_entity_operations csi_media_ops = {
-+ .link_setup = csi_link_setup,
-+ .link_validate = v4l2_subdev_link_validate,
-+};
-+
-+int stf_csi_register(struct stf_csi_dev *csi_dev, struct v4l2_device *v4l2_dev)
-+{
-+ struct v4l2_subdev *sd = &csi_dev->subdev;
-+ struct device *dev = csi_dev->stfcamss->dev;
-+ struct media_pad *pads = csi_dev->pads;
-+ int ret;
-+
-+ csi_dev->mipirx_1p8 = devm_regulator_get(dev, "mipirx_1p8");
-+ if (IS_ERR(csi_dev->mipirx_1p8))
-+ return PTR_ERR(csi_dev->mipirx_1p8);
-+
-+ csi_dev->mipirx_0p9 = devm_regulator_get(dev, "mipirx_0p9");
-+ if (IS_ERR(csi_dev->mipirx_0p9))
-+ return PTR_ERR(csi_dev->mipirx_0p9);
-+
-+ v4l2_subdev_init(sd, &csi_v4l2_ops);
-+ sd->internal_ops = &csi_v4l2_internal_ops;
-+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-+ snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d",
-+ STF_CSI_NAME, 0);
-+ v4l2_set_subdevdata(sd, csi_dev);
-+
-+ ret = csi_init_formats(sd, NULL);
-+ if (ret < 0) {
-+ dev_err(dev, "Failed to init format: %d\n", ret);
-+ return ret;
-+ }
-+
-+ pads[STF_CSI_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
-+ pads[STF_CSI_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
-+
-+ sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
-+ sd->entity.ops = &csi_media_ops;
-+ ret = media_entity_pads_init(&sd->entity, STF_CSI_PADS_NUM, pads);
-+ if (ret < 0) {
-+ dev_err(dev, "Failed to init media entity: %d\n", ret);
-+ return ret;
-+ }
-+
-+ ret = v4l2_device_register_subdev(v4l2_dev, sd);
-+ if (ret < 0) {
-+ dev_err(dev, "Failed to register subdev: %d\n", ret);
-+ goto err_sreg;
-+ }
-+
-+ return 0;
-+
-+err_sreg:
-+ media_entity_cleanup(&sd->entity);
-+ return ret;
-+}
-+
-+int stf_csi_unregister(struct stf_csi_dev *csi_dev)
-+{
-+ v4l2_device_unregister_subdev(&csi_dev->subdev);
-+ media_entity_cleanup(&csi_dev->subdev.entity);
-+ mutex_destroy(&csi_dev->stream_lock);
-+ return 0;
-+}
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/stf_csi.h
-@@ -0,0 +1,61 @@
-+/* SPDX-License-Identifier: GPL-2.0
-+ *
-+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
-+ *
-+ */
-+
-+#ifndef STF_CSI_H
-+#define STF_CSI_H
-+
-+#include <linux/regulator/consumer.h>
-+#include <media/v4l2-subdev.h>
-+#include <media/v4l2-device.h>
-+#include <media/media-entity.h>
-+#include <video/stf-vin.h>
-+
-+#define STF_CSI_NAME "stf_csi"
-+
-+#define STF_CSI_PAD_SINK 0
-+#define STF_CSI_PAD_SRC 1
-+#define STF_CSI_PADS_NUM 2
-+
-+struct csi_format {
-+ u32 code;
-+ u8 bpp;
-+};
-+
-+struct stf_csi_dev;
-+
-+struct csi_hw_ops {
-+ int (*csi_power_on)(struct stf_csi_dev *csi_dev, u8 on);
-+ int (*csi_clk_enable)(struct stf_csi_dev *csi_dev);
-+ int (*csi_clk_disable)(struct stf_csi_dev *csi_dev);
-+ int (*csi_stream_set)(struct stf_csi_dev *csi_dev, int on,
-+ u32 dt, u32 width, u8 bpp);
-+};
-+
-+struct stf_csi_dev {
-+ struct stfcamss *stfcamss;
-+ enum sensor_type s_type;
-+ struct v4l2_subdev subdev;
-+ struct media_pad pads[STF_CSI_PADS_NUM];
-+ struct v4l2_mbus_framefmt fmt[STF_CSI_PADS_NUM];
-+ const struct csi_format *formats_sink;
-+ unsigned int nformats_sink;
-+ const struct csi_format *formats_src;
-+ unsigned int nformats_src;
-+ struct csi_hw_ops *hw_ops;
-+ struct mutex stream_lock;
-+ int stream_count;
-+ struct regulator *mipirx_1p8;
-+ struct regulator *mipirx_0p9;
-+};
-+
-+extern int stf_csi_subdev_init(struct stfcamss *stfcamss);
-+extern int stf_csi_register(struct stf_csi_dev *csi_dev,
-+ struct v4l2_device *v4l2_dev);
-+extern int stf_csi_unregister(struct stf_csi_dev *csi_dev);
-+extern struct csi_hw_ops csi_ops;
-+extern void dump_csi_reg(void *__iomem csibase);
-+
-+#endif /* STF_CSI_H */
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/stf_csi_hw_ops.c
-@@ -0,0 +1,310 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
-+ *
-+ */
-+
-+#include "stfcamss.h"
-+#include <linux/regmap.h>
-+
-+#define CSI2RX_DEVICE_CFG_REG 0x000
-+
-+#define CSI2RX_SOFT_RESET_REG 0x004
-+#define CSI2RX_SOFT_RESET_PROTOCOL BIT(1)
-+#define CSI2RX_SOFT_RESET_FRONT BIT(0)
-+
-+#define CSI2RX_DPHY_LANE_CONTROL 0x040
-+
-+#define CSI2RX_STATIC_CFG_REG 0x008
-+#define CSI2RX_STATIC_CFG_DLANE_MAP(llane, plane) \
-+ ((plane) << (16 + (llane) * 4))
-+#define CSI2RX_STATIC_CFG_LANES_MASK GENMASK(11, 8)
-+
-+#define CSI2RX_STREAM_BASE(n) (((n) + 1) * 0x100)
-+
-+#define CSI2RX_STREAM_CTRL_REG(n) (CSI2RX_STREAM_BASE(n) + 0x000)
-+#define CSI2RX_STREAM_CTRL_START BIT(0)
-+
-+#define CSI2RX_STREAM_DATA_CFG_REG(n) (CSI2RX_STREAM_BASE(n) + 0x008)
-+#define CSI2RX_STREAM_DATA_CFG_EN_VC_SELECT BIT(31)
-+#define CSI2RX_STREAM_DATA_CFG_EN_DATA_TYPE_0 BIT(7)
-+#define CSI2RX_STREAM_DATA_CFG_VC_SELECT(n) BIT((n) + 16)
-+
-+#define CSI2RX_STREAM_CFG_REG(n) (CSI2RX_STREAM_BASE(n) + 0x00c)
-+#define CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF (1 << 8)
-+
-+#define CSI2RX_LANES_MAX 4
-+#define CSI2RX_STREAMS_MAX 4
-+
-+static int stf_csi_power_on(struct stf_csi_dev *csi_dev, u8 on)
-+{
-+ struct stfcamss *stfcamss = csi_dev->stfcamss;
-+ int ret;
-+
-+ if (on) {
-+ ret = regulator_enable(csi_dev->mipirx_1p8);
-+ if (ret) {
-+ st_err(ST_CSI, "Cannot enable mipirx_1p8 regulator\n");
-+ goto err_1p8;
-+ }
-+
-+ ret = regulator_enable(csi_dev->mipirx_0p9);
-+ if (ret) {
-+ st_err(ST_CSI, "Cannot enable mipirx_0p9 regulator\n");
-+ goto err_0p9;
-+ }
-+ } else {
-+ regulator_disable(csi_dev->mipirx_1p8);
-+ regulator_disable(csi_dev->mipirx_0p9);
-+ }
-+
-+ regmap_update_bits(stfcamss->stf_aon_syscon, stfcamss->aon_gp_reg,
-+ BIT(31), BIT(31));
-+
-+ return 0;
-+
-+err_0p9:
-+ regulator_disable(csi_dev->mipirx_1p8);
-+err_1p8:
-+ return ret;
-+
-+}
-+
-+static int stf_csi_clk_enable(struct stf_csi_dev *csi_dev)
-+{
-+ struct stfcamss *stfcamss = csi_dev->stfcamss;
-+
-+ clk_set_rate(stfcamss->sys_clk[STFCLK_MIPI_RX0_PXL].clk, 198000000);
-+ clk_prepare_enable(stfcamss->sys_clk[STFCLK_PIXEL_CLK_IF0].clk);
-+ clk_prepare_enable(stfcamss->sys_clk[STFCLK_PIXEL_CLK_IF1].clk);
-+ clk_prepare_enable(stfcamss->sys_clk[STFCLK_PIXEL_CLK_IF2].clk);
-+ clk_prepare_enable(stfcamss->sys_clk[STFCLK_PIXEL_CLK_IF3].clk);
-+
-+ reset_control_deassert(stfcamss->sys_rst[STFRST_PIXEL_CLK_IF0].rstc);
-+ reset_control_deassert(stfcamss->sys_rst[STFRST_PIXEL_CLK_IF1].rstc);
-+ reset_control_deassert(stfcamss->sys_rst[STFRST_PIXEL_CLK_IF2].rstc);
-+ reset_control_deassert(stfcamss->sys_rst[STFRST_PIXEL_CLK_IF3].rstc);
-+
-+ switch (csi_dev->s_type) {
-+ case SENSOR_VIN:
-+ reset_control_deassert(stfcamss->sys_rst[STFRST_AXIWR].rstc);
-+ clk_set_parent(stfcamss->sys_clk[STFCLK_AXIWR].clk,
-+ stfcamss->sys_clk[STFCLK_MIPI_RX0_PXL].clk);
-+ break;
-+ case SENSOR_ISP:
-+ clk_set_parent(stfcamss->sys_clk[STFCLK_WRAPPER_CLK_C].clk,
-+ stfcamss->sys_clk[STFCLK_MIPI_RX0_PXL].clk);
-+ break;
-+ }
-+
-+ return 0;
-+}
-+
-+static int stf_csi_clk_disable(struct stf_csi_dev *csi_dev)
-+{
-+ struct stfcamss *stfcamss = csi_dev->stfcamss;
-+
-+ switch (csi_dev->s_type) {
-+ case SENSOR_VIN:
-+ reset_control_assert(stfcamss->sys_rst[STFRST_AXIWR].rstc);
-+ break;
-+ case SENSOR_ISP:
-+ break;
-+ }
-+
-+ reset_control_assert(stfcamss->sys_rst[STFRST_PIXEL_CLK_IF3].rstc);
-+ reset_control_assert(stfcamss->sys_rst[STFRST_PIXEL_CLK_IF2].rstc);
-+ reset_control_assert(stfcamss->sys_rst[STFRST_PIXEL_CLK_IF1].rstc);
-+ reset_control_assert(stfcamss->sys_rst[STFRST_PIXEL_CLK_IF0].rstc);
-+
-+ clk_disable_unprepare(stfcamss->sys_clk[STFCLK_PIXEL_CLK_IF3].clk);
-+ clk_disable_unprepare(stfcamss->sys_clk[STFCLK_PIXEL_CLK_IF2].clk);
-+ clk_disable_unprepare(stfcamss->sys_clk[STFCLK_PIXEL_CLK_IF1].clk);
-+ clk_disable_unprepare(stfcamss->sys_clk[STFCLK_PIXEL_CLK_IF0].clk);
-+
-+ return 0;
-+}
-+
-+static void csi2rx_reset(void *reg_base)
-+{
-+ writel(CSI2RX_SOFT_RESET_PROTOCOL | CSI2RX_SOFT_RESET_FRONT,
-+ reg_base + CSI2RX_SOFT_RESET_REG);
-+
-+ udelay(10);
-+
-+ writel(0, reg_base + CSI2RX_SOFT_RESET_REG);
-+}
-+
-+static int csi2rx_start(struct stf_csi_dev *csi_dev, void *reg_base, u32 dt)
-+{
-+ struct stfcamss *stfcamss = csi_dev->stfcamss;
-+ struct csi2phy_cfg *csiphy =
-+ stfcamss->csiphy_dev->csiphy;
-+ unsigned int i;
-+ unsigned long lanes_used = 0;
-+ u32 reg;
-+
-+ if (!csiphy) {
-+ st_err(ST_CSI, "csiphy0 config not exist\n");
-+ return -EINVAL;
-+ }
-+
-+ csi2rx_reset(reg_base);
-+
-+ reg = csiphy->num_data_lanes << 8;
-+ for (i = 0; i < csiphy->num_data_lanes; i++) {
-+#ifndef USE_CSIDPHY_ONE_CLK_MODE
-+ reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, csiphy->data_lanes[i]);
-+ set_bit(csiphy->data_lanes[i] - 1, &lanes_used);
-+#else
-+ reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, i + 1);
-+ set_bit(i, &lanes_used);
-+#endif
-+ }
-+
-+ /*
-+ * Even the unused lanes need to be mapped. In order to avoid
-+ * to map twice to the same physical lane, keep the lanes used
-+ * in the previous loop, and only map unused physical lanes to
-+ * the rest of our logical lanes.
-+ */
-+ for (i = csiphy->num_data_lanes; i < CSI2RX_LANES_MAX; i++) {
-+ unsigned int idx = find_first_zero_bit(&lanes_used,
-+ CSI2RX_LANES_MAX);
-+
-+ set_bit(idx, &lanes_used);
-+ reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, idx + 1);
-+ }
-+
-+ writel(reg, reg_base + CSI2RX_STATIC_CFG_REG);
-+
-+ // 0x40 DPHY_LANE_CONTROL
-+ reg = 0;
-+#ifndef USE_CSIDPHY_ONE_CLK_MODE
-+ for (i = 0; i < csiphy->num_data_lanes; i++)
-+ reg |= 1 << (csiphy->data_lanes[i] - 1)
-+ | 1 << (csiphy->data_lanes[i] + 11);
-+#else
-+ for (i = 0; i < csiphy->num_data_lanes; i++)
-+ reg |= 1 << i | 1 << (i + 12); //data_clane
-+#endif
-+
-+ reg |= 1 << 4 | 1 << 16; //clk_lane
-+ writel(reg, reg_base + CSI2RX_DPHY_LANE_CONTROL);
-+
-+ /*
-+ * Create a static mapping between the CSI virtual channels
-+ * and the output stream.
-+ *
-+ * This should be enhanced, but v4l2 lacks the support for
-+ * changing that mapping dynamically.
-+ *
-+ * We also cannot enable and disable independent streams here,
-+ * hence the reference counting.
-+ */
-+ for (i = 0; i < CSI2RX_STREAMS_MAX; i++) {
-+ writel(CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF,
-+ reg_base + CSI2RX_STREAM_CFG_REG(i));
-+
-+ writel(CSI2RX_STREAM_DATA_CFG_EN_VC_SELECT |
-+ CSI2RX_STREAM_DATA_CFG_VC_SELECT(i) |
-+ CSI2RX_STREAM_DATA_CFG_EN_DATA_TYPE_0 | dt,
-+ reg_base + CSI2RX_STREAM_DATA_CFG_REG(i));
-+
-+ writel(CSI2RX_STREAM_CTRL_START,
-+ reg_base + CSI2RX_STREAM_CTRL_REG(i));
-+ }
-+
-+ return 0;
-+}
-+
-+static void csi2rx_stop(struct stf_csi_dev *csi_dev, void *reg_base)
-+{
-+ unsigned int i;
-+
-+ for (i = 0; i < CSI2RX_STREAMS_MAX; i++)
-+ writel(0, reg_base + CSI2RX_STREAM_CTRL_REG(i));
-+}
-+
-+static void csi_set_vin_axiwr_pix(struct stf_csi_dev *csi_dev, u32 width, u8 bpp)
-+{
-+ struct stf_vin_dev *vin = csi_dev->stfcamss->vin;
-+ u32 value = 0;
-+ int cnfg_axiwr_pix_ct = 64 / bpp;
-+
-+ if (cnfg_axiwr_pix_ct == 2)
-+ value = 0;
-+ else if (cnfg_axiwr_pix_ct == 4)
-+ value = 1;
-+ else if (cnfg_axiwr_pix_ct == 8)
-+ value = 2;
-+
-+ reg_set_bit(vin->sysctrl_base, SYSCONSAIF_SYSCFG_28,
-+ BIT(14)|BIT(13), value << 13); //u0_vin_cnfg_axiwr0_pix_ct
-+ reg_set_bit(vin->sysctrl_base, SYSCONSAIF_SYSCFG_28,
-+ BIT(12)|BIT(11)|BIT(10)|BIT(9)|BIT(8)|BIT(7)|BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2),
-+ (width / cnfg_axiwr_pix_ct - 1)<<2); //u0_vin_cnfg_axiwr0_pix_cnt_end
-+}
-+
-+static int stf_csi_stream_set(struct stf_csi_dev *csi_dev,
-+ int on, u32 dt, u32 width, u8 bpp)
-+{
-+ struct stf_vin_dev *vin = csi_dev->stfcamss->vin;
-+ void __iomem *reg_base = vin->csi2rx_base;
-+
-+ switch (csi_dev->s_type) {
-+ case SENSOR_VIN:
-+ reg_set_bit(vin->sysctrl_base, SYSCONSAIF_SYSCFG_20,
-+ BIT(3)|BIT(2)|BIT(1)|BIT(0),
-+ 0<<0); //u0_vin_cnfg_axiwr0_channel_sel
-+ reg_set_bit(vin->sysctrl_base, SYSCONSAIF_SYSCFG_28,
-+ BIT(16)|BIT(15),
-+ 0<<15); //u0_vin_cnfg_axiwr0_pixel_high_bit_sel
-+ csi_set_vin_axiwr_pix(csi_dev, width, bpp);
-+ break;
-+ case SENSOR_ISP:
-+ reg_set_bit(vin->sysctrl_base, SYSCONSAIF_SYSCFG_36,
-+ BIT(7)|BIT(6),
-+ 0<<6); //u0_vin_cnfg_mipi_byte_en_isp
-+ reg_set_bit(vin->sysctrl_base, SYSCONSAIF_SYSCFG_36,
-+ BIT(11)|BIT(10)|BIT(9)|BIT(8),
-+ 0<<8); //u0_vin_cnfg_mipi_channel_sel0
-+ reg_set_bit(vin->sysctrl_base, SYSCONSAIF_SYSCFG_36,
-+ BIT(16)|BIT(15)|BIT(14)|BIT(13),
-+ 0<<13); //u0_vin_cnfg_pix_num
-+
-+ if (dt == 0x2b)
-+ reg_set_bit(vin->sysctrl_base, SYSCONSAIF_SYSCFG_36,
-+ BIT(12),
-+ 1<<12); //u0_vin_cnfg_p_i_mipi_header_en0
-+ break;
-+ }
-+
-+ if (on)
-+ csi2rx_start(csi_dev, reg_base, dt);
-+ else
-+ csi2rx_stop(csi_dev, reg_base);
-+
-+ return 0;
-+}
-+
-+void dump_csi_reg(void *__iomem csibase)
-+{
-+ st_info(ST_CSI, "DUMP CSI register:\n");
-+ print_reg(ST_CSI, csibase, 0x00);
-+ print_reg(ST_CSI, csibase, 0x04);
-+ print_reg(ST_CSI, csibase, 0x08);
-+ print_reg(ST_CSI, csibase, 0x10);
-+
-+ print_reg(ST_CSI, csibase, 0x40);
-+ print_reg(ST_CSI, csibase, 0x48);
-+ print_reg(ST_CSI, csibase, 0x4c);
-+ print_reg(ST_CSI, csibase, 0x50);
-+}
-+
-+struct csi_hw_ops csi_ops = {
-+ .csi_power_on = stf_csi_power_on,
-+ .csi_clk_enable = stf_csi_clk_enable,
-+ .csi_clk_disable = stf_csi_clk_disable,
-+ .csi_stream_set = stf_csi_stream_set,
-+};
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/stf_csiphy.c
-@@ -0,0 +1,357 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
-+ *
-+ */
-+
-+#include "stfcamss.h"
-+#include <media/v4l2-async.h>
-+#include <media/v4l2-ctrls.h>
-+#include <media/v4l2-device.h>
-+#include <media/v4l2-event.h>
-+#include <media/v4l2-fwnode.h>
-+#include <media/v4l2-subdev.h>
-+
-+static const struct csiphy_format csiphy_formats_st7110[] = {
-+ { MEDIA_BUS_FMT_UYVY8_2X8, 16},
-+ { MEDIA_BUS_FMT_SRGGB10_1X10, 10},
-+ { MEDIA_BUS_FMT_SGRBG10_1X10, 10},
-+ { MEDIA_BUS_FMT_SGBRG10_1X10, 10},
-+ { MEDIA_BUS_FMT_SBGGR10_1X10, 10},
-+};
-+
-+int stf_csiphy_subdev_init(struct stfcamss *stfcamss)
-+{
-+ struct stf_csiphy_dev *csiphy_dev = stfcamss->csiphy_dev;
-+
-+ csiphy_dev->hw_ops = &csiphy_ops;
-+ csiphy_dev->stfcamss = stfcamss;
-+ csiphy_dev->formats = csiphy_formats_st7110;
-+ csiphy_dev->nformats = ARRAY_SIZE(csiphy_formats_st7110);
-+ mutex_init(&csiphy_dev->stream_lock);
-+ return 0;
-+}
-+
-+static int csiphy_set_power(struct v4l2_subdev *sd, int on)
-+{
-+ return 0;
-+}
-+
-+static int csiphy_set_stream(struct v4l2_subdev *sd, int enable)
-+{
-+ struct stf_csiphy_dev *csiphy_dev = v4l2_get_subdevdata(sd);
-+
-+ mutex_lock(&csiphy_dev->stream_lock);
-+ if (enable) {
-+ if (csiphy_dev->stream_count == 0) {
-+ csiphy_dev->hw_ops->csiphy_clk_enable(csiphy_dev);
-+ csiphy_dev->hw_ops->csiphy_config_set(csiphy_dev);
-+ csiphy_dev->hw_ops->csiphy_stream_set(csiphy_dev, 1);
-+ }
-+ csiphy_dev->stream_count++;
-+ } else {
-+ if (csiphy_dev->stream_count == 0)
-+ goto exit;
-+ if (csiphy_dev->stream_count == 1) {
-+ csiphy_dev->hw_ops->csiphy_clk_disable(csiphy_dev);
-+ csiphy_dev->hw_ops->csiphy_stream_set(csiphy_dev, 0);
-+ }
-+ csiphy_dev->stream_count--;
-+ }
-+exit:
-+ mutex_unlock(&csiphy_dev->stream_lock);
-+
-+ return 0;
-+}
-+
-+static struct v4l2_mbus_framefmt *
-+__csiphy_get_format(struct stf_csiphy_dev *csiphy_dev,
-+ struct v4l2_subdev_state *state,
-+ unsigned int pad,
-+ enum v4l2_subdev_format_whence which)
-+{
-+ if (which == V4L2_SUBDEV_FORMAT_TRY)
-+ return v4l2_subdev_get_try_format(
-+ &csiphy_dev->subdev,
-+ state,
-+ pad);
-+
-+ return &csiphy_dev->fmt[pad];
-+}
-+
-+static void csiphy_try_format(struct stf_csiphy_dev *csiphy_dev,
-+ struct v4l2_subdev_state *state,
-+ unsigned int pad,
-+ struct v4l2_mbus_framefmt *fmt,
-+ enum v4l2_subdev_format_whence which)
-+{
-+ unsigned int i;
-+
-+ switch (pad) {
-+ case STF_CSIPHY_PAD_SINK:
-+ /* Set format on sink pad */
-+
-+ for (i = 0; i < csiphy_dev->nformats; i++)
-+ if (fmt->code == csiphy_dev->formats[i].code)
-+ break;
-+
-+ if (i >= csiphy_dev->nformats)
-+ fmt->code = csiphy_dev->formats[0].code;
-+
-+ fmt->width = clamp_t(u32,
-+ fmt->width,
-+ STFCAMSS_FRAME_MIN_WIDTH,
-+ STFCAMSS_FRAME_MAX_WIDTH);
-+ fmt->height = clamp_t(u32,
-+ fmt->height,
-+ STFCAMSS_FRAME_MIN_HEIGHT,
-+ STFCAMSS_FRAME_MAX_HEIGHT);
-+
-+ fmt->field = V4L2_FIELD_NONE;
-+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
-+ fmt->flags = 0;
-+
-+ break;
-+
-+ case STF_CSIPHY_PAD_SRC:
-+
-+ *fmt = *__csiphy_get_format(csiphy_dev,
-+ state,
-+ STF_CSIPHY_PAD_SINK, which);
-+
-+ break;
-+ }
-+}
-+
-+static int csiphy_enum_mbus_code(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_mbus_code_enum *code)
-+{
-+ struct stf_csiphy_dev *csiphy_dev = v4l2_get_subdevdata(sd);
-+
-+ if (code->index >= csiphy_dev->nformats)
-+ return -EINVAL;
-+
-+ if (code->pad == STF_CSIPHY_PAD_SINK) {
-+ code->code = csiphy_dev->formats[code->index].code;
-+ } else {
-+ struct v4l2_mbus_framefmt *sink_fmt;
-+
-+ sink_fmt = __csiphy_get_format(csiphy_dev, state,
-+ STF_CSIPHY_PAD_SINK,
-+ code->which);
-+
-+ code->code = sink_fmt->code;
-+ if (!code->code)
-+ return -EINVAL;
-+ }
-+ code->flags = 0;
-+ return 0;
-+}
-+
-+static int csiphy_enum_frame_size(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_frame_size_enum *fse)
-+{
-+ struct stf_csiphy_dev *csiphy_dev = v4l2_get_subdevdata(sd);
-+ struct v4l2_mbus_framefmt format;
-+
-+ if (fse->index != 0)
-+ return -EINVAL;
-+
-+ format.code = fse->code;
-+ format.width = 1;
-+ format.height = 1;
-+ csiphy_try_format(csiphy_dev, state, fse->pad, &format, fse->which);
-+ fse->min_width = format.width;
-+ fse->min_height = format.height;
-+
-+ if (format.code != fse->code)
-+ return -EINVAL;
-+
-+ format.code = fse->code;
-+ format.width = -1;
-+ format.height = -1;
-+ csiphy_try_format(csiphy_dev, state, fse->pad, &format, fse->which);
-+ fse->max_width = format.width;
-+ fse->max_height = format.height;
-+
-+ return 0;
-+}
-+
-+static int csiphy_get_format(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_format *fmt)
-+{
-+ struct stf_csiphy_dev *csiphy_dev = v4l2_get_subdevdata(sd);
-+ struct v4l2_mbus_framefmt *format;
-+
-+ format = __csiphy_get_format(csiphy_dev, state, fmt->pad, fmt->which);
-+ if (format == NULL)
-+ return -EINVAL;
-+
-+ fmt->format = *format;
-+
-+ return 0;
-+}
-+
-+static int csiphy_set_format(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_format *fmt)
-+{
-+ struct stf_csiphy_dev *csiphy_dev = v4l2_get_subdevdata(sd);
-+ struct v4l2_mbus_framefmt *format;
-+
-+ format = __csiphy_get_format(csiphy_dev, state, fmt->pad, fmt->which);
-+ if (format == NULL)
-+ return -EINVAL;
-+
-+ mutex_lock(&csiphy_dev->stream_lock);
-+ if (csiphy_dev->stream_count) {
-+ fmt->format = *format;
-+ mutex_unlock(&csiphy_dev->stream_lock);
-+ goto out;
-+ } else {
-+ csiphy_try_format(csiphy_dev, state, fmt->pad, &fmt->format, fmt->which);
-+ *format = fmt->format;
-+ }
-+ mutex_unlock(&csiphy_dev->stream_lock);
-+
-+ /* Propagate the format from sink to source */
-+ if (fmt->pad == STF_CSIPHY_PAD_SINK) {
-+ format = __csiphy_get_format(csiphy_dev,
-+ state,
-+ STF_CSIPHY_PAD_SRC,
-+ fmt->which);
-+
-+ *format = fmt->format;
-+ csiphy_try_format(csiphy_dev, state, STF_CSIPHY_PAD_SRC, format,
-+ fmt->which);
-+ }
-+out:
-+ return 0;
-+}
-+
-+static int csiphy_init_formats(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_fh *fh)
-+{
-+ struct v4l2_subdev_format format = {
-+ .pad = STF_CSIPHY_PAD_SINK,
-+ .which = fh ? V4L2_SUBDEV_FORMAT_TRY :
-+ V4L2_SUBDEV_FORMAT_ACTIVE,
-+ .format = {
-+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
-+ .width = 1920,
-+ .height = 1080
-+ }
-+ };
-+
-+ return csiphy_set_format(sd, fh ? fh->state : NULL, &format);
-+}
-+
-+static int csiphy_link_setup(struct media_entity *entity,
-+ const struct media_pad *local,
-+ const struct media_pad *remote, u32 flags)
-+{
-+ if ((local->flags & MEDIA_PAD_FL_SOURCE) &&
-+ (flags & MEDIA_LNK_FL_ENABLED)) {
-+ struct v4l2_subdev *sd;
-+ struct stf_csiphy_dev *csiphy_dev;
-+ struct stf_csi_dev *csi_dev;
-+
-+ if (media_pad_remote_pad_first(local))
-+ return -EBUSY;
-+
-+ sd = media_entity_to_v4l2_subdev(entity);
-+ csiphy_dev = v4l2_get_subdevdata(sd);
-+
-+ sd = media_entity_to_v4l2_subdev(remote->entity);
-+ csi_dev = v4l2_get_subdevdata(sd);
-+ st_info(ST_CSIPHY, "CSIPHY0 link to CSI0\n");
-+ }
-+
-+ return 0;
-+}
-+
-+static const struct v4l2_subdev_core_ops csiphy_core_ops = {
-+ .s_power = csiphy_set_power,
-+};
-+
-+static const struct v4l2_subdev_video_ops csiphy_video_ops = {
-+ .s_stream = csiphy_set_stream,
-+};
-+
-+static const struct v4l2_subdev_pad_ops csiphy_pad_ops = {
-+ .enum_mbus_code = csiphy_enum_mbus_code,
-+ .enum_frame_size = csiphy_enum_frame_size,
-+ .get_fmt = csiphy_get_format,
-+ .set_fmt = csiphy_set_format,
-+};
-+
-+static const struct v4l2_subdev_ops csiphy_v4l2_ops = {
-+ .core = &csiphy_core_ops,
-+ .video = &csiphy_video_ops,
-+ .pad = &csiphy_pad_ops,
-+};
-+
-+static const struct v4l2_subdev_internal_ops csiphy_v4l2_internal_ops = {
-+ .open = csiphy_init_formats,
-+};
-+
-+static const struct media_entity_operations csiphy_media_ops = {
-+ .link_setup = csiphy_link_setup,
-+ .link_validate = v4l2_subdev_link_validate,
-+};
-+
-+int stf_csiphy_register(struct stf_csiphy_dev *csiphy_dev,
-+ struct v4l2_device *v4l2_dev)
-+{
-+ struct v4l2_subdev *sd = &csiphy_dev->subdev;
-+ struct device *dev = csiphy_dev->stfcamss->dev;
-+ struct media_pad *pads = csiphy_dev->pads;
-+ int ret;
-+
-+ v4l2_subdev_init(sd, &csiphy_v4l2_ops);
-+ sd->internal_ops = &csiphy_v4l2_internal_ops;
-+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-+ snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d",
-+ STF_CSIPHY_NAME, 0);
-+ v4l2_set_subdevdata(sd, csiphy_dev);
-+
-+ ret = csiphy_init_formats(sd, NULL);
-+ if (ret < 0) {
-+ dev_err(dev, "Failed to init format: %d\n", ret);
-+ return ret;
-+ }
-+
-+ pads[STF_CSIPHY_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
-+ pads[STF_CSIPHY_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
-+
-+ sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
-+ sd->entity.ops = &csiphy_media_ops;
-+ ret = media_entity_pads_init(&sd->entity, STF_CSIPHY_PADS_NUM, pads);
-+ if (ret < 0) {
-+ dev_err(dev, "Failed to init media entity: %d\n", ret);
-+ return ret;
-+ }
-+
-+ ret = v4l2_device_register_subdev(v4l2_dev, sd);
-+ if (ret < 0) {
-+ dev_err(dev, "Failed to register subdev: %d\n", ret);
-+ goto err_sreg;
-+ }
-+
-+ return 0;
-+
-+err_sreg:
-+ media_entity_cleanup(&sd->entity);
-+ return ret;
-+}
-+
-+int stf_csiphy_unregister(struct stf_csiphy_dev *csiphy_dev)
-+{
-+ v4l2_device_unregister_subdev(&csiphy_dev->subdev);
-+ media_entity_cleanup(&csiphy_dev->subdev.entity);
-+ mutex_destroy(&csiphy_dev->stream_lock);
-+ return 0;
-+}
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/stf_csiphy.h
-@@ -0,0 +1,188 @@
-+/* SPDX-License-Identifier: GPL-2.0
-+ *
-+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
-+ *
-+ */
-+
-+#ifndef STF_CSIPHY_H
-+#define STF_CSIPHY_H
-+
-+#include <media/v4l2-subdev.h>
-+#include <media/v4l2-device.h>
-+#include <media/media-entity.h>
-+#include <video/stf-vin.h>
-+
-+#define STF_CSIPHY_NAME "stf_csiphy"
-+
-+#define STF_CSIPHY_PAD_SINK 0
-+#define STF_CSIPHY_PAD_SRC 1
-+#define STF_CSIPHY_PADS_NUM 2
-+
-+#define STF_CSI2_MAX_DATA_LANES 4
-+
-+union static_config {
-+ u32 raw;
-+ struct {
-+ u32 sel : 2;
-+ u32 rsvd_6 : 2;
-+ u32 v2p0_support_enable : 1;
-+ u32 rsvd_5 : 3;
-+ u32 lane_nb : 3;
-+ u32 rsvd_4 : 5;
-+ u32 dl0_map : 3;
-+ u32 rsvd_3 : 1;
-+ u32 dl1_map : 3;
-+ u32 rsvd_2 : 1;
-+ u32 dl2_map : 3;
-+ u32 rsvd_1 : 1;
-+ u32 dl3_map : 3;
-+ u32 rsvd_0 : 1;
-+ } bits;
-+};
-+
-+union error_bypass_cfg {
-+ u32 value;
-+ struct {
-+ u32 crc : 1;
-+ u32 ecc : 1;
-+ u32 data_id : 1;
-+ u32 rsvd_0 : 29;
-+ };
-+};
-+
-+union stream_monitor_ctrl {
-+ u32 value;
-+ struct {
-+ u32 lb_vc : 4;
-+ u32 lb_en : 1;
-+ u32 timer_vc : 4;
-+ u32 timer_en : 1;
-+ u32 timer_eof : 1;
-+ u32 frame_mon_vc : 4;
-+ u32 frame_mon_en : 1;
-+ u32 frame_length : 16;
-+ };
-+};
-+
-+union stream_cfg {
-+ u32 value;
-+ struct {
-+ u32 interface_mode : 1;
-+ u32 ls_le_mode : 1;
-+ u32 rsvd_3 : 2;
-+ u32 num_pixels : 2;
-+ u32 rsvd_2 : 2;
-+ u32 fifo_mode : 2;
-+ u32 rsvd_1 : 2;
-+ u32 bpp_bypass : 3;
-+ u32 rsvd_0 : 1;
-+ u32 fifo_fill : 16;
-+ };
-+};
-+
-+union dphy_lane_ctrl {
-+ u32 raw;
-+ struct {
-+ u32 dl0_en : 1;
-+ u32 dl1_en : 1;
-+ u32 dl2_en : 1;
-+ u32 dl3_en : 1;
-+ u32 cl_en : 1;
-+ u32 rsvd_1 : 7;
-+ u32 dl0_reset : 1;
-+ u32 dl1_reset : 1;
-+ u32 dl2_reset : 1;
-+ u32 dl3_reset : 1;
-+ u32 cl_reset : 1;
-+ u32 rsvd_0 : 15;
-+ } bits;
-+};
-+
-+union dphy_lane_swap {
-+ u32 raw;
-+ struct {
-+ u32 rx_1c2c_sel : 1;
-+ u32 lane_swap_clk : 3;
-+ u32 lane_swap_clk1 : 3;
-+ u32 lane_swap_lan0 : 3;
-+ u32 lane_swap_lan1 : 3;
-+ u32 lane_swap_lan2 : 3;
-+ u32 lane_swap_lan3 : 3;
-+ u32 dpdn_swap_clk : 1;
-+ u32 dpdn_swap_clk1 : 1;
-+ u32 dpdn_swap_lan0 : 1;
-+ u32 dpdn_swap_lan1 : 1;
-+ u32 dpdn_swap_lan2 : 1;
-+ u32 dpdn_swap_lan3 : 1;
-+ u32 hs_freq_chang_clk0 : 1;
-+ u32 hs_freq_chang_clk1 : 1;
-+ u32 reserved : 5;
-+ } bits;
-+};
-+
-+union dphy_lane_en {
-+ u32 raw;
-+ struct {
-+ u32 gpio_en : 6;
-+ u32 mp_test_mode_sel : 5;
-+ u32 mp_test_en : 1;
-+ u32 dphy_enable_lan0 : 1;
-+ u32 dphy_enable_lan1 : 1;
-+ u32 dphy_enable_lan2 : 1;
-+ u32 dphy_enable_lan3 : 1;
-+ u32 rsvd_0 : 16;
-+ } bits;
-+};
-+
-+struct csiphy_format {
-+ u32 code;
-+ u8 bpp;
-+};
-+
-+struct csi2phy_cfg {
-+ unsigned int flags;
-+ unsigned char data_lanes[STF_CSI2_MAX_DATA_LANES];
-+ unsigned char clock_lane;
-+ unsigned char num_data_lanes;
-+ bool lane_polarities[1 + STF_CSI2_MAX_DATA_LANES];
-+};
-+
-+struct csi2phy_cfg2 {
-+ unsigned char data_lanes[STF_CSI2_MAX_DATA_LANES];
-+ unsigned char num_data_lanes;
-+ unsigned char num_clks;
-+ unsigned char clock_lane;
-+ unsigned char clock1_lane;
-+ bool lane_polarities[2 + STF_CSI2_MAX_DATA_LANES];
-+};
-+
-+struct stf_csiphy_dev;
-+
-+struct csiphy_hw_ops {
-+ int (*csiphy_clk_enable)(struct stf_csiphy_dev *csiphy_dev);
-+ int (*csiphy_clk_disable)(struct stf_csiphy_dev *csiphy_dev);
-+ int (*csiphy_config_set)(struct stf_csiphy_dev *csiphy_dev);
-+ int (*csiphy_stream_set)(struct stf_csiphy_dev *csiphy_dev, int on);
-+};
-+
-+struct stf_csiphy_dev {
-+ struct stfcamss *stfcamss;
-+ struct csi2phy_cfg *csiphy;
-+ struct v4l2_subdev subdev;
-+ struct media_pad pads[STF_CSIPHY_PADS_NUM];
-+ struct v4l2_mbus_framefmt fmt[STF_CSIPHY_PADS_NUM];
-+ const struct csiphy_format *formats;
-+ unsigned int nformats;
-+ struct csiphy_hw_ops *hw_ops;
-+ struct mutex stream_lock;
-+ int stream_count;
-+};
-+
-+extern int stf_csiphy_subdev_init(struct stfcamss *stfcamss);
-+extern int stf_csiphy_register(struct stf_csiphy_dev *csiphy_dev,
-+ struct v4l2_device *v4l2_dev);
-+extern int stf_csiphy_unregister(struct stf_csiphy_dev *csiphy_dev);
-+
-+extern struct csiphy_hw_ops csiphy_ops;
-+
-+#endif /* STF_CSIPHY_H */
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/stf_csiphy_hw_ops.c
-@@ -0,0 +1,335 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
-+ *
-+ */
-+
-+#include "stfcamss.h"
-+#include <linux/sort.h>
-+
-+static int stf_csiphy_clk_set(struct stf_csiphy_dev *csiphy_dev, int on)
-+{
-+ struct stfcamss *stfcamss = csiphy_dev->stfcamss;
-+ static int init_flag;
-+ static struct mutex count_lock;
-+ static int count;
-+
-+ if (!init_flag) {
-+ init_flag = 1;
-+ mutex_init(&count_lock);
-+ }
-+ mutex_lock(&count_lock);
-+ if (on) {
-+ clk_set_rate(stfcamss->sys_clk[STFCLK_M31DPHY_CFGCLK_IN].clk,
-+ 99000000);
-+ clk_set_rate(stfcamss->sys_clk[STFCLK_M31DPHY_REFCLK_IN].clk,
-+ 49500000);
-+ clk_set_rate(stfcamss->sys_clk[STFCLK_M31DPHY_TXCLKESC_LAN0].clk,
-+ 19800000);
-+
-+ reset_control_deassert(stfcamss->sys_rst[STFRST_M31DPHY_HW].rstc);
-+ reset_control_deassert(stfcamss->sys_rst[STFRST_M31DPHY_B09_ALWAYS_ON].rstc);
-+
-+ count++;
-+ } else {
-+ if (count == 0)
-+ goto exit;
-+ if (count == 1) {
-+ reset_control_assert(stfcamss->sys_rst[STFRST_M31DPHY_HW].rstc);
-+ reset_control_assert(stfcamss->sys_rst[STFRST_M31DPHY_B09_ALWAYS_ON].rstc);
-+ }
-+ count--;
-+ }
-+exit:
-+ mutex_unlock(&count_lock);
-+ return 0;
-+}
-+
-+static int stf_csiphy_clk_enable(struct stf_csiphy_dev *csiphy_dev)
-+{
-+ return stf_csiphy_clk_set(csiphy_dev, 1);
-+}
-+
-+static int stf_csiphy_clk_disable(struct stf_csiphy_dev *csiphy_dev)
-+{
-+ return stf_csiphy_clk_set(csiphy_dev, 0);
-+}
-+
-+#ifndef USE_CSIDPHY_ONE_CLK_MODE
-+static int cmp_func(const void *x1, const void *x2)
-+{
-+ return *((unsigned char *)x1) - *((unsigned char *)x2);
-+}
-+#endif
-+
-+int try_cfg(struct csi2phy_cfg2 *cfg, struct csi2phy_cfg *cfg0,
-+ struct csi2phy_cfg *cfg1)
-+{
-+ int i = 0;
-+
-+ cfg->clock_lane = 0;
-+ cfg->clock1_lane = 5;
-+ cfg->data_lanes[0] = 1;
-+ cfg->data_lanes[1] = 2;
-+ cfg->data_lanes[2] = 3;
-+ cfg->data_lanes[3] = 4;
-+
-+ if (cfg0 && cfg1) {
-+ st_debug(ST_CSIPHY, "CSIPHY use 2 clk mode\n");
-+ cfg->num_clks = 2;
-+ cfg->num_data_lanes =
-+ cfg1->num_data_lanes + cfg0->num_data_lanes;
-+ if (cfg->num_data_lanes > STF_CSI2_MAX_DATA_LANES)
-+ return -EINVAL;
-+ cfg->clock_lane = cfg0->clock_lane;
-+ cfg->lane_polarities[0] = cfg0->lane_polarities[0];
-+ cfg->clock1_lane = cfg1->clock_lane;
-+ cfg->lane_polarities[1] = cfg1->lane_polarities[0];
-+ for (i = 0; i < cfg0->num_data_lanes; i++) {
-+ cfg->data_lanes[i] = cfg0->data_lanes[i];
-+ cfg->lane_polarities[i + 2] =
-+ cfg0->lane_polarities[i + 1];
-+ }
-+
-+ for (i = cfg0->num_data_lanes; i < cfg->num_data_lanes; i++) {
-+ cfg->data_lanes[i] =
-+ cfg1->data_lanes[i - cfg0->num_data_lanes];
-+ cfg->lane_polarities[i + 2] =
-+ cfg1->lane_polarities[i - cfg0->num_data_lanes + 1];
-+ }
-+ } else if (cfg0 && !cfg1) {
-+ st_debug(ST_CSIPHY, "CSIPHY cfg0 use 1 clk mode\n");
-+ cfg->num_clks = 1;
-+ cfg->num_data_lanes = cfg0->num_data_lanes;
-+ cfg->clock_lane = cfg->clock1_lane = cfg0->clock_lane;
-+ cfg->lane_polarities[0] = cfg->lane_polarities[1] =
-+ cfg0->lane_polarities[0];
-+ for (i = 0; i < cfg0->num_data_lanes; i++) {
-+ cfg->data_lanes[i] = cfg0->data_lanes[i];
-+ cfg->lane_polarities[i + 2] = cfg0->lane_polarities[i + 1];
-+ }
-+ } else if (!cfg0 && cfg1) {
-+ st_debug(ST_CSIPHY, "CSIPHY cfg1 use 1 clk mode\n");
-+ cfg->num_clks = 1;
-+ cfg->num_data_lanes = cfg1->num_data_lanes;
-+ cfg->clock_lane = cfg->clock1_lane = cfg1->clock_lane;
-+ cfg->lane_polarities[0] = cfg->lane_polarities[1] =
-+ cfg1->lane_polarities[0];
-+ for (i = 0; i < cfg1->num_data_lanes; i++) {
-+ cfg->data_lanes[i] = cfg1->data_lanes[i];
-+ cfg->lane_polarities[i + 2] = cfg1->lane_polarities[i + 1];
-+ }
-+ } else {
-+ return -EINVAL;
-+ }
-+
-+#ifndef USE_CSIDPHY_ONE_CLK_MODE
-+ sort(cfg->data_lanes, cfg->num_data_lanes,
-+ sizeof(cfg->data_lanes[0]), cmp_func, NULL);
-+#endif
-+ for (i = 0; i < cfg->num_data_lanes; i++)
-+ st_debug(ST_CSIPHY, "%d: %d\n", i, cfg->data_lanes[i]);
-+ return 0;
-+}
-+
-+static int csi2rx_dphy_config(struct stf_vin_dev *vin,
-+ struct stf_csiphy_dev *csiphy_dev)
-+{
-+ struct csi2phy_cfg2 cfg2 = {0};
-+ struct csi2phy_cfg2 *cfg = &cfg2;
-+ struct csi2phy_cfg *phycfg = csiphy_dev->csiphy;
-+
-+ if (!phycfg)
-+ return -EINVAL;
-+
-+ if (try_cfg(cfg, phycfg, NULL))
-+ return -EINVAL;
-+
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_4, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_8, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_12, 0xfff0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_16, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_20, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_24, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_28, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_32, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_36, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_40, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_44, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_48, 0x24000000);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_52, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_56, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_60, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_64, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_68, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_72, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_76, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_80, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_84, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_88, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_92, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_96, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_100, 0x02000000);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_104, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_108, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_112, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_116, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_120, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_124, 0xc);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_128, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_132, 0xcc500000);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_136, 0xcc);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_140, 0x0);
-+ reg_write(vin->rstgen_base, M31DPHY_APBCFGSAIF__SYSCFG_144, 0x0);
-+
-+ reg_set_bit(vin->rstgen_base, //r100_ctrl0_2d1c_efuse_en
-+ M31DPHY_APBCFGSAIF__SYSCFG_0,
-+ BIT(6), 1<<6);
-+ reg_set_bit(vin->rstgen_base, //r100_ctrl0_2d1c_efuse_in
-+ M31DPHY_APBCFGSAIF__SYSCFG_0,
-+ BIT(12)|BIT(11)|BIT(10)|BIT(9)|BIT(8)|BIT(7), 0x1b<<7);
-+ reg_set_bit(vin->rstgen_base, //r100_ctrl1_2d1c_efuse_en
-+ M31DPHY_APBCFGSAIF__SYSCFG_0,
-+ BIT(13), 1<<13);
-+ reg_set_bit(vin->rstgen_base, //r100_ctrl1_2d1c_efuse_in
-+ M31DPHY_APBCFGSAIF__SYSCFG_0,
-+ BIT(19)|BIT(18)|BIT(17)|BIT(16)|BIT(15)|BIT(14), 0x1b<<14);
-+
-+ reg_set_bit(vin->rstgen_base, //data_bus16_8
-+ M31DPHY_APBCFGSAIF__SYSCFG_184,
-+ BIT(8), 0<<8);
-+
-+ reg_set_bit(vin->rstgen_base, //debug_mode_sel
-+ M31DPHY_APBCFGSAIF__SYSCFG_184,
-+ BIT(15)|BIT(14)|BIT(13)|BIT(12)|BIT(11)|BIT(10)|BIT(9), 0x5a<<9);
-+
-+ reg_set_bit(vin->rstgen_base, //dpdn_swap_clk0
-+ M31DPHY_APBCFGSAIF__SYSCFG_188,
-+ BIT(0), cfg->lane_polarities[0]<<0);
-+ reg_set_bit(vin->rstgen_base, //dpdn_swap_clk1
-+ M31DPHY_APBCFGSAIF__SYSCFG_188,
-+ BIT(1), cfg->lane_polarities[1]<<1);
-+ reg_set_bit(vin->rstgen_base, //dpdn_swap_lan0
-+ M31DPHY_APBCFGSAIF__SYSCFG_188,
-+ BIT(2), cfg->lane_polarities[2]<<2);
-+ reg_set_bit(vin->rstgen_base, //dpdn_swap_lan1
-+ M31DPHY_APBCFGSAIF__SYSCFG_188,
-+ BIT(3), cfg->lane_polarities[3]<<3);
-+ reg_set_bit(vin->rstgen_base, //dpdn_swap_lan2
-+ M31DPHY_APBCFGSAIF__SYSCFG_188,
-+ BIT(4), cfg->lane_polarities[4]<<4);
-+ reg_set_bit(vin->rstgen_base, //dpdn_swap_lan3
-+ M31DPHY_APBCFGSAIF__SYSCFG_188,
-+ BIT(5), cfg->lane_polarities[5]<<5);
-+ reg_set_bit(vin->rstgen_base, //enable clk0
-+ M31DPHY_APBCFGSAIF__SYSCFG_188,
-+ BIT(6), 1<<6);
-+ reg_set_bit(vin->rstgen_base, //enable clk1
-+ M31DPHY_APBCFGSAIF__SYSCFG_188,
-+ BIT(7), 1<<7);
-+ reg_set_bit(vin->rstgen_base, //enable lan0
-+ M31DPHY_APBCFGSAIF__SYSCFG_188,
-+ BIT(8), 1<<8);
-+ reg_set_bit(vin->rstgen_base, //enable lan1
-+ M31DPHY_APBCFGSAIF__SYSCFG_188,
-+ BIT(9), 1<<9);
-+ reg_set_bit(vin->rstgen_base, //enable lan2
-+ M31DPHY_APBCFGSAIF__SYSCFG_188,
-+ BIT(10), 1<<10);
-+ reg_set_bit(vin->rstgen_base, //enable lan3
-+ M31DPHY_APBCFGSAIF__SYSCFG_188,
-+ BIT(11), 1<<11);
-+ reg_set_bit(vin->rstgen_base, //gpi_en
-+ M31DPHY_APBCFGSAIF__SYSCFG_188,
-+ BIT(17)|BIT(16)|BIT(15)|BIT(14)|BIT(13)|BIT(12),
-+ 0<<12);
-+ reg_set_bit(vin->rstgen_base, //hs_freq_change_clk0
-+ M31DPHY_APBCFGSAIF__SYSCFG_188,
-+ BIT(18), 0<<18);
-+ reg_set_bit(vin->rstgen_base, //hs_freq_change_clk1
-+ M31DPHY_APBCFGSAIF__SYSCFG_188,
-+ BIT(19), 0<<19);
-+
-+ reg_set_bit(vin->rstgen_base,
-+ M31DPHY_APBCFGSAIF__SYSCFG_188,
-+ BIT(22)|BIT(21)|BIT(20), cfg->clock_lane<<20); //clock lane 0
-+ reg_set_bit(vin->rstgen_base,
-+ M31DPHY_APBCFGSAIF__SYSCFG_188,
-+ BIT(25)|BIT(24)|BIT(23), cfg->clock1_lane<<23); //clock lane 1
-+
-+ reg_set_bit(vin->rstgen_base,
-+ M31DPHY_APBCFGSAIF__SYSCFG_188,
-+ BIT(28)|BIT(27)|BIT(26), cfg->data_lanes[0]<<26); //data lane 0
-+ reg_set_bit(vin->rstgen_base,
-+ M31DPHY_APBCFGSAIF__SYSCFG_188,
-+ BIT(31)|BIT(30)|BIT(29), cfg->data_lanes[1]<<29); //data lane 1
-+ reg_set_bit(vin->rstgen_base,
-+ M31DPHY_APBCFGSAIF__SYSCFG_192,
-+ BIT(2)|BIT(1)|BIT(0), cfg->data_lanes[2]<<0); //data lane 2
-+ reg_set_bit(vin->rstgen_base,
-+ M31DPHY_APBCFGSAIF__SYSCFG_192,
-+ BIT(5)|BIT(4)|BIT(3), cfg->data_lanes[3]<<3); //data lane 3
-+
-+ reg_set_bit(vin->rstgen_base, //mp_test_en
-+ M31DPHY_APBCFGSAIF__SYSCFG_192,
-+ BIT(6), 0<<6);
-+ reg_set_bit(vin->rstgen_base, //mp_test_mode_sel
-+ M31DPHY_APBCFGSAIF__SYSCFG_192,
-+ BIT(11)|BIT(10)|BIT(9)|BIT(8)|BIT(7), 0<<7);
-+
-+ reg_set_bit(vin->rstgen_base, //pll_clk_sel
-+ M31DPHY_APBCFGSAIF__SYSCFG_192,
-+ BIT(20)|BIT(19)|BIT(18)|BIT(17)|BIT(16)|BIT(15)|BIT(14)|BIT(13)|BIT(12),
-+ 0x37c<<12);
-+
-+ reg_set_bit(vin->rstgen_base, //rx_1c2c_sel
-+ M31DPHY_APBCFGSAIF__SYSCFG_200,
-+ BIT(8), 0<<8);
-+
-+ reg_set_bit(vin->rstgen_base, //precounter in clk0
-+ M31DPHY_APBCFGSAIF__SYSCFG_192,
-+ BIT(29)|BIT(28)|BIT(27)|BIT(26)|BIT(25)|BIT(24)|BIT(23)|BIT(22),
-+ 8<<22);
-+ reg_set_bit(vin->rstgen_base, //precounter in clk1
-+ M31DPHY_APBCFGSAIF__SYSCFG_196,
-+ BIT(7)|BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
-+ 8<<0);
-+ reg_set_bit(vin->rstgen_base, //precounter in lan0
-+ M31DPHY_APBCFGSAIF__SYSCFG_196,
-+ BIT(15)|BIT(14)|BIT(13)|BIT(12)|BIT(11)|BIT(10)|BIT(9)|BIT(8),
-+ 7<<8);
-+ reg_set_bit(vin->rstgen_base, //precounter in lan1
-+ M31DPHY_APBCFGSAIF__SYSCFG_196,
-+ BIT(23)|BIT(22)|BIT(21)|BIT(20)|BIT(19)|BIT(18)|BIT(17)|BIT(16),
-+ 7<<16);
-+ reg_set_bit(vin->rstgen_base, //precounter in lan2
-+ M31DPHY_APBCFGSAIF__SYSCFG_196,
-+ BIT(31)|BIT(30)|BIT(29)|BIT(28)|BIT(27)|BIT(26)|BIT(25)|BIT(24),
-+ 7<<24);
-+ reg_set_bit(vin->rstgen_base, //precounter in lan3
-+ M31DPHY_APBCFGSAIF__SYSCFG_200,
-+ BIT(7)|BIT(6)|BIT(5)|BIT(4)|BIT(3)|BIT(2)|BIT(1)|BIT(0),
-+ 7<<0);
-+
-+ return 0;
-+}
-+
-+static int stf_csiphy_config_set(struct stf_csiphy_dev *csiphy_dev)
-+{
-+ struct stf_vin_dev *vin = csiphy_dev->stfcamss->vin;
-+
-+ csi2rx_dphy_config(vin, csiphy_dev);
-+ return 0;
-+}
-+
-+static int stf_csiphy_stream_set(struct stf_csiphy_dev *csiphy_dev, int on)
-+{
-+ return 0;
-+}
-+
-+struct csiphy_hw_ops csiphy_ops = {
-+ .csiphy_clk_enable = stf_csiphy_clk_enable,
-+ .csiphy_clk_disable = stf_csiphy_clk_disable,
-+ .csiphy_config_set = stf_csiphy_config_set,
-+ .csiphy_stream_set = stf_csiphy_stream_set,
-+};
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/stf_dmabuf.c
-@@ -0,0 +1,123 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
-+ *
-+ */
-+
-+#include <linux/dma-buf.h>
-+#include <media/v4l2-subdev.h>
-+#include <media/videobuf2-dma-contig.h>
-+
-+#include "stf_isp_ioctl.h"
-+#include "stf_dmabuf.h"
-+
-+#define TOTAL_SIZE_LIMIT (64 * 1024 * 1024)
-+
-+static size_t total_size;
-+static struct vb2_queue vb2_queue = {
-+ .dma_attrs = 0,
-+ .gfp_flags = 0,
-+ .dma_dir = DMA_TO_DEVICE,
-+};
-+static struct vb2_buffer vb = {
-+ .vb2_queue = &vb2_queue,
-+};
-+
-+static int dmabuf_create(struct device *dev,
-+ struct dmabuf_create *head)
-+{
-+ struct dma_buf *dmabuf = NULL;
-+ void *mem_priv = NULL;
-+ dma_addr_t *paddr = NULL;
-+ int ret = 0;
-+
-+ mem_priv = vb2_dma_contig_memops.alloc(&vb, dev, head->size);
-+ if (IS_ERR_OR_NULL(mem_priv)) {
-+ if (mem_priv)
-+ ret = PTR_ERR(mem_priv);
-+ goto exit;
-+ }
-+
-+ dmabuf = vb2_dma_contig_memops.get_dmabuf(&vb, mem_priv, O_RDWR);
-+ if (IS_ERR(dmabuf)) {
-+ ret = PTR_ERR(dmabuf);
-+ goto free;
-+ }
-+
-+ head->fd = dma_buf_fd(dmabuf, O_CLOEXEC);
-+ if (head->fd < 0) {
-+ dma_buf_put(dmabuf);
-+ ret = head->fd;
-+ goto free;
-+ }
-+
-+ paddr = vb2_dma_contig_memops.cookie(&vb, mem_priv);
-+ head->paddr = *paddr;
-+ return 0;
-+free:
-+ vb2_dma_contig_memops.put(mem_priv);
-+exit:
-+ return ret;
-+}
-+
-+int stf_dmabuf_ioctl_alloc(struct device *dev, void *arg)
-+{
-+ struct dmabuf_create *head = arg;
-+ int ret = -EINVAL;
-+
-+ if (IS_ERR_OR_NULL(head))
-+ return -EFAULT;
-+
-+ head->size = PAGE_ALIGN(head->size);
-+ if (!head->size)
-+ return -EINVAL;
-+ if ((head->size + total_size) > TOTAL_SIZE_LIMIT)
-+ return -ENOMEM;
-+
-+ ret = dmabuf_create(dev, head);
-+ if (ret)
-+ return -EFAULT;
-+
-+ total_size += head->size;
-+ return ret;
-+}
-+
-+int stf_dmabuf_ioctl_free(struct device *dev, void *arg)
-+{
-+ struct dmabuf_create *head = arg;
-+ struct dma_buf *dmabuf = NULL;
-+ int ret = 0;
-+
-+ if (IS_ERR_OR_NULL(head))
-+ return -EFAULT;
-+ if (head->size != PAGE_ALIGN(head->size))
-+ return -EINVAL;
-+ if (head->size > total_size)
-+ return -EINVAL;
-+
-+ dmabuf = dma_buf_get(head->fd);
-+ if (IS_ERR_OR_NULL(dmabuf))
-+ return -EINVAL;
-+
-+ dma_buf_put(dmabuf);
-+ vb2_dma_contig_memops.put(dmabuf->priv);
-+ total_size -= head->size;
-+ return ret;
-+}
-+
-+int stf_dmabuf_ioctl(struct device *dev, unsigned int cmd, void *arg)
-+{
-+ int ret = -ENOIOCTLCMD;
-+
-+ switch (cmd) {
-+ case VIDIOC_STF_DMABUF_ALLOC:
-+ ret = stf_dmabuf_ioctl_alloc(dev, arg);
-+ break;
-+ case VIDIOC_STF_DMABUF_FREE:
-+ ret = stf_dmabuf_ioctl_free(dev, arg);
-+ break;
-+ default:
-+ break;
-+ }
-+ return ret;
-+}
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/stf_dmabuf.h
-@@ -0,0 +1,12 @@
-+/* SPDX-License-Identifier: GPL-2.0 */
-+/*
-+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
-+ *
-+ */
-+
-+#ifndef STF_DMABUF_H
-+#define STF_DMABUF_H
-+
-+extern int stf_dmabuf_ioctl(struct device *dev, unsigned int cmd, void *arg);
-+
-+#endif /* STF_DMABUF_H */
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/stf_dvp.c
-@@ -0,0 +1,385 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
-+ *
-+ */
-+
-+#include "stfcamss.h"
-+#include <media/v4l2-async.h>
-+#include <media/v4l2-ctrls.h>
-+#include <media/v4l2-device.h>
-+#include <media/v4l2-event.h>
-+#include <media/v4l2-fwnode.h>
-+#include <media/v4l2-subdev.h>
-+
-+static const struct dvp_format dvp_formats_st7110[] = {
-+ { MEDIA_BUS_FMT_YUYV8_2X8, 8},
-+ { MEDIA_BUS_FMT_RGB565_2X8_LE, 8},
-+ { MEDIA_BUS_FMT_SRGGB8_1X8, 8},
-+ { MEDIA_BUS_FMT_SGRBG8_1X8, 8},
-+ { MEDIA_BUS_FMT_SGBRG8_1X8, 8},
-+ { MEDIA_BUS_FMT_SBGGR8_1X8, 8},
-+ { MEDIA_BUS_FMT_SRGGB10_1X10, 8},
-+ { MEDIA_BUS_FMT_SGRBG10_1X10, 8},
-+ { MEDIA_BUS_FMT_SGBRG10_1X10, 8},
-+ { MEDIA_BUS_FMT_SBGGR10_1X10, 8},
-+};
-+
-+static int dvp_find_format(u32 code,
-+ const struct dvp_format *formats,
-+ unsigned int nformats)
-+{
-+ int i;
-+
-+ for (i = 0; i < nformats; i++)
-+ if (formats[i].code == code)
-+ return i;
-+ return -EINVAL;
-+}
-+
-+int stf_dvp_subdev_init(struct stfcamss *stfcamss)
-+{
-+ struct stf_dvp_dev *dvp_dev = stfcamss->dvp_dev;
-+
-+ dvp_dev->s_type = SENSOR_VIN;
-+ dvp_dev->hw_ops = &dvp_ops;
-+ dvp_dev->stfcamss = stfcamss;
-+ dvp_dev->formats = dvp_formats_st7110;
-+ dvp_dev->nformats = ARRAY_SIZE(dvp_formats_st7110);
-+ mutex_init(&dvp_dev->stream_lock);
-+ dvp_dev->stream_count = 0;
-+ return 0;
-+}
-+
-+static int dvp_set_power(struct v4l2_subdev *sd, int on)
-+{
-+ return 0;
-+}
-+
-+static struct v4l2_mbus_framefmt *
-+__dvp_get_format(struct stf_dvp_dev *dvp_dev,
-+ struct v4l2_subdev_state *state,
-+ unsigned int pad,
-+ enum v4l2_subdev_format_whence which)
-+{
-+
-+ if (which == V4L2_SUBDEV_FORMAT_TRY)
-+ return v4l2_subdev_get_try_format(
-+ &dvp_dev->subdev, state, pad);
-+ return &dvp_dev->fmt[pad];
-+}
-+
-+static int dvp_set_stream(struct v4l2_subdev *sd, int enable)
-+{
-+ struct stf_dvp_dev *dvp_dev = v4l2_get_subdevdata(sd);
-+ struct v4l2_mbus_framefmt *format;
-+ int ret = 0;
-+
-+ format = __dvp_get_format(dvp_dev, NULL, STF_DVP_PAD_SRC,
-+ V4L2_SUBDEV_FORMAT_ACTIVE);
-+ if (format == NULL)
-+ return -EINVAL;
-+ ret = dvp_find_format(format->code,
-+ dvp_dev->formats,
-+ dvp_dev->nformats);
-+ if (ret < 0)
-+ return ret;
-+
-+ mutex_lock(&dvp_dev->stream_lock);
-+ if (enable) {
-+ if (dvp_dev->stream_count == 0) {
-+ dvp_dev->hw_ops->dvp_clk_enable(dvp_dev);
-+ dvp_dev->hw_ops->dvp_config_set(dvp_dev);
-+ dvp_dev->hw_ops->dvp_set_format(dvp_dev,
-+ format->width, dvp_dev->formats[ret].bpp);
-+ dvp_dev->hw_ops->dvp_stream_set(dvp_dev, 1);
-+ }
-+ dvp_dev->stream_count++;
-+ } else {
-+ if (dvp_dev->stream_count == 0)
-+ goto exit;
-+ if (dvp_dev->stream_count == 1) {
-+ dvp_dev->hw_ops->dvp_stream_set(dvp_dev, 0);
-+ dvp_dev->hw_ops->dvp_clk_disable(dvp_dev);
-+ }
-+ dvp_dev->stream_count--;
-+ }
-+exit:
-+ mutex_unlock(&dvp_dev->stream_lock);
-+ return 0;
-+}
-+
-+static void dvp_try_format(struct stf_dvp_dev *dvp_dev,
-+ struct v4l2_subdev_state *state,
-+ unsigned int pad,
-+ struct v4l2_mbus_framefmt *fmt,
-+ enum v4l2_subdev_format_whence which)
-+{
-+ unsigned int i;
-+
-+ switch (pad) {
-+ case STF_DVP_PAD_SINK:
-+ /* Set format on sink pad */
-+
-+ for (i = 0; i < dvp_dev->nformats; i++)
-+ if (fmt->code == dvp_dev->formats[i].code)
-+ break;
-+
-+ if (i >= dvp_dev->nformats)
-+ fmt->code = dvp_dev->formats[0].code;
-+
-+ fmt->width = clamp_t(u32,
-+ fmt->width, STFCAMSS_FRAME_MIN_WIDTH,
-+ STFCAMSS_FRAME_MAX_WIDTH);
-+ fmt->height = clamp_t(u32,
-+ fmt->height, STFCAMSS_FRAME_MIN_HEIGHT,
-+ STFCAMSS_FRAME_MAX_HEIGHT);
-+
-+ fmt->field = V4L2_FIELD_NONE;
-+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
-+ fmt->flags = 0;
-+
-+ break;
-+
-+ case STF_DVP_PAD_SRC:
-+
-+ *fmt = *__dvp_get_format(dvp_dev, state, STF_DVP_PAD_SINK, which);
-+
-+ break;
-+ }
-+}
-+
-+static int dvp_enum_mbus_code(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_mbus_code_enum *code)
-+{
-+ struct stf_dvp_dev *dvp_dev = v4l2_get_subdevdata(sd);
-+
-+ if (code->index >= dvp_dev->nformats)
-+ return -EINVAL;
-+
-+ if (code->pad == STF_DVP_PAD_SINK) {
-+ code->code = dvp_dev->formats[code->index].code;
-+ } else {
-+ struct v4l2_mbus_framefmt *sink_fmt;
-+
-+ sink_fmt = __dvp_get_format(dvp_dev, state, STF_DVP_PAD_SINK,
-+ code->which);
-+
-+ code->code = sink_fmt->code;
-+ if (!code->code)
-+ return -EINVAL;
-+ }
-+ code->flags = 0;
-+
-+ return 0;
-+}
-+
-+static int dvp_enum_frame_size(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_frame_size_enum *fse)
-+{
-+ struct stf_dvp_dev *dvp_dev = v4l2_get_subdevdata(sd);
-+ struct v4l2_mbus_framefmt format;
-+
-+ if (fse->index != 0)
-+ return -EINVAL;
-+
-+ format.code = fse->code;
-+ format.width = 1;
-+ format.height = 1;
-+ dvp_try_format(dvp_dev, state, fse->pad, &format, fse->which);
-+ fse->min_width = format.width;
-+ fse->min_height = format.height;
-+
-+ if (format.code != fse->code)
-+ return -EINVAL;
-+
-+ format.code = fse->code;
-+ format.width = -1;
-+ format.height = -1;
-+ dvp_try_format(dvp_dev, state, fse->pad, &format, fse->which);
-+ fse->max_width = format.width;
-+ fse->max_height = format.height;
-+
-+ return 0;
-+}
-+
-+static int dvp_get_format(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_format *fmt)
-+{
-+ struct stf_dvp_dev *dvp_dev = v4l2_get_subdevdata(sd);
-+ struct v4l2_mbus_framefmt *format;
-+
-+ format = __dvp_get_format(dvp_dev, state, fmt->pad, fmt->which);
-+ if (format == NULL)
-+ return -EINVAL;
-+
-+ fmt->format = *format;
-+
-+ return 0;
-+}
-+
-+static int dvp_set_format(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_format *fmt)
-+{
-+ struct stf_dvp_dev *dvp_dev = v4l2_get_subdevdata(sd);
-+ struct v4l2_mbus_framefmt *format;
-+
-+ format = __dvp_get_format(dvp_dev, state, fmt->pad, fmt->which);
-+ if (format == NULL)
-+ return -EINVAL;
-+
-+ mutex_lock(&dvp_dev->stream_lock);
-+ if (dvp_dev->stream_count) {
-+ fmt->format = *format;
-+ mutex_unlock(&dvp_dev->stream_lock);
-+ goto out;
-+ } else {
-+ dvp_try_format(dvp_dev, state, fmt->pad, &fmt->format, fmt->which);
-+ *format = fmt->format;
-+ }
-+ mutex_unlock(&dvp_dev->stream_lock);
-+
-+ /* Propagate the format from sink to source */
-+ if (fmt->pad == STF_DVP_PAD_SINK) {
-+ format = __dvp_get_format(dvp_dev, state, STF_DVP_PAD_SRC,
-+ fmt->which);
-+
-+ *format = fmt->format;
-+ dvp_try_format(dvp_dev, state, STF_DVP_PAD_SRC, format,
-+ fmt->which);
-+ }
-+
-+out:
-+ return 0;
-+}
-+
-+static int dvp_init_formats(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_fh *fh)
-+{
-+ struct v4l2_subdev_format format = {
-+ .pad = STF_DVP_PAD_SINK,
-+ .which = fh ? V4L2_SUBDEV_FORMAT_TRY :
-+ V4L2_SUBDEV_FORMAT_ACTIVE,
-+ .format = {
-+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
-+ .width = 1920,
-+ .height = 1080
-+ }
-+ };
-+
-+ return dvp_set_format(sd, fh ? fh->state : NULL, &format);
-+}
-+
-+static int dvp_link_setup(struct media_entity *entity,
-+ const struct media_pad *local,
-+ const struct media_pad *remote, u32 flags)
-+{
-+ if ((local->flags & MEDIA_PAD_FL_SOURCE) &&
-+ (flags & MEDIA_LNK_FL_ENABLED)) {
-+ struct v4l2_subdev *sd;
-+ struct stf_dvp_dev *dvp_dev;
-+ struct vin_line *line;
-+
-+ if (media_pad_remote_pad_first(local))
-+ return -EBUSY;
-+
-+ sd = media_entity_to_v4l2_subdev(entity);
-+ dvp_dev = v4l2_get_subdevdata(sd);
-+
-+ sd = media_entity_to_v4l2_subdev(remote->entity);
-+ line = v4l2_get_subdevdata(sd);
-+ if (line->sdev_type == VIN_DEV_TYPE)
-+ dvp_dev->s_type = SENSOR_VIN;
-+ if (line->sdev_type == ISP_DEV_TYPE)
-+ dvp_dev->s_type = SENSOR_ISP;
-+ st_info(ST_DVP, "DVP device sensor type: %d\n", dvp_dev->s_type);
-+ }
-+
-+ return 0;
-+}
-+
-+static const struct v4l2_subdev_core_ops dvp_core_ops = {
-+ .s_power = dvp_set_power,
-+};
-+
-+static const struct v4l2_subdev_video_ops dvp_video_ops = {
-+ .s_stream = dvp_set_stream,
-+};
-+
-+static const struct v4l2_subdev_pad_ops dvp_pad_ops = {
-+ .enum_mbus_code = dvp_enum_mbus_code,
-+ .enum_frame_size = dvp_enum_frame_size,
-+ .get_fmt = dvp_get_format,
-+ .set_fmt = dvp_set_format,
-+};
-+
-+static const struct v4l2_subdev_ops dvp_v4l2_ops = {
-+ .core = &dvp_core_ops,
-+ .video = &dvp_video_ops,
-+ .pad = &dvp_pad_ops,
-+};
-+
-+static const struct v4l2_subdev_internal_ops dvp_v4l2_internal_ops = {
-+ .open = dvp_init_formats,
-+};
-+
-+static const struct media_entity_operations dvp_media_ops = {
-+ .link_setup = dvp_link_setup,
-+ .link_validate = v4l2_subdev_link_validate,
-+};
-+
-+int stf_dvp_register(struct stf_dvp_dev *dvp_dev,
-+ struct v4l2_device *v4l2_dev)
-+{
-+ struct v4l2_subdev *sd = &dvp_dev->subdev;
-+ struct media_pad *pads = dvp_dev->pads;
-+ int ret;
-+
-+ v4l2_subdev_init(sd, &dvp_v4l2_ops);
-+ sd->internal_ops = &dvp_v4l2_internal_ops;
-+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-+ snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d",
-+ STF_DVP_NAME, 0);
-+ v4l2_set_subdevdata(sd, dvp_dev);
-+
-+ ret = dvp_init_formats(sd, NULL);
-+ if (ret < 0) {
-+ st_err(ST_DVP, "Failed to init format: %d\n", ret);
-+ return ret;
-+ }
-+
-+ pads[STF_DVP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
-+ pads[STF_DVP_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
-+
-+ sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
-+ sd->entity.ops = &dvp_media_ops;
-+ ret = media_entity_pads_init(&sd->entity, STF_DVP_PADS_NUM, pads);
-+ if (ret < 0) {
-+ st_err(ST_DVP, "Failed to init media entity: %d\n", ret);
-+ return ret;
-+ }
-+
-+ ret = v4l2_device_register_subdev(v4l2_dev, sd);
-+ if (ret < 0) {
-+ st_err(ST_DVP, "Failed to register subdev: %d\n", ret);
-+ goto err_sreg;
-+ }
-+
-+ return 0;
-+
-+err_sreg:
-+ media_entity_cleanup(&sd->entity);
-+ return ret;
-+}
-+
-+int stf_dvp_unregister(struct stf_dvp_dev *dvp_dev)
-+{
-+ v4l2_device_unregister_subdev(&dvp_dev->subdev);
-+ media_entity_cleanup(&dvp_dev->subdev.entity);
-+ mutex_destroy(&dvp_dev->stream_lock);
-+ return 0;
-+}
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/stf_dvp.h
-@@ -0,0 +1,67 @@
-+/* SPDX-License-Identifier: GPL-2.0
-+ *
-+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
-+ *
-+ */
-+
-+#ifndef STF_DVP_H
-+#define STF_DVP_H
-+
-+#include <media/v4l2-subdev.h>
-+#include <media/v4l2-device.h>
-+#include <media/media-entity.h>
-+#include <video/stf-vin.h>
-+
-+#define STF_DVP_NAME "stf_dvp"
-+
-+#define STF_DVP_PAD_SINK 0
-+#define STF_DVP_PAD_SRC 1
-+#define STF_DVP_PADS_NUM 2
-+
-+struct dvp_format {
-+ u32 code;
-+ u8 bpp;
-+};
-+
-+enum sensor_type;
-+enum subdev_type;
-+
-+struct dvp_cfg {
-+ unsigned int flags;
-+ unsigned char bus_width;
-+ unsigned char data_shift;
-+};
-+
-+struct stf_dvp_dev;
-+
-+struct dvp_hw_ops {
-+ int (*dvp_clk_enable)(struct stf_dvp_dev *dvp_dev);
-+ int (*dvp_clk_disable)(struct stf_dvp_dev *dvp_dev);
-+ int (*dvp_config_set)(struct stf_dvp_dev *dvp_dev);
-+ int (*dvp_set_format)(struct stf_dvp_dev *dvp_dev,
-+ u32 pix_width, u8 bpp);
-+ int (*dvp_stream_set)(struct stf_dvp_dev *dvp_dev, int on);
-+};
-+
-+struct stf_dvp_dev {
-+ struct stfcamss *stfcamss;
-+ struct dvp_cfg *dvp;
-+ enum sensor_type s_type;
-+ struct v4l2_subdev subdev;
-+ struct media_pad pads[STF_DVP_PADS_NUM];
-+ struct v4l2_mbus_framefmt fmt[STF_DVP_PADS_NUM];
-+ const struct dvp_format *formats;
-+ unsigned int nformats;
-+ struct dvp_hw_ops *hw_ops;
-+ struct mutex stream_lock;
-+ int stream_count;
-+};
-+
-+extern int stf_dvp_subdev_init(struct stfcamss *stfcamss);
-+extern int stf_dvp_register(struct stf_dvp_dev *dvp_dev,
-+ struct v4l2_device *v4l2_dev);
-+extern int stf_dvp_unregister(struct stf_dvp_dev *dvp_dev);
-+
-+extern struct dvp_hw_ops dvp_ops;
-+
-+#endif /* STF_DVP_H */
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/stf_dvp_hw_ops.c
-@@ -0,0 +1,187 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
-+ *
-+ */
-+
-+#include "stfcamss.h"
-+
-+static int stf_dvp_clk_enable(struct stf_dvp_dev *dvp_dev)
-+{
-+ struct stfcamss *stfcamss = dvp_dev->stfcamss;
-+
-+ switch (dvp_dev->s_type) {
-+ case SENSOR_VIN:
-+ reset_control_deassert(stfcamss->sys_rst[STFRST_AXIWR].rstc);
-+ clk_set_phase(stfcamss->sys_clk[STFCLK_DVP_INV].clk, 0);
-+ clk_set_parent(stfcamss->sys_clk[STFCLK_AXIWR].clk,
-+ stfcamss->sys_clk[STFCLK_DVP_INV].clk);
-+ break;
-+ case SENSOR_ISP:
-+ clk_set_phase(stfcamss->sys_clk[STFCLK_DVP_INV].clk, 0);
-+ clk_set_parent(stfcamss->sys_clk[STFCLK_WRAPPER_CLK_C].clk,
-+ stfcamss->sys_clk[STFCLK_DVP_INV].clk);
-+ break;
-+ }
-+
-+ return 0;
-+}
-+
-+static int stf_dvp_clk_disable(struct stf_dvp_dev *dvp_dev)
-+{
-+ struct stfcamss *stfcamss = dvp_dev->stfcamss;
-+
-+ switch (dvp_dev->s_type) {
-+ case SENSOR_VIN:
-+ clk_set_parent(stfcamss->sys_clk[STFCLK_AXIWR].clk,
-+ stfcamss->sys_clk[STFCLK_MIPI_RX0_PXL].clk);
-+ reset_control_assert(stfcamss->sys_rst[STFRST_AXIWR].rstc);
-+ break;
-+ case SENSOR_ISP:
-+ clk_set_parent(stfcamss->sys_clk[STFCLK_WRAPPER_CLK_C].clk,
-+ stfcamss->sys_clk[STFCLK_MIPI_RX0_PXL].clk);
-+ break;
-+ }
-+
-+ return 0;
-+}
-+
-+static int stf_dvp_config_set(struct stf_dvp_dev *dvp_dev)
-+{
-+
-+ struct stf_vin_dev *vin = dvp_dev->stfcamss->vin;
-+ unsigned int flags = 0;
-+ unsigned char data_shift = 0;
-+ u32 polarities = 0;
-+
-+ if (!dvp_dev->dvp)
-+ return -EINVAL;
-+
-+ flags = dvp_dev->dvp->flags;
-+ data_shift = dvp_dev->dvp->data_shift;
-+ st_info(ST_DVP, "%s, polarities = 0x%x, flags = 0x%x\n",
-+ __func__, polarities, flags);
-+
-+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
-+ polarities |= BIT(1);
-+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
-+ polarities |= BIT(3);
-+ print_reg(ST_DVP, vin->sysctrl_base, SYSCONSAIF_SYSCFG_36);
-+ reg_set_bit(vin->sysctrl_base, SYSCONSAIF_SYSCFG_36,
-+ U0_VIN_CNFG_DVP_HS_POS
-+ | U0_VIN_CNFG_DVP_VS_POS,
-+ polarities);
-+ print_reg(ST_DVP, vin->sysctrl_base, SYSCONSAIF_SYSCFG_36);
-+
-+ switch (data_shift) {
-+ case 0:
-+ data_shift = 0;
-+ break;
-+ case 2:
-+ data_shift = 1;
-+ break;
-+ case 4:
-+ data_shift = 2;
-+ break;
-+ case 6:
-+ data_shift = 3;
-+ break;
-+ default:
-+ data_shift = 0;
-+ break;
-+ };
-+ print_reg(ST_DVP, vin->sysctrl_base, SYSCONSAIF_SYSCFG_28);
-+ reg_set_bit(vin->sysctrl_base, SYSCONSAIF_SYSCFG_28,
-+ UO_VIN_CNFG_AXIWR0_PIXEL_HEIGH_BIT_SEL,
-+ data_shift << 15);
-+ print_reg(ST_DVP, vin->sysctrl_base, SYSCONSAIF_SYSCFG_28);
-+
-+ return 0;
-+}
-+
-+static int set_vin_axiwr_pix_ct(struct stf_vin_dev *vin, u8 bpp)
-+{
-+ u32 value = 0;
-+ int cnfg_axiwr_pix_ct = 64 / bpp;
-+
-+ // need check
-+ if (cnfg_axiwr_pix_ct == 2)
-+ value = 1;
-+ else if (cnfg_axiwr_pix_ct == 4)
-+ value = 1;
-+ else if (cnfg_axiwr_pix_ct == 8)
-+ value = 0;
-+ else
-+ return 0;
-+
-+ print_reg(ST_DVP, vin->sysctrl_base, SYSCONSAIF_SYSCFG_28);
-+ reg_set_bit(vin->sysctrl_base,
-+ SYSCONSAIF_SYSCFG_28,
-+ U0_VIN_CNFG_AXIWR0_PIX_CT,
-+ value<<13);
-+ print_reg(ST_DVP, vin->sysctrl_base, SYSCONSAIF_SYSCFG_28);
-+
-+ return cnfg_axiwr_pix_ct;
-+
-+}
-+
-+static int stf_dvp_set_format(struct stf_dvp_dev *dvp_dev,
-+ u32 pix_width, u8 bpp)
-+{
-+ struct stf_vin_dev *vin = dvp_dev->stfcamss->vin;
-+ int val, pix_ct;
-+
-+ if (dvp_dev->s_type == SENSOR_VIN) {
-+ pix_ct = set_vin_axiwr_pix_ct(vin, bpp);
-+ val = (pix_width / pix_ct) - 1;
-+ print_reg(ST_DVP, vin->sysctrl_base, SYSCTRL_VIN_WR_PIX_TOTAL);
-+ reg_set_bit(vin->sysctrl_base,
-+ SYSCONSAIF_SYSCFG_28,
-+ U0_VIN_CNFG_AXIWR0_PIX_CNT_END,
-+ val << 2);
-+ print_reg(ST_DVP, vin->sysctrl_base, SYSCTRL_VIN_WR_PIX_TOTAL);
-+
-+ }
-+
-+ return 0;
-+}
-+
-+static int stf_dvp_stream_set(struct stf_dvp_dev *dvp_dev, int on)
-+{
-+ struct stf_vin_dev *vin = dvp_dev->stfcamss->vin;
-+
-+ switch (dvp_dev->s_type) {
-+ case SENSOR_VIN:
-+ reg_set_bit(vin->sysctrl_base, SYSCONSAIF_SYSCFG_36,
-+ U0_VIN_CNFG_ISP_DVP_EN0,
-+ 0);
-+ reg_set_bit(vin->sysctrl_base, SYSCONSAIF_SYSCFG_0,
-+ U0_VIN_CNFG_AXI_DVP_EN,
-+ !!on<<2);
-+ break;
-+ case SENSOR_ISP:
-+ reg_set_bit(vin->sysctrl_base, SYSCONSAIF_SYSCFG_36,
-+ U0_VIN_CNFG_ISP_DVP_EN0,
-+ !!on<<5);
-+ reg_set_bit(vin->sysctrl_base, SYSCONSAIF_SYSCFG_0,
-+ U0_VIN_CNFG_AXI_DVP_EN,
-+ 0);
-+ reg_set_bit(vin->sysctrl_base, SYSCONSAIF_SYSCFG_36,
-+ U0_VIN_CNFG_DVP_SWAP_EN,
-+ 0);
-+ reg_set_bit(vin->sysctrl_base, SYSCONSAIF_SYSCFG_36,
-+ U0_VIN_CNFG_GEN_EN_AXIRD,
-+ 0);
-+ break;
-+ }
-+
-+ return 0;
-+}
-+
-+struct dvp_hw_ops dvp_ops = {
-+ .dvp_clk_enable = stf_dvp_clk_enable,
-+ .dvp_clk_disable = stf_dvp_clk_disable,
-+ .dvp_config_set = stf_dvp_config_set,
-+ .dvp_set_format = stf_dvp_set_format,
-+ .dvp_stream_set = stf_dvp_stream_set,
-+};
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/stf_event.c
-@@ -0,0 +1,36 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
-+ *
-+ */
-+
-+#include <linux/notifier.h>
-+#include <linux/module.h>
-+#include <linux/init.h>
-+#include <linux/kernel.h>
-+#include <linux/fs.h>
-+
-+static ATOMIC_NOTIFIER_HEAD(vin_notifier_list);
-+
-+int vin_notifier_register(struct notifier_block *nb)
-+{
-+ return atomic_notifier_chain_register(&vin_notifier_list, nb);
-+}
-+EXPORT_SYMBOL_GPL(vin_notifier_register);
-+
-+void vin_notifier_unregister(struct notifier_block *nb)
-+{
-+ atomic_notifier_chain_unregister(&vin_notifier_list, nb);
-+}
-+EXPORT_SYMBOL_GPL(vin_notifier_unregister);
-+
-+int vin_notifier_call(unsigned long e, void *v)
-+{
-+ return atomic_notifier_call_chain(&vin_notifier_list, e, v);
-+}
-+EXPORT_SYMBOL_GPL(vin_notifier_call);
-+
-+MODULE_AUTHOR("StarFive Technology Co., Ltd.");
-+MODULE_DESCRIPTION("Starfive VIC video in notifier");
-+MODULE_LICENSE("GPL");
-+//MODULE_SUPPORTED_DEVICE("video");
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/stf_isp.c
-@@ -0,0 +1,1521 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
-+ *
-+ */
-+
-+#include "stfcamss.h"
-+#include <media/v4l2-async.h>
-+#include <media/v4l2-ctrls.h>
-+#include <media/v4l2-device.h>
-+#include <media/v4l2-event.h>
-+#include <media/v4l2-fwnode.h>
-+#include <media/v4l2-subdev.h>
-+#include <linux/firmware.h>
-+#include <linux/jh7110-isp.h>
-+#include "stf_isp_ioctl.h"
-+#include "stf_dmabuf.h"
-+
-+static int user_config_isp;
-+static int isp_set_selection(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_selection *sel);
-+
-+static struct v4l2_rect *
-+__isp_get_compose(struct stf_isp_dev *isp_dev,
-+ struct v4l2_subdev_state *state,
-+ enum v4l2_subdev_format_whence which);
-+
-+static struct v4l2_rect *
-+__isp_get_crop(struct stf_isp_dev *isp_dev,
-+ struct v4l2_subdev_state *state,
-+ enum v4l2_subdev_format_whence which);
-+
-+static struct v4l2_rect *
-+__isp_get_scale(struct stf_isp_dev *isp_dev,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_selection *sel);
-+
-+static struct v4l2_rect *
-+__isp_get_itiws(struct stf_isp_dev *isp_dev,
-+ struct v4l2_subdev_state *state,
-+ enum v4l2_subdev_format_whence which);
-+
-+// sink format and raw format must one by one
-+static const struct isp_format isp_formats_st7110_sink[] = {
-+ { MEDIA_BUS_FMT_SRGGB10_1X10, 10},
-+ { MEDIA_BUS_FMT_SGRBG10_1X10, 10},
-+ { MEDIA_BUS_FMT_SGBRG10_1X10, 10},
-+ { MEDIA_BUS_FMT_SBGGR10_1X10, 10},
-+};
-+
-+static const struct isp_format isp_formats_st7110_raw[] = {
-+ { MEDIA_BUS_FMT_SRGGB12_1X12, 12},
-+ { MEDIA_BUS_FMT_SGRBG12_1X12, 12},
-+ { MEDIA_BUS_FMT_SGBRG12_1X12, 12},
-+ { MEDIA_BUS_FMT_SBGGR12_1X12, 12},
-+};
-+
-+static const struct isp_format isp_formats_st7110_compat_10bit_raw[] = {
-+ { MEDIA_BUS_FMT_SRGGB10_1X10, 10},
-+ { MEDIA_BUS_FMT_SGRBG10_1X10, 10},
-+ { MEDIA_BUS_FMT_SGBRG10_1X10, 10},
-+ { MEDIA_BUS_FMT_SBGGR10_1X10, 10},
-+};
-+
-+static const struct isp_format isp_formats_st7110_compat_8bit_raw[] = {
-+ { MEDIA_BUS_FMT_SRGGB8_1X8, 8},
-+ { MEDIA_BUS_FMT_SGRBG8_1X8, 8},
-+ { MEDIA_BUS_FMT_SGBRG8_1X8, 8},
-+ { MEDIA_BUS_FMT_SBGGR8_1X8, 8},
-+};
-+
-+static const struct isp_format isp_formats_st7110_uo[] = {
-+ { MEDIA_BUS_FMT_Y12_1X12, 8},
-+};
-+
-+static const struct isp_format isp_formats_st7110_iti[] = {
-+ { MEDIA_BUS_FMT_SRGGB10_1X10, 10},
-+ { MEDIA_BUS_FMT_SGRBG10_1X10, 10},
-+ { MEDIA_BUS_FMT_SGBRG10_1X10, 10},
-+ { MEDIA_BUS_FMT_SBGGR10_1X10, 10},
-+ { MEDIA_BUS_FMT_SRGGB12_1X12, 12},
-+ { MEDIA_BUS_FMT_SGRBG12_1X12, 12},
-+ { MEDIA_BUS_FMT_SGBRG12_1X12, 12},
-+ { MEDIA_BUS_FMT_SBGGR12_1X12, 12},
-+ { MEDIA_BUS_FMT_Y12_1X12, 8},
-+ { MEDIA_BUS_FMT_YUV8_1X24, 8},
-+};
-+
-+static const struct isp_format_table isp_formats_st7110[] = {
-+ { isp_formats_st7110_sink, ARRAY_SIZE(isp_formats_st7110_sink) }, /* pad 0 */
-+ { isp_formats_st7110_uo, ARRAY_SIZE(isp_formats_st7110_uo) }, /* pad 1 */
-+ { isp_formats_st7110_uo, ARRAY_SIZE(isp_formats_st7110_uo) }, /* pad 2 */
-+ { isp_formats_st7110_uo, ARRAY_SIZE(isp_formats_st7110_uo) }, /* pad 3 */
-+ { isp_formats_st7110_iti, ARRAY_SIZE(isp_formats_st7110_iti) }, /* pad 4 */
-+ { isp_formats_st7110_iti, ARRAY_SIZE(isp_formats_st7110_iti) }, /* pad 5 */
-+ { isp_formats_st7110_raw, ARRAY_SIZE(isp_formats_st7110_raw) }, /* pad 6 */
-+ { isp_formats_st7110_raw, ARRAY_SIZE(isp_formats_st7110_raw) }, /* pad 7 */
-+};
-+
-+int stf_isp_subdev_init(struct stfcamss *stfcamss)
-+{
-+ struct stf_isp_dev *isp_dev = stfcamss->isp_dev;
-+
-+ isp_dev->sdev_type = ISP_DEV_TYPE;
-+ isp_dev->hw_ops = &isp_ops;
-+ isp_dev->stfcamss = stfcamss;
-+ isp_dev->formats = isp_formats_st7110;
-+ isp_dev->nformats = ARRAY_SIZE(isp_formats_st7110);
-+ mutex_init(&isp_dev->stream_lock);
-+ mutex_init(&isp_dev->power_lock);
-+ mutex_init(&isp_dev->setfile_lock);
-+ atomic_set(&isp_dev->shadow_count, 0);
-+ return 0;
-+}
-+
-+/*
-+ * ISP Controls.
-+ */
-+
-+static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
-+{
-+ return &container_of(ctrl->handler, struct stf_isp_dev,
-+ ctrls.handler)->subdev;
-+}
-+
-+static u64 isp_calc_pixel_rate(struct stf_isp_dev *isp_dev)
-+{
-+ u64 rate = 0;
-+
-+ return rate;
-+}
-+
-+static int isp_set_ctrl_hue(struct stf_isp_dev *isp_dev, int value)
-+{
-+ int ret = 0;
-+
-+ return ret;
-+}
-+
-+static int isp_set_ctrl_contrast(struct stf_isp_dev *isp_dev, int value)
-+{
-+ int ret = 0;
-+
-+ return ret;
-+}
-+
-+static int isp_set_ctrl_saturation(struct stf_isp_dev *isp_dev, int value)
-+{
-+ int ret = 0;
-+
-+ return ret;
-+}
-+
-+static int isp_set_ctrl_white_balance(struct stf_isp_dev *isp_dev, int awb)
-+{
-+ struct isp_ctrls *ctrls = &isp_dev->ctrls;
-+ int ret = 0;
-+
-+ if (!awb && (ctrls->red_balance->is_new
-+ || ctrls->blue_balance->is_new)) {
-+ u16 red = (u16)ctrls->red_balance->val;
-+ u16 blue = (u16)ctrls->blue_balance->val;
-+
-+ st_debug(ST_ISP, "red = 0x%x, blue = 0x%x\n", red, blue);
-+ //isp_dev->hw_ops->isp_set_awb_r_gain(isp_dev, red);
-+ //if (ret)
-+ // return ret;
-+ //isp_dev->hw_ops->isp_set_awb_b_gain(isp_dev, blue);
-+ }
-+
-+ return ret;
-+}
-+
-+static int isp_set_ctrl_exposure(struct stf_isp_dev *isp_dev,
-+ enum v4l2_exposure_auto_type auto_exposure)
-+{
-+ int ret = 0;
-+
-+ return ret;
-+}
-+
-+static int isp_set_ctrl_gain(struct stf_isp_dev *isp_dev, bool auto_gain)
-+{
-+ int ret = 0;
-+
-+ return ret;
-+}
-+
-+static const char * const test_pattern_menu[] = {
-+ "Disabled",
-+ "Color bars",
-+ "Color bars w/ rolling bar",
-+ "Color squares",
-+ "Color squares w/ rolling bar",
-+};
-+
-+#define ISP_TEST_ENABLE BIT(7)
-+#define ISP_TEST_ROLLING BIT(6) /* rolling horizontal bar */
-+#define ISP_TEST_TRANSPARENT BIT(5)
-+#define ISP_TEST_SQUARE_BW BIT(4) /* black & white squares */
-+#define ISP_TEST_BAR_STANDARD (0 << 2)
-+#define ISP_TEST_BAR_VERT_CHANGE_1 (1 << 2)
-+#define ISP_TEST_BAR_HOR_CHANGE (2 << 2)
-+#define ISP_TEST_BAR_VERT_CHANGE_2 (3 << 2)
-+#define ISP_TEST_BAR (0 << 0)
-+#define ISP_TEST_RANDOM (1 << 0)
-+#define ISP_TEST_SQUARE (2 << 0)
-+#define ISP_TEST_BLACK (3 << 0)
-+
-+static const u8 test_pattern_val[] = {
-+ 0,
-+ ISP_TEST_ENABLE | ISP_TEST_BAR_VERT_CHANGE_1 |
-+ ISP_TEST_BAR,
-+ ISP_TEST_ENABLE | ISP_TEST_ROLLING |
-+ ISP_TEST_BAR_VERT_CHANGE_1 | ISP_TEST_BAR,
-+ ISP_TEST_ENABLE | ISP_TEST_SQUARE,
-+ ISP_TEST_ENABLE | ISP_TEST_ROLLING | ISP_TEST_SQUARE,
-+};
-+
-+static int isp_set_ctrl_test_pattern(struct stf_isp_dev *isp_dev, int value)
-+{
-+ int ret = 0;
-+
-+ // return isp_write_reg(isp_dev, ISP_REG_PRE_ISP_TEST_SET1,
-+ // test_pattern_val[value]);
-+ return ret;
-+}
-+
-+static int isp_set_ctrl_light_freq(struct stf_isp_dev *isp_dev, int value)
-+{
-+ int ret = 0;
-+
-+ return ret;
-+}
-+
-+static int isp_set_ctrl_hflip(struct stf_isp_dev *isp_dev, int value)
-+{
-+ int ret = 0;
-+
-+ return ret;
-+}
-+
-+static int isp_set_ctrl_vflip(struct stf_isp_dev *isp_dev, int value)
-+{
-+ int ret = 0;
-+
-+ return ret;
-+}
-+
-+static int isp_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
-+{
-+ switch (ctrl->id) {
-+ case V4L2_CID_AUTOGAIN:
-+ break;
-+ case V4L2_CID_EXPOSURE_AUTO:
-+ break;
-+ }
-+
-+ return 0;
-+}
-+
-+static int isp_s_ctrl(struct v4l2_ctrl *ctrl)
-+{
-+ struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
-+ struct stf_isp_dev *isp_dev = v4l2_get_subdevdata(sd);
-+ int ret = 0;
-+
-+ /*
-+ * If the device is not powered up by the host driver do
-+ * not apply any controls to H/W at this time. Instead
-+ * the controls will be restored right after power-up.
-+ */
-+ mutex_lock(&isp_dev->power_lock);
-+ if (isp_dev->power_count == 0) {
-+ mutex_unlock(&isp_dev->power_lock);
-+ return 0;
-+ }
-+ mutex_unlock(&isp_dev->power_lock);
-+
-+ switch (ctrl->id) {
-+ case V4L2_CID_AUTOGAIN:
-+ ret = isp_set_ctrl_gain(isp_dev, ctrl->val);
-+ break;
-+ case V4L2_CID_EXPOSURE_AUTO:
-+ ret = isp_set_ctrl_exposure(isp_dev, ctrl->val);
-+ break;
-+ case V4L2_CID_AUTO_WHITE_BALANCE:
-+ ret = isp_set_ctrl_white_balance(isp_dev, ctrl->val);
-+ break;
-+ case V4L2_CID_HUE:
-+ ret = isp_set_ctrl_hue(isp_dev, ctrl->val);
-+ break;
-+ case V4L2_CID_CONTRAST:
-+ ret = isp_set_ctrl_contrast(isp_dev, ctrl->val);
-+ break;
-+ case V4L2_CID_SATURATION:
-+ ret = isp_set_ctrl_saturation(isp_dev, ctrl->val);
-+ break;
-+ case V4L2_CID_TEST_PATTERN:
-+ ret = isp_set_ctrl_test_pattern(isp_dev, ctrl->val);
-+ break;
-+ case V4L2_CID_POWER_LINE_FREQUENCY:
-+ ret = isp_set_ctrl_light_freq(isp_dev, ctrl->val);
-+ break;
-+ case V4L2_CID_HFLIP:
-+ ret = isp_set_ctrl_hflip(isp_dev, ctrl->val);
-+ break;
-+ case V4L2_CID_VFLIP:
-+ ret = isp_set_ctrl_vflip(isp_dev, ctrl->val);
-+ break;
-+ case V4L2_CID_USER_JH7110_ISP_WB_SETTING:
-+ break;
-+ case V4L2_CID_USER_JH7110_ISP_CAR_SETTING:
-+ break;
-+ case V4L2_CID_USER_JH7110_ISP_CCM_SETTING:
-+ break;
-+ default:
-+ ret = -EINVAL;
-+ break;
-+ }
-+
-+ return ret;
-+}
-+
-+static const struct v4l2_ctrl_ops isp_ctrl_ops = {
-+ .g_volatile_ctrl = isp_g_volatile_ctrl,
-+ .s_ctrl = isp_s_ctrl,
-+};
-+
-+struct v4l2_ctrl_config isp_ctrl[] = {
-+ [0] = {
-+ .ops = &isp_ctrl_ops,
-+ .type = V4L2_CTRL_TYPE_U8,
-+ .def = 0,
-+ .min = 0x00,
-+ .max = 0xff,
-+ .step = 1,
-+ .name = "WB Setting",
-+ .id = V4L2_CID_USER_JH7110_ISP_WB_SETTING,
-+ .dims[0] = sizeof(struct jh7110_isp_wb_setting),
-+ .flags = 0,
-+ },
-+ [1] = {
-+ .ops = &isp_ctrl_ops,
-+ .type = V4L2_CTRL_TYPE_U8,
-+ .def = 0,
-+ .min = 0x00,
-+ .max = 0xff,
-+ .step = 1,
-+ .name = "Car Setting",
-+ .id = V4L2_CID_USER_JH7110_ISP_CAR_SETTING,
-+ .dims[0] = sizeof(struct jh7110_isp_car_setting),
-+ .flags = 0,
-+ },
-+ [2] = {
-+ .ops = &isp_ctrl_ops,
-+ .type = V4L2_CTRL_TYPE_U8,
-+ .def = 0,
-+ .min = 0x00,
-+ .max = 0xff,
-+ .step = 1,
-+ .name = "CCM Setting",
-+ .id = V4L2_CID_USER_JH7110_ISP_CCM_SETTING,
-+ .dims[0] = sizeof(struct jh7110_isp_ccm_setting),
-+ .flags = 0,
-+ },
-+};
-+
-+static int isp_init_controls(struct stf_isp_dev *isp_dev)
-+{
-+ const struct v4l2_ctrl_ops *ops = &isp_ctrl_ops;
-+ struct isp_ctrls *ctrls = &isp_dev->ctrls;
-+ struct v4l2_ctrl_handler *hdl = &ctrls->handler;
-+ int ret;
-+ int i;
-+
-+ v4l2_ctrl_handler_init(hdl, 32);
-+
-+ /* Clock related controls */
-+ ctrls->pixel_rate = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_PIXEL_RATE,
-+ 0, INT_MAX, 1,
-+ isp_calc_pixel_rate(isp_dev));
-+
-+ /* Auto/manual white balance */
-+ ctrls->auto_wb = v4l2_ctrl_new_std(hdl, ops,
-+ V4L2_CID_AUTO_WHITE_BALANCE,
-+ 0, 1, 1, 1);
-+ ctrls->blue_balance = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BLUE_BALANCE,
-+ 0, 4095, 1, 0);
-+ ctrls->red_balance = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_RED_BALANCE,
-+ 0, 4095, 1, 0);
-+ /* Auto/manual exposure */
-+ ctrls->auto_exp = v4l2_ctrl_new_std_menu(hdl, ops,
-+ V4L2_CID_EXPOSURE_AUTO,
-+ V4L2_EXPOSURE_MANUAL, 0,
-+ V4L2_EXPOSURE_AUTO);
-+ ctrls->exposure = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_EXPOSURE,
-+ 0, 65535, 1, 0);
-+ /* Auto/manual gain */
-+ ctrls->auto_gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTOGAIN,
-+ 0, 1, 1, 1);
-+ ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN,
-+ 0, 1023, 1, 0);
-+
-+ ctrls->saturation = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SATURATION,
-+ 0, 255, 1, 64);
-+ ctrls->hue = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HUE,
-+ 0, 359, 1, 0);
-+ ctrls->contrast = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_CONTRAST,
-+ 0, 255, 1, 0);
-+ ctrls->test_pattern =
-+ v4l2_ctrl_new_std_menu_items(hdl, ops, V4L2_CID_TEST_PATTERN,
-+ ARRAY_SIZE(test_pattern_menu) - 1,
-+ 0, 0, test_pattern_menu);
-+ ctrls->hflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HFLIP,
-+ 0, 1, 1, 0);
-+ ctrls->vflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VFLIP,
-+ 0, 1, 1, 0);
-+
-+ ctrls->light_freq =
-+ v4l2_ctrl_new_std_menu(hdl, ops,
-+ V4L2_CID_POWER_LINE_FREQUENCY,
-+ V4L2_CID_POWER_LINE_FREQUENCY_AUTO, 0,
-+ V4L2_CID_POWER_LINE_FREQUENCY_50HZ);
-+
-+ for (i = 0; i < ARRAY_SIZE(isp_ctrl); i++)
-+ v4l2_ctrl_new_custom(hdl, &isp_ctrl[i], NULL);
-+
-+
-+ if (hdl->error) {
-+ ret = hdl->error;
-+ goto free_ctrls;
-+ }
-+
-+ ctrls->pixel_rate->flags |= V4L2_CTRL_FLAG_READ_ONLY;
-+ ctrls->gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
-+ ctrls->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
-+
-+ v4l2_ctrl_auto_cluster(3, &ctrls->auto_wb, 0, false);
-+ v4l2_ctrl_auto_cluster(2, &ctrls->auto_gain, 0, true);
-+ v4l2_ctrl_auto_cluster(2, &ctrls->auto_exp, 1, true);
-+
-+ isp_dev->subdev.ctrl_handler = hdl;
-+ return 0;
-+
-+free_ctrls:
-+ v4l2_ctrl_handler_free(hdl);
-+ return ret;
-+}
-+
-+static int isp_set_power(struct v4l2_subdev *sd, int on)
-+{
-+ struct stf_isp_dev *isp_dev = v4l2_get_subdevdata(sd);
-+
-+ st_debug(ST_ISP, "%s, %d\n", __func__, __LINE__);
-+ mutex_lock(&isp_dev->power_lock);
-+ if (on) {
-+ if (isp_dev->power_count == 0)
-+ st_debug(ST_ISP, "turn on isp\n");
-+ isp_dev->power_count++;
-+ } else {
-+ if (isp_dev->power_count == 0)
-+ goto exit;
-+ isp_dev->power_count--;
-+ }
-+exit:
-+ mutex_unlock(&isp_dev->power_lock);
-+
-+ return 0;
-+}
-+
-+static struct v4l2_mbus_framefmt *
-+__isp_get_format(struct stf_isp_dev *isp_dev,
-+ struct v4l2_subdev_state *state,
-+ unsigned int pad,
-+ enum v4l2_subdev_format_whence which)
-+{
-+
-+ if (which == V4L2_SUBDEV_FORMAT_TRY)
-+ return v4l2_subdev_get_try_format(&isp_dev->subdev, state, pad);
-+
-+ return &isp_dev->fmt[pad];
-+}
-+
-+static int isp_get_interface_type(struct media_entity *entity)
-+{
-+ struct v4l2_subdev *subdev;
-+ struct media_pad *pad = &entity->pads[0];
-+
-+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
-+ return -EINVAL;
-+
-+ pad = media_pad_remote_pad_first(pad);
-+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
-+ return -EINVAL;
-+
-+ subdev = media_entity_to_v4l2_subdev(pad->entity);
-+
-+ st_debug(ST_ISP, "interface subdev name %s\n", subdev->name);
-+ if (!strncmp(subdev->name, STF_CSI_NAME, strlen(STF_CSI_NAME)))
-+ return CSI_SENSOR;
-+ if (!strncmp(subdev->name, STF_DVP_NAME, strlen(STF_DVP_NAME)))
-+ return DVP_SENSOR;
-+ return -EINVAL;
-+}
-+
-+static int isp_set_stream(struct v4l2_subdev *sd, int enable)
-+{
-+ struct stf_isp_dev *isp_dev = v4l2_get_subdevdata(sd);
-+ int ret = 0, interface_type;
-+ struct v4l2_mbus_framefmt *fmt;
-+ struct v4l2_event src_ch = { 0 };
-+
-+ fmt = __isp_get_format(isp_dev, NULL, STF_ISP_PAD_SINK, V4L2_SUBDEV_FORMAT_ACTIVE);
-+ mutex_lock(&isp_dev->stream_lock);
-+ if (enable) {
-+ if (isp_dev->stream_count == 0) {
-+ isp_dev->hw_ops->isp_clk_enable(isp_dev);
-+ if (!user_config_isp)
-+ isp_dev->hw_ops->isp_config_set(isp_dev);
-+ interface_type = isp_get_interface_type(&sd->entity);
-+ if (interface_type < 0) {
-+ st_err(ST_ISP, "%s, pipeline not config\n", __func__);
-+ goto exit;
-+ }
-+ isp_dev->hw_ops->isp_set_format(isp_dev,
-+ isp_dev->rect, fmt->code, interface_type);
-+ isp_dev->hw_ops->isp_reset(isp_dev);
-+ isp_dev->hw_ops->isp_stream_set(isp_dev, enable);
-+ user_config_isp = 0;
-+ }
-+ isp_dev->stream_count++;
-+ } else {
-+ if (isp_dev->stream_count == 0)
-+ goto exit;
-+ if (isp_dev->stream_count == 1) {
-+ isp_dev->hw_ops->isp_stream_set(isp_dev, enable);
-+ isp_dev->hw_ops->isp_clk_disable(isp_dev);
-+ }
-+ isp_dev->stream_count--;
-+ }
-+ src_ch.type = V4L2_EVENT_SOURCE_CHANGE,
-+ src_ch.u.src_change.changes = isp_dev->stream_count,
-+
-+ v4l2_subdev_notify_event(sd, &src_ch);
-+exit:
-+ mutex_unlock(&isp_dev->stream_lock);
-+
-+ mutex_lock(&isp_dev->power_lock);
-+ /* restore controls */
-+ if (enable && isp_dev->power_count == 1) {
-+ mutex_unlock(&isp_dev->power_lock);
-+ ret = v4l2_ctrl_handler_setup(&isp_dev->ctrls.handler);
-+ } else
-+ mutex_unlock(&isp_dev->power_lock);
-+
-+ return ret;
-+}
-+
-+/*Try to match sensor format with sink, and then get the index as default.*/
-+static int isp_match_sensor_format_get_index(struct stf_isp_dev *isp_dev)
-+{
-+ int ret, idx;
-+ struct media_entity *sensor;
-+ struct v4l2_subdev *subdev;
-+ struct v4l2_subdev_format fmt;
-+ const struct isp_format_table *formats;
-+
-+ if (!isp_dev)
-+ return -EINVAL;
-+
-+ sensor = stfcamss_find_sensor(&isp_dev->subdev.entity);
-+ if (!sensor)
-+ return -EINVAL;
-+
-+ subdev = media_entity_to_v4l2_subdev(sensor);
-+ st_debug(ST_ISP, "Found sensor = %s\n", sensor->name);
-+
-+ fmt.pad = 0;
-+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
-+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
-+ if (ret) {
-+ st_warn(ST_ISP, "Sonser get format failed !!\n");
-+ return -EINVAL;
-+ }
-+
-+ st_debug(ST_ISP, "Got sensor format 0x%x !!\n", fmt.format.code);
-+
-+ formats = &isp_dev->formats[0]; /* isp sink format */
-+ for (idx = 0; idx < formats->nfmts; idx++) {
-+ if (formats->fmts[idx].code == fmt.format.code) {
-+ st_info(ST_ISP,
-+ "Match sensor format to isp_formats_st7110_sink index %d !!\n",
-+ idx);
-+ return idx;
-+ }
-+ }
-+ return -ERANGE;
-+}
-+
-+static int isp_match_format_get_index(const struct isp_format_table *f_table,
-+ __u32 mbus_code,
-+ unsigned int pad)
-+{
-+ int i;
-+
-+ for (i = 0; i < f_table->nfmts; i++) {
-+ if (mbus_code == f_table->fmts[i].code) {
-+ break;
-+ } else {
-+ if (pad == STF_ISP_PAD_SRC_RAW || pad == STF_ISP_PAD_SRC_SCD_Y) {
-+ if (mbus_code == (isp_formats_st7110_compat_10bit_raw[i].code ||
-+ isp_formats_st7110_compat_8bit_raw[i].code))
-+ break;
-+ }
-+ }
-+ }
-+
-+ return i;
-+}
-+
-+static void isp_try_format(struct stf_isp_dev *isp_dev,
-+ struct v4l2_subdev_state *state,
-+ unsigned int pad,
-+ struct v4l2_mbus_framefmt *fmt,
-+ enum v4l2_subdev_format_whence which)
-+{
-+ const struct isp_format_table *formats;
-+ unsigned int i;
-+ u32 code = fmt->code;
-+ u32 bpp;
-+
-+ if (pad == STF_ISP_PAD_SINK) {
-+ /* Set format on sink pad */
-+
-+ formats = &isp_dev->formats[pad];
-+ fmt->width = clamp_t(u32,
-+ fmt->width, STFCAMSS_FRAME_MIN_WIDTH,
-+ STFCAMSS_FRAME_MAX_WIDTH);
-+ fmt->height = clamp_t(u32,
-+ fmt->height, STFCAMSS_FRAME_MIN_HEIGHT,
-+ STFCAMSS_FRAME_MAX_HEIGHT);
-+ fmt->height &= ~0x1;
-+
-+ fmt->field = V4L2_FIELD_NONE;
-+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
-+ fmt->flags = 0;
-+ } else {
-+ formats = &isp_dev->formats[pad];
-+ }
-+
-+ i = isp_match_format_get_index(formats, fmt->code, pad);
-+ st_debug(ST_ISP, "%s pad=%d, code=%x isp_match_format_get_index = %d\n",
-+ __func__, pad, code, i);
-+
-+ if (i >= formats->nfmts &&
-+ (pad == STF_ISP_PAD_SRC_RAW || pad == STF_ISP_PAD_SRC_SCD_Y)) {
-+ int sensor_idx;
-+
-+ sensor_idx = isp_match_sensor_format_get_index(isp_dev);
-+ if (sensor_idx)
-+ i = sensor_idx;
-+ }
-+
-+ if (pad != STF_ISP_PAD_SINK)
-+ *fmt = *__isp_get_format(isp_dev, state, STF_ISP_PAD_SINK, which);
-+
-+ if (i >= formats->nfmts) {
-+ fmt->code = formats->fmts[0].code;
-+ bpp = formats->fmts[0].bpp;
-+ st_info(ST_ISP, "Use default index 0 format = 0x%x\n", fmt->code);
-+ } else {
-+ // sink format and raw format must one by one
-+ if (pad == STF_ISP_PAD_SRC_RAW || pad == STF_ISP_PAD_SRC_SCD_Y) {
-+ fmt->code = formats->fmts[i].code;
-+ bpp = formats->fmts[i].bpp;
-+ st_info(ST_ISP, "Use mapping format from sink index %d = 0x%x\n",
-+ i, fmt->code);
-+ } else {
-+ fmt->code = code;
-+ bpp = formats->fmts[i].bpp;
-+ st_info(ST_ISP, "Use input format = 0x%x\n", fmt->code);
-+ }
-+ }
-+
-+ switch (pad) {
-+ case STF_ISP_PAD_SINK:
-+ break;
-+ case STF_ISP_PAD_SRC:
-+ isp_dev->rect[ISP_COMPOSE].bpp = bpp;
-+ break;
-+ case STF_ISP_PAD_SRC_SS0:
-+ isp_dev->rect[ISP_SCALE_SS0].bpp = bpp;
-+ break;
-+ case STF_ISP_PAD_SRC_SS1:
-+ isp_dev->rect[ISP_SCALE_SS1].bpp = bpp;
-+ break;
-+ case STF_ISP_PAD_SRC_ITIW:
-+ case STF_ISP_PAD_SRC_ITIR:
-+ isp_dev->rect[ISP_ITIWS].bpp = bpp;
-+ break;
-+ case STF_ISP_PAD_SRC_RAW:
-+ isp_dev->rect[ISP_CROP].bpp = bpp;
-+ break;
-+ case STF_ISP_PAD_SRC_SCD_Y:
-+ break;
-+ }
-+}
-+
-+static int isp_enum_mbus_code(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_mbus_code_enum *code)
-+{
-+ struct stf_isp_dev *isp_dev = v4l2_get_subdevdata(sd);
-+ const struct isp_format_table *formats;
-+
-+ if (code->index >= isp_dev->formats[code->pad].nfmts)
-+ return -EINVAL;
-+
-+ formats = &isp_dev->formats[code->pad];
-+ code->code = formats->fmts[code->index].code;
-+ code->flags = 0;
-+
-+ return 0;
-+}
-+
-+static int isp_enum_frame_size(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_frame_size_enum *fse)
-+{
-+ struct stf_isp_dev *isp_dev = v4l2_get_subdevdata(sd);
-+ struct v4l2_mbus_framefmt format;
-+
-+ if (fse->index != 0)
-+ return -EINVAL;
-+
-+ format.code = fse->code;
-+ format.width = 1;
-+ format.height = 1;
-+ isp_try_format(isp_dev, state, fse->pad, &format, fse->which);
-+ fse->min_width = format.width;
-+ fse->min_height = format.height;
-+
-+ if (format.code != fse->code)
-+ return -EINVAL;
-+
-+ format.code = fse->code;
-+ format.width = -1;
-+ format.height = -1;
-+ isp_try_format(isp_dev, state, fse->pad, &format, fse->which);
-+ fse->max_width = format.width;
-+ fse->max_height = format.height;
-+
-+ return 0;
-+}
-+
-+static int isp_get_format(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_format *fmt)
-+{
-+ struct stf_isp_dev *isp_dev = v4l2_get_subdevdata(sd);
-+ struct v4l2_mbus_framefmt *format;
-+
-+ format = __isp_get_format(isp_dev, state, fmt->pad, fmt->which);
-+ if (format == NULL)
-+ return -EINVAL;
-+
-+ fmt->format = *format;
-+
-+ return 0;
-+}
-+
-+static int isp_set_format(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_format *fmt)
-+{
-+ struct stf_isp_dev *isp_dev = v4l2_get_subdevdata(sd);
-+ struct v4l2_mbus_framefmt *format;
-+ struct v4l2_subdev_selection sel = { 0 };
-+ struct v4l2_rect *rect = NULL;
-+ int ret;
-+
-+ st_debug(ST_ISP, "%s pad=%d, code=%x, which=%d\n",
-+ __func__, fmt->reserved[0], fmt->format.code, fmt->which);
-+ format = __isp_get_format(isp_dev, state, fmt->pad, fmt->which);
-+ if (format == NULL)
-+ return -EINVAL;
-+
-+ mutex_lock(&isp_dev->stream_lock);
-+ if (isp_dev->stream_count) {
-+ fmt->format = *format;
-+ if (fmt->reserved[0] != 0) {
-+ sel.which = fmt->which;
-+ sel.pad = fmt->reserved[0];
-+
-+ switch (fmt->reserved[0]) {
-+ case STF_ISP_PAD_SRC:
-+ rect = __isp_get_compose(isp_dev, state, fmt->which);
-+ break;
-+ case STF_ISP_PAD_SRC_SS0:
-+ case STF_ISP_PAD_SRC_SS1:
-+ rect = __isp_get_scale(isp_dev, state, &sel);
-+ break;
-+ case STF_ISP_PAD_SRC_ITIW:
-+ case STF_ISP_PAD_SRC_ITIR:
-+ rect = __isp_get_itiws(isp_dev, state, fmt->which);
-+ break;
-+ case STF_ISP_PAD_SRC_RAW:
-+ case STF_ISP_PAD_SRC_SCD_Y:
-+ rect = __isp_get_crop(isp_dev, state, fmt->which);
-+ break;
-+ default:
-+ break;
-+ }
-+ if (rect != NULL) {
-+ fmt->format.width = rect->width;
-+ fmt->format.height = rect->height;
-+ }
-+ }
-+ mutex_unlock(&isp_dev->stream_lock);
-+ goto out;
-+ } else {
-+ isp_try_format(isp_dev, state, fmt->pad, &fmt->format, fmt->which);
-+ *format = fmt->format;
-+ }
-+ mutex_unlock(&isp_dev->stream_lock);
-+
-+ /* Propagate the format from sink to source */
-+ if (fmt->pad == STF_ISP_PAD_SINK) {
-+ /* Reset sink pad compose selection */
-+ sel.which = fmt->which;
-+ sel.pad = STF_ISP_PAD_SINK;
-+ sel.target = V4L2_SEL_TGT_CROP;
-+ sel.r.width = fmt->format.width;
-+ sel.r.height = fmt->format.height;
-+ ret = isp_set_selection(sd, state, &sel);
-+ if (ret < 0)
-+ return ret;
-+ }
-+
-+out:
-+ return 0;
-+}
-+
-+static struct v4l2_rect *
-+__isp_get_compose(struct stf_isp_dev *isp_dev,
-+ struct v4l2_subdev_state *state,
-+ enum v4l2_subdev_format_whence which)
-+{
-+ if (which == V4L2_SUBDEV_FORMAT_TRY)
-+ return v4l2_subdev_get_try_compose(&isp_dev->subdev, state,
-+ STF_ISP_PAD_SINK);
-+
-+
-+ return &isp_dev->rect[ISP_COMPOSE].rect;
-+}
-+
-+static struct v4l2_rect *
-+__isp_get_crop(struct stf_isp_dev *isp_dev,
-+ struct v4l2_subdev_state *state,
-+ enum v4l2_subdev_format_whence which)
-+{
-+ if (which == V4L2_SUBDEV_FORMAT_TRY)
-+ return v4l2_subdev_get_try_crop(&isp_dev->subdev, state,
-+ STF_ISP_PAD_SINK);
-+
-+ return &isp_dev->rect[ISP_CROP].rect;
-+}
-+
-+static struct v4l2_rect *
-+__isp_get_scale(struct stf_isp_dev *isp_dev,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_selection *sel)
-+{
-+ int pad;
-+
-+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
-+ return v4l2_subdev_get_try_compose(&isp_dev->subdev, state,
-+ STF_ISP_PAD_SINK);
-+ if (sel->pad != STF_ISP_PAD_SRC_SS0 && sel->pad != STF_ISP_PAD_SRC_SS1)
-+ return NULL;
-+
-+ pad = sel->pad == STF_ISP_PAD_SRC_SS0 ? ISP_SCALE_SS0 : ISP_SCALE_SS1;
-+ return &isp_dev->rect[pad].rect;
-+}
-+
-+static struct v4l2_rect *
-+__isp_get_itiws(struct stf_isp_dev *isp_dev,
-+ struct v4l2_subdev_state *state,
-+ enum v4l2_subdev_format_whence which)
-+{
-+ if (which == V4L2_SUBDEV_FORMAT_TRY)
-+ return v4l2_subdev_get_try_crop(&isp_dev->subdev, state, STF_ISP_PAD_SINK);
-+
-+ return &isp_dev->rect[ISP_ITIWS].rect;
-+}
-+
-+static void isp_try_crop(struct stf_isp_dev *isp_dev,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_rect *rect,
-+ enum v4l2_subdev_format_whence which)
-+{
-+ struct v4l2_mbus_framefmt *fmt;
-+
-+ fmt = __isp_get_format(isp_dev, state, STF_ISP_PAD_SINK, which);
-+
-+ if (rect->width > fmt->width)
-+ rect->width = fmt->width;
-+
-+ if (rect->width + rect->left > fmt->width)
-+ rect->left = fmt->width - rect->width;
-+
-+ if (rect->height > fmt->height)
-+ rect->height = fmt->height;
-+
-+ if (rect->height + rect->top > fmt->height)
-+ rect->top = fmt->height - rect->height;
-+
-+ if (rect->width < STFCAMSS_FRAME_MIN_WIDTH) {
-+ rect->left = 0;
-+ rect->width = STFCAMSS_FRAME_MIN_WIDTH;
-+ }
-+
-+ if (rect->height < STFCAMSS_FRAME_MIN_HEIGHT) {
-+ rect->top = 0;
-+ rect->height = STFCAMSS_FRAME_MIN_HEIGHT;
-+ }
-+ rect->height &= ~0x1;
-+}
-+
-+static void isp_try_compose(struct stf_isp_dev *isp_dev,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_rect *rect,
-+ enum v4l2_subdev_format_whence which)
-+{
-+ struct v4l2_rect *crop;
-+
-+ crop = __isp_get_crop(isp_dev, state, which);
-+
-+ if (rect->width > crop->width)
-+ rect->width = crop->width;
-+
-+ if (rect->height > crop->height)
-+ rect->height = crop->height;
-+
-+ if (crop->width > rect->width * SCALER_RATIO_MAX)
-+ rect->width = (crop->width + SCALER_RATIO_MAX - 1) /
-+ SCALER_RATIO_MAX;
-+
-+ if (crop->height > rect->height * SCALER_RATIO_MAX)
-+ rect->height = (crop->height + SCALER_RATIO_MAX - 1) /
-+ SCALER_RATIO_MAX;
-+
-+ if (rect->width < STFCAMSS_FRAME_MIN_WIDTH)
-+ rect->width = STFCAMSS_FRAME_MIN_WIDTH;
-+
-+ if (rect->height < STFCAMSS_FRAME_MIN_HEIGHT)
-+ rect->height = STFCAMSS_FRAME_MIN_HEIGHT;
-+ rect->height &= ~0x1;
-+}
-+
-+static void isp_try_scale(struct stf_isp_dev *isp_dev,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_rect *rect,
-+ enum v4l2_subdev_format_whence which)
-+{
-+ struct v4l2_rect *compose;
-+
-+ compose = __isp_get_compose(isp_dev, state, which);
-+
-+ if (rect->width > compose->width)
-+ rect->width = compose->width;
-+
-+ if (rect->width + rect->left > compose->width)
-+ rect->left = compose->width - rect->width;
-+
-+ if (rect->height > compose->height)
-+ rect->height = compose->height;
-+
-+ if (rect->height + rect->top > compose->height)
-+ rect->top = compose->height - rect->height;
-+
-+ if (rect->width < STFCAMSS_FRAME_MIN_WIDTH) {
-+ rect->left = 0;
-+ rect->width = STFCAMSS_FRAME_MIN_WIDTH;
-+ }
-+
-+ if (rect->height < STFCAMSS_FRAME_MIN_HEIGHT) {
-+ rect->top = 0;
-+ rect->height = STFCAMSS_FRAME_MIN_HEIGHT;
-+ }
-+ rect->height &= ~0x1;
-+}
-+
-+static void isp_try_itiws(struct stf_isp_dev *isp_dev,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_rect *rect,
-+ enum v4l2_subdev_format_whence which)
-+{
-+ struct v4l2_rect *crop;
-+
-+ crop = __isp_get_crop(isp_dev, state, which);
-+
-+ if (rect->width > crop->width)
-+ rect->width = crop->width;
-+
-+ if (rect->width + rect->left > crop->width)
-+ rect->left = crop->width - rect->width;
-+
-+ if (rect->height > crop->height)
-+ rect->height = crop->height;
-+
-+ if (rect->height + rect->top > crop->height)
-+ rect->top = crop->height - rect->height;
-+
-+ if (rect->width < STFCAMSS_FRAME_MIN_WIDTH) {
-+ rect->left = 0;
-+ rect->width = STFCAMSS_FRAME_MIN_WIDTH;
-+ }
-+
-+ if (rect->height < STFCAMSS_FRAME_MIN_HEIGHT) {
-+ rect->top = 0;
-+ rect->height = STFCAMSS_FRAME_MIN_HEIGHT;
-+ }
-+ rect->height &= ~0x1;
-+}
-+
-+static int isp_get_selection(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_selection *sel)
-+{
-+ struct stf_isp_dev *isp_dev = v4l2_get_subdevdata(sd);
-+ struct v4l2_subdev_format fmt = { 0 };
-+ struct v4l2_rect *rect;
-+ int ret;
-+
-+ switch (sel->target) {
-+ case V4L2_SEL_TGT_CROP_BOUNDS:
-+ case V4L2_SEL_TGT_CROP_DEFAULT:
-+ fmt.pad = sel->pad;
-+ fmt.which = sel->which;
-+ ret = isp_get_format(sd, state, &fmt);
-+ if (ret < 0)
-+ return ret;
-+
-+ sel->r.left = 0;
-+ sel->r.top = 0;
-+ sel->r.width = fmt.format.width;
-+ sel->r.height = fmt.format.height;
-+ break;
-+ case V4L2_SEL_TGT_CROP:
-+ rect = __isp_get_crop(isp_dev, state, sel->which);
-+ if (rect == NULL)
-+ return -EINVAL;
-+
-+ sel->r = *rect;
-+ break;
-+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
-+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
-+ if (sel->pad > STF_ISP_PAD_SRC_ITIR)
-+ return -EINVAL;
-+ rect = __isp_get_crop(isp_dev, state, sel->which);
-+ if (rect == NULL)
-+ return -EINVAL;
-+
-+ sel->r.left = rect->left;
-+ sel->r.top = rect->top;
-+ sel->r.width = rect->width;
-+ sel->r.height = rect->height;
-+ break;
-+ case V4L2_SEL_TGT_COMPOSE:
-+ if (sel->pad > STF_ISP_PAD_SRC_ITIR)
-+ return -EINVAL;
-+ if (sel->pad == STF_ISP_PAD_SRC_SS0
-+ || sel->pad == STF_ISP_PAD_SRC_SS1) {
-+ rect = __isp_get_scale(isp_dev, state, sel);
-+ if (rect == NULL)
-+ return -EINVAL;
-+ } else if (sel->pad == STF_ISP_PAD_SRC_ITIW
-+ || sel->pad == STF_ISP_PAD_SRC_ITIR) {
-+ rect = __isp_get_itiws(isp_dev, state, sel->which);
-+ if (rect == NULL)
-+ return -EINVAL;
-+ } else {
-+ rect = __isp_get_compose(isp_dev, state, sel->which);
-+ if (rect == NULL)
-+ return -EINVAL;
-+ }
-+ sel->r = *rect;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ st_info(ST_ISP, "%s pad = %d, left = %d, %d, %d, %d\n",
-+ __func__, sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height);
-+ return 0;
-+}
-+
-+static int isp_set_selection(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_selection *sel)
-+{
-+ struct stf_isp_dev *isp_dev = v4l2_get_subdevdata(sd);
-+ struct v4l2_rect *rect;
-+ int ret = 0;
-+
-+ if (sel->target == V4L2_SEL_TGT_COMPOSE &&
-+ ((sel->pad == STF_ISP_PAD_SINK)
-+ || (sel->pad == STF_ISP_PAD_SRC))) {
-+ struct v4l2_subdev_format fmt = { 0 };
-+ int i;
-+
-+ rect = __isp_get_compose(isp_dev, state, sel->which);
-+ if (rect == NULL)
-+ return -EINVAL;
-+
-+ mutex_lock(&isp_dev->stream_lock);
-+ if (isp_dev->stream_count) {
-+ sel->r = *rect;
-+ mutex_unlock(&isp_dev->stream_lock);
-+ ret = 0;
-+ goto out;
-+ } else {
-+ isp_try_compose(isp_dev, state, &sel->r, sel->which);
-+ *rect = sel->r;
-+ }
-+ mutex_unlock(&isp_dev->stream_lock);
-+
-+ /* Reset source pad format width and height */
-+ fmt.which = sel->which;
-+ fmt.pad = STF_ISP_PAD_SRC;
-+ ret = isp_get_format(sd, state, &fmt);
-+ if (ret < 0)
-+ return ret;
-+
-+ fmt.format.width = rect->width;
-+ fmt.format.height = rect->height;
-+ ret = isp_set_format(sd, state, &fmt);
-+
-+ /* Reset scale */
-+ for (i = STF_ISP_PAD_SRC_SS0; i <= STF_ISP_PAD_SRC_ITIR; i++) {
-+ struct v4l2_subdev_selection scale = { 0 };
-+
-+ scale.which = sel->which;
-+ scale.target = V4L2_SEL_TGT_COMPOSE;
-+ scale.r = *rect;
-+ scale.pad = i;
-+ ret = isp_set_selection(sd, state, &scale);
-+ }
-+ } else if (sel->target == V4L2_SEL_TGT_COMPOSE
-+ && ((sel->pad == STF_ISP_PAD_SRC_SS0)
-+ || (sel->pad == STF_ISP_PAD_SRC_SS1))) {
-+ struct v4l2_subdev_format fmt = { 0 };
-+
-+ rect = __isp_get_scale(isp_dev, state, sel);
-+ if (rect == NULL)
-+ return -EINVAL;
-+
-+ mutex_lock(&isp_dev->stream_lock);
-+ if (isp_dev->stream_count) {
-+ sel->r = *rect;
-+ mutex_unlock(&isp_dev->stream_lock);
-+ ret = 0;
-+ goto out;
-+ } else {
-+ isp_try_scale(isp_dev, state, &sel->r, sel->which);
-+ *rect = sel->r;
-+ }
-+ mutex_unlock(&isp_dev->stream_lock);
-+
-+ /* Reset source pad format width and height */
-+ fmt.which = sel->which;
-+ fmt.pad = sel->pad;
-+ ret = isp_get_format(sd, state, &fmt);
-+ if (ret < 0)
-+ return ret;
-+
-+ fmt.format.width = rect->width;
-+ fmt.format.height = rect->height;
-+ ret = isp_set_format(sd, state, &fmt);
-+ } else if (sel->target == V4L2_SEL_TGT_COMPOSE
-+ && ((sel->pad == STF_ISP_PAD_SRC_ITIW)
-+ || (sel->pad == STF_ISP_PAD_SRC_ITIR))) {
-+ struct v4l2_subdev_format fmt = { 0 };
-+
-+ rect = __isp_get_itiws(isp_dev, state, sel->which);
-+ if (rect == NULL)
-+ return -EINVAL;
-+
-+ mutex_lock(&isp_dev->stream_lock);
-+ if (isp_dev->stream_count) {
-+ sel->r = *rect;
-+ mutex_unlock(&isp_dev->stream_lock);
-+ ret = 0;
-+ goto out;
-+ } else {
-+ isp_try_itiws(isp_dev, state, &sel->r, sel->which);
-+ *rect = sel->r;
-+ }
-+ mutex_unlock(&isp_dev->stream_lock);
-+
-+ /* Reset source pad format width and height */
-+ fmt.which = sel->which;
-+ fmt.pad = sel->pad;
-+ ret = isp_get_format(sd, state, &fmt);
-+ if (ret < 0)
-+ return ret;
-+
-+ fmt.format.width = rect->width;
-+ fmt.format.height = rect->height;
-+ ret = isp_set_format(sd, state, &fmt);
-+ } else if (sel->target == V4L2_SEL_TGT_CROP) {
-+ struct v4l2_subdev_selection compose = { 0 };
-+ int i;
-+
-+ rect = __isp_get_crop(isp_dev, state, sel->which);
-+ if (rect == NULL)
-+ return -EINVAL;
-+
-+ mutex_lock(&isp_dev->stream_lock);
-+ if (isp_dev->stream_count) {
-+ sel->r = *rect;
-+ mutex_unlock(&isp_dev->stream_lock);
-+ ret = 0;
-+ goto out;
-+ } else {
-+ isp_try_crop(isp_dev, state, &sel->r, sel->which);
-+ *rect = sel->r;
-+ }
-+ mutex_unlock(&isp_dev->stream_lock);
-+
-+ /* Reset source compose selection */
-+ compose.which = sel->which;
-+ compose.target = V4L2_SEL_TGT_COMPOSE;
-+ compose.r.width = rect->width;
-+ compose.r.height = rect->height;
-+ compose.pad = STF_ISP_PAD_SINK;
-+ ret = isp_set_selection(sd, state, &compose);
-+
-+ /* Reset source pad format width and height */
-+ for (i = STF_ISP_PAD_SRC_RAW; i < STF_ISP_PAD_MAX; i++) {
-+ struct v4l2_subdev_format fmt = { 0 };
-+
-+ fmt.which = sel->which;
-+ fmt.pad = i;
-+ ret = isp_get_format(sd, state, &fmt);
-+ if (ret < 0)
-+ return ret;
-+
-+ fmt.format.width = rect->width;
-+ fmt.format.height = rect->height;
-+ ret = isp_set_format(sd, state, &fmt);
-+ }
-+ } else {
-+ ret = -EINVAL;
-+ }
-+
-+ st_info(ST_ISP, "%s pad = %d, left = %d, %d, %d, %d\n",
-+ __func__, sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height);
-+out:
-+ return ret;
-+}
-+
-+static int isp_init_formats(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_fh *fh)
-+{
-+ struct v4l2_subdev_format format = {
-+ .pad = STF_ISP_PAD_SINK,
-+ .which = fh ? V4L2_SUBDEV_FORMAT_TRY :
-+ V4L2_SUBDEV_FORMAT_ACTIVE,
-+ .format = {
-+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
-+ .width = 1920,
-+ .height = 1080
-+ }
-+ };
-+
-+ return isp_set_format(sd, fh ? fh->state : NULL, &format);
-+}
-+
-+static int isp_link_setup(struct media_entity *entity,
-+ const struct media_pad *local,
-+ const struct media_pad *remote, u32 flags)
-+{
-+ if (flags & MEDIA_LNK_FL_ENABLED)
-+ if (media_pad_remote_pad_first(local))
-+ return -EBUSY;
-+ return 0;
-+}
-+
-+static int stf_isp_load_setfile(struct stf_isp_dev *isp_dev, char *file_name)
-+{
-+ struct device *dev = isp_dev->stfcamss->dev;
-+ const struct firmware *fw;
-+ u8 *buf = NULL;
-+ int *regval_num;
-+ int ret;
-+
-+ st_debug(ST_ISP, "%s, file_name %s\n", __func__, file_name);
-+ ret = request_firmware(&fw, file_name, dev);
-+ if (ret < 0) {
-+ st_err(ST_ISP, "firmware request failed (%d)\n", ret);
-+ return ret;
-+ }
-+ buf = devm_kzalloc(dev, fw->size, GFP_KERNEL);
-+ if (!buf)
-+ return -ENOMEM;
-+ memcpy(buf, fw->data, fw->size);
-+
-+ mutex_lock(&isp_dev->setfile_lock);
-+ if (isp_dev->setfile.state == 1)
-+ devm_kfree(dev, isp_dev->setfile.data);
-+ isp_dev->setfile.data = buf;
-+ isp_dev->setfile.size = fw->size;
-+ isp_dev->setfile.state = 1;
-+ regval_num = (int *)&buf[fw->size - sizeof(unsigned int)];
-+ isp_dev->setfile.settings.regval_num = *regval_num;
-+ isp_dev->setfile.settings.regval = (struct regval_t *)buf;
-+ mutex_unlock(&isp_dev->setfile_lock);
-+
-+ st_debug(ST_ISP, "stf_isp setfile loaded size: %zu B, reg_nul: %d\n",
-+ fw->size, isp_dev->setfile.settings.regval_num);
-+
-+ release_firmware(fw);
-+ return ret;
-+}
-+
-+static long stf_isp_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
-+{
-+ struct stf_isp_dev *isp_dev = v4l2_get_subdevdata(sd);
-+ struct device *dev = isp_dev->stfcamss->dev;
-+ int ret = -ENOIOCTLCMD;
-+
-+ switch (cmd) {
-+ case VIDIOC_STFISP_LOAD_FW: {
-+ struct stfisp_fw_info *fw_info = arg;
-+
-+ if (IS_ERR(fw_info)) {
-+ st_err(ST_ISP, "fw_info failed, params invaild\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = stf_isp_load_setfile(isp_dev, fw_info->filename);
-+ break;
-+ }
-+ case VIDIOC_STF_DMABUF_ALLOC:
-+ case VIDIOC_STF_DMABUF_FREE:
-+ ret = stf_dmabuf_ioctl(dev, cmd, arg);
-+ break;
-+ case VIDIOC_STFISP_GET_REG:
-+ ret = isp_dev->hw_ops->isp_reg_read(isp_dev, arg);
-+ break;
-+ case VIDIOC_STFISP_SET_REG:
-+ ret = isp_dev->hw_ops->isp_reg_write(isp_dev, arg);
-+ break;
-+ case VIDIOC_STFISP_SHADOW_LOCK:
-+ if (atomic_add_unless(&isp_dev->shadow_count, 1, 1))
-+ ret = 0;
-+ else
-+ ret = -EBUSY;
-+ st_debug(ST_ISP, "%s, %d, ret = %d\n", __func__, __LINE__, ret);
-+ break;
-+ case VIDIOC_STFISP_SHADOW_UNLOCK:
-+ if (atomic_dec_if_positive(&isp_dev->shadow_count) < 0)
-+ ret = -EINVAL;
-+ else
-+ ret = 0;
-+ st_debug(ST_ISP, "%s, %d, ret = %d\n", __func__, __LINE__, ret);
-+ break;
-+ case VIDIOC_STFISP_SHADOW_UNLOCK_N_TRIGGER:
-+ {
-+ isp_dev->hw_ops->isp_shadow_trigger(isp_dev);
-+ if (atomic_dec_if_positive(&isp_dev->shadow_count) < 0)
-+ ret = -EINVAL;
-+ else
-+ ret = 0;
-+ st_debug(ST_ISP, "%s, %d, ret = %d\n", __func__, __LINE__, ret);
-+ }
-+ break;
-+ case VIDIOC_STFISP_SET_USER_CONFIG_ISP:
-+ st_debug(ST_ISP, "%s, %d set user_config_isp\n", __func__, __LINE__);
-+ user_config_isp = 1;
-+ break;
-+ default:
-+ break;
-+ }
-+ return ret;
-+}
-+
-+int isp_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
-+{
-+ struct stf_isp_dev *isp_dev = v4l2_get_subdevdata(sd);
-+
-+ st_debug(ST_ISP, "%s, %d\n", __func__, __LINE__);
-+ while (atomic_dec_if_positive(&isp_dev->shadow_count) > 0)
-+ st_warn(ST_ISP, "user not unlocked the shadow lock, driver unlock it!\n");
-+
-+ return 0;
-+}
-+
-+static int stf_isp_subscribe_event(struct v4l2_subdev *sd,
-+ struct v4l2_fh *fh,
-+ struct v4l2_event_subscription *sub)
-+{
-+ switch (sub->type) {
-+ case V4L2_EVENT_SOURCE_CHANGE:
-+ return v4l2_src_change_event_subdev_subscribe(sd, fh, sub);
-+ case V4L2_EVENT_CTRL:
-+ return v4l2_ctrl_subdev_subscribe_event(sd, fh, sub);
-+ default:
-+ st_debug(ST_ISP, "unspport subscribe_event\n");
-+ return -EINVAL;
-+ }
-+}
-+
-+static const struct v4l2_subdev_core_ops isp_core_ops = {
-+ .s_power = isp_set_power,
-+ .ioctl = stf_isp_ioctl,
-+ .log_status = v4l2_ctrl_subdev_log_status,
-+ // .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
-+ .subscribe_event = stf_isp_subscribe_event,
-+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
-+};
-+
-+static const struct v4l2_subdev_video_ops isp_video_ops = {
-+ .s_stream = isp_set_stream,
-+};
-+
-+static const struct v4l2_subdev_pad_ops isp_pad_ops = {
-+ .enum_mbus_code = isp_enum_mbus_code,
-+ .enum_frame_size = isp_enum_frame_size,
-+ .get_fmt = isp_get_format,
-+ .set_fmt = isp_set_format,
-+ .get_selection = isp_get_selection,
-+ .set_selection = isp_set_selection,
-+};
-+
-+static const struct v4l2_subdev_ops isp_v4l2_ops = {
-+ .core = &isp_core_ops,
-+ .video = &isp_video_ops,
-+ .pad = &isp_pad_ops,
-+};
-+
-+static const struct v4l2_subdev_internal_ops isp_v4l2_internal_ops = {
-+ .open = isp_init_formats,
-+ .close = isp_close,
-+};
-+
-+static const struct media_entity_operations isp_media_ops = {
-+ .link_setup = isp_link_setup,
-+ .link_validate = v4l2_subdev_link_validate,
-+};
-+
-+int stf_isp_register(struct stf_isp_dev *isp_dev,
-+ struct v4l2_device *v4l2_dev)
-+{
-+ struct v4l2_subdev *sd = &isp_dev->subdev;
-+ struct media_pad *pads = isp_dev->pads;
-+ int ret;
-+
-+ v4l2_subdev_init(sd, &isp_v4l2_ops);
-+ sd->internal_ops = &isp_v4l2_internal_ops;
-+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
-+ snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d",
-+ STF_ISP_NAME, 0);
-+ v4l2_set_subdevdata(sd, isp_dev);
-+
-+ ret = isp_init_formats(sd, NULL);
-+ if (ret < 0) {
-+ st_err(ST_ISP, "Failed to init format: %d\n", ret);
-+ return ret;
-+ }
-+
-+ pads[STF_ISP_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
-+ pads[STF_ISP_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
-+ pads[STF_ISP_PAD_SRC_SS0].flags = MEDIA_PAD_FL_SOURCE;
-+ pads[STF_ISP_PAD_SRC_SS1].flags = MEDIA_PAD_FL_SOURCE;
-+ pads[STF_ISP_PAD_SRC_ITIW].flags = MEDIA_PAD_FL_SOURCE;
-+ pads[STF_ISP_PAD_SRC_ITIR].flags = MEDIA_PAD_FL_SOURCE;
-+ pads[STF_ISP_PAD_SRC_RAW].flags = MEDIA_PAD_FL_SOURCE;
-+ pads[STF_ISP_PAD_SRC_SCD_Y].flags = MEDIA_PAD_FL_SOURCE;
-+
-+ sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
-+ sd->entity.ops = &isp_media_ops;
-+ ret = media_entity_pads_init(&sd->entity, STF_ISP_PAD_MAX, pads);
-+ if (ret < 0) {
-+ st_err(ST_ISP, "Failed to init media entity: %d\n", ret);
-+ return ret;
-+ }
-+
-+ ret = isp_init_controls(isp_dev);
-+ if (ret)
-+ goto err_sreg;
-+
-+ ret = v4l2_device_register_subdev(v4l2_dev, sd);
-+ if (ret < 0) {
-+ st_err(ST_ISP, "Failed to register subdev: %d\n", ret);
-+ goto free_ctrls;
-+ }
-+
-+ return 0;
-+
-+free_ctrls:
-+ v4l2_ctrl_handler_free(&isp_dev->ctrls.handler);
-+err_sreg:
-+ media_entity_cleanup(&sd->entity);
-+ return ret;
-+}
-+
-+int stf_isp_unregister(struct stf_isp_dev *isp_dev)
-+{
-+ v4l2_device_unregister_subdev(&isp_dev->subdev);
-+ media_entity_cleanup(&isp_dev->subdev.entity);
-+ v4l2_ctrl_handler_free(&isp_dev->ctrls.handler);
-+ mutex_destroy(&isp_dev->stream_lock);
-+ mutex_destroy(&isp_dev->power_lock);
-+ mutex_destroy(&isp_dev->setfile_lock);
-+ return 0;
-+}
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/stf_isp.h
-@@ -0,0 +1,222 @@
-+/* SPDX-License-Identifier: GPL-2.0
-+ *
-+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
-+ *
-+ */
-+
-+#ifndef STF_ISP_H
-+#define STF_ISP_H
-+
-+#include <media/v4l2-subdev.h>
-+#include <media/v4l2-ctrls.h>
-+#include <media/v4l2-device.h>
-+#include <media/media-entity.h>
-+#include <video/stf-vin.h>
-+
-+#define STF_ISP_NAME "stf_isp"
-+#define STF_ISP_SETFILE "stf_isp0_fw.bin"
-+
-+#define ISP_SCD_BUFFER_SIZE (19 * 256 * 4) // align 128
-+#define ISP_YHIST_BUFFER_SIZE (64 * 4)
-+#define ISP_SCD_Y_BUFFER_SIZE (ISP_SCD_BUFFER_SIZE + ISP_YHIST_BUFFER_SIZE)
-+#define ISP_RAW_DATA_BITS 12
-+#define SCALER_RATIO_MAX 1 // no compose function
-+#define STF_ISP_REG_OFFSET_MAX 0x0FFF
-+#define STF_ISP_REG_DELAY_MAX 100
-+
-+#define ISP_REG_CSIINTS_ADDR 0x00000008
-+#define ISP_REG_SENSOR 0x00000014
-+#define ISP_REG_DUMP_CFG_0 0x00000024
-+#define ISP_REG_DUMP_CFG_1 0x00000028
-+#define ISP_REG_SCD_CFG_0 0x00000098
-+#define ISP_REG_SCD_CFG_1 0x0000009C
-+#define ISP_REG_SC_CFG_1 0x000000BC
-+#define ISP_REG_IESHD_ADDR 0x00000A50
-+#define ISP_REG_SS0AY 0x00000A94
-+#define ISP_REG_SS0AUV 0x00000A98
-+#define ISP_REG_SS0S 0x00000A9C
-+#define ISP_REG_SS0IW 0x00000AA8
-+#define ISP_REG_SS1AY 0x00000AAC
-+#define ISP_REG_SS1AUV 0x00000AB0
-+#define ISP_REG_SS1S 0x00000AB4
-+#define ISP_REG_SS1IW 0x00000AC0
-+#define ISP_REG_YHIST_CFG_4 0x00000CD8
-+#define ISP_REG_ITIIWSR 0x00000B20
-+#define ISP_REG_ITIDWLSR 0x00000B24
-+#define ISP_REG_ITIDWYSAR 0x00000B28
-+#define ISP_REG_ITIDWUSAR 0x00000B2C
-+#define ISP_REG_ITIDRYSAR 0x00000B30
-+#define ISP_REG_ITIDRUSAR 0x00000B34
-+#define ISP_REG_ITIPDFR 0x00000B38
-+#define ISP_REG_ITIDRLSR 0x00000B3C
-+#define ISP_REG_ITIBSR 0x00000B40
-+#define ISP_REG_ITIAIR 0x00000B44
-+#define ISP_REG_ITIDPSR 0x00000B48
-+
-+/* The output line of a isp controller */
-+enum isp_line_id {
-+ STF_ISP_LINE_INVALID = -1,
-+ STF_ISP_LINE_SRC = 1,
-+ STF_ISP_LINE_SRC_SS0 = 2,
-+ STF_ISP_LINE_SRC_SS1 = 3,
-+ STF_ISP_LINE_SRC_ITIW = 4,
-+ STF_ISP_LINE_SRC_ITIR = 5,
-+ STF_ISP_LINE_SRC_RAW = 6,
-+ STF_ISP_LINE_SRC_SCD_Y = 7,
-+ STF_ISP_LINE_MAX = STF_ISP_LINE_SRC_SCD_Y
-+};
-+
-+/* pad id for media framework */
-+enum isp_pad_id {
-+ STF_ISP_PAD_SINK = 0,
-+ STF_ISP_PAD_SRC = 1,
-+ STF_ISP_PAD_SRC_SS0 = 2,
-+ STF_ISP_PAD_SRC_SS1 = 3,
-+ STF_ISP_PAD_SRC_ITIW = 4,
-+ STF_ISP_PAD_SRC_ITIR = 5,
-+ STF_ISP_PAD_SRC_RAW = 6,
-+ STF_ISP_PAD_SRC_SCD_Y = 7,
-+ STF_ISP_PAD_MAX = 8
-+};
-+
-+enum {
-+ EN_INT_NONE = 0,
-+ EN_INT_ISP_DONE = (0x1 << 24),
-+ EN_INT_CSI_DONE = (0x1 << 25),
-+ EN_INT_SC_DONE = (0x1 << 26),
-+ EN_INT_LINE_INT = (0x1 << 27),
-+ EN_INT_ALL = (0xF << 24),
-+};
-+
-+enum {
-+ DVP_SENSOR = 0,
-+ CSI_SENSOR,
-+};
-+
-+#define ISP_AWB_OECF_SKIP_FRAME 0
-+// 0x0BC [31:30] SEL - sc0 input mux for sc awb
-+// 00 : after DEC, 01 : after OBC, 10 : after OECF, 11 : after AWB
-+enum scd_type {
-+ DEC_TYPE = 0,
-+ OBC_TYPE,
-+ OECF_TYPE,
-+ AWB_TYPE
-+};
-+
-+struct isp_format {
-+ u32 code;
-+ u8 bpp;
-+};
-+
-+struct isp_format_table {
-+ const struct isp_format *fmts;
-+ int nfmts;
-+};
-+
-+struct regval_t {
-+ u32 addr;
-+ u32 val;
-+ u32 mask;
-+ u32 delay_ms;
-+};
-+
-+struct reg_table {
-+ struct regval_t *regval;
-+ int regval_num;
-+};
-+
-+struct isp_stream_format {
-+ struct v4l2_rect rect;
-+ u32 bpp;
-+};
-+
-+struct stf_isp_dev;
-+enum subdev_type;
-+
-+struct isp_hw_ops {
-+ int (*isp_clk_enable)(struct stf_isp_dev *isp_dev);
-+ int (*isp_clk_disable)(struct stf_isp_dev *isp_dev);
-+ int (*isp_reset)(struct stf_isp_dev *isp_dev);
-+ int (*isp_config_set)(struct stf_isp_dev *isp_dev);
-+ int (*isp_set_format)(struct stf_isp_dev *isp_dev,
-+ struct isp_stream_format *crop, u32 mcode,
-+ int type);
-+ // u32 width, u32 height);
-+ int (*isp_stream_set)(struct stf_isp_dev *isp_dev, int on);
-+ int (*isp_reg_read)(struct stf_isp_dev *isp_dev, void *arg);
-+ int (*isp_reg_write)(struct stf_isp_dev *isp_dev, void *arg);
-+ int (*isp_shadow_trigger)(struct stf_isp_dev *isp_dev);
-+};
-+
-+struct isp_ctrls {
-+ struct v4l2_ctrl_handler handler;
-+ struct v4l2_ctrl *pixel_rate;
-+ struct {
-+ struct v4l2_ctrl *auto_exp;
-+ struct v4l2_ctrl *exposure;
-+ };
-+ struct {
-+ struct v4l2_ctrl *auto_wb;
-+ struct v4l2_ctrl *blue_balance;
-+ struct v4l2_ctrl *red_balance;
-+ };
-+ struct {
-+ struct v4l2_ctrl *auto_gain;
-+ struct v4l2_ctrl *gain;
-+ };
-+ struct v4l2_ctrl *brightness;
-+ struct v4l2_ctrl *light_freq;
-+ struct v4l2_ctrl *saturation;
-+ struct v4l2_ctrl *contrast;
-+ struct v4l2_ctrl *hue;
-+ struct v4l2_ctrl *test_pattern;
-+ struct v4l2_ctrl *hflip;
-+ struct v4l2_ctrl *vflip;
-+};
-+
-+struct isp_setfile {
-+ struct reg_table settings;
-+ const u8 *data;
-+ unsigned int size;
-+ unsigned int state;
-+};
-+
-+enum {
-+ ISP_CROP = 0,
-+ ISP_COMPOSE,
-+ ISP_SCALE_SS0,
-+ ISP_SCALE_SS1,
-+ ISP_ITIWS,
-+ ISP_RECT_MAX
-+};
-+
-+struct stf_isp_dev {
-+ enum subdev_type sdev_type; // must be frist
-+ struct stfcamss *stfcamss;
-+ struct v4l2_subdev subdev;
-+ struct media_pad pads[STF_ISP_PAD_MAX];
-+ struct v4l2_mbus_framefmt fmt[STF_ISP_PAD_MAX];
-+ struct isp_stream_format rect[ISP_RECT_MAX];
-+ const struct isp_format_table *formats;
-+ unsigned int nformats;
-+ struct isp_hw_ops *hw_ops;
-+ struct mutex power_lock;
-+ int power_count;
-+ struct mutex stream_lock;
-+ int stream_count;
-+ atomic_t shadow_count;
-+
-+ struct isp_ctrls ctrls;
-+ struct mutex setfile_lock;
-+ struct isp_setfile setfile;
-+ struct reg_table *context_regs;
-+};
-+
-+extern int stf_isp_subdev_init(struct stfcamss *stfcamss);
-+extern int stf_isp_register(struct stf_isp_dev *isp_dev,
-+ struct v4l2_device *v4l2_dev);
-+extern int stf_isp_unregister(struct stf_isp_dev *isp_dev);
-+extern struct isp_hw_ops isp_ops;
-+extern void dump_isp_reg(void *__iomem ispbase);
-+
-+#endif /* STF_ISP_H */
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/stf_isp_hw_ops.c
-@@ -0,0 +1,1550 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
-+ *
-+ */
-+
-+#include "stfcamss.h"
-+#include <linux/io.h>
-+#include <linux/fb.h>
-+#include <linux/module.h>
-+#include <video/stf-vin.h>
-+#include "stf_isp_ioctl.h"
-+#include "stf_isp.h"
-+#include <linux/delay.h>
-+#include <linux/clk.h>
-+#define USE_NEW_CONFIG_SETTING
-+
-+static struct regval_t isp_reg_init_config_list[] = {
-+ /* config DC(0040H~0044H) */
-+ {0x00000044, 0x00000000, 0, 0},
-+ /* config DEC(0030H) */
-+ {0x00000030, 0x00000000, 0, 0},
-+ /* config OBC(0034H, 02E0H~02FCH) */
-+ {0x00000034, 0x000000BB, 0, 0},
-+ {0x000002E0, 0x40404040, 0, 0},
-+ {0x000002E4, 0x40404040, 0, 0},
-+ {0x000002E8, 0x40404040, 0, 0},
-+ {0x000002EC, 0x40404040, 0, 0},
-+ {0x000002F0, 0x00000000, 0, 0},
-+ {0x000002F4, 0x00000000, 0, 0},
-+ {0x000002F8, 0x00000000, 0, 0},
-+ {0x000002FC, 0x00000000, 0, 0},
-+ /* config LCBQ(0074H, 007CH, 0300H~039FH, and 0400H~049FH) */
-+ {0x00000074, 0x00009900, 0, 0},
-+ {0x0000007C, 0x01E40040, 0, 0},
-+ {0x00000300, 0x01000100, 0, 0},
-+ {0x00000304, 0x01000100, 0, 0},
-+ {0x00000308, 0x01000100, 0, 0},
-+ {0x0000030C, 0x01000100, 0, 0},
-+ {0x00000310, 0x01000100, 0, 0},
-+ {0x00000314, 0x01000100, 0, 0},
-+ {0x00000318, 0x01000100, 0, 0},
-+ {0x0000031C, 0x01000100, 0, 0},
-+ {0x00000320, 0x01000100, 0, 0},
-+ {0x00000324, 0x01000100, 0, 0},
-+ {0x00000328, 0x01000100, 0, 0},
-+ {0x0000032C, 0x01000100, 0, 0},
-+ {0x00000330, 0x00000100, 0, 0},
-+ {0x00000334, 0x01000100, 0, 0},
-+ {0x00000338, 0x01000100, 0, 0},
-+ {0x0000033C, 0x01000100, 0, 0},
-+ {0x00000340, 0x01000100, 0, 0},
-+ {0x00000344, 0x01000100, 0, 0},
-+ {0x00000348, 0x01000100, 0, 0},
-+ {0x0000034C, 0x01000100, 0, 0},
-+ {0x00000350, 0x01000100, 0, 0},
-+ {0x00000354, 0x01000100, 0, 0},
-+ {0x00000358, 0x01000100, 0, 0},
-+ {0x0000035C, 0x01000100, 0, 0},
-+ {0x00000360, 0x01000100, 0, 0},
-+ {0x00000364, 0x00000100, 0, 0},
-+ {0x00000368, 0x01000100, 0, 0},
-+ {0x0000036C, 0x01000100, 0, 0},
-+ {0x00000370, 0x01000100, 0, 0},
-+ {0x00000374, 0x01000100, 0, 0},
-+ {0x00000378, 0x01000100, 0, 0},
-+ {0x0000037C, 0x01000100, 0, 0},
-+ {0x00000380, 0x01000100, 0, 0},
-+ {0x00000384, 0x01000100, 0, 0},
-+ {0x00000388, 0x01000100, 0, 0},
-+ {0x0000038C, 0x01000100, 0, 0},
-+ {0x00000390, 0x01000100, 0, 0},
-+ {0x00000394, 0x01000100, 0, 0},
-+ {0x00000398, 0x00000100, 0, 0},
-+ {0x0000039C, 0x01000100, 0, 0},
-+ {0x000003A0, 0x01000100, 0, 0},
-+ {0x000003A4, 0x01000100, 0, 0},
-+ {0x000003A8, 0x01000100, 0, 0},
-+ {0x000003AC, 0x01000100, 0, 0},
-+ {0x000003B0, 0x01000100, 0, 0},
-+ {0x000003B4, 0x01000100, 0, 0},
-+ {0x000003B8, 0x01000100, 0, 0},
-+ {0x000003BC, 0x01000100, 0, 0},
-+ {0x000003C0, 0x01000100, 0, 0},
-+ {0x000003C4, 0x01000100, 0, 0},
-+ {0x000003C8, 0x01000100, 0, 0},
-+ {0x000003CC, 0x00000100, 0, 0},
-+ {0x00000400, 0x00000000, 0, 0},
-+ {0x00000404, 0x00000000, 0, 0},
-+ {0x00000408, 0x00000000, 0, 0},
-+ {0x0000040C, 0x00000000, 0, 0},
-+ {0x00000410, 0x00000000, 0, 0},
-+ {0x00000414, 0x00000000, 0, 0},
-+ {0x00000418, 0x00000000, 0, 0},
-+ {0x0000041C, 0x00000000, 0, 0},
-+ {0x00000420, 0x00000000, 0, 0},
-+ {0x00000424, 0x00000000, 0, 0},
-+ {0x00000428, 0x00000000, 0, 0},
-+ {0x0000042C, 0x00000000, 0, 0},
-+ {0x00000430, 0x00000000, 0, 0},
-+ {0x00000434, 0x00000000, 0, 0},
-+ {0x00000438, 0x00000000, 0, 0},
-+ {0x0000043C, 0x00000000, 0, 0},
-+ {0x00000440, 0x00000000, 0, 0},
-+ {0x00000444, 0x00000000, 0, 0},
-+ {0x00000448, 0x00000000, 0, 0},
-+ {0x0000044C, 0x00000000, 0, 0},
-+ {0x00000450, 0x00000000, 0, 0},
-+ {0x00000454, 0x00000000, 0, 0},
-+ {0x00000458, 0x00000000, 0, 0},
-+ {0x0000045C, 0x00000000, 0, 0},
-+ {0x00000460, 0x00000000, 0, 0},
-+ {0x00000464, 0x00000000, 0, 0},
-+ {0x00000468, 0x00000000, 0, 0},
-+ {0x0000046C, 0x00000000, 0, 0},
-+ {0x00000470, 0x00000000, 0, 0},
-+ {0x00000474, 0x00000000, 0, 0},
-+ {0x00000478, 0x00000000, 0, 0},
-+ {0x0000047C, 0x00000000, 0, 0},
-+ {0x00000480, 0x00000000, 0, 0},
-+ {0x00000484, 0x00000000, 0, 0},
-+ {0x00000488, 0x00000000, 0, 0},
-+ {0x0000048C, 0x00000000, 0, 0},
-+ {0x00000490, 0x00000000, 0, 0},
-+ {0x00000494, 0x00000000, 0, 0},
-+ {0x00000498, 0x00000000, 0, 0},
-+ {0x0000049C, 0x00000000, 0, 0},
-+ {0x000004A0, 0x00000000, 0, 0},
-+ {0x000004A4, 0x00000000, 0, 0},
-+ {0x000004A8, 0x00000000, 0, 0},
-+ {0x000004AC, 0x00000000, 0, 0},
-+ {0x000004B0, 0x00000000, 0, 0},
-+ {0x000004B4, 0x00000000, 0, 0},
-+ {0x000004B8, 0x00000000, 0, 0},
-+ {0x000004BC, 0x00000000, 0, 0},
-+ {0x000004C0, 0x00000000, 0, 0},
-+ {0x000004C4, 0x00000000, 0, 0},
-+ {0x000004C8, 0x00000000, 0, 0},
-+ {0x000004CC, 0x00000000, 0, 0},
-+ /* config OECF(0100H~027CH) */
-+ {0x00000100, 0x00100000, 0, 0},
-+ {0x00000104, 0x00400020, 0, 0},
-+ {0x00000108, 0x00800060, 0, 0},
-+ {0x0000010C, 0x00C000A0, 0, 0},
-+ {0x00000110, 0x010000E0, 0, 0},
-+ {0x00000114, 0x02000180, 0, 0},
-+ {0x00000118, 0x03000280, 0, 0},
-+ {0x0000011C, 0x03FE0380, 0, 0},
-+ {0x00000120, 0x00100000, 0, 0},
-+ {0x00000124, 0x00400020, 0, 0},
-+ {0x00000128, 0x00800060, 0, 0},
-+ {0x0000012C, 0x00C000A0, 0, 0},
-+ {0x00000130, 0x010000E0, 0, 0},
-+ {0x00000134, 0x02000180, 0, 0},
-+ {0x00000138, 0x03000280, 0, 0},
-+ {0x0000013C, 0x03FE0380, 0, 0},
-+ {0x00000140, 0x00100000, 0, 0},
-+ {0x00000144, 0x00400020, 0, 0},
-+ {0x00000148, 0x00800060, 0, 0},
-+ {0x0000014C, 0x00C000A0, 0, 0},
-+ {0x00000150, 0x010000E0, 0, 0},
-+ {0x00000154, 0x02000180, 0, 0},
-+ {0x00000158, 0x03000280, 0, 0},
-+ {0x0000015C, 0x03FE0380, 0, 0},
-+ {0x00000160, 0x00100000, 0, 0},
-+ {0x00000164, 0x00400020, 0, 0},
-+ {0x00000168, 0x00800060, 0, 0},
-+ {0x0000016C, 0x00C000A0, 0, 0},
-+ {0x00000170, 0x010000E0, 0, 0},
-+ {0x00000174, 0x02000180, 0, 0},
-+ {0x00000178, 0x03000280, 0, 0},
-+ {0x0000017C, 0x03FE0380, 0, 0},
-+ {0x00000180, 0x00100000, 0, 0},
-+ {0x00000184, 0x00400020, 0, 0},
-+ {0x00000188, 0x00800060, 0, 0},
-+ {0x0000018C, 0x00C000A0, 0, 0},
-+ {0x00000190, 0x010000E0, 0, 0},
-+ {0x00000194, 0x02000180, 0, 0},
-+ {0x00000198, 0x03000280, 0, 0},
-+ {0x0000019C, 0x03FE0380, 0, 0},
-+ {0x000001A0, 0x00100000, 0, 0},
-+ {0x000001A4, 0x00400020, 0, 0},
-+ {0x000001A8, 0x00800060, 0, 0},
-+ {0x000001AC, 0x00C000A0, 0, 0},
-+ {0x000001B0, 0x010000E0, 0, 0},
-+ {0x000001B4, 0x02000180, 0, 0},
-+ {0x000001B8, 0x03000280, 0, 0},
-+ {0x000001BC, 0x03FE0380, 0, 0},
-+ {0x000001C0, 0x00100000, 0, 0},
-+ {0x000001C4, 0x00400020, 0, 0},
-+ {0x000001C8, 0x00800060, 0, 0},
-+ {0x000001CC, 0x00C000A0, 0, 0},
-+ {0x000001D0, 0x010000E0, 0, 0},
-+ {0x000001D4, 0x02000180, 0, 0},
-+ {0x000001D8, 0x03000280, 0, 0},
-+ {0x000001DC, 0x03FE0380, 0, 0},
-+ {0x000001E0, 0x00100000, 0, 0},
-+ {0x000001E4, 0x00400020, 0, 0},
-+ {0x000001E8, 0x00800060, 0, 0},
-+ {0x000001EC, 0x00C000A0, 0, 0},
-+ {0x000001F0, 0x010000E0, 0, 0},
-+ {0x000001F4, 0x02000180, 0, 0},
-+ {0x000001F8, 0x03000280, 0, 0},
-+ {0x000001FC, 0x03FE0380, 0, 0},
-+ {0x00000200, 0x00800080, 0, 0},
-+ {0x00000204, 0x00800080, 0, 0},
-+ {0x00000208, 0x00800080, 0, 0},
-+ {0x0000020C, 0x00800080, 0, 0},
-+ {0x00000210, 0x00800080, 0, 0},
-+ {0x00000214, 0x00800080, 0, 0},
-+ {0x00000218, 0x00800080, 0, 0},
-+ {0x0000021C, 0x00800080, 0, 0},
-+ {0x00000220, 0x00800080, 0, 0},
-+ {0x00000224, 0x00800080, 0, 0},
-+ {0x00000228, 0x00800080, 0, 0},
-+ {0x0000022C, 0x00800080, 0, 0},
-+ {0x00000230, 0x00800080, 0, 0},
-+ {0x00000234, 0x00800080, 0, 0},
-+ {0x00000238, 0x00800080, 0, 0},
-+ {0x0000023C, 0x00800080, 0, 0},
-+ {0x00000240, 0x00800080, 0, 0},
-+ {0x00000244, 0x00800080, 0, 0},
-+ {0x00000248, 0x00800080, 0, 0},
-+ {0x0000024C, 0x00800080, 0, 0},
-+ {0x00000250, 0x00800080, 0, 0},
-+ {0x00000254, 0x00800080, 0, 0},
-+ {0x00000258, 0x00800080, 0, 0},
-+ {0x0000025C, 0x00800080, 0, 0},
-+ {0x00000260, 0x00800080, 0, 0},
-+ {0x00000264, 0x00800080, 0, 0},
-+ {0x00000268, 0x00800080, 0, 0},
-+ {0x0000026C, 0x00800080, 0, 0},
-+ {0x00000270, 0x00800080, 0, 0},
-+ {0x00000274, 0x00800080, 0, 0},
-+ {0x00000278, 0x00800080, 0, 0},
-+ {0x0000027C, 0x00800080, 0, 0},
-+ /* config OECFHM(03D0H~03E4H) */
-+ {0x000003D0, 0x04000000, 0, 0},
-+ {0x000003D4, 0x0C000800, 0, 0},
-+ {0x000003D8, 0x00000FFF, 0, 0},
-+ {0x000003DC, 0x08000800, 0, 0},
-+ {0x000003E0, 0x08000800, 0, 0},
-+ {0x000003E4, 0x00000800, 0, 0},
-+ /* config LCCF(0050H, 0058H, 00E0H~00ECH) */
-+ {0x00000050, 0x021C03C0, 0, 0},
-+ {0x00000058, 0x0000000B, 0, 0},
-+ {0x000000E0, 0x00000000, 0, 0},
-+ {0x000000E4, 0x00000000, 0, 0},
-+ {0x000000E8, 0x00000000, 0, 0},
-+ {0x000000EC, 0x00000000, 0, 0},
-+ /* config AWB(0280H~02DCH) */
-+ {0x00000280, 0x00000000, 0, 0},
-+ {0x00000284, 0x00000000, 0, 0},
-+ {0x00000288, 0x00000000, 0, 0},
-+ {0x0000028C, 0x00000000, 0, 0},
-+ {0x00000290, 0x00000000, 0, 0},
-+ {0x00000294, 0x00000000, 0, 0},
-+ {0x00000298, 0x00000000, 0, 0},
-+ {0x0000029C, 0x00000000, 0, 0},
-+ {0x000002A0, 0x00000000, 0, 0},
-+ {0x000002A4, 0x00000000, 0, 0},
-+ {0x000002A8, 0x00000000, 0, 0},
-+ {0x000002AC, 0x00000000, 0, 0},
-+ {0x000002B0, 0x00000000, 0, 0},
-+ {0x000002B4, 0x00000000, 0, 0},
-+ {0x000002B8, 0x00000000, 0, 0},
-+ {0x000002BC, 0x00000000, 0, 0},
-+ {0x000002C0, 0x00800080, 0, 0},
-+ {0x000002C4, 0x00800080, 0, 0},
-+ {0x000002C8, 0x00800080, 0, 0},
-+ {0x000002CC, 0x00800080, 0, 0},
-+ {0x000002D0, 0x00800080, 0, 0},
-+ {0x000002D4, 0x00800080, 0, 0},
-+ {0x000002D8, 0x00800080, 0, 0},
-+ {0x000002DC, 0x00800080, 0, 0},
-+ /* config CTC(0A10H) and DBC(0A14H) filter */
-+ {0x00000A10, 0x41400040, 0, 0},
-+ {0x00000A14, 0x02000200, 0, 0},
-+ /* config CFA(0018H, 0A1CH) */
-+ {0x00000018, 0x000011BB, 0, 0},
-+ {0x00000A1C, 0x00000032, 0, 0},
-+ /* config CCM(0C40H~0CA4H) */
-+ {0x00000C40, 0x00060000, 0, 0},
-+ {0x00000C44, 0x00000000, 0, 0},
-+ {0x00000C48, 0x00000000, 0, 0},
-+ {0x00000C4C, 0x00000000, 0, 0},
-+ {0x00000C50, 0x00000000, 0, 0},
-+ {0x00000C54, 0x00000000, 0, 0},
-+ {0x00000C58, 0x00000000, 0, 0},
-+ {0x00000C5C, 0x00000000, 0, 0},
-+ {0x00000C60, 0x00000000, 0, 0},
-+ {0x00000C64, 0x00000000, 0, 0},
-+ {0x00000C68, 0x00000000, 0, 0},
-+ {0x00000C6C, 0x00000000, 0, 0},
-+ {0x00000C70, 0x00000080, 0, 0},
-+ {0x00000C74, 0x00000000, 0, 0},
-+ {0x00000C78, 0x00000000, 0, 0},
-+ {0x00000C7C, 0x00000000, 0, 0},
-+ {0x00000C80, 0x00000080, 0, 0},
-+ {0x00000C84, 0x00000000, 0, 0},
-+ {0x00000C88, 0x00000000, 0, 0},
-+ {0x00000C8C, 0x00000000, 0, 0},
-+ {0x00000C90, 0x00000080, 0, 0},
-+ {0x00000C94, 0x00000000, 0, 0},
-+ {0x00000C98, 0x00000000, 0, 0},
-+ {0x00000C9C, 0x00000000, 0, 0},
-+ {0x00000CA0, 0x00000700, 0, 0},
-+ {0x00000CA4, 0x00000200, 0, 0},
-+ /* config GMARGB(0E00H~0E38H) */
-+ {0x00000E00, 0x24000000, 0, 0},
-+ {0x00000E04, 0x08000020, 0, 0},
-+ {0x00000E08, 0x08000040, 0, 0},
-+ {0x00000E0C, 0x08000060, 0, 0},
-+ {0x00000E10, 0x08000080, 0, 0},
-+ {0x00000E14, 0x080000A0, 0, 0},
-+ {0x00000E18, 0x080000C0, 0, 0},
-+ {0x00000E1C, 0x080000E0, 0, 0},
-+ {0x00000E20, 0x08000100, 0, 0},
-+ {0x00000E24, 0x08000180, 0, 0},
-+ {0x00000E28, 0x08000200, 0, 0},
-+ {0x00000E2C, 0x08000280, 0, 0},
-+ {0x00000E30, 0x08000300, 0, 0},
-+ {0x00000E34, 0x08000380, 0, 0},
-+ {0x00000E38, 0x080003FE, 0, 0},
-+ /* config R2Y(0E40H~0E60H) */
-+ {0x00000E40, 0x0000004C, 0, 0},
-+ {0x00000E44, 0x00000097, 0, 0},
-+ {0x00000E48, 0x0000001D, 0, 0},
-+ {0x00000E4C, 0x000001D5, 0, 0},
-+ {0x00000E50, 0x000001AC, 0, 0},
-+ {0x00000E54, 0x00000080, 0, 0},
-+ {0x00000E58, 0x00000080, 0, 0},
-+ {0x00000E5C, 0x00000194, 0, 0},
-+ {0x00000E60, 0x000001EC, 0, 0},
-+ /* config YCRV(0F00H~0FFCH) */
-+ {0x00000F00, 0x00000000, 0, 0},
-+ {0x00000F04, 0x00000010, 0, 0},
-+ {0x00000F08, 0x00000020, 0, 0},
-+ {0x00000F0C, 0x00000030, 0, 0},
-+ {0x00000F10, 0x00000040, 0, 0},
-+ {0x00000F14, 0x00000050, 0, 0},
-+ {0x00000F18, 0x00000060, 0, 0},
-+ {0x00000F1C, 0x00000070, 0, 0},
-+ {0x00000F20, 0x00000080, 0, 0},
-+ {0x00000F24, 0x00000090, 0, 0},
-+ {0x00000F28, 0x000000A0, 0, 0},
-+ {0x00000F2C, 0x000000B0, 0, 0},
-+ {0x00000F30, 0x000000C0, 0, 0},
-+ {0x00000F34, 0x000000D0, 0, 0},
-+ {0x00000F38, 0x000000E0, 0, 0},
-+ {0x00000F3C, 0x000000F0, 0, 0},
-+ {0x00000F40, 0x00000100, 0, 0},
-+ {0x00000F44, 0x00000110, 0, 0},
-+ {0x00000F48, 0x00000120, 0, 0},
-+ {0x00000F4C, 0x00000130, 0, 0},
-+ {0x00000F50, 0x00000140, 0, 0},
-+ {0x00000F54, 0x00000150, 0, 0},
-+ {0x00000F58, 0x00000160, 0, 0},
-+ {0x00000F5C, 0x00000170, 0, 0},
-+ {0x00000F60, 0x00000180, 0, 0},
-+ {0x00000F64, 0x00000190, 0, 0},
-+ {0x00000F68, 0x000001A0, 0, 0},
-+ {0x00000F6C, 0x000001B0, 0, 0},
-+ {0x00000F70, 0x000001C0, 0, 0},
-+ {0x00000F74, 0x000001D0, 0, 0},
-+ {0x00000F78, 0x000001E0, 0, 0},
-+ {0x00000F7C, 0x000001F0, 0, 0},
-+ {0x00000F80, 0x00000200, 0, 0},
-+ {0x00000F84, 0x00000210, 0, 0},
-+ {0x00000F88, 0x00000220, 0, 0},
-+ {0x00000F8C, 0x00000230, 0, 0},
-+ {0x00000F90, 0x00000240, 0, 0},
-+ {0x00000F94, 0x00000250, 0, 0},
-+ {0x00000F98, 0x00000260, 0, 0},
-+ {0x00000F9C, 0x00000270, 0, 0},
-+ {0x00000FA0, 0x00000280, 0, 0},
-+ {0x00000FA4, 0x00000290, 0, 0},
-+ {0x00000FA8, 0x000002A0, 0, 0},
-+ {0x00000FAC, 0x000002B0, 0, 0},
-+ {0x00000FB0, 0x000002C0, 0, 0},
-+ {0x00000FB4, 0x000002D0, 0, 0},
-+ {0x00000FB8, 0x000002E0, 0, 0},
-+ {0x00000FBC, 0x000002F0, 0, 0},
-+ {0x00000FC0, 0x00000300, 0, 0},
-+ {0x00000FC4, 0x00000310, 0, 0},
-+ {0x00000FC8, 0x00000320, 0, 0},
-+ {0x00000FCC, 0x00000330, 0, 0},
-+ {0x00000FD0, 0x00000340, 0, 0},
-+ {0x00000FD4, 0x00000350, 0, 0},
-+ {0x00000FD8, 0x00000360, 0, 0},
-+ {0x00000FDC, 0x00000370, 0, 0},
-+ {0x00000FE0, 0x00000380, 0, 0},
-+ {0x00000FE4, 0x00000390, 0, 0},
-+ {0x00000FE8, 0x000003A0, 0, 0},
-+ {0x00000FEC, 0x000003B0, 0, 0},
-+ {0x00000FF0, 0x000003C0, 0, 0},
-+ {0x00000FF4, 0x000003D0, 0, 0},
-+ {0x00000FF8, 0x000003E0, 0, 0},
-+ {0x00000FFC, 0x000003F0, 0, 0},
-+ /* config Shrp(0E80H~0EE8H) */
-+ {0x00000E80, 0x00070F00, 0, 0},
-+ {0x00000E84, 0x00180F00, 0, 0},
-+ {0x00000E88, 0x00800F00, 0, 0},
-+ {0x00000E8C, 0x01000F00, 0, 0},
-+ {0x00000E90, 0x00100F00, 0, 0},
-+ {0x00000E94, 0x00600F00, 0, 0},
-+ {0x00000E98, 0x01000F00, 0, 0},
-+ {0x00000E9C, 0x01900F00, 0, 0},
-+ {0x00000EA0, 0x00000F00, 0, 0},
-+ {0x00000EA4, 0x00000F00, 0, 0},
-+ {0x00000EA8, 0x00000F00, 0, 0},
-+ {0x00000EAC, 0x00000F00, 0, 0},
-+ {0x00000EB0, 0x00000F00, 0, 0},
-+ {0x00000EB4, 0x00000F00, 0, 0},
-+ {0x00000EB8, 0x00000F00, 0, 0},
-+ {0x00000EBC, 0x10000000, 0, 0},
-+ {0x00000EC0, 0x10000000, 0, 0},
-+ {0x00000EC4, 0x10000000, 0, 0},
-+ {0x00000EC8, 0x10000000, 0, 0},
-+ {0x00000ECC, 0x10000000, 0, 0},
-+ {0x00000ED0, 0x10000000, 0, 0},
-+ {0x00000ED4, 0x88000D7C, 0, 0},
-+ {0x00000ED8, 0x00C00040, 0, 0},
-+ {0x00000EDC, 0xFF000000, 0, 0},
-+ {0x00000EE0, 0x00A00040, 0, 0},
-+ {0x00000EE4, 0x00000000, 0, 0},
-+ {0x00000EE8, 0x00000000, 0, 0},
-+ /* config DNYUV(0C00H~0C24H) */
-+ {0x00000C00, 0x00777777, 0, 0},
-+ {0x00000C04, 0x00007777, 0, 0},
-+ {0x00000C08, 0x00777777, 0, 0},
-+ {0x00000C0C, 0x00007777, 0, 0},
-+ {0x00000C10, 0x00600040, 0, 0},
-+ {0x00000C14, 0x00D80090, 0, 0},
-+ {0x00000C18, 0x01E60144, 0, 0},
-+ {0x00000C1C, 0x00600040, 0, 0},
-+ {0x00000C20, 0x00D80090, 0, 0},
-+ {0x00000C24, 0x01E60144, 0, 0},
-+ /* config SAT(0A30H~0A40H, 0A54H~0A58H) */
-+ {0x00000A30, 0x00000100, 0, 0},
-+ {0x00000A34, 0x001F0001, 0, 0},
-+ {0x00000A38, 0x00000000, 0, 0},
-+ {0x00000A3C, 0x00000100, 0, 0},
-+ {0x00000A40, 0x00000008, 0, 0},
-+ {0x00000A54, 0x04010001, 0, 0},
-+ {0x00000A58, 0x03FF0001, 0, 0},
-+ /* config OBA(0090H~0094H) */
-+ {0x00000090, 0x04380000, 0, 0},
-+ {0x00000094, 0x04390780, 0, 0},
-+ /* config SC(0098H~009CH, 00B8H~00BCH,
-+ * 00C0H, 0C4H~0D4H, 04D0H~054CH, 5D0H~5D4H)
-+ */
-+ {0x0000009C, 0x01000000, 0, 0},
-+ {0x000000B8, 0x000C0000, 0, 0},
-+ {0x000000BC, 0xC010151D, 0, 0},
-+ {0x000000C0, 0x01F1BF08, 0, 0},
-+ {0x000000C4, 0xFF00FF00, 0, 0},
-+ {0x000000C8, 0xFF00FF00, 0, 0},
-+ {0x000000CC, 0xFFFF0000, 0, 0},
-+ {0x000000D0, 0xFFFF0000, 0, 0},
-+ {0x000000D4, 0xFFFF0000, 0, 0},
-+ {0x000000D8, 0x01050107, 0, 0},
-+ {0x000004D0, 0x00000000, 0, 0},
-+ {0x000004D4, 0x00000000, 0, 0},
-+ {0x000004D8, 0x00000000, 0, 0},
-+ {0x000004DC, 0x00000000, 0, 0},
-+ {0x000004E0, 0x00000000, 0, 0},
-+ {0x000004E4, 0x00000000, 0, 0},
-+ {0x000004E8, 0x00000000, 0, 0},
-+ {0x000004EC, 0x00000000, 0, 0},
-+ {0x000004F0, 0x00100000, 0, 0},
-+ {0x000004F4, 0x00000000, 0, 0},
-+ {0x000004F8, 0x03D20000, 0, 0},
-+ {0x000004FC, 0x00000000, 0, 0},
-+ {0x00000500, 0x00950000, 0, 0},
-+ {0x00000504, 0x00000000, 0, 0},
-+ {0x00000508, 0x00253000, 0, 0},
-+ {0x0000050C, 0x00000000, 0, 0},
-+ {0x00000510, 0x00000000, 0, 0},
-+ {0x00000514, 0x00000000, 0, 0},
-+ {0x00000518, 0x00000000, 0, 0},
-+ {0x0000051C, 0x00000000, 0, 0},
-+ {0x00000520, 0x00000000, 0, 0},
-+ {0x00000524, 0x00000000, 0, 0},
-+ {0x00000528, 0x00000000, 0, 0},
-+ {0x0000052C, 0x00000000, 0, 0},
-+ {0x00000530, 0x00000000, 0, 0},
-+ {0x00000534, 0x00000000, 0, 0},
-+ {0x00000538, 0xFFFFFFF0, 0, 0},
-+ {0x0000053C, 0x8FFFFFFF, 0, 0},
-+ {0x00000540, 0x0000001E, 0, 0},
-+ {0x00000544, 0x00000000, 0, 0},
-+ {0x00000548, 0x00000000, 0, 0},
-+ {0x0000054C, 0xF0F20000, 0, 0},
-+ {0x000005D0, 0xFF00FF00, 0, 0},
-+ {0x000005D4, 0xFF00FF00, 0, 0},
-+ /* config YHIST(0CC8H~0CD8H) */
-+ {0x00000CC8, 0x00000000, 0, 0},
-+ {0x00000CCC, 0x0437077F, 0, 0},
-+ {0x00000CD0, 0x00010002, 0, 0},
-+ {0x00000CD4, 0x00000000, 0, 0},
-+ /* config CBAR(0600H-0653H) */
-+ {0x00000600, 0x043E0782, 0, 0},
-+ {0x00000604, 0x00000000, 0, 0},
-+ {0x00000608, 0x0437077F, 0, 0},
-+ {0x0000060C, 0x00443150, 0, 0},
-+ {0x00000610, 0x00000000, 0, 0},
-+ {0x00000614, 0x08880888, 0, 0},
-+ {0x00000618, 0x02220222, 0, 0},
-+ {0x0000061C, 0x04440444, 0, 0},
-+ {0x00000620, 0x08880888, 0, 0},
-+ {0x00000624, 0x0AAA0AAA, 0, 0},
-+ {0x00000628, 0x0CCC0CCC, 0, 0},
-+ {0x0000062C, 0x0EEE0EEE, 0, 0},
-+ {0x00000630, 0x0FFF0FFF, 0, 0},
-+ {0x00000634, 0x08880888, 0, 0},
-+ {0x00000638, 0x02220222, 0, 0},
-+ {0x0000063C, 0x04440444, 0, 0},
-+ {0x00000640, 0x08880888, 0, 0},
-+ {0x00000644, 0x0AAA0AAA, 0, 0},
-+ {0x00000648, 0x0CCC0CCC, 0, 0},
-+ {0x0000064C, 0x0EEE0EEE, 0, 0},
-+ {0x00000650, 0x0FFF0FFF, 0, 0},
-+ /* config sensor(0014H) */
-+ {0x00000014, 0x0000000c, 0, 0},
-+ /* config CROP(001CH, 0020H) */
-+ {0x0000001C, 0x00000000, 0, 0},
-+ {0x00000020, 0x0437077F, 0, 0},
-+ /* config isp pileline X/Y size(A0CH) */
-+ {0x00000A0C, 0x04380780, 0, 0},
-+ /* config CSI dump (24H/28H) */
-+ {0x00000028, 0x00030B80, 0, 0},
-+ /* Video Output */
-+ /* config UO(0A80H~0A90H) */
-+ {0x00000A88, 0x00000780, 0, 0},
-+ /* NV12 */
-+ {0x00000A8C, 0x00000000, 0, 0},
-+ /* NV21
-+ *{0x00000A8C, 0x00000020, 0, 0},
-+ */
-+ {0x00000A90, 0x00000000, 0, 0},
-+ {0x00000A9C, 0x00000780, 0, 0},
-+ {0x00000AA0, 0x00000002, 0, 0},
-+ {0x00000AA4, 0x00000002, 0, 0},
-+ {0x00000AA8, 0x07800438, 0, 0},
-+ {0x00000AB4, 0x00000780, 0, 0},
-+ {0x00000AB8, 0x00000002, 0, 0},
-+ {0x00000ABC, 0x00000002, 0, 0},
-+ {0x00000AC0, 0x07800438, 0, 0},
-+ {0x00000AC4, 0x00000000, 0, 0},
-+ /* config TIL(0B20H~0B48H) */
-+ {0x00000B20, 0x04380780, 0, 0},
-+ {0x00000B24, 0x00000960, 0, 0},
-+ {0x00000B38, 0x00030003, 0, 0},
-+ {0x00000B3C, 0x00000960, 0, 0},
-+ {0x00000B44, 0x00000000, 0, 0},
-+ {0x00000B48, 0x00000000, 0, 0},
-+ /* Enable DEC/OBC/OECF/LCCF/AWB/SC/DUMP */
-+ {0x00000010, 0x000A00D6, 0x00000000, 0x00},
-+ /* Enable CFA/CAR/CCM/GMARGB/R2Y/SHRP/SAT/DNYUV/YCRV/YHIST/CTC/DBC */
-+ {0x00000A08, 0x107A01BE, 0x00000000, 0x00},
-+};
-+
-+const struct reg_table isp_reg_init_settings[] = {
-+ {isp_reg_init_config_list,
-+ ARRAY_SIZE(isp_reg_init_config_list)},
-+};
-+
-+static struct regval_t isp_reg_start_config_list[] = {
-+#if defined(ENABLE_SS0_SS1)
-+ /* ENABLE UO/SS0/SS1/Multi-Frame and Reset ISP */
-+ {0x00000A00, 0x00121802, 0x00000000, 0x0A},
-+ /* ENABLE UO/SS0/SS1/Multi-Frame and Leave ISP reset */
-+ {0x00000A00, 0x00121800, 0x00000000, 0x0A},
-+#else
-+ /* ENABLE UO/Multi-Frame and Reset ISP */
-+ {0x00000A00, 0x00120002, 0x00000000, 0x0A},
-+ /* ENABLE UO/Multi-Frame and Leave ISP reset */
-+ {0x00000A00, 0x00120000, 0x00000000, 0x0A},
-+#endif
-+ /* Config ISP shadow mode as next-vsync */
-+ {0x00000A50, 0x00000002, 0x00000000, 0x00},
-+#if defined(ENABLE_SS0_SS1)
-+ /* ENABLE UO/SS0/SS1/Multi-Frame and Enable ISP */
-+ {0x00000A00, 0x00121801, 0x00000000, 0x0A},
-+#else
-+ /* ENABLE UO/Multi-Frame and Enable ISP */
-+ {0x00000A00, 0x00120001, 0x00000000, 0x0A},
-+#endif
-+ /* Config CSI shadow mode as immediate to fetch current setting */
-+ {0x00000008, 0x00010004, 0x00000000, 0x0A},
-+ /* Config CSI shadow mode as next-vsync */
-+ {0x00000008, 0x00020004, 0x00000000, 0x00},
-+ /* Enable CSI */
-+ {0x00000000, 0x00000001, 0x00000000, 0x0A},
-+};
-+
-+const struct reg_table isp_reg_start_settings[] = {
-+ {isp_reg_start_config_list,
-+ ARRAY_SIZE(isp_reg_start_config_list)},
-+};
-+
-+static struct regval_t isp_imx_219_reg_config_list[] = {
-+ /* MIPI sensor */
-+ {0x00000014, 0x0000000D, 0, 0},
-+ /* config CFA(0018H, 0A1CH) */
-+ {0x00000A1C, 0x00000032, 0, 0},
-+ {0x00000A8C, 0x00000000, 0, 0},
-+ {0x00000A90, 0x00000000, 0, 0},
-+ /* config R2Y(0E40H~0E60H) */
-+ {0x00000E40, 0x0000004C, 0, 0},
-+ {0x00000E44, 0x00000097, 0, 0},
-+ {0x00000E48, 0x0000001D, 0, 0},
-+ {0x00000E4C, 0x000001D5, 0, 0},
-+ {0x00000E50, 0x000001AC, 0, 0},
-+ {0x00000E54, 0x00000080, 0, 0},
-+ {0x00000E58, 0x00000080, 0, 0},
-+ {0x00000E5C, 0x00000194, 0, 0},
-+ {0x00000E60, 0x000001EC, 0, 0},
-+ /* Config AWB(0280H~02DCH). Fixed WB gain for IMX-219 sensor. */
-+ {0x00000280, 0x00000000, 0, 0},
-+ {0x00000284, 0x00000000, 0, 0},
-+ {0x00000288, 0x00000000, 0, 0},
-+ {0x0000028C, 0x00000000, 0, 0},
-+ {0x00000290, 0x00000000, 0, 0},
-+ {0x00000294, 0x00000000, 0, 0},
-+ {0x00000298, 0x00000000, 0, 0},
-+ {0x0000029C, 0x00000000, 0, 0},
-+ {0x000002A0, 0x00000000, 0, 0},
-+ {0x000002A4, 0x00000000, 0, 0},
-+ {0x000002A8, 0x00000000, 0, 0},
-+ {0x000002AC, 0x00000000, 0, 0},
-+ {0x000002B0, 0x00000000, 0, 0},
-+ {0x000002B4, 0x00000000, 0, 0},
-+ {0x000002B8, 0x00000000, 0, 0},
-+ {0x000002BC, 0x00000000, 0, 0},
-+ {0x000002C0, 0x00F000F0, 0, 0},
-+ {0x000002C4, 0x00F000F0, 0, 0},
-+ {0x000002C8, 0x00800080, 0, 0},
-+ {0x000002CC, 0x00800080, 0, 0},
-+ {0x000002D0, 0x00800080, 0, 0},
-+ {0x000002D4, 0x00800080, 0, 0},
-+ {0x000002D8, 0x00B000B0, 0, 0},
-+ {0x000002DC, 0x00B000B0, 0, 0},
-+ /* config GMARGB(0E00H~0E38H)
-+ * Gamma RGB 1.9 for IMX-219 sensor
-+ */
-+ {0x00000E00, 0x24000000, 0, 0},
-+ {0x00000E04, 0x159500A5, 0, 0},
-+ {0x00000E08, 0x0F9900EE, 0, 0},
-+ {0x00000E0C, 0x0CE40127, 0, 0},
-+ {0x00000E10, 0x0B410157, 0, 0},
-+ {0x00000E14, 0x0A210181, 0, 0},
-+ {0x00000E18, 0x094B01A8, 0, 0},
-+ {0x00000E1C, 0x08A401CC, 0, 0},
-+ {0x00000E20, 0x081D01EE, 0, 0},
-+ {0x00000E24, 0x06B20263, 0, 0},
-+ {0x00000E28, 0x05D802C7, 0, 0},
-+ {0x00000E2C, 0x05420320, 0, 0},
-+ {0x00000E30, 0x04D30370, 0, 0},
-+ {0x00000E34, 0x047C03BB, 0, 0},
-+ {0x00000E38, 0x043703FF, 0, 0},
-+ {0x00000010, 0x00000080, 0, 0},
-+ /* Enable CFA/GMARGB/R2Y */
-+ {0x00000A08, 0x10000032, 0x0FFFFFFF, 0x00},
-+ {0x00000A00, 0x00120002, 0, 0},
-+ {0x00000A00, 0x00120000, 0, 0},
-+ {0x00000A50, 0x00000002, 0, 0},
-+ {0x00000008, 0x00010000, 0, 0},
-+ {0x00000008, 0x0002000A, 0, 0},
-+ {0x00000000, 0x00000001, 0, 0},
-+};
-+
-+const struct reg_table isp_imx_219_settings[] = {
-+ {isp_imx_219_reg_config_list,
-+ ARRAY_SIZE(isp_imx_219_reg_config_list)},
-+};
-+
-+static struct regval_t isp_format_reg_list[] = {
-+ {0x0000001C, 0x00000000, 0x00000000, 0},
-+ {0x00000020, 0x0437077F, 0x00000000, 0},
-+ {0x00000A0C, 0x04380780, 0x00000000, 0},
-+ {0x00000A88, 0x00000780, 0x00000000, 0},
-+ {0x00000018, 0x000011BB, 0x00000000, 0},
-+ {0x00000A08, 0x10000000, 0xF0000000, 0},
-+ {0x00000028, 0x00030B80, 0x0003FFFF, 0},
-+ {0x00000AA8, 0x07800438, 0x00000000, 0},
-+ {0x00000A9C, 0x00000780, 0x00000000, 0},
-+ {0x00000AC0, 0x07800438, 0x00000000, 0},
-+ {0x00000AB4, 0x00000780, 0x00000000, 0},
-+ {0x00000B20, 0x04380780, 0x00000000, 0},
-+ {0x00000B24, 0x00000960, 0x00000000, 0},
-+ {0x00000B3C, 0x00000960, 0x00000000, 0},
-+ {0x00000014, 0x00000008, 0x00000000, 0},
-+};
-+
-+const struct reg_table isp_format_settings[] = {
-+ {isp_format_reg_list,
-+ ARRAY_SIZE(isp_format_reg_list)},
-+};
-+
-+#if defined(USE_NEW_CONFIG_SETTING)
-+#else
-+static struct reg_table *isp_settings = (struct reg_table *)isp_imx_219_settings;
-+#endif
-+
-+static void isp_load_regs(void __iomem *ispbase, const struct reg_table *table)
-+{
-+ int j;
-+ u32 delay_ms, reg_addr, mask, val;
-+
-+ for (j = 0; j < table->regval_num; j++) {
-+ delay_ms = table->regval[j].delay_ms;
-+ reg_addr = table->regval[j].addr;
-+ val = table->regval[j].val;
-+ mask = table->regval[j].mask;
-+
-+ if (reg_addr % 4
-+ || reg_addr > STF_ISP_REG_OFFSET_MAX
-+ || delay_ms > STF_ISP_REG_DELAY_MAX)
-+ continue;
-+
-+ if (mask)
-+ reg_set_bit(ispbase, reg_addr, mask, val);
-+ else
-+ reg_write(ispbase, reg_addr, val);
-+ if (delay_ms)
-+ usleep_range(1000 * delay_ms, 1000 * delay_ms + 100);
-+ }
-+}
-+
-+static void isp_load_regs_exclude_csi_isp_enable(
-+ void __iomem *ispbase,
-+ const struct reg_table *table)
-+{
-+ int j;
-+ u32 delay_ms, reg_addr, mask, val;
-+
-+ for (j = 0; j < table->regval_num; j++) {
-+ delay_ms = table->regval[j].delay_ms;
-+ reg_addr = table->regval[j].addr;
-+ val = table->regval[j].val;
-+ mask = table->regval[j].mask;
-+
-+ if (reg_addr % 4
-+ || reg_addr > STF_ISP_REG_OFFSET_MAX
-+ || delay_ms > STF_ISP_REG_DELAY_MAX
-+ || ((reg_addr == ISP_REG_CSI_INPUT_EN_AND_STATUS) && (val & 0x01))
-+ || ((reg_addr == ISP_REG_ISP_CTRL_0) && (val & 0x01)))
-+ continue;
-+
-+ if (mask)
-+ reg_set_bit(ispbase, reg_addr, mask, val);
-+ else
-+ reg_write(ispbase, reg_addr, val);
-+ if (delay_ms)
-+ usleep_range(1000 * delay_ms, 1000 * delay_ms + 100);
-+ }
-+}
-+
-+static int stf_isp_clk_enable(struct stf_isp_dev *isp_dev)
-+{
-+ struct stfcamss *stfcamss = isp_dev->stfcamss;
-+
-+ clk_prepare_enable(stfcamss->sys_clk[STFCLK_WRAPPER_CLK_C].clk);
-+ reset_control_deassert(stfcamss->sys_rst[STFRST_WRAPPER_C].rstc);
-+ reset_control_deassert(stfcamss->sys_rst[STFRST_WRAPPER_P].rstc);
-+
-+ return 0;
-+}
-+
-+static int stf_isp_clk_disable(struct stf_isp_dev *isp_dev)
-+{
-+ struct stfcamss *stfcamss = isp_dev->stfcamss;
-+
-+ reset_control_assert(stfcamss->sys_rst[STFRST_WRAPPER_C].rstc);
-+ reset_control_assert(stfcamss->sys_rst[STFRST_WRAPPER_P].rstc);
-+ clk_disable_unprepare(stfcamss->sys_clk[STFCLK_WRAPPER_CLK_C].clk);
-+
-+ return 0;
-+}
-+
-+static void __iomem *stf_isp_get_ispbase(struct stf_vin_dev *vin)
-+{
-+ void __iomem *base = vin->isp_base;
-+
-+ return base;
-+}
-+
-+static int stf_isp_save_ctx_regs(struct stf_isp_dev *isp_dev)
-+{
-+ int j;
-+ u32 addr, val;
-+ void __iomem *ispbase;
-+ struct device *dev = isp_dev->stfcamss->dev;
-+ struct stf_vin_dev *vin = isp_dev->stfcamss->vin;
-+
-+ ispbase = stf_isp_get_ispbase(vin);
-+
-+ if (!isp_dev->context_regs) {
-+ int regs_size =
-+ sizeof(struct regval_t) * isp_reg_init_settings->regval_num;
-+ isp_dev->context_regs =
-+ devm_kzalloc(dev, sizeof(struct reg_table), GFP_KERNEL);
-+ isp_dev->context_regs->regval =
-+ devm_kzalloc(dev, regs_size, GFP_KERNEL);
-+ isp_dev->context_regs->regval_num = isp_reg_init_settings->regval_num;
-+ }
-+
-+ if (!isp_dev->context_regs || !isp_dev->context_regs->regval)
-+ return -ENOMEM;
-+
-+ st_debug(ST_ISP, "Saving ISP context registers\n");
-+ for (j = 0; j < isp_reg_init_settings->regval_num; j++) {
-+ addr = isp_reg_init_settings->regval[j].addr;
-+ val = ioread32(ispbase + addr);
-+ isp_dev->context_regs->regval[j].addr = addr;
-+ isp_dev->context_regs->regval[j].val = val;
-+ }
-+ st_debug(ST_ISP, "ISP context registers have been saved\n");
-+
-+ return 0;
-+};
-+
-+static int stf_isp_restore_ctx_regs(struct stf_isp_dev *isp_dev)
-+{
-+ struct stf_vin_dev *vin = isp_dev->stfcamss->vin;
-+ void __iomem *ispbase;
-+
-+ ispbase = stf_isp_get_ispbase(vin);
-+
-+ if (isp_dev->context_regs) {
-+ isp_load_regs(ispbase, isp_dev->context_regs);
-+ st_debug(ST_ISP, "Restored ISP register: isp_reg_init_settings.\n");
-+ }
-+
-+ return 0;
-+}
-+
-+static int stf_isp_reset(struct stf_isp_dev *isp_dev)
-+{
-+ struct stf_vin_dev *vin = isp_dev->stfcamss->vin;
-+ void __iomem *ispbase;
-+
-+ ispbase = stf_isp_get_ispbase(vin);
-+
-+ reg_set_bit(ispbase, ISP_REG_ISP_CTRL_0, BIT(1), BIT(1));
-+ reg_set_bit(ispbase, ISP_REG_ISP_CTRL_0, BIT(1), 0);
-+
-+ return 0;
-+}
-+
-+static int stf_isp_config_set(struct stf_isp_dev *isp_dev)
-+{
-+ struct stf_vin_dev *vin = isp_dev->stfcamss->vin;
-+ void __iomem *ispbase;
-+
-+ ispbase = stf_isp_get_ispbase(vin);
-+
-+ st_debug(ST_ISP, "%s\n", __func__);
-+
-+#if defined(USE_NEW_CONFIG_SETTING)
-+ mutex_lock(&isp_dev->setfile_lock);
-+
-+ if (isp_dev->context_regs) {
-+ stf_isp_restore_ctx_regs(isp_dev);
-+ st_debug(ST_ISP, "%s context regs restore done\n", __func__);
-+ } else {
-+ isp_load_regs(ispbase, isp_reg_init_settings);
-+ st_debug(ST_ISP, "%s isp_reg_init_settings done\n", __func__);
-+ }
-+ if (isp_dev->setfile.state) {
-+ st_info(ST_ISP, "%s, Program extra ISP setting!\n", __func__);
-+ isp_load_regs_exclude_csi_isp_enable(ispbase,
-+ &isp_dev->setfile.settings);
-+ }
-+
-+ mutex_unlock(&isp_dev->setfile_lock);
-+#else
-+ mutex_lock(&isp_dev->setfile_lock);
-+ if (isp_dev->setfile.state)
-+ isp_load_regs(ispbase, &isp_dev->setfile.settings);
-+ else
-+ isp_load_regs(ispbase, isp_settings);
-+ mutex_unlock(&isp_dev->setfile_lock);
-+
-+ st_debug(ST_ISP, "config 0x%x = 0x%x\n",
-+ isp_format_reg_list[0].addr,
-+ isp_format_reg_list[0].val);
-+ st_debug(ST_ISP, "config 0x%x = 0x%x\n",
-+ isp_format_reg_list[1].addr,
-+ isp_format_reg_list[1].val);
-+ st_debug(ST_ISP, "config 0x%x = 0x%x\n",
-+ isp_format_reg_list[2].addr,
-+ isp_format_reg_list[2].val);
-+ st_debug(ST_ISP, "config 0x%x = 0x%x\n",
-+ isp_format_reg_list[3].addr,
-+ isp_format_reg_list[3].val);
-+#endif
-+
-+ return 0;
-+}
-+
-+static int stf_isp_set_format(struct stf_isp_dev *isp_dev,
-+ struct isp_stream_format *crop_array, u32 mcode,
-+ int type)
-+{
-+ struct stf_vin_dev *vin = isp_dev->stfcamss->vin;
-+ struct stf_dvp_dev *dvp_dev = isp_dev->stfcamss->dvp_dev;
-+ struct v4l2_rect *crop = &crop_array[ISP_COMPOSE].rect;
-+ u32 bpp = crop_array[ISP_COMPOSE].bpp;
-+ void __iomem *ispbase;
-+ u32 val, val1;
-+
-+ ispbase = stf_isp_get_ispbase(vin);
-+
-+ st_debug(ST_ISP, "interface type is %d(%s)\n",
-+ type, type == CSI_SENSOR ? "CSI" : "DVP");
-+
-+ if (type == DVP_SENSOR) {
-+ unsigned int flags = dvp_dev->dvp->flags;
-+
-+ st_debug(ST_ISP, "dvp flags = 0x%x, hsync active is %s, vsync active is %s\n",
-+ flags, flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH ? "high" : "low",
-+ flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH ? "high" : "low");
-+ }
-+
-+ val = crop->left + (crop->top << 16);
-+ isp_format_reg_list[0].addr = ISP_REG_PIC_CAPTURE_START_CFG;
-+ isp_format_reg_list[0].val = val;
-+
-+ val = (crop->width + crop->left - 1)
-+ + ((crop->height + crop->top - 1) << 16);
-+ isp_format_reg_list[1].addr = ISP_REG_PIC_CAPTURE_END_CFG;
-+ isp_format_reg_list[1].val = val;
-+
-+ val = crop->width + (crop->height << 16);
-+ isp_format_reg_list[2].addr = ISP_REG_PIPELINE_XY_SIZE;
-+ isp_format_reg_list[2].val = val;
-+
-+ isp_format_reg_list[3].addr = ISP_REG_STRIDE;
-+ isp_format_reg_list[3].val = ALIGN(crop->width * bpp / 8, STFCAMSS_FRAME_WIDTH_ALIGN_8);
-+
-+ switch (mcode) {
-+ case MEDIA_BUS_FMT_SRGGB10_1X10:
-+ case MEDIA_BUS_FMT_SRGGB8_1X8:
-+ // 3 2 3 2 1 0 1 0 B Gb B Gb Gr R Gr R
-+ val = 0x0000EE44;
-+ val1 = 0x00000000;
-+ break;
-+ case MEDIA_BUS_FMT_SGRBG10_1X10:
-+ case MEDIA_BUS_FMT_SGRBG8_1X8:
-+ // 2 3 2 3 0 1 0 1, Gb B Gb B R Gr R Gr
-+ val = 0x0000BB11;
-+ val1 = 0x20000000;
-+ break;
-+ case MEDIA_BUS_FMT_SGBRG10_1X10:
-+ case MEDIA_BUS_FMT_SGBRG8_1X8:
-+ // 1 0 1 0 3 2 3 2, Gr R Gr R B Gb B Gb
-+ val = 0x000044EE;
-+ val1 = 0x30000000;
-+ break;
-+ case MEDIA_BUS_FMT_SBGGR10_1X10:
-+ case MEDIA_BUS_FMT_SBGGR8_1X8:
-+ // 0 1 0 1 2 3 2 3 R Gr R Gr Gb B Gb B
-+ val = 0x000011BB;
-+ val1 = 0x10000000;
-+ break;
-+ default:
-+ st_err(ST_ISP, "UNKNOW format\n");
-+ val = 0x000011BB;
-+ val1 = 0x10000000;
-+ break;
-+ }
-+
-+ isp_format_reg_list[4].addr = ISP_REG_RAW_FORMAT_CFG;
-+ isp_format_reg_list[4].val = val;
-+
-+ isp_format_reg_list[5].addr = ISP_REG_ISP_CTRL_1;
-+ isp_format_reg_list[5].val = val1;
-+ isp_format_reg_list[5].mask = 0xF0000000;
-+
-+ st_info(ST_ISP, "src left: %d, top: %d, width = %d, height = %d, bpp = %d\n",
-+ crop->left, crop->top, crop->width, crop->height, bpp);
-+
-+ crop = &crop_array[ISP_CROP].rect;
-+ bpp = crop_array[ISP_CROP].bpp;
-+ val = ALIGN(crop->width * bpp / 8, STFCAMSS_FRAME_WIDTH_ALIGN_128);
-+ isp_format_reg_list[6].addr = ISP_REG_DUMP_CFG_1;
-+ isp_format_reg_list[6].val = val | 3 << 16;
-+ isp_format_reg_list[6].mask = 0x0003FFFF;
-+
-+ st_info(ST_ISP, "raw left: %d, top: %d, width = %d, height = %d, bpp = %d\n",
-+ crop->left, crop->top, crop->width, crop->height, bpp);
-+
-+ crop = &crop_array[ISP_SCALE_SS0].rect;
-+ bpp = crop_array[ISP_SCALE_SS0].bpp;
-+ isp_format_reg_list[7].addr = ISP_REG_SS0IW;
-+ isp_format_reg_list[7].val = (crop->width << 16) + crop->height;
-+ isp_format_reg_list[8].addr = ISP_REG_SS0S;
-+ isp_format_reg_list[8].val = ALIGN(crop->width * bpp / 8, STFCAMSS_FRAME_WIDTH_ALIGN_8);
-+
-+ st_info(ST_ISP, "ss0 left: %d, top: %d, width = %d, height = %d, bpp = %d\n",
-+ crop->left, crop->top, crop->width, crop->height, bpp);
-+
-+ crop = &crop_array[ISP_SCALE_SS1].rect;
-+ bpp = crop_array[ISP_SCALE_SS1].bpp;
-+ isp_format_reg_list[9].addr = ISP_REG_SS1IW;
-+ isp_format_reg_list[9].val = (crop->width << 16) + crop->height;
-+ isp_format_reg_list[10].addr = ISP_REG_SS1S;
-+ isp_format_reg_list[10].val = ALIGN(crop->width * bpp / 8, STFCAMSS_FRAME_WIDTH_ALIGN_8);
-+
-+ crop = &crop_array[ISP_ITIWS].rect;
-+ bpp = crop_array[ISP_ITIWS].bpp;
-+ isp_format_reg_list[11].addr = ISP_REG_ITIIWSR;
-+ isp_format_reg_list[11].val = (crop->height << 16) + crop->width;
-+ isp_format_reg_list[12].addr = ISP_REG_ITIDWLSR;
-+ isp_format_reg_list[12].val = ALIGN(crop->width * bpp / 8, STFCAMSS_FRAME_WIDTH_ALIGN_8);
-+ isp_format_reg_list[13].addr = ISP_REG_ITIDRLSR;
-+ isp_format_reg_list[13].val = ALIGN(crop->width * bpp / 8, STFCAMSS_FRAME_WIDTH_ALIGN_8);
-+
-+ st_info(ST_ISP, "iti left: %d, top: %d, width = %d, height = %d, bpp = %d\n",
-+ crop->left, crop->top, crop->width, crop->height, bpp);
-+
-+ isp_format_reg_list[14].addr = ISP_REG_SENSOR;
-+ isp_format_reg_list[14].val = 0x00000000;
-+ if (type == DVP_SENSOR) {
-+ unsigned int flags = dvp_dev->dvp->flags;
-+
-+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
-+ isp_format_reg_list[14].val |= 0x08;
-+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
-+ isp_format_reg_list[14].val |= 0x04;
-+ } else {
-+ isp_format_reg_list[14].val |= 0x01;
-+ }
-+
-+ isp_load_regs(ispbase, isp_format_settings);
-+ return 0;
-+}
-+
-+static int stf_isp_stream_set(struct stf_isp_dev *isp_dev, int on)
-+{
-+ struct stf_vin_dev *vin = isp_dev->stfcamss->vin;
-+
-+ void __iomem *ispbase;
-+
-+ ispbase = stf_isp_get_ispbase(vin);
-+
-+ if (on) {
-+#if defined(USE_NEW_CONFIG_SETTING)
-+ isp_load_regs(ispbase, isp_reg_start_settings);
-+#else
-+ reg_set_bit(ispbase, ISP_REG_CSIINTS_ADDR, 0x3FFFF, 0x3000a);
-+ reg_set_bit(ispbase, ISP_REG_IESHD_ADDR, BIT(1) | BIT(0), 0x3);
-+ reg_set_bit(ispbase, ISP_REG_ISP_CTRL_0, BIT(0), 1);
-+#endif //#if defined(USE_NEW_CONFIG_SETTING)
-+ } else {
-+ /* NOTE: Clear bit 0 of ISP_REG_ISP_CTRL_0 here will get crash. */
-+ stf_isp_save_ctx_regs(isp_dev);
-+ }
-+
-+ return 0;
-+}
-+
-+static union reg_buf reg_buf;
-+static int stf_isp_reg_read(struct stf_isp_dev *isp_dev, void *arg)
-+{
-+ struct stf_vin_dev *vin = isp_dev->stfcamss->vin;
-+ void __iomem *ispbase;
-+ struct isp_reg_param *reg_param = arg;
-+ u32 size;
-+ unsigned long r;
-+
-+ if (reg_param->reg_buf == NULL) {
-+ st_err(ST_ISP, "Failed to access register. The pointer is NULL!!!\n");
-+ return -EINVAL;
-+ }
-+
-+ ispbase = stf_isp_get_ispbase(vin);
-+
-+ size = 0;
-+ switch (reg_param->reg_info.method) {
-+ case STF_ISP_REG_METHOD_ONE_REG:
-+ break;
-+
-+ case STF_ISP_REG_METHOD_SERIES:
-+ if (reg_param->reg_info.length > STF_ISP_REG_BUF_SIZE) {
-+ st_err(ST_ISP, "Failed to access register. \
-+ The (length=0x%08X > 0x%08X) is out of size!!!\n",
-+ reg_param->reg_info.length, STF_ISP_REG_BUF_SIZE);
-+ return -EINVAL;
-+ }
-+ break;
-+
-+ case STF_ISP_REG_METHOD_MODULE:
-+ /* This mode is not supported in the V4L2 version. */
-+ st_err(ST_ISP, "Reg Read - Failed to access register. The method = \
-+ STF_ISP_REG_METHOD_MODULE is not supported!!!\n");
-+ return -ENOTTY;
-+
-+ case STF_ISP_REG_METHOD_TABLE:
-+ if (reg_param->reg_info.length > STF_ISP_REG_TBL_BUF_SIZE) {
-+ st_err(ST_ISP, "Failed to access register. \
-+ The (length=0x%08X > 0x%08X) is out of size!!!\n",
-+ reg_param->reg_info.length, STF_ISP_REG_TBL_BUF_SIZE);
-+ return -EINVAL;
-+ }
-+ size = sizeof(u32) * reg_param->reg_info.length * 2;
-+ break;
-+
-+ case STF_ISP_REG_METHOD_TABLE_2:
-+ if (reg_param->reg_info.length > STF_ISP_REG_TBL_2_BUF_SIZE) {
-+ st_err(ST_ISP, "Failed to access register. \
-+ The (length=0x%08X > 0x%08X) is out of size!!!\n",
-+ reg_param->reg_info.length, STF_ISP_REG_TBL_2_BUF_SIZE);
-+ return -EINVAL;
-+ }
-+ size = sizeof(u32) * reg_param->reg_info.length * 3;
-+ break;
-+
-+ case STF_ISP_REG_METHOD_TABLE_3:
-+ if (reg_param->reg_info.length > STF_ISP_REG_TBL_3_BUF_SIZE) {
-+ st_err(ST_ISP, "Failed to access register. \
-+ The (length=0x%08X > 0x%08X) is out of size!!!\n",
-+ reg_param->reg_info.length, STF_ISP_REG_TBL_3_BUF_SIZE);
-+ return -EINVAL;
-+ }
-+ size = sizeof(u32) * reg_param->reg_info.length * 4;
-+ break;
-+
-+ case STF_ISP_REG_METHOD_SMPL_PACK:
-+ st_err(ST_ISP, "Reg Read - Failed to access register. The method = \
-+ STF_ISP_REG_METHOD_SMPL_PACK is not supported!!!\n");
-+ return -ENOTTY;
-+
-+ case STF_ISP_REG_METHOD_SOFT_RDMA:
-+ // This mode is not supported in the V4L2 version.
-+ st_err(ST_ISP, "Reg Read - Failed to access register. The method = \
-+ STF_ISP_REG_METHOD_SOFT_RDMA is not supported!!!\n");
-+ return -ENOTTY;
-+
-+ default:
-+ st_err(ST_ISP, "Failed to access register. The method=%d \
-+ is not supported!!!\n", reg_param->reg_info.method);
-+ return -ENOTTY;
-+ }
-+
-+ memset(®_buf, 0, sizeof(union reg_buf));
-+ if (size) {
-+ r = copy_from_user((u8 *)reg_buf.buffer,
-+ (u8 *)reg_param->reg_buf->buffer, size);
-+ if (r) {
-+ st_err(ST_ISP, "Failed to call copy_from_user for the \
-+ reg_param->reg_buf value\n");
-+ return -EIO;
-+ }
-+ }
-+
-+ size = 0;
-+ switch (reg_param->reg_info.method) {
-+ case STF_ISP_REG_METHOD_ONE_REG:
-+ reg_buf.buffer[0] = reg_read(ispbase, reg_param->reg_info.offset);
-+ size = sizeof(u32);
-+ break;
-+
-+ case STF_ISP_REG_METHOD_SERIES:
-+ for (r = 0; r < reg_param->reg_info.length; r++) {
-+ reg_buf.buffer[r] = reg_read(ispbase,
-+ reg_param->reg_info.offset + (r * 4));
-+ }
-+ size = sizeof(u32) * reg_param->reg_info.length;
-+ break;
-+
-+ case STF_ISP_REG_METHOD_MODULE:
-+ break;
-+
-+ case STF_ISP_REG_METHOD_TABLE:
-+ for (r = 0; r < reg_param->reg_info.length; r++) {
-+ reg_buf.reg_tbl[r].value = reg_read(ispbase,
-+ reg_buf.reg_tbl[r].offset);
-+ }
-+ size = sizeof(u32) * reg_param->reg_info.length * 2;
-+ break;
-+
-+ case STF_ISP_REG_METHOD_TABLE_2:
-+ for (r = 0; r < reg_param->reg_info.length; r++) {
-+ if (reg_buf.reg_tbl2[r].mask) {
-+ reg_buf.reg_tbl2[r].value = (reg_read(ispbase,
-+ reg_buf.reg_tbl2[r].offset)
-+ & reg_buf.reg_tbl2[r].mask);
-+ } else {
-+ reg_buf.reg_tbl2[r].value = reg_read(ispbase,
-+ reg_buf.reg_tbl2[r].offset);
-+ }
-+ }
-+ size = sizeof(u32) * reg_param->reg_info.length * 3;
-+ break;
-+
-+ case STF_ISP_REG_METHOD_TABLE_3:
-+ for (r = 0; r < reg_param->reg_info.length; r++) {
-+ if (reg_buf.reg_tbl3[r].mask) {
-+ reg_buf.reg_tbl3[r].value = (reg_read(ispbase,
-+ reg_buf.reg_tbl3[r].offset)
-+ & reg_buf.reg_tbl3[r].mask);
-+ } else {
-+ reg_buf.reg_tbl3[r].value = reg_read(ispbase,
-+ reg_buf.reg_tbl3[r].offset);
-+ }
-+ if (reg_buf.reg_tbl3[r].delay_ms) {
-+ usleep_range(1000 * reg_buf.reg_tbl3[r].delay_ms,
-+ 1000 * reg_buf.reg_tbl3[r].delay_ms + 100);
-+ }
-+ }
-+ size = sizeof(u32) * reg_param->reg_info.length * 4;
-+ break;
-+
-+ case STF_ISP_REG_METHOD_SMPL_PACK:
-+ break;
-+
-+ case STF_ISP_REG_METHOD_SOFT_RDMA:
-+ break;
-+
-+ default:
-+ break;
-+ }
-+
-+ r = copy_to_user((u8 *)reg_param->reg_buf->buffer, (u8 *)reg_buf.buffer,
-+ size);
-+ if (r) {
-+ st_err(ST_ISP, "Failed to call copy_to_user for the \
-+ reg_param->buffer value\n");
-+ return -EIO;
-+ }
-+
-+ return 0;
-+}
-+
-+static int stf_isp_soft_rdma(struct stf_isp_dev *isp_dev, u32 rdma_addr)
-+{
-+ struct stf_vin_dev *vin = isp_dev->stfcamss->vin;
-+ void __iomem *ispbase;
-+ struct isp_rdma_info *rdma_info = NULL;
-+ s32 len;
-+ u32 offset;
-+ int ret = 0;
-+
-+ ispbase = stf_isp_get_ispbase(vin);
-+
-+ rdma_info = phys_to_virt(rdma_addr);
-+ while (1) {
-+ if (rdma_info->tag == RDMA_WR_ONE) {
-+ reg_write(ispbase, rdma_info->offset, rdma_info->param);
-+ rdma_info++;
-+ } else if (rdma_info->tag == RDMA_WR_SRL) {
-+ offset = rdma_info->offset;
-+ len = rdma_info->param;
-+ rdma_info++;
-+ while (len > 0) {
-+ reg_write(ispbase, offset, rdma_info->param);
-+ offset += 4;
-+ len--;
-+ if (len > 0) {
-+ reg_write(ispbase, offset, rdma_info->value);
-+ len--;
-+ }
-+ offset += 4;
-+ rdma_info++;
-+ }
-+ } else if (rdma_info->tag == RDMA_LINK) {
-+ rdma_info = phys_to_virt(rdma_info->param);
-+ } else if (rdma_info->tag == RDMA_SINT) {
-+ /* Software not support this command. */
-+ rdma_info++;
-+ } else if (rdma_info->tag == RDMA_END) {
-+ break;
-+ } else
-+ rdma_info++;
-+ }
-+
-+ return ret;
-+}
-+
-+static int stf_isp_reg_write(struct stf_isp_dev *isp_dev, void *arg)
-+{
-+ struct stf_vin_dev *vin = isp_dev->stfcamss->vin;
-+ void __iomem *ispbase;
-+ struct isp_reg_param *reg_param = arg;
-+ struct isp_rdma_info *rdma_info = NULL;
-+ s32 len;
-+ u32 offset;
-+ u32 size;
-+ unsigned long r;
-+ int ret = 0;
-+
-+ if ((reg_param->reg_buf == NULL)
-+ && (reg_param->reg_info.method != STF_ISP_REG_METHOD_SOFT_RDMA)) {
-+ st_err(ST_ISP, "Failed to access register. \
-+ The register buffer pointer is NULL!!!\n");
-+ return -EINVAL;
-+ }
-+
-+ ispbase = stf_isp_get_ispbase(vin);
-+
-+ size = 0;
-+ switch (reg_param->reg_info.method) {
-+ case STF_ISP_REG_METHOD_ONE_REG:
-+ size = sizeof(u32);
-+ break;
-+
-+ case STF_ISP_REG_METHOD_SERIES:
-+ if (reg_param->reg_info.length > STF_ISP_REG_BUF_SIZE) {
-+ st_err(ST_ISP, "Failed to access register. \
-+ The (length=0x%08X > 0x%08X) is out of size!!!\n",
-+ reg_param->reg_info.length, STF_ISP_REG_BUF_SIZE);
-+ return -EINVAL;
-+ }
-+ size = sizeof(u32) * reg_param->reg_info.length;
-+ break;
-+
-+ case STF_ISP_REG_METHOD_MODULE:
-+ // This mode is not supported in the V4L2 version.
-+ st_err(ST_ISP, "Reg Write - Failed to access register. \
-+ The method = STF_ISP_REG_METHOD_MODULE is not supported!!!\n");
-+ return -ENOTTY;
-+
-+ case STF_ISP_REG_METHOD_TABLE:
-+ if (reg_param->reg_info.length > STF_ISP_REG_TBL_BUF_SIZE) {
-+ st_err(ST_ISP, "Failed to access register. \
-+ The (length=0x%08X > 0x%08X) is out of size!!!\n",
-+ reg_param->reg_info.length, STF_ISP_REG_TBL_BUF_SIZE);
-+ return -EINVAL;
-+ }
-+ size = sizeof(u32) * reg_param->reg_info.length * 2;
-+ break;
-+
-+ case STF_ISP_REG_METHOD_TABLE_2:
-+ if (reg_param->reg_info.length > STF_ISP_REG_TBL_2_BUF_SIZE) {
-+ st_err(ST_ISP, "Failed to access register. \
-+ The (length=0x%08X > 0x%08X) is out of size!!!\n",
-+ reg_param->reg_info.length, STF_ISP_REG_TBL_2_BUF_SIZE);
-+ return -EINVAL;
-+ }
-+ size = sizeof(u32) * reg_param->reg_info.length * 3;
-+ break;
-+
-+ case STF_ISP_REG_METHOD_TABLE_3:
-+ if (reg_param->reg_info.length > STF_ISP_REG_TBL_3_BUF_SIZE) {
-+ st_err(ST_ISP, "Failed to access register. \
-+ The (length=0x%08X > 0x%08X) is out of size!!!\n",
-+ reg_param->reg_info.length, STF_ISP_REG_TBL_3_BUF_SIZE);
-+ return -EINVAL;
-+ }
-+ size = sizeof(u32) * reg_param->reg_info.length * 4;
-+ break;
-+
-+ case STF_ISP_REG_METHOD_SMPL_PACK:
-+ if (reg_param->reg_info.length > STF_ISP_REG_SMPL_PACK_BUF_SIZE) {
-+ st_err(ST_ISP, "Failed to access register. \
-+ The (length=0x%08X > 0x%08X) is out of size!!!\n",
-+ reg_param->reg_info.length, STF_ISP_REG_SMPL_PACK_BUF_SIZE);
-+ return -EINVAL;
-+ }
-+ size = sizeof(u32) * reg_param->reg_info.length * 2;
-+ break;
-+
-+ case STF_ISP_REG_METHOD_SOFT_RDMA:
-+ break;
-+
-+ default:
-+ st_err(ST_ISP, "Failed to access register. The method=%d \
-+ is not supported!!!\n", reg_param->reg_info.method);
-+ return -ENOTTY;
-+ }
-+
-+ memset(®_buf, 0, sizeof(union reg_buf));
-+ if (size) {
-+ r = copy_from_user((u8 *)reg_buf.buffer,
-+ (u8 *)reg_param->reg_buf->buffer, size);
-+ if (r) {
-+ st_err(ST_ISP, "Failed to call copy_from_user for the \
-+ reg_param->reg_buf value\n");
-+ return -EIO;
-+ }
-+ }
-+
-+ switch (reg_param->reg_info.method) {
-+ case STF_ISP_REG_METHOD_ONE_REG:
-+ reg_write(ispbase, reg_param->reg_info.offset, reg_buf.buffer[0]);
-+ break;
-+
-+ case STF_ISP_REG_METHOD_SERIES:
-+ for (r = 0; r < reg_param->reg_info.length; r++) {
-+ reg_write(ispbase, reg_param->reg_info.offset + (r * 4),
-+ reg_buf.buffer[r]);
-+ }
-+ break;
-+
-+ case STF_ISP_REG_METHOD_MODULE:
-+ /* This mode is not supported in the V4L2 version. */
-+ break;
-+
-+ case STF_ISP_REG_METHOD_TABLE:
-+ for (r = 0; r < reg_param->reg_info.length; r++) {
-+ reg_write(ispbase, reg_buf.reg_tbl[r].offset,
-+ reg_buf.reg_tbl[r].value);
-+ }
-+ break;
-+
-+ case STF_ISP_REG_METHOD_TABLE_2:
-+ for (r = 0; r < reg_param->reg_info.length; r++) {
-+ if (reg_buf.reg_tbl2[r].mask) {
-+ reg_set_bit(ispbase, reg_buf.reg_tbl2[r].offset,
-+ reg_buf.reg_tbl2[r].mask, reg_buf.reg_tbl2[r].value);
-+ } else {
-+ reg_write(ispbase, reg_buf.reg_tbl2[r].offset,
-+ reg_buf.reg_tbl2[r].value);
-+ }
-+ }
-+ break;
-+
-+ case STF_ISP_REG_METHOD_TABLE_3:
-+ for (r = 0; r < reg_param->reg_info.length; r++) {
-+ if (reg_buf.reg_tbl3[r].mask) {
-+ reg_set_bit(ispbase, reg_buf.reg_tbl3[r].offset,
-+ reg_buf.reg_tbl3[r].mask, reg_buf.reg_tbl3[r].value);
-+ } else {
-+ reg_write(ispbase, reg_buf.reg_tbl3[r].offset,
-+ reg_buf.reg_tbl3[r].value);
-+ }
-+ if (reg_buf.reg_tbl3[r].delay_ms) {
-+ usleep_range(1000 * reg_buf.reg_tbl3[r].delay_ms,
-+ 1000 * reg_buf.reg_tbl3[r].delay_ms + 100);
-+ }
-+ }
-+ break;
-+
-+ case STF_ISP_REG_METHOD_SMPL_PACK:
-+ size = reg_param->reg_info.length;
-+ rdma_info = ®_buf.rdma_cmd[0];
-+ while (size) {
-+ if (rdma_info->tag == RDMA_WR_ONE) {
-+ reg_write(ispbase, rdma_info->offset, rdma_info->param);
-+ rdma_info++;
-+ size--;
-+ } else if (rdma_info->tag == RDMA_WR_SRL) {
-+ offset = rdma_info->offset;
-+ len = rdma_info->param;
-+ rdma_info++;
-+ size--;
-+ while (size && (len > 0)) {
-+ reg_write(ispbase, offset, rdma_info->param);
-+ offset += 4;
-+ len--;
-+ if (len > 0) {
-+ reg_write(ispbase, offset, rdma_info->value);
-+ len--;
-+ }
-+ offset += 4;
-+ rdma_info++;
-+ size--;
-+ }
-+ } else if (rdma_info->tag == RDMA_END) {
-+ break;
-+ } else {
-+ rdma_info++;
-+ size--;
-+ }
-+ }
-+ break;
-+
-+ case STF_ISP_REG_METHOD_SOFT_RDMA:
-+ /*
-+ * Simulation the hardware RDMA behavior to debug and verify
-+ * the RDMA chain.
-+ */
-+ ret = stf_isp_soft_rdma(isp_dev, reg_param->reg_info.offset);
-+ break;
-+
-+ default:
-+ break;
-+ }
-+
-+ return ret;
-+}
-+
-+static int stf_isp_shadow_trigger(struct stf_isp_dev *isp_dev)
-+{
-+ struct stf_vin_dev *vin = isp_dev->stfcamss->vin;
-+ void __iomem *ispbase;
-+
-+ ispbase = stf_isp_get_ispbase(vin);
-+
-+ // shadow update
-+ reg_set_bit(ispbase, ISP_REG_CSIINTS_ADDR, (BIT(17) | BIT(16)), 0x30000);
-+ reg_set_bit(ispbase, ISP_REG_IESHD_ADDR, (BIT(1) | BIT(0)), 0x3);
-+ return 0;
-+}
-+
-+void dump_isp_reg(void *__iomem ispbase)
-+{
-+ int j;
-+ u32 addr, val;
-+
-+ st_debug(ST_ISP, "DUMP ISP register:\n -- isp_reg_init_settings --\n");
-+ for (j = 0; j < isp_reg_init_settings->regval_num; j++) {
-+ addr = isp_reg_init_settings->regval[j].addr;
-+ val = ioread32(ispbase + addr);
-+ st_debug(ST_ISP, "{0x%08x, 0x%08x}\n", addr, val);
-+ }
-+
-+ st_debug(ST_ISP, " --- isp_format_settings ---\n");
-+ for (j = 0; j < isp_format_settings->regval_num; j++) {
-+ addr = isp_format_settings->regval[j].addr;
-+ val = ioread32(ispbase + addr);
-+ st_debug(ST_ISP, "{0x%08x, 0x%08x}\n", addr, val);
-+ }
-+
-+ val = ioread32(ispbase + ISP_REG_Y_PLANE_START_ADDR);
-+ st_debug(ST_ISP, "-- ISP_REG_Y_PLANE_START_ADDR --\n {0x%08x, 0x%08x}\n",
-+ ISP_REG_Y_PLANE_START_ADDR, val);
-+ val = ioread32(ispbase + ISP_REG_UV_PLANE_START_ADDR);
-+ st_debug(ST_ISP, "-- ISP_REG_UV_PLANE_START_ADDR --\n {0x%08x, 0x%08x}\n",
-+ ISP_REG_UV_PLANE_START_ADDR, val);
-+ val = ioread32(ispbase + ISP_REG_DUMP_CFG_0);
-+ st_debug(ST_ISP, "-- ISP_REG_DUMP_CFG_0 --\n {0x%08x, 0x%08x}\n",
-+ ISP_REG_DUMP_CFG_0, val);
-+ val = ioread32(ispbase + ISP_REG_DUMP_CFG_1);
-+ st_debug(ST_ISP, " --- ISP_REG_DUMP_CFG_1 ---\n {0x%08x, 0x%08x}\n",
-+ ISP_REG_DUMP_CFG_1, val);
-+
-+ st_debug(ST_ISP, " --- isp_reg_start_settings ---\n");
-+ for (j = 0; j < isp_reg_start_settings->regval_num; j++) {
-+ addr = isp_reg_start_settings->regval[j].addr;
-+ val = ioread32(ispbase + addr);
-+ st_debug(ST_ISP, "{0x%08x, 0x%08x}\n", addr, val);
-+ }
-+}
-+
-+struct isp_hw_ops isp_ops = {
-+ .isp_clk_enable = stf_isp_clk_enable,
-+ .isp_clk_disable = stf_isp_clk_disable,
-+ .isp_reset = stf_isp_reset,
-+ .isp_config_set = stf_isp_config_set,
-+ .isp_set_format = stf_isp_set_format,
-+ .isp_stream_set = stf_isp_stream_set,
-+ .isp_reg_read = stf_isp_reg_read,
-+ .isp_reg_write = stf_isp_reg_write,
-+ .isp_shadow_trigger = stf_isp_shadow_trigger,
-+};
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/stf_isp_ioctl.h
-@@ -0,0 +1,133 @@
-+/* SPDX-License-Identifier: GPL-2.0 */
-+/*
-+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
-+ *
-+ */
-+
-+#ifndef STF_ISP_IOCTL_H
-+#define STF_ISP_IOCTL_H
-+
-+
-+#include <media/v4l2-ctrls.h>
-+
-+
-+#define FILENAME_MAX_LEN 30
-+
-+#define ISP_IOC ('V')
-+#define STF_ISP_REG_BUF_SIZE (768)
-+#define STF_ISP_REG_TBL_BUF_SIZE (STF_ISP_REG_BUF_SIZE / 2)
-+#define STF_ISP_REG_TBL_2_BUF_SIZE (STF_ISP_REG_BUF_SIZE / 3)
-+#define STF_ISP_REG_TBL_3_BUF_SIZE (STF_ISP_REG_BUF_SIZE / 4)
-+#define STF_ISP_REG_SMPL_PACK_BUF_SIZE (STF_ISP_REG_BUF_SIZE / 2)
-+#define RDMA_WR_ONE (0xA0)
-+#define RDMA_WR_SRL (0xA1)
-+#define RDMA_LINK (0xA2)
-+#define RDMA_SINT (0xA3)
-+#define RDMA_END (0xAF)
-+#define ENABLE_SS0_SS1
-+
-+enum _STF_ISP_IOCTL {
-+ STF_ISP_IOCTL_LOAD_FW = BASE_VIDIOC_PRIVATE + 1,
-+ STF_ISP_IOCTL_DMABUF_ALLOC,
-+ STF_ISP_IOCTL_DMABUF_FREE,
-+ STF_ISP_IOCTL_GET_HW_VER,
-+ STF_ISP_IOCTL_REG,
-+ STF_ISP_IOCTL_SHADOW_LOCK,
-+ STF_ISP_IOCTL_SHADOW_UNLOCK,
-+ STF_ISP_IOCTL_SHADOW_UNLOCK_N_TRIGGER,
-+ STF_ISP_IOCTL_SET_USER_CONFIG_ISP,
-+ STF_ISP_IOCTL_MAX
-+};
-+
-+enum _STF_ISP_REG_METHOD {
-+ STF_ISP_REG_METHOD_ONE_REG = 0,
-+ STF_ISP_REG_METHOD_SERIES,
-+ STF_ISP_REG_METHOD_MODULE,
-+ STF_ISP_REG_METHOD_TABLE,
-+ STF_ISP_REG_METHOD_TABLE_2,
-+ STF_ISP_REG_METHOD_TABLE_3,
-+ STF_ISP_REG_METHOD_SMPL_PACK,
-+ STF_ISP_REG_METHOD_SOFT_RDMA,
-+ STF_ISP_REG_METHOD_MAX
-+};
-+
-+
-+struct stfisp_fw_info {
-+ char __user filename[FILENAME_MAX_LEN];
-+};
-+
-+struct dmabuf_create {
-+ __u32 fd;
-+ __u32 size;
-+ __u32 paddr;
-+};
-+
-+struct isp_rdma_info {
-+ u32 param;
-+ union {
-+ u32 value;
-+ struct {
-+ u32 offset : 24;
-+ u32 tag : 8;
-+ };
-+ };
-+};
-+
-+struct isp_reg_info {
-+ /** @brief [in] access method of register */
-+ u8 method;
-+ /** @brief [in] offset indicated which register will be read/write */
-+ u32 offset;
-+ /** @brief [in] length for indicated how much register will be read/write */
-+ u32 length;
-+};
-+
-+union reg_buf {
-+ u32 buffer[STF_ISP_REG_BUF_SIZE];
-+ struct {
-+ u32 offset;
-+ u32 value;
-+ } reg_tbl[STF_ISP_REG_TBL_BUF_SIZE];
-+ struct {
-+ u32 offset;
-+ u32 value;
-+ u32 mask;
-+ } reg_tbl2[STF_ISP_REG_TBL_2_BUF_SIZE];
-+ struct {
-+ u32 offset;
-+ u32 value;
-+ u32 mask;
-+ u32 delay_ms;
-+ } reg_tbl3[STF_ISP_REG_TBL_3_BUF_SIZE];
-+ struct isp_rdma_info rdma_cmd[STF_ISP_REG_SMPL_PACK_BUF_SIZE];
-+};
-+
-+struct isp_reg_param {
-+ /** @brief [in, out] register read/write information */
-+ struct isp_reg_info reg_info;
-+ /** @brief [in, out] buffer */
-+ union reg_buf *reg_buf;
-+};
-+
-+
-+#define VIDIOC_STFISP_LOAD_FW \
-+ _IOW(ISP_IOC, STF_ISP_IOCTL_LOAD_FW, struct stfisp_fw_info)
-+#define VIDIOC_STF_DMABUF_ALLOC \
-+ _IOWR(ISP_IOC, STF_ISP_IOCTL_DMABUF_ALLOC, struct dmabuf_create)
-+#define VIDIOC_STF_DMABUF_FREE \
-+ _IOWR(ISP_IOC, STF_ISP_IOCTL_DMABUF_FREE, struct dmabuf_create)
-+#define VIDIOC_STFISP_GET_REG \
-+ _IOWR(ISP_IOC, STF_ISP_IOCTL_REG, struct isp_reg_param)
-+#define VIDIOC_STFISP_SET_REG \
-+ _IOW(ISP_IOC, STF_ISP_IOCTL_REG, struct isp_reg_param)
-+#define VIDIOC_STFISP_SHADOW_LOCK \
-+ _IO(ISP_IOC, STF_ISP_IOCTL_SHADOW_LOCK)
-+#define VIDIOC_STFISP_SHADOW_UNLOCK \
-+ _IO(ISP_IOC, STF_ISP_IOCTL_SHADOW_UNLOCK)
-+#define VIDIOC_STFISP_SHADOW_UNLOCK_N_TRIGGER \
-+ _IO(ISP_IOC, STF_ISP_IOCTL_SHADOW_UNLOCK_N_TRIGGER)
-+#define VIDIOC_STFISP_SET_USER_CONFIG_ISP \
-+ _IO(ISP_IOC, STF_ISP_IOCTL_SET_USER_CONFIG_ISP)
-+
-+
-+#endif /* STF_ISP_IOCTL_H */
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/stf_video.c
-@@ -0,0 +1,1552 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
-+ *
-+ */
-+
-+#include "stfcamss.h"
-+#include "stf_video.h"
-+#include <media/media-entity.h>
-+#include <media/v4l2-event.h>
-+#include <media/v4l2-mc.h>
-+#include <media/videobuf2-dma-sg.h>
-+#include <media/videobuf2-vmalloc.h>
-+#include <media/videobuf2-dma-contig.h>
-+
-+static const struct stfcamss_format_info formats_pix_st7110_wr[] = {
-+ { MEDIA_BUS_FMT_AYUV8_1X32, V4L2_PIX_FMT_AYUV32, 1,
-+ { { 1, 1 } }, { { 1, 1 } }, { 32 } },
-+ { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_YUYV, 1,
-+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
-+ { MEDIA_BUS_FMT_RGB565_2X8_LE, V4L2_PIX_FMT_RGB565, 1,
-+ { { 1, 1 } }, { { 1, 1 } }, { 16 } },
-+ { MEDIA_BUS_FMT_SRGGB8_1X8, V4L2_PIX_FMT_SRGGB8, 1,
-+ { { 1, 1 } }, { { 1, 1 } }, { 8 } },
-+ { MEDIA_BUS_FMT_SGRBG8_1X8, V4L2_PIX_FMT_SGRBG8, 1,
-+ { { 1, 1 } }, { { 1, 1 } }, { 8 } },
-+ { MEDIA_BUS_FMT_SGBRG8_1X8, V4L2_PIX_FMT_SGBRG8, 1,
-+ { { 1, 1 } }, { { 1, 1 } }, { 8 } },
-+ { MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_PIX_FMT_SBGGR8, 1,
-+ { { 1, 1 } }, { { 1, 1 } }, { 8 } },
-+ { MEDIA_BUS_FMT_SRGGB10_1X10, V4L2_PIX_FMT_SRGGB10, 1,
-+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
-+ { MEDIA_BUS_FMT_SGRBG10_1X10, V4L2_PIX_FMT_SGRBG10, 1,
-+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
-+ { MEDIA_BUS_FMT_SGBRG10_1X10, V4L2_PIX_FMT_SGBRG10, 1,
-+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
-+ { MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_PIX_FMT_SBGGR10, 1,
-+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
-+};
-+
-+static const struct stfcamss_format_info formats_raw_st7110_isp[] = {
-+ { MEDIA_BUS_FMT_SBGGR12_1X12, V4L2_PIX_FMT_SBGGR12, 1,
-+ { { 1, 1 } }, { { 1, 1 } }, { 12 } },
-+ { MEDIA_BUS_FMT_SRGGB12_1X12, V4L2_PIX_FMT_SRGGB12, 1,
-+ { { 1, 1 } }, { { 1, 1 } }, { 12 } },
-+ { MEDIA_BUS_FMT_SGRBG12_1X12, V4L2_PIX_FMT_SGRBG12, 1,
-+ { { 1, 1 } }, { { 1, 1 } }, { 12 } },
-+ { MEDIA_BUS_FMT_SGBRG12_1X12, V4L2_PIX_FMT_SGBRG12, 1,
-+ { { 1, 1 } }, { { 1, 1 } }, { 12 } },
-+};
-+
-+static const struct stfcamss_format_info formats_pix_st7110_isp[] = {
-+ // { MEDIA_BUS_FMT_YUYV12_2X12, V4L2_PIX_FMT_NV12M, 2,
-+ // { { 1, 1 }, { 1, 1 } }, { { 1, 1 }, { 1, 1 } }, { 8 , 4 } },
-+ { MEDIA_BUS_FMT_Y12_1X12, V4L2_PIX_FMT_NV12, 1,
-+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
-+ { MEDIA_BUS_FMT_Y12_1X12, V4L2_PIX_FMT_NV21, 1,
-+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
-+};
-+
-+static const struct stfcamss_format_info formats_st7110_isp_iti[] = {
-+ // raw format
-+ { MEDIA_BUS_FMT_SRGGB10_1X10, V4L2_PIX_FMT_SRGGB10, 1,
-+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
-+ { MEDIA_BUS_FMT_SGRBG10_1X10, V4L2_PIX_FMT_SGRBG10, 1,
-+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
-+ { MEDIA_BUS_FMT_SGBRG10_1X10, V4L2_PIX_FMT_SGBRG10, 1,
-+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
-+ { MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_PIX_FMT_SBGGR10, 1,
-+ { { 1, 1 } }, { { 1, 1 } }, { 10 } },
-+ { MEDIA_BUS_FMT_SRGGB12_1X12, V4L2_PIX_FMT_SRGGB12, 1,
-+ { { 1, 1 } }, { { 1, 1 } }, { 12 } },
-+ { MEDIA_BUS_FMT_SGRBG12_1X12, V4L2_PIX_FMT_SGRBG12, 1,
-+ { { 1, 1 } }, { { 1, 1 } }, { 12 } },
-+ { MEDIA_BUS_FMT_SGBRG12_1X12, V4L2_PIX_FMT_SGBRG12, 1,
-+ { { 1, 1 } }, { { 1, 1 } }, { 12 } },
-+ { MEDIA_BUS_FMT_SBGGR12_1X12, V4L2_PIX_FMT_SBGGR12, 1,
-+ { { 1, 1 } }, { { 1, 1 } }, { 12 } },
-+
-+ // YUV420
-+ { MEDIA_BUS_FMT_Y12_1X12, V4L2_PIX_FMT_NV12, 1,
-+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
-+ { MEDIA_BUS_FMT_Y12_1X12, V4L2_PIX_FMT_NV21, 1,
-+ { { 1, 1 } }, { { 2, 3 } }, { 8 } },
-+
-+ // YUV444
-+ { MEDIA_BUS_FMT_YUV8_1X24, V4L2_PIX_FMT_NV24, 1,
-+ { { 1, 1 } }, { { 1, 3 } }, { 8 } },
-+ { MEDIA_BUS_FMT_VUY8_1X24, V4L2_PIX_FMT_NV42, 1,
-+ { { 1, 1 } }, { { 1, 3 } }, { 8 } },
-+};
-+
-+static int video_find_format(u32 code, u32 pixelformat,
-+ const struct stfcamss_format_info *formats,
-+ unsigned int nformats)
-+{
-+ int i;
-+
-+ for (i = 0; i < nformats; i++) {
-+ if (formats[i].code == code &&
-+ formats[i].pixelformat == pixelformat)
-+ return i;
-+ }
-+
-+ for (i = 0; i < nformats; i++)
-+ if (formats[i].code == code)
-+ return i;
-+
-+ for (i = 0; i < nformats; i++)
-+ if (formats[i].pixelformat == pixelformat)
-+ return i;
-+
-+ return -EINVAL;
-+}
-+
-+static int __video_try_fmt(struct stfcamss_video *video,
-+ struct v4l2_format *f, int is_mp)
-+{
-+ struct v4l2_pix_format *pix;
-+ struct v4l2_pix_format_mplane *pix_mp;
-+ const struct stfcamss_format_info *fi;
-+ u32 width, height;
-+ u32 bpl;
-+ int i, j;
-+
-+ st_debug(ST_VIDEO, "%s, fmt.type = 0x%x\n", __func__, f->type);
-+ pix = &f->fmt.pix;
-+ pix_mp = &f->fmt.pix_mp;
-+
-+ if (is_mp) {
-+ for (i = 0; i < video->nformats; i++)
-+ if (pix_mp->pixelformat
-+ == video->formats[i].pixelformat)
-+ break;
-+
-+ if (i == video->nformats)
-+ i = 0; /* default format */
-+
-+ fi = &video->formats[i];
-+ width = pix_mp->width;
-+ height = pix_mp->height;
-+
-+ memset(pix_mp, 0, sizeof(*pix_mp));
-+
-+ pix_mp->pixelformat = fi->pixelformat;
-+ pix_mp->width = clamp_t(u32, width, STFCAMSS_FRAME_MIN_WIDTH,
-+ STFCAMSS_FRAME_MAX_WIDTH);
-+ pix_mp->height = clamp_t(u32, height, STFCAMSS_FRAME_MIN_HEIGHT,
-+ STFCAMSS_FRAME_MAX_HEIGHT);
-+ pix_mp->num_planes = fi->planes;
-+ for (j = 0; j < pix_mp->num_planes; j++) {
-+ bpl = pix_mp->width / fi->hsub[j].numerator *
-+ fi->hsub[j].denominator * fi->bpp[j] / 8;
-+ bpl = ALIGN(bpl, video->bpl_alignment);
-+ pix_mp->plane_fmt[j].bytesperline = bpl;
-+ pix_mp->plane_fmt[j].sizeimage = pix_mp->height /
-+ fi->vsub[j].numerator
-+ * fi->vsub[j].denominator * bpl;
-+ }
-+
-+ pix_mp->field = V4L2_FIELD_NONE;
-+ pix_mp->colorspace = V4L2_COLORSPACE_SRGB;
-+ pix_mp->flags = 0;
-+ pix_mp->ycbcr_enc =
-+ V4L2_MAP_YCBCR_ENC_DEFAULT(pix_mp->colorspace);
-+ pix_mp->quantization =
-+ V4L2_MAP_QUANTIZATION_DEFAULT(true,
-+ pix_mp->colorspace, pix_mp->ycbcr_enc);
-+ pix_mp->xfer_func =
-+ V4L2_MAP_XFER_FUNC_DEFAULT(pix_mp->colorspace);
-+
-+ st_info(ST_VIDEO, "w, h = %d, %d, bpp = %d\n", pix_mp->width,
-+ pix_mp->height, fi->bpp[0]);
-+ st_info(ST_VIDEO, "i = %d, p = %d, s = 0x%x\n", i,
-+ pix_mp->num_planes, pix_mp->plane_fmt[0].sizeimage);
-+
-+ } else {
-+ for (i = 0; i < video->nformats; i++)
-+ if (pix->pixelformat == video->formats[i].pixelformat)
-+ break;
-+
-+ if (i == video->nformats)
-+ i = 0; /* default format */
-+
-+ fi = &video->formats[i];
-+ width = pix->width;
-+ height = pix->height;
-+
-+ memset(pix, 0, sizeof(*pix));
-+
-+ pix->pixelformat = fi->pixelformat;
-+ pix->width = clamp_t(u32, width, STFCAMSS_FRAME_MIN_WIDTH,
-+ STFCAMSS_FRAME_MAX_WIDTH);
-+ pix->height = clamp_t(u32, height, STFCAMSS_FRAME_MIN_HEIGHT,
-+ STFCAMSS_FRAME_MAX_HEIGHT);
-+ bpl = pix->width / fi->hsub[0].numerator *
-+ fi->hsub[0].denominator * fi->bpp[0] / 8;
-+ bpl = ALIGN(bpl, video->bpl_alignment);
-+ pix->bytesperline = bpl;
-+ pix->sizeimage = pix->height /
-+ fi->vsub[0].numerator
-+ * fi->vsub[0].denominator * bpl;
-+
-+ pix->field = V4L2_FIELD_NONE;
-+ pix->colorspace = V4L2_COLORSPACE_SRGB;
-+ pix->flags = 0;
-+ pix->ycbcr_enc =
-+ V4L2_MAP_YCBCR_ENC_DEFAULT(pix->colorspace);
-+ pix->quantization =
-+ V4L2_MAP_QUANTIZATION_DEFAULT(true,
-+ pix->colorspace, pix->ycbcr_enc);
-+ pix->xfer_func =
-+ V4L2_MAP_XFER_FUNC_DEFAULT(pix->colorspace);
-+
-+ st_info(ST_VIDEO, "w, h = %d, %d, bpp = %d\n", pix->width,
-+ pix->height, fi->bpp[0]);
-+ st_info(ST_VIDEO, "i = %d, s = 0x%x\n", i, pix->sizeimage);
-+ }
-+ return 0;
-+}
-+
-+static int stf_video_init_format(struct stfcamss_video *video, int is_mp)
-+{
-+ int ret;
-+ struct v4l2_format format = {
-+ .type = video->type,
-+ .fmt.pix = {
-+ .width = 1920,
-+ .height = 1080,
-+ .pixelformat = V4L2_PIX_FMT_RGB565,
-+ },
-+ };
-+
-+ ret = __video_try_fmt(video, &format, is_mp);
-+
-+ if (ret < 0)
-+ return ret;
-+
-+ video->active_fmt = format;
-+
-+ return 0;
-+}
-+
-+static int video_queue_setup(struct vb2_queue *q,
-+ unsigned int *num_buffers, unsigned int *num_planes,
-+ unsigned int sizes[], struct device *alloc_devs[])
-+{
-+ struct stfcamss_video *video = vb2_get_drv_priv(q);
-+ const struct v4l2_pix_format *format =
-+ &video->active_fmt.fmt.pix;
-+ const struct v4l2_pix_format_mplane *format_mp =
-+ &video->active_fmt.fmt.pix_mp;
-+ unsigned int i;
-+
-+ st_debug(ST_VIDEO, "%s, planes = %d\n", __func__, *num_planes);
-+
-+ if (video->is_mp) {
-+ if (*num_planes) {
-+ if (*num_planes != format_mp->num_planes)
-+ return -EINVAL;
-+
-+ for (i = 0; i < *num_planes; i++)
-+ if (sizes[i] <
-+ format_mp->plane_fmt[i].sizeimage)
-+ return -EINVAL;
-+
-+ return 0;
-+ }
-+
-+ *num_planes = format_mp->num_planes;
-+
-+ for (i = 0; i < *num_planes; i++)
-+ sizes[i] = format_mp->plane_fmt[i].sizeimage;
-+ } else {
-+ if (*num_planes) {
-+ if (*num_planes != 1)
-+ return -EINVAL;
-+
-+ if (sizes[0] < format->sizeimage)
-+ return -EINVAL;
-+ }
-+
-+ *num_planes = 1;
-+ sizes[0] = format->sizeimage;
-+ if (!sizes[0])
-+ st_err(ST_VIDEO, "%s: error size is zero!!!\n", __func__);
-+ }
-+ if ((stf_vin_map_isp_pad(video->id, STF_ISP_PAD_SRC)
-+ == STF_ISP_PAD_SRC_SCD_Y) &&
-+ sizes[0] < ISP_SCD_Y_BUFFER_SIZE) {
-+ sizes[0] = ISP_SCD_Y_BUFFER_SIZE;
-+ }
-+
-+ st_info(ST_VIDEO, "%s, planes = %d, size = %d\n",
-+ __func__, *num_planes, sizes[0]);
-+ return 0;
-+}
-+
-+static int video_buf_init(struct vb2_buffer *vb)
-+{
-+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
-+ struct stfcamss_video *video = vb2_get_drv_priv(vb->vb2_queue);
-+ struct stfcamss_buffer *buffer =
-+ container_of(vbuf, struct stfcamss_buffer, vb);
-+ const struct v4l2_pix_format *fmt = &video->active_fmt.fmt.pix;
-+ const struct v4l2_pix_format_mplane *fmt_mp =
-+ &video->active_fmt.fmt.pix_mp;
-+ //struct sg_table *sgt;
-+ dma_addr_t *paddr;
-+ unsigned int i;
-+
-+ buffer->sizeimage = 0;
-+
-+ if (video->is_mp) {
-+ for (i = 0; i < fmt_mp->num_planes; i++) {
-+ paddr = vb2_plane_cookie(vb, i);
-+ buffer->addr[i] = *paddr;
-+ buffer->sizeimage += vb2_plane_size(vb, i);
-+ }
-+
-+ if (fmt_mp->num_planes == 1
-+ && (fmt_mp->pixelformat == V4L2_PIX_FMT_NV12
-+ || fmt_mp->pixelformat == V4L2_PIX_FMT_NV21
-+ || fmt_mp->pixelformat == V4L2_PIX_FMT_NV16
-+ || fmt_mp->pixelformat == V4L2_PIX_FMT_NV61))
-+ buffer->addr[1] = buffer->addr[0] +
-+ fmt_mp->plane_fmt[0].bytesperline *
-+ fmt_mp->height;
-+ } else {
-+ paddr = vb2_plane_cookie(vb, 0);
-+ buffer->sizeimage = vb2_plane_size(vb, 0);
-+ buffer->addr[0] = *paddr;
-+ if (fmt->pixelformat == V4L2_PIX_FMT_NV12
-+ || fmt->pixelformat == V4L2_PIX_FMT_NV21
-+ || fmt->pixelformat == V4L2_PIX_FMT_NV16
-+ || fmt->pixelformat == V4L2_PIX_FMT_NV61)
-+ buffer->addr[1] = buffer->addr[0] +
-+ fmt->bytesperline *
-+ fmt->height;
-+ }
-+
-+ if (stf_vin_map_isp_pad(video->id, STF_ISP_PAD_SRC)
-+ == STF_ISP_PAD_SRC_SCD_Y) {
-+ buffer->addr[1] = buffer->addr[0] + ISP_YHIST_BUFFER_SIZE;
-+ buffer->vaddr_sc = vb2_plane_vaddr(vb, 0);
-+ }
-+
-+ return 0;
-+}
-+
-+static int video_buf_prepare(struct vb2_buffer *vb)
-+{
-+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
-+ struct stfcamss_video *video = vb2_get_drv_priv(vb->vb2_queue);
-+ const struct v4l2_pix_format *fmt = &video->active_fmt.fmt.pix;
-+ const struct v4l2_pix_format_mplane *fmt_mp =
-+ &video->active_fmt.fmt.pix_mp;
-+ unsigned int i;
-+
-+ if (video->is_mp) {
-+ for (i = 0; i < fmt_mp->num_planes; i++) {
-+ if (fmt_mp->plane_fmt[i].sizeimage
-+ > vb2_plane_size(vb, i))
-+ return -EINVAL;
-+
-+ vb2_set_plane_payload(vb, i,
-+ fmt_mp->plane_fmt[i].sizeimage);
-+ }
-+ } else {
-+ if (fmt->sizeimage > vb2_plane_size(vb, 0)) {
-+ st_err(ST_VIDEO, "sizeimage = %d, plane size = %d\n",
-+ fmt->sizeimage, (unsigned int)vb2_plane_size(vb, 0));
-+ return -EINVAL;
-+ }
-+ vb2_set_plane_payload(vb, 0, fmt->sizeimage);
-+ }
-+
-+ vbuf->field = V4L2_FIELD_NONE;
-+
-+ return 0;
-+}
-+
-+static void video_buf_queue(struct vb2_buffer *vb)
-+{
-+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
-+ struct stfcamss_video *video = vb2_get_drv_priv(vb->vb2_queue);
-+ struct stfcamss_buffer *buffer =
-+ container_of(vbuf, struct stfcamss_buffer, vb);
-+
-+ video->ops->queue_buffer(video, buffer);
-+}
-+
-+static int video_mbus_to_pix_mp(const struct v4l2_mbus_framefmt *mbus,
-+ struct v4l2_pix_format_mplane *pix,
-+ const struct stfcamss_format_info *f,
-+ unsigned int alignment)
-+{
-+ unsigned int i;
-+ u32 bytesperline;
-+
-+ memset(pix, 0, sizeof(*pix));
-+ v4l2_fill_pix_format_mplane(pix, mbus);
-+ pix->pixelformat = f->pixelformat;
-+ pix->num_planes = f->planes;
-+ for (i = 0; i < pix->num_planes; i++) {
-+ bytesperline = pix->width / f->hsub[i].numerator *
-+ f->hsub[i].denominator * f->bpp[i] / 8;
-+ bytesperline = ALIGN(bytesperline, alignment);
-+ pix->plane_fmt[i].bytesperline = bytesperline;
-+ pix->plane_fmt[i].sizeimage = pix->height /
-+ f->vsub[i].numerator * f->vsub[i].denominator *
-+ bytesperline;
-+ }
-+
-+ return 0;
-+}
-+
-+static int video_mbus_to_pix(const struct v4l2_mbus_framefmt *mbus,
-+ struct v4l2_pix_format *pix,
-+ const struct stfcamss_format_info *f,
-+ unsigned int alignment)
-+{
-+ u32 bytesperline;
-+
-+ memset(pix, 0, sizeof(*pix));
-+ v4l2_fill_pix_format(pix, mbus);
-+ pix->pixelformat = f->pixelformat;
-+ bytesperline = pix->width / f->hsub[0].numerator *
-+ f->hsub[0].denominator * f->bpp[0] / 8;
-+ bytesperline = ALIGN(bytesperline, alignment);
-+ pix->bytesperline = bytesperline;
-+ pix->sizeimage = pix->height /
-+ f->vsub[0].numerator * f->vsub[0].denominator *
-+ bytesperline;
-+ return 0;
-+}
-+
-+static struct v4l2_subdev *video_remote_subdev(
-+ struct stfcamss_video *video, u32 *pad)
-+{
-+ struct media_pad *remote;
-+
-+ remote = media_pad_remote_pad_first(&video->pad);
-+
-+ if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
-+ return NULL;
-+
-+ if (pad)
-+ *pad = remote->index;
-+
-+ return media_entity_to_v4l2_subdev(remote->entity);
-+}
-+
-+static int video_get_subdev_format(struct stfcamss_video *video,
-+ struct v4l2_format *format)
-+{
-+ struct v4l2_pix_format *pix = &video->active_fmt.fmt.pix;
-+ struct v4l2_pix_format_mplane *pix_mp =
-+ &video->active_fmt.fmt.pix_mp;
-+ struct v4l2_subdev_format fmt;
-+ struct v4l2_subdev *subdev;
-+ u32 pixelformat;
-+ u32 pad;
-+ int ret;
-+
-+ subdev = video_remote_subdev(video, &pad);
-+ if (subdev == NULL)
-+ return -EPIPE;
-+
-+ fmt.pad = pad;
-+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
-+
-+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
-+ if (ret)
-+ return ret;
-+
-+ if (video->is_mp)
-+ pixelformat = pix_mp->pixelformat;
-+ else
-+ pixelformat = pix->pixelformat;
-+ ret = video_find_format(fmt.format.code, pixelformat,
-+ video->formats, video->nformats);
-+ if (ret < 0)
-+ return ret;
-+
-+ format->type = video->type;
-+
-+ if (video->is_mp)
-+ return video_mbus_to_pix_mp(&fmt.format, &format->fmt.pix_mp,
-+ &video->formats[ret], video->bpl_alignment);
-+ else
-+ return video_mbus_to_pix(&fmt.format, &format->fmt.pix,
-+ &video->formats[ret], video->bpl_alignment);
-+}
-+
-+static int video_check_format(struct stfcamss_video *video)
-+{
-+ struct v4l2_pix_format *pix = &video->active_fmt.fmt.pix;
-+ struct v4l2_pix_format_mplane *pix_mp =
-+ &video->active_fmt.fmt.pix_mp;
-+ struct v4l2_format format;
-+ struct v4l2_pix_format *sd_pix = &format.fmt.pix;
-+ struct v4l2_pix_format_mplane *sd_pix_mp = &format.fmt.pix_mp;
-+ int ret;
-+
-+ if (video->is_mp) {
-+ sd_pix_mp->pixelformat = pix_mp->pixelformat;
-+ ret = video_get_subdev_format(video, &format);
-+ if (ret < 0)
-+ return ret;
-+
-+ if (pix_mp->pixelformat != sd_pix_mp->pixelformat ||
-+ pix_mp->height > sd_pix_mp->height ||
-+ pix_mp->width > sd_pix_mp->width ||
-+ pix_mp->num_planes != sd_pix_mp->num_planes ||
-+ pix_mp->field != format.fmt.pix_mp.field) {
-+ st_err(ST_VIDEO,
-+ "%s, not match:\n"
-+ "0x%x 0x%x\n0x%x 0x%x\n0x%x 0x%x\n",
-+ __func__,
-+ pix_mp->pixelformat, sd_pix_mp->pixelformat,
-+ pix_mp->height, sd_pix_mp->height,
-+ pix_mp->field, format.fmt.pix_mp.field);
-+ return -EPIPE;
-+ }
-+
-+ } else {
-+ sd_pix->pixelformat = pix->pixelformat;
-+ ret = video_get_subdev_format(video, &format);
-+ if (ret < 0)
-+ return ret;
-+
-+ if (pix->pixelformat != sd_pix->pixelformat ||
-+ pix->height > sd_pix->height ||
-+ pix->width > sd_pix->width ||
-+ pix->field != format.fmt.pix.field) {
-+ st_err(ST_VIDEO,
-+ "%s, not match:\n"
-+ "0x%x 0x%x\n0x%x 0x%x\n0x%x 0x%x\n",
-+ __func__,
-+ pix->pixelformat, sd_pix->pixelformat,
-+ pix->height, sd_pix->height,
-+ pix->field, format.fmt.pix.field);
-+ return -EPIPE;
-+ }
-+ }
-+ return 0;
-+}
-+
-+static int video_start_streaming(struct vb2_queue *q, unsigned int count)
-+{
-+ struct stfcamss_video *video = vb2_get_drv_priv(q);
-+ struct video_device *vdev = &video->vdev;
-+ struct media_entity *entity;
-+ struct media_pad *pad;
-+ struct v4l2_subdev *subdev;
-+ int ret;
-+
-+ ret = video_device_pipeline_start(vdev, &video->stfcamss->pipe);
-+ if (ret < 0) {
-+ st_err(ST_VIDEO,
-+ "Failed to video_device_pipeline_start: %d\n", ret);
-+ return ret;
-+ }
-+
-+ ret = video_check_format(video);
-+ if (ret < 0)
-+ goto error;
-+ entity = &vdev->entity;
-+ while (1) {
-+ pad = &entity->pads[0];
-+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
-+ break;
-+
-+ pad = media_pad_remote_pad_first(pad);
-+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
-+ break;
-+
-+ entity = pad->entity;
-+ subdev = media_entity_to_v4l2_subdev(entity);
-+
-+ ret = v4l2_subdev_call(subdev, video, s_stream, 1);
-+ if (ret < 0 && ret != -ENOIOCTLCMD)
-+ goto error;
-+ }
-+ return 0;
-+
-+error:
-+ video_device_pipeline_stop(vdev);
-+ video->ops->flush_buffers(video, VB2_BUF_STATE_QUEUED);
-+ return ret;
-+}
-+
-+static void video_stop_streaming(struct vb2_queue *q)
-+{
-+ struct stfcamss_video *video = vb2_get_drv_priv(q);
-+ struct video_device *vdev = &video->vdev;
-+ struct media_entity *entity;
-+ struct media_pad *pad;
-+ struct v4l2_subdev *subdev;
-+
-+ entity = &vdev->entity;
-+ while (1) {
-+ pad = &entity->pads[0];
-+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
-+ break;
-+
-+ pad = media_pad_remote_pad_first(pad);
-+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
-+ break;
-+
-+ entity = pad->entity;
-+ subdev = media_entity_to_v4l2_subdev(entity);
-+
-+ v4l2_subdev_call(subdev, video, s_stream, 0);
-+ }
-+
-+ video_device_pipeline_stop(vdev);
-+ video->ops->flush_buffers(video, VB2_BUF_STATE_ERROR);
-+}
-+
-+static const struct vb2_ops stf_video_vb2_q_ops = {
-+ .queue_setup = video_queue_setup,
-+ .wait_prepare = vb2_ops_wait_prepare,
-+ .wait_finish = vb2_ops_wait_finish,
-+ .buf_init = video_buf_init,
-+ .buf_prepare = video_buf_prepare,
-+ .buf_queue = video_buf_queue,
-+ .start_streaming = video_start_streaming,
-+ .stop_streaming = video_stop_streaming,
-+};
-+
-+/* -----------------------------------------------------
-+ * V4L2 ioctls
-+ */
-+
-+static int getcrop_pad_id(int video_id)
-+{
-+ return stf_vin_map_isp_pad(video_id, STF_ISP_PAD_SRC);
-+}
-+
-+static int video_querycap(struct file *file, void *fh,
-+ struct v4l2_capability *cap)
-+{
-+ struct stfcamss_video *video = video_drvdata(file);
-+
-+ strscpy(cap->driver, "stf camss", sizeof(cap->driver));
-+ strscpy(cap->card, "Starfive Camera Subsystem", sizeof(cap->card));
-+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
-+ dev_name(video->stfcamss->dev));
-+ return 0;
-+}
-+
-+static int video_get_unique_pixelformat_by_index(struct stfcamss_video *video,
-+ int ndx)
-+{
-+ int i, j, k;
-+
-+ /* find index "i" of "k"th unique pixelformat in formats array */
-+ k = -1;
-+ for (i = 0; i < video->nformats; i++) {
-+ for (j = 0; j < i; j++) {
-+ if (video->formats[i].pixelformat ==
-+ video->formats[j].pixelformat)
-+ break;
-+ }
-+
-+ if (j == i)
-+ k++;
-+
-+ if (k == ndx)
-+ return i;
-+ }
-+
-+ return -EINVAL;
-+}
-+
-+static int video_get_pixelformat_by_mbus_code(struct stfcamss_video *video,
-+ u32 mcode)
-+{
-+ int i;
-+
-+ for (i = 0; i < video->nformats; i++) {
-+ if (video->formats[i].code == mcode)
-+ return i;
-+ }
-+
-+ return -EINVAL;
-+}
-+
-+static int video_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
-+{
-+ struct stfcamss_video *video = video_drvdata(file);
-+ int i;
-+
-+ st_debug(ST_VIDEO, "%s:\n0x%x 0x%x\n 0x%x, 0x%x\n0x%x\n",
-+ __func__,
-+ f->type, video->type,
-+ f->index, video->nformats,
-+ f->mbus_code);
-+
-+ if (f->type != video->type)
-+ return -EINVAL;
-+ if (f->index >= video->nformats)
-+ return -EINVAL;
-+
-+ if (f->mbus_code) {
-+ /* Each entry in formats[] table has unique mbus_code */
-+ if (f->index > 0)
-+ return -EINVAL;
-+
-+ i = video_get_pixelformat_by_mbus_code(video, f->mbus_code);
-+ } else {
-+ i = video_get_unique_pixelformat_by_index(video, f->index);
-+ }
-+
-+ if (i < 0)
-+ return -EINVAL;
-+
-+ f->pixelformat = video->formats[i].pixelformat;
-+
-+ return 0;
-+}
-+
-+static int video_enum_framesizes(struct file *file, void *fh,
-+ struct v4l2_frmsizeenum *fsize)
-+{
-+ struct v4l2_subdev_frame_size_enum fse = {0};
-+ struct v4l2_subdev_mbus_code_enum code = {0};
-+ struct stfcamss_video *video = video_drvdata(file);
-+ struct video_device *vdev = &video->vdev;
-+ struct media_entity *entity = &vdev->entity;
-+ struct media_entity *sensor;
-+ struct v4l2_subdev *subdev;
-+ struct media_pad *pad;
-+ bool support_selection = false;
-+ int i;
-+ int ret;
-+
-+ for (i = 0; i < video->nformats; i++) {
-+ if (video->formats[i].pixelformat == fsize->pixel_format)
-+ break;
-+ }
-+
-+ if (i == video->nformats)
-+ return -EINVAL;
-+
-+ entity = &vdev->entity;
-+ while (1) {
-+ pad = &entity->pads[0];
-+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
-+ break;
-+
-+ pad = media_pad_remote_pad_first(pad);
-+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
-+ break;
-+
-+ entity = pad->entity;
-+ subdev = media_entity_to_v4l2_subdev(entity);
-+
-+ if (subdev->ops->pad->set_selection) {
-+ support_selection = true;
-+ break;
-+ }
-+ }
-+
-+ if (support_selection) {
-+ if (fsize->index)
-+ return -EINVAL;
-+ fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
-+ fsize->stepwise.min_width = STFCAMSS_FRAME_MIN_WIDTH;
-+ fsize->stepwise.max_width = STFCAMSS_FRAME_MAX_WIDTH;
-+ fsize->stepwise.min_height = STFCAMSS_FRAME_MIN_HEIGHT;
-+ fsize->stepwise.max_height = STFCAMSS_FRAME_MAX_HEIGHT;
-+ fsize->stepwise.step_width = 1;
-+ fsize->stepwise.step_height = 1;
-+ } else {
-+ entity = &vdev->entity;
-+ sensor = stfcamss_find_sensor(entity);
-+ if (!sensor)
-+ return -ENOTTY;
-+
-+ subdev = media_entity_to_v4l2_subdev(sensor);
-+ code.index = 0;
-+ code.which = V4L2_SUBDEV_FORMAT_ACTIVE;
-+ ret = v4l2_subdev_call(subdev, pad, enum_mbus_code, NULL, &code);
-+ if (ret < 0)
-+ return -EINVAL;
-+ fse.index = fsize->index;
-+ fse.code = code.code;
-+ fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
-+ ret = v4l2_subdev_call(subdev, pad, enum_frame_size, NULL, &fse);
-+ if (ret < 0)
-+ return -EINVAL;
-+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
-+ fsize->discrete.width = fse.min_width;
-+ fsize->discrete.height = fse.min_height;
-+ }
-+
-+ return 0;
-+}
-+
-+static int video_enum_frameintervals(struct file *file, void *fh,
-+ struct v4l2_frmivalenum *fival)
-+{
-+ int ret = 0;
-+ struct stfcamss_video *video = video_drvdata(file);
-+ struct video_device *vdev = &video->vdev;
-+ struct media_entity *entity = &vdev->entity;
-+ struct media_entity *sensor;
-+ struct v4l2_subdev *subdev;
-+ struct v4l2_subdev_mbus_code_enum code = {0};
-+ struct v4l2_subdev_frame_interval_enum fie = {0};
-+
-+ sensor = stfcamss_find_sensor(entity);
-+ if (!sensor)
-+ return -ENOTTY;
-+ fie.index = fival->index;
-+ fie.width = fival->width;
-+ fie.height = fival->height;
-+ fie.which = V4L2_SUBDEV_FORMAT_ACTIVE;
-+ subdev = media_entity_to_v4l2_subdev(sensor);
-+
-+ code.index = 0;
-+ code.which = V4L2_SUBDEV_FORMAT_ACTIVE;
-+
-+ /* Don't care about the code, just find by pixelformat */
-+ ret = video_find_format(0, fival->pixel_format,
-+ video->formats, video->nformats);
-+ if (ret < 0)
-+ return -EINVAL;
-+
-+ ret = v4l2_subdev_call(subdev, pad, enum_mbus_code, NULL, &code);
-+ if (ret < 0)
-+ return -EINVAL;
-+
-+ fie.code = code.code;
-+ ret = v4l2_subdev_call(subdev, pad, enum_frame_interval, NULL, &fie);
-+ if (ret < 0)
-+ return ret;
-+
-+ fival->type = V4L2_FRMSIZE_TYPE_DISCRETE;
-+ fival->discrete = fie.interval;
-+
-+ return 0;
-+}
-+
-+static int video_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
-+{
-+ struct stfcamss_video *video = video_drvdata(file);
-+
-+ st_debug(ST_VIDEO, "%s, fmt.type = 0x%x\n", __func__, f->type);
-+ st_debug(ST_VIDEO, "%s, active_fmt.type = 0x%x,0x%x\n",
-+ __func__, video->active_fmt.type,
-+ video->active_fmt.fmt.pix.pixelformat);
-+ *f = video->active_fmt;
-+ return 0;
-+}
-+
-+static int video_g_fmt_mp(struct file *file, void *fh, struct v4l2_format *f)
-+{
-+ struct stfcamss_video *video = video_drvdata(file);
-+
-+ st_debug(ST_VIDEO, "%s, fmt.type = 0x%x\n", __func__, f->type);
-+ st_debug(ST_VIDEO, "%s, active_fmt.type = 0x%x\n",
-+ __func__, video->active_fmt.type);
-+ *f = video->active_fmt;
-+ return 0;
-+}
-+
-+static int video_entity_s_fmt(struct stfcamss_video *video,
-+ struct media_entity *entity,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_format *fmt)
-+{
-+ struct v4l2_subdev *subdev;
-+ struct media_pad *pad;
-+ struct v4l2_mbus_framefmt *mf = &fmt->format;
-+ struct v4l2_subdev_format fmt_src = {
-+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
-+ };
-+ u32 width, height, code;
-+ int ret, index = 0;
-+
-+ code = mf->code;
-+ width = mf->width;
-+ height = mf->height;
-+ subdev = media_entity_to_v4l2_subdev(entity);
-+ while (1) {
-+ if (index >= entity->num_pads)
-+ break;
-+ pad = &entity->pads[index];
-+ pad = media_pad_remote_pad_first(pad);
-+ if (pad && is_media_entity_v4l2_subdev(pad->entity)) {
-+ fmt->pad = index;
-+ ret = v4l2_subdev_call(subdev, pad, set_fmt, state, fmt);
-+ if (mf->code != code ||
-+ mf->width != width || mf->height != height) {
-+ st_warn(ST_VIDEO,
-+ "\"%s\":%d pad fmt has been"
-+ " changed to 0x%x %ux%u\n",
-+ subdev->name, fmt->pad, mf->code,
-+ mf->width, mf->height);
-+ }
-+ if (index) {
-+ fmt_src.pad = index;
-+ ret = v4l2_subdev_call(subdev, pad, get_fmt, state, &fmt_src);
-+ if (ret)
-+ return ret;
-+
-+ fmt->format.code = fmt_src.format.code;
-+ ret = video_entity_s_fmt(video, pad->entity, state, fmt);
-+ }
-+ }
-+
-+ if (ret < 0 && ret != -ENOIOCTLCMD)
-+ break;
-+ index++;
-+ }
-+ return ret;
-+}
-+
-+static int video_pipeline_s_fmt(struct stfcamss_video *video,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_format *f)
-+{
-+ struct video_device *vdev = &video->vdev;
-+ struct media_entity *entity = &vdev->entity;
-+ struct v4l2_subdev *subdev;
-+ int ret, index;
-+ struct v4l2_subdev_format fmt = {
-+ .pad = 0,
-+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
-+ .reserved = {getcrop_pad_id(video->id)}
-+ };
-+ struct v4l2_mbus_framefmt *mf = &fmt.format;
-+ struct v4l2_pix_format *pix = &f->fmt.pix;
-+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
-+ struct media_entity *sensor;
-+ u32 width, height;
-+ struct media_pad *pad;
-+
-+ /* pix to mbus format */
-+ if (video->is_mp) {
-+ index = video_find_format(mf->code,
-+ pix_mp->pixelformat,
-+ video->formats, video->nformats);
-+ if (index < 0)
-+ return index;
-+ v4l2_fill_mbus_format_mplane(mf, pix_mp);
-+ mf->code = video->formats[index].code;
-+ } else {
-+ index = video_find_format(mf->code,
-+ pix->pixelformat,
-+ video->formats, video->nformats);
-+ if (index < 0)
-+ return index;
-+ v4l2_fill_mbus_format(mf, pix, video->formats[index].code);
-+ }
-+
-+ width = mf->width;
-+ height = mf->height;
-+
-+ sensor = stfcamss_find_sensor(entity);
-+ if (!sensor) {
-+ st_err(ST_VIDEO, "Can't find sensor\n");
-+ return -ENOTTY;
-+ }
-+
-+ subdev = media_entity_to_v4l2_subdev(sensor);
-+ ret = v4l2_subdev_call(subdev, pad, get_fmt, state, &fmt);
-+ if (ret)
-+ return ret;
-+
-+ /*
-+ * Starting from sensor subdevice, walk within
-+ * pipeline and set format on each subdevice
-+ */
-+ pad = media_pad_remote_pad_first(&sensor->pads[0]);
-+ ret = video_entity_s_fmt(video, pad->entity, state, &fmt);
-+ if (ret < 0 && ret != -ENOIOCTLCMD)
-+ return ret;
-+
-+ index = video_find_format(mf->code,
-+ video->formats[index].pixelformat,
-+ video->formats, video->nformats);
-+ st_debug(ST_VIDEO, "%s, code=%x, index=%d\n",
-+ __func__, mf->code, index);
-+
-+ if (index < 0)
-+ return index;
-+
-+ if (video->is_mp)
-+ video_mbus_to_pix_mp(mf, pix_mp,
-+ &video->formats[index], video->bpl_alignment);
-+ else
-+ video_mbus_to_pix(mf, pix,
-+ &video->formats[index], video->bpl_alignment);
-+
-+ ret = __video_try_fmt(video, f, video->is_mp);
-+ if (ret < 0)
-+ return ret;
-+
-+ return 0;
-+}
-+
-+static int video_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
-+{
-+ struct stfcamss_video *video = video_drvdata(file);
-+ int ret;
-+
-+ st_debug(ST_VIDEO, "%s, fmt.type = 0x%x, v4l2fmt=%x\n",
-+ __func__, f->type, f->fmt.pix.pixelformat);
-+
-+ if (vb2_is_busy(&video->vb2_q))
-+ return -EBUSY;
-+
-+ ret = __video_try_fmt(video, f, false);
-+ if (ret < 0)
-+ return ret;
-+
-+ ret = video_pipeline_s_fmt(video, NULL, f);
-+
-+ st_debug(ST_VIDEO, "%s, pixelformat=0x%x, ret=%d\n",
-+ __func__, f->fmt.pix.pixelformat, ret);
-+ if (ret < 0)
-+ return ret;
-+
-+ video->active_fmt = *f;
-+
-+ return 0;
-+}
-+
-+static int video_s_fmt_mp(struct file *file, void *fh, struct v4l2_format *f)
-+{
-+ struct stfcamss_video *video = video_drvdata(file);
-+ int ret;
-+
-+ st_debug(ST_VIDEO, "%s, fmt.type = 0x%x\n", __func__, f->type);
-+ if (vb2_is_busy(&video->vb2_q))
-+ return -EBUSY;
-+
-+ ret = __video_try_fmt(video, f, true);
-+ if (ret < 0)
-+ return ret;
-+
-+ ret = video_pipeline_s_fmt(video, NULL, f);
-+ if (ret < 0)
-+ return ret;
-+
-+ video->active_fmt = *f;
-+
-+ return 0;
-+}
-+
-+static int video_try_fmt(struct file *file,
-+ void *fh, struct v4l2_format *f)
-+{
-+ struct stfcamss_video *video = video_drvdata(file);
-+
-+ return __video_try_fmt(video, f, false);
-+}
-+
-+static int video_try_fmt_mp(struct file *file,
-+ void *fh, struct v4l2_format *f)
-+{
-+ struct stfcamss_video *video = video_drvdata(file);
-+
-+ return __video_try_fmt(video, f, true);
-+}
-+
-+static int video_enum_input(struct file *file, void *fh,
-+ struct v4l2_input *input)
-+{
-+ if (input->index > 0)
-+ return -EINVAL;
-+
-+ strscpy(input->name, "camera", sizeof(input->name));
-+ input->type = V4L2_INPUT_TYPE_CAMERA;
-+
-+ return 0;
-+}
-+
-+static int video_g_input(struct file *file, void *fh, unsigned int *input)
-+{
-+ *input = 0;
-+
-+ return 0;
-+}
-+
-+static int video_s_input(struct file *file, void *fh, unsigned int input)
-+{
-+ return input == 0 ? 0 : -EINVAL;
-+}
-+
-+static int video_g_parm(struct file *file, void *priv,
-+ struct v4l2_streamparm *p)
-+{
-+ struct stfcamss_video *video = video_drvdata(file);
-+ struct video_device *vdev = &video->vdev;
-+ struct media_entity *entity;
-+ struct v4l2_subdev *subdev;
-+ struct media_pad *pad;
-+ int ret, is_support = 0;
-+
-+ entity = &vdev->entity;
-+ while (1) {
-+ pad = &entity->pads[0];
-+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
-+ break;
-+
-+ pad = media_pad_remote_pad_first(pad);
-+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
-+ break;
-+
-+ entity = pad->entity;
-+ subdev = media_entity_to_v4l2_subdev(entity);
-+
-+ ret = v4l2_g_parm_cap(vdev, subdev, p);
-+ if (ret < 0 && ret != -ENOIOCTLCMD)
-+ break;
-+ if (!ret)
-+ is_support = 1;
-+ }
-+
-+ return is_support ? 0 : ret;
-+}
-+
-+static int video_s_parm(struct file *file, void *priv,
-+ struct v4l2_streamparm *p)
-+{
-+ struct stfcamss_video *video = video_drvdata(file);
-+ struct video_device *vdev = &video->vdev;
-+ struct media_entity *entity;
-+ struct v4l2_subdev *subdev;
-+ struct media_pad *pad;
-+ struct v4l2_streamparm tmp_p;
-+ int ret, is_support = 0;
-+
-+ entity = &vdev->entity;
-+ while (1) {
-+ pad = &entity->pads[0];
-+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
-+ break;
-+
-+ pad = media_pad_remote_pad_first(pad);
-+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
-+ break;
-+
-+ entity = pad->entity;
-+ subdev = media_entity_to_v4l2_subdev(entity);
-+
-+ tmp_p = *p;
-+ ret = v4l2_s_parm_cap(vdev, subdev, &tmp_p);
-+ if (ret < 0 && ret != -ENOIOCTLCMD)
-+ break;
-+ if (!ret) {
-+ is_support = 1;
-+ *p = tmp_p;
-+ }
-+ }
-+
-+ return is_support ? 0 : ret;
-+}
-+
-+/* Crop ioctls */
-+int video_g_pixelaspect(struct file *file, void *fh,
-+ int buf_type, struct v4l2_fract *aspect)
-+{
-+ return 0;
-+}
-+
-+int video_g_selection(struct file *file, void *fh,
-+ struct v4l2_selection *s)
-+{
-+ struct stfcamss_video *video = video_drvdata(file);
-+ struct video_device *vdev = &video->vdev;
-+ struct media_entity *entity;
-+ struct v4l2_subdev *subdev;
-+ struct media_pad *pad;
-+ struct v4l2_subdev_selection sel = {
-+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
-+ .pad = getcrop_pad_id(video->id),
-+ .target = s->target,
-+ .r = s->r,
-+ .flags = s->flags,
-+ };
-+ int ret;
-+
-+ st_debug(ST_VIDEO, "%s, target = 0x%x, 0x%x\n",
-+ __func__, sel.target, s->target);
-+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE
-+ && s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-+ return -EINVAL;
-+
-+ entity = &vdev->entity;
-+ while (1) {
-+ pad = &entity->pads[0];
-+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
-+ break;
-+
-+ pad = media_pad_remote_pad_first(pad);
-+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
-+ break;
-+
-+ entity = pad->entity;
-+ subdev = media_entity_to_v4l2_subdev(entity);
-+
-+ ret = v4l2_subdev_call(subdev, pad, get_selection, NULL, &sel);
-+ if (!ret) {
-+ s->r = sel.r;
-+ s->flags = sel.flags;
-+ break;
-+ }
-+ if (ret != -ENOIOCTLCMD)
-+ break;
-+ }
-+
-+ return ret;
-+}
-+
-+int video_s_selection(struct file *file, void *fh,
-+ struct v4l2_selection *s)
-+{
-+ struct stfcamss_video *video = video_drvdata(file);
-+ struct video_device *vdev = &video->vdev;
-+ struct media_entity *entity;
-+ struct v4l2_subdev *subdev;
-+ struct media_pad *pad;
-+ struct v4l2_subdev_selection sel = {
-+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
-+ .pad = getcrop_pad_id(video->id),
-+ .target = s->target,
-+ .r = s->r,
-+ .flags = s->flags,
-+ };
-+ struct v4l2_pix_format *format = &video->active_fmt.fmt.pix;
-+ struct v4l2_pix_format_mplane *format_mp =
-+ &video->active_fmt.fmt.pix_mp;
-+ int ret;
-+
-+ st_debug(ST_VIDEO, "%s, target = 0x%x, 0x%x\n",
-+ __func__, sel.target, s->target);
-+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE
-+ && s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
-+ return -EINVAL;
-+
-+ entity = &vdev->entity;
-+ while (1) {
-+ pad = &entity->pads[0];
-+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
-+ break;
-+
-+ pad = media_pad_remote_pad_first(pad);
-+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
-+ break;
-+
-+ entity = pad->entity;
-+ subdev = media_entity_to_v4l2_subdev(entity);
-+
-+ ret = v4l2_subdev_call(subdev, pad, set_selection, NULL, &sel);
-+ if (!ret) {
-+ s->r = sel.r;
-+ s->flags = sel.flags;
-+ format->width = s->r.width;
-+ format->height = s->r.height;
-+ format_mp->width = s->r.width;
-+ format_mp->height = s->r.height;
-+ ret = __video_try_fmt(video, &video->active_fmt,
-+ video->is_mp);
-+ if (ret < 0)
-+ return ret;
-+ break;
-+ }
-+ if (ret != -ENOIOCTLCMD)
-+ break;
-+ }
-+
-+ st_debug(ST_VIDEO, "ret = 0x%x, -EINVAL = 0x%x\n", ret, -EINVAL);
-+
-+ return ret;
-+}
-+
-+static const struct v4l2_ioctl_ops stf_vid_ioctl_ops = {
-+ .vidioc_querycap = video_querycap,
-+ .vidioc_enum_fmt_vid_cap = video_enum_fmt,
-+ .vidioc_enum_framesizes = video_enum_framesizes,
-+ .vidioc_enum_frameintervals = video_enum_frameintervals,
-+ .vidioc_g_fmt_vid_cap = video_g_fmt,
-+ .vidioc_s_fmt_vid_cap = video_s_fmt,
-+ .vidioc_try_fmt_vid_cap = video_try_fmt,
-+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
-+ .vidioc_querybuf = vb2_ioctl_querybuf,
-+ .vidioc_qbuf = vb2_ioctl_qbuf,
-+ .vidioc_expbuf = vb2_ioctl_expbuf,
-+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
-+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
-+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
-+ .vidioc_streamon = vb2_ioctl_streamon,
-+ .vidioc_streamoff = vb2_ioctl_streamoff,
-+ .vidioc_enum_input = video_enum_input,
-+ .vidioc_g_input = video_g_input,
-+ .vidioc_s_input = video_s_input,
-+ .vidioc_g_parm = video_g_parm,
-+ .vidioc_s_parm = video_s_parm,
-+ .vidioc_s_selection = video_s_selection,
-+ .vidioc_g_selection = video_g_selection,
-+};
-+
-+static const struct v4l2_ioctl_ops stf_vid_ioctl_ops_mp = {
-+ .vidioc_querycap = video_querycap,
-+ .vidioc_enum_fmt_vid_cap = video_enum_fmt,
-+ .vidioc_enum_framesizes = video_enum_framesizes,
-+ .vidioc_enum_frameintervals = video_enum_frameintervals,
-+ .vidioc_g_fmt_vid_cap_mplane = video_g_fmt_mp,
-+ .vidioc_s_fmt_vid_cap_mplane = video_s_fmt_mp,
-+ .vidioc_try_fmt_vid_cap_mplane = video_try_fmt_mp,
-+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
-+ .vidioc_querybuf = vb2_ioctl_querybuf,
-+ .vidioc_qbuf = vb2_ioctl_qbuf,
-+ .vidioc_expbuf = vb2_ioctl_expbuf,
-+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
-+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
-+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
-+ .vidioc_streamon = vb2_ioctl_streamon,
-+ .vidioc_streamoff = vb2_ioctl_streamoff,
-+ .vidioc_enum_input = video_enum_input,
-+ .vidioc_g_input = video_g_input,
-+ .vidioc_s_input = video_s_input,
-+ .vidioc_g_parm = video_g_parm,
-+ .vidioc_s_parm = video_s_parm,
-+ .vidioc_s_selection = video_s_selection,
-+ .vidioc_g_selection = video_g_selection,
-+};
-+
-+static const struct v4l2_ioctl_ops stf_vid_ioctl_ops_out = {
-+ .vidioc_querycap = video_querycap,
-+ .vidioc_enum_fmt_vid_out = video_enum_fmt,
-+ .vidioc_enum_framesizes = video_enum_framesizes,
-+ .vidioc_enum_frameintervals = video_enum_frameintervals,
-+ .vidioc_g_fmt_vid_out = video_g_fmt,
-+ .vidioc_s_fmt_vid_out = video_s_fmt,
-+ .vidioc_try_fmt_vid_out = video_try_fmt,
-+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
-+ .vidioc_querybuf = vb2_ioctl_querybuf,
-+ .vidioc_qbuf = vb2_ioctl_qbuf,
-+ .vidioc_expbuf = vb2_ioctl_expbuf,
-+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
-+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
-+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
-+ .vidioc_streamon = vb2_ioctl_streamon,
-+ .vidioc_streamoff = vb2_ioctl_streamoff,
-+};
-+
-+static int video_open(struct file *file)
-+{
-+ struct video_device *vdev = video_devdata(file);
-+ struct stfcamss_video *video = video_drvdata(file);
-+ struct v4l2_fh *vfh;
-+ int ret;
-+
-+ mutex_lock(&video->lock);
-+
-+ vfh = kzalloc(sizeof(*vfh), GFP_KERNEL);
-+ if (vfh == NULL) {
-+ ret = -ENOMEM;
-+ goto error_alloc;
-+ }
-+
-+ v4l2_fh_init(vfh, vdev);
-+ v4l2_fh_add(vfh);
-+
-+ file->private_data = vfh;
-+
-+ if (!video->pm_count) {
-+ ret = v4l2_pipeline_pm_get(&vdev->entity);
-+ if (ret < 0) {
-+ st_err(ST_VIDEO,
-+ "Failed to power up pipeline: %d\n", ret);
-+ goto error_pm_use;
-+ }
-+ }
-+
-+ video->pm_count++;
-+
-+ mutex_unlock(&video->lock);
-+
-+ return 0;
-+
-+error_pm_use:
-+ v4l2_fh_release(file);
-+error_alloc:
-+ mutex_unlock(&video->lock);
-+ return ret;
-+}
-+
-+static int video_release(struct file *file)
-+{
-+ struct video_device *vdev = video_devdata(file);
-+ struct stfcamss_video *video = video_drvdata(file);
-+
-+ vb2_fop_release(file);
-+
-+ video->pm_count--;
-+
-+ if (!video->pm_count)
-+ v4l2_pipeline_pm_put(&vdev->entity);
-+
-+ file->private_data = NULL;
-+
-+ return 0;
-+}
-+
-+static const struct v4l2_file_operations stf_vid_fops = {
-+ .owner = THIS_MODULE,
-+ .unlocked_ioctl = video_ioctl2,
-+ .open = video_open,
-+ .release = video_release,
-+ .poll = vb2_fop_poll,
-+ .mmap = vb2_fop_mmap,
-+ .read = vb2_fop_read,
-+};
-+
-+static void stf_video_release(struct video_device *vdev)
-+{
-+ struct stfcamss_video *video = video_get_drvdata(vdev);
-+
-+ media_entity_cleanup(&vdev->entity);
-+
-+ mutex_destroy(&video->q_lock);
-+ mutex_destroy(&video->lock);
-+}
-+
-+int stf_video_register(struct stfcamss_video *video,
-+ struct v4l2_device *v4l2_dev,
-+ const char *name, int is_mp)
-+{
-+ struct video_device *vdev;
-+ struct vb2_queue *q;
-+ struct media_pad *pad = &video->pad;
-+ int ret;
-+ enum isp_pad_id isp_pad;
-+
-+ vdev = &video->vdev;
-+
-+ mutex_init(&video->q_lock);
-+
-+ q = &video->vb2_q;
-+ q->drv_priv = video;
-+ q->mem_ops = &vb2_dma_contig_memops;
-+ q->ops = &stf_video_vb2_q_ops;
-+ //q->type = is_mp ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
-+ // V4L2_BUF_TYPE_VIDEO_CAPTURE;
-+ q->type = video->type;
-+ q->io_modes = VB2_DMABUF | VB2_MMAP | VB2_READ;
-+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
-+ q->buf_struct_size = sizeof(struct stfcamss_buffer);
-+ q->dev = video->stfcamss->dev;
-+ q->lock = &video->q_lock;
-+ q->min_buffers_needed = STFCAMSS_MIN_BUFFERS;
-+ ret = vb2_queue_init(q);
-+ if (ret < 0) {
-+ st_err(ST_VIDEO,
-+ "Failed to init vb2 queue: %d\n", ret);
-+ goto err_vb2_init;
-+ }
-+
-+ pad->flags = MEDIA_PAD_FL_SINK;
-+ ret = media_entity_pads_init(&vdev->entity, 1, pad);
-+ if (ret < 0) {
-+ st_err(ST_VIDEO,
-+ "Failed to init video entity: %d\n",
-+ ret);
-+ goto err_vb2_init;
-+ }
-+
-+ mutex_init(&video->lock);
-+
-+ isp_pad = stf_vin_map_isp_pad(video->id, STF_ISP_PAD_SRC);
-+ if (video->id == VIN_LINE_WR) {
-+ video->formats = formats_pix_st7110_wr;
-+ video->nformats = ARRAY_SIZE(formats_pix_st7110_wr);
-+ video->bpl_alignment = STFCAMSS_FRAME_WIDTH_ALIGN_8;
-+ } else if (isp_pad == STF_ISP_PAD_SRC
-+ || isp_pad == STF_ISP_PAD_SRC_SS0
-+ || isp_pad == STF_ISP_PAD_SRC_SS1) {
-+ video->formats = formats_pix_st7110_isp;
-+ video->nformats = ARRAY_SIZE(formats_pix_st7110_isp);
-+ video->bpl_alignment = STFCAMSS_FRAME_WIDTH_ALIGN_8;
-+ } else if (isp_pad == STF_ISP_PAD_SRC_ITIW
-+ || isp_pad == STF_ISP_PAD_SRC_ITIR) {
-+ video->formats = formats_st7110_isp_iti;
-+ video->nformats = ARRAY_SIZE(formats_st7110_isp_iti);
-+ video->bpl_alignment = STFCAMSS_FRAME_WIDTH_ALIGN_8;
-+ } else { // raw/scdump/yhist
-+ video->formats = formats_raw_st7110_isp;
-+ video->nformats = ARRAY_SIZE(formats_raw_st7110_isp);
-+ video->bpl_alignment = STFCAMSS_FRAME_WIDTH_ALIGN_128;
-+ }
-+ video->is_mp = is_mp;
-+
-+ ret = stf_video_init_format(video, is_mp);
-+ if (ret < 0) {
-+ st_err(ST_VIDEO, "Failed to init format: %d\n", ret);
-+ goto err_vid_init_format;
-+ }
-+
-+ vdev->fops = &stf_vid_fops;
-+ if (isp_pad == STF_ISP_PAD_SRC_ITIR) {
-+ vdev->device_caps = V4L2_CAP_VIDEO_OUTPUT;
-+ vdev->vfl_dir = VFL_DIR_TX;
-+ } else {
-+ vdev->device_caps = is_mp ? V4L2_CAP_VIDEO_CAPTURE_MPLANE :
-+ V4L2_CAP_VIDEO_CAPTURE;
-+ vdev->vfl_dir = VFL_DIR_RX;
-+ }
-+ vdev->device_caps |= V4L2_CAP_STREAMING | V4L2_CAP_READWRITE | V4L2_CAP_IO_MC;
-+ if (video->type == V4L2_CAP_VIDEO_OUTPUT)
-+ vdev->ioctl_ops = &stf_vid_ioctl_ops_out;
-+ else
-+ vdev->ioctl_ops = is_mp ? &stf_vid_ioctl_ops_mp : &stf_vid_ioctl_ops;
-+ vdev->release = stf_video_release;
-+ vdev->v4l2_dev = v4l2_dev;
-+ vdev->queue = &video->vb2_q;
-+ vdev->lock = &video->lock;
-+ //strlcpy(vdev->name, name, sizeof(vdev->name));
-+ strscpy(vdev->name, name, sizeof(vdev->name));
-+
-+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, video->id);
-+ if (ret < 0) {
-+ st_err(ST_VIDEO,
-+ "Failed to register video device: %d\n",
-+ ret);
-+ goto err_vid_reg;
-+ }
-+
-+ video_set_drvdata(vdev, video);
-+ return 0;
-+
-+err_vid_reg:
-+err_vid_init_format:
-+ media_entity_cleanup(&vdev->entity);
-+ mutex_destroy(&video->lock);
-+err_vb2_init:
-+ mutex_destroy(&video->q_lock);
-+ return ret;
-+}
-+
-+void stf_video_unregister(struct stfcamss_video *video)
-+{
-+ vb2_video_unregister_device(&video->vdev);
-+}
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/stf_video.h
-@@ -0,0 +1,83 @@
-+/* SPDX-License-Identifier: GPL-2.0
-+ *
-+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
-+ *
-+ */
-+
-+#ifndef STF_VIDEO_H
-+#define STF_VIDEO_H
-+
-+#include <linux/mutex.h>
-+#include <media/videobuf2-v4l2.h>
-+#include <linux/videodev2.h>
-+#include <media/v4l2-dev.h>
-+#include <media/v4l2-device.h>
-+#include <media/v4l2-fh.h>
-+#include <media/v4l2-ioctl.h>
-+
-+#define STFCAMSS_FRAME_MIN_WIDTH 64
-+#define STFCAMSS_FRAME_MAX_WIDTH 1920
-+#define STFCAMSS_FRAME_MIN_HEIGHT 64
-+#define STFCAMSS_FRAME_MAX_HEIGHT 1080
-+#define STFCAMSS_FRAME_WIDTH_ALIGN_8 8
-+#define STFCAMSS_FRAME_WIDTH_ALIGN_128 128
-+#define STFCAMSS_MIN_BUFFERS 2
-+
-+#define STFCAMSS_MAX_ENTITY_NAME_LEN 27
-+
-+struct stfcamss_buffer {
-+ struct vb2_v4l2_buffer vb;
-+ dma_addr_t addr[3];
-+ void *vaddr_sc; /* Use for isp sc data */
-+ struct list_head queue;
-+ int sizeimage;
-+};
-+
-+struct stfcamss_video;
-+
-+struct stfcamss_video_ops {
-+ int (*queue_buffer)(struct stfcamss_video *vid,
-+ struct stfcamss_buffer *buf);
-+ int (*flush_buffers)(struct stfcamss_video *vid,
-+ enum vb2_buffer_state state);
-+};
-+
-+struct fract {
-+ u8 numerator;
-+ u8 denominator;
-+};
-+
-+struct stfcamss_format_info {
-+ u32 code;
-+ u32 pixelformat;
-+ u8 planes;
-+ struct fract hsub[3];
-+ struct fract vsub[3];
-+ u8 bpp[3];
-+};
-+
-+struct stfcamss_video {
-+ struct stfcamss *stfcamss;
-+ u8 id;
-+ struct vb2_queue vb2_q;
-+ struct video_device vdev;
-+ struct media_pad pad;
-+ struct media_pipeline pipe;
-+ struct v4l2_format active_fmt;
-+ enum v4l2_buf_type type;
-+ const struct stfcamss_video_ops *ops;
-+ struct mutex lock;
-+ struct mutex q_lock;
-+ unsigned int bpl_alignment;
-+ const struct stfcamss_format_info *formats;
-+ unsigned int nformats;
-+ unsigned int is_mp;
-+ unsigned int pm_count;
-+};
-+
-+int stf_video_register(struct stfcamss_video *video,
-+ struct v4l2_device *v4l2_dev, const char *name, int is_mp);
-+
-+void stf_video_unregister(struct stfcamss_video *video);
-+
-+#endif /* STF_VIDEO_H */
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/stf_vin.c
-@@ -0,0 +1,1515 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
-+ *
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/interrupt.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/pm_runtime.h>
-+#include <media/v4l2-event.h>
-+
-+#include "stfcamss.h"
-+
-+#define vin_line_array(ptr_line) \
-+ ((const struct vin_line (*)[]) &(ptr_line[-(ptr_line->id)]))
-+
-+#define line_to_vin2_dev(ptr_line) \
-+ container_of(vin_line_array(ptr_line), struct stf_vin2_dev, line)
-+
-+#define VIN_FRAME_DROP_MAX_VAL 90
-+#define VIN_FRAME_DROP_MIN_VAL 4
-+#define VIN_FRAME_PER_SEC_MAX_VAL 90
-+
-+/* ISP ctrl need 1 sec to let frames become stable. */
-+#define VIN_FRAME_DROP_SEC_FOR_ISP_CTRL 1
-+
-+
-+// #define VIN_TWO_BUFFER
-+
-+static const struct vin2_format vin2_formats_st7110[] = {
-+ { MEDIA_BUS_FMT_YUYV8_2X8, 16},
-+ { MEDIA_BUS_FMT_RGB565_2X8_LE, 16},
-+ { MEDIA_BUS_FMT_SRGGB8_1X8, 8},
-+ { MEDIA_BUS_FMT_SGRBG8_1X8, 8},
-+ { MEDIA_BUS_FMT_SGBRG8_1X8, 8},
-+ { MEDIA_BUS_FMT_SBGGR8_1X8, 8},
-+ { MEDIA_BUS_FMT_SRGGB10_1X10, 10},
-+ { MEDIA_BUS_FMT_SGRBG10_1X10, 10},
-+ { MEDIA_BUS_FMT_SGBRG10_1X10, 10},
-+ { MEDIA_BUS_FMT_SBGGR10_1X10, 10},
-+ { MEDIA_BUS_FMT_SRGGB12_1X12, 12},
-+ { MEDIA_BUS_FMT_SGRBG12_1X12, 12},
-+ { MEDIA_BUS_FMT_SGBRG12_1X12, 12},
-+ { MEDIA_BUS_FMT_SBGGR12_1X12, 12},
-+ { MEDIA_BUS_FMT_Y12_1X12, 8},
-+ { MEDIA_BUS_FMT_YUV8_1X24, 8},
-+ { MEDIA_BUS_FMT_AYUV8_1X32, 32},
-+};
-+
-+static const struct vin2_format isp_formats_st7110_raw[] = {
-+ { MEDIA_BUS_FMT_SBGGR12_1X12, 12},
-+ { MEDIA_BUS_FMT_SRGGB12_1X12, 12},
-+ { MEDIA_BUS_FMT_SGRBG12_1X12, 12},
-+ { MEDIA_BUS_FMT_SGBRG12_1X12, 12},
-+};
-+
-+static const struct vin2_format isp_formats_st7110_uo[] = {
-+ { MEDIA_BUS_FMT_Y12_1X12, 8},
-+};
-+
-+static const struct vin2_format isp_formats_st7110_iti[] = {
-+ { MEDIA_BUS_FMT_SRGGB10_1X10, 10},
-+ { MEDIA_BUS_FMT_SGRBG10_1X10, 10},
-+ { MEDIA_BUS_FMT_SGBRG10_1X10, 10},
-+ { MEDIA_BUS_FMT_SBGGR10_1X10, 10},
-+ { MEDIA_BUS_FMT_SRGGB12_1X12, 12},
-+ { MEDIA_BUS_FMT_SGRBG12_1X12, 12},
-+ { MEDIA_BUS_FMT_SGBRG12_1X12, 12},
-+ { MEDIA_BUS_FMT_SBGGR12_1X12, 12},
-+ { MEDIA_BUS_FMT_Y12_1X12, 8},
-+ { MEDIA_BUS_FMT_YUV8_1X24, 8},
-+};
-+
-+static const struct vin2_format_table vin2_formats_table[] = {
-+ /* VIN_LINE_WR */
-+ { vin2_formats_st7110, ARRAY_SIZE(vin2_formats_st7110) },
-+ /* VIN_LINE_ISP */
-+ { isp_formats_st7110_uo, ARRAY_SIZE(isp_formats_st7110_uo) },
-+ /* VIN_LINE_ISP_SS0 */
-+ { isp_formats_st7110_uo, ARRAY_SIZE(isp_formats_st7110_uo) },
-+ /* VIN_LINE_ISP_SS1 */
-+ { isp_formats_st7110_uo, ARRAY_SIZE(isp_formats_st7110_uo) },
-+ /* VIN_LINE_ISP_ITIW */
-+ { isp_formats_st7110_iti, ARRAY_SIZE(isp_formats_st7110_iti) },
-+ /* VIN_LINE_ISP_ITIR */
-+ { isp_formats_st7110_iti, ARRAY_SIZE(isp_formats_st7110_iti) },
-+ /* VIN_LINE_ISP_RAW */
-+ { isp_formats_st7110_raw, ARRAY_SIZE(isp_formats_st7110_raw) },
-+ /* VIN_LINE_ISP_SCD_Y */
-+ { isp_formats_st7110_raw, ARRAY_SIZE(isp_formats_st7110_raw) },
-+};
-+
-+static void vin_buffer_done(struct vin_line *line, struct vin_params *params);
-+static void vin_change_buffer(struct vin_line *line);
-+static struct stfcamss_buffer *vin_buf_get_pending(struct vin_output *output);
-+static void vin_output_init_addrs(struct vin_line *line);
-+static void vin_init_outputs(struct vin_line *line);
-+static struct v4l2_mbus_framefmt *
-+__vin_get_format(struct vin_line *line,
-+ struct v4l2_subdev_state *state,
-+ unsigned int pad,
-+ enum v4l2_subdev_format_whence which);
-+
-+static char *get_line_subdevname(int line_id)
-+{
-+ char *name = NULL;
-+
-+ switch (line_id) {
-+ case VIN_LINE_WR:
-+ name = "wr";
-+ break;
-+ case VIN_LINE_ISP:
-+ name = "isp0";
-+ break;
-+ case VIN_LINE_ISP_SS0:
-+ name = "isp0_ss0";
-+ break;
-+ case VIN_LINE_ISP_SS1:
-+ name = "isp0_ss1";
-+ break;
-+ case VIN_LINE_ISP_ITIW:
-+ name = "isp0_itiw";
-+ break;
-+ case VIN_LINE_ISP_ITIR:
-+ name = "isp0_itir";
-+ break;
-+ case VIN_LINE_ISP_RAW:
-+ name = "isp0_raw";
-+ break;
-+ case VIN_LINE_ISP_SCD_Y:
-+ name = "isp0_scd_y";
-+ break;
-+ default:
-+ name = "unknow";
-+ break;
-+ }
-+ return name;
-+}
-+
-+static enum isp_line_id stf_vin_map_isp_line(enum vin_line_id line)
-+{
-+ enum isp_line_id line_id;
-+
-+ if ((line > VIN_LINE_WR) && (line < VIN_LINE_MAX)) {
-+ line_id = line % STF_ISP_LINE_MAX;
-+ if (line_id == 0)
-+ line_id = STF_ISP_LINE_SRC_SCD_Y;
-+ } else
-+ line_id = STF_ISP_LINE_INVALID;
-+
-+ return line_id;
-+}
-+
-+enum isp_pad_id stf_vin_map_isp_pad(enum vin_line_id line, enum isp_pad_id def)
-+{
-+ enum isp_pad_id pad_id;
-+
-+ if (line == VIN_LINE_WR)
-+ pad_id = STF_ISP_PAD_SINK;
-+ else if ((line > VIN_LINE_WR) && (line < VIN_LINE_MAX))
-+ pad_id = stf_vin_map_isp_line(line);
-+ else
-+ pad_id = def;
-+
-+ return pad_id;
-+}
-+
-+int stf_vin_subdev_init(struct stfcamss *stfcamss)
-+{
-+ struct stf_vin_dev *vin;
-+ struct device *dev = stfcamss->dev;
-+ struct stf_vin2_dev *vin_dev = stfcamss->vin_dev;
-+ int i, ret = 0;
-+
-+ vin_dev->stfcamss = stfcamss;
-+ vin_dev->hw_ops = &vin_ops;
-+ vin_dev->hw_ops->isr_buffer_done = vin_buffer_done;
-+ vin_dev->hw_ops->isr_change_buffer = vin_change_buffer;
-+
-+ vin = stfcamss->vin;
-+ atomic_set(&vin_dev->ref_count, 0);
-+
-+ ret = devm_request_irq(dev,
-+ vin->irq, vin_dev->hw_ops->vin_wr_irq_handler,
-+ 0, "vin_axiwr_irq", vin_dev);
-+ if (ret) {
-+ st_err(ST_VIN, "failed to request irq\n");
-+ goto out;
-+ }
-+
-+ ret = devm_request_irq(dev,
-+ vin->isp_irq, vin_dev->hw_ops->vin_isp_irq_handler,
-+ 0, "vin_isp_irq", vin_dev);
-+ if (ret) {
-+ st_err(ST_VIN, "failed to request isp irq\n");
-+ goto out;
-+ }
-+
-+ st_info(ST_CAMSS, "%s, %d!\n", __func__, __LINE__);
-+#ifdef ISP_USE_CSI_AND_SC_DONE_INTERRUPT
-+ ret = devm_request_irq(dev,
-+ vin->isp_csi_irq, vin_dev->hw_ops->vin_isp_csi_irq_handler,
-+ 0, "vin_isp_csi_irq", vin_dev);
-+ if (ret) {
-+ st_err(ST_VIN, "failed to request isp raw irq\n");
-+ goto out;
-+ }
-+
-+ ret = devm_request_irq(dev,
-+ vin->isp_scd_irq, vin_dev->hw_ops->vin_isp_scd_irq_handler,
-+ 0, "vin_isp_scd_irq", vin_dev);
-+ if (ret) {
-+ st_err(ST_VIN, "failed to request isp scd irq\n");
-+ goto out;
-+ }
-+#endif
-+
-+ ret = devm_request_irq(dev,
-+ vin->isp_irq_csiline, vin_dev->hw_ops->vin_isp_irq_csiline_handler,
-+ 0, "vin_isp_irq_csiline", vin_dev);
-+ if (ret) {
-+ st_err(ST_VIN, "failed to request isp irq csiline\n");
-+ goto out;
-+ }
-+
-+ mutex_init(&vin_dev->power_lock);
-+ vin_dev->power_count = 0;
-+
-+ for (i = 0; i < STF_DUMMY_MODULE_NUMS; i++) {
-+ struct dummy_buffer *dummy_buffer = &vin_dev->dummy_buffer[i];
-+
-+ mutex_init(&dummy_buffer->stream_lock);
-+ dummy_buffer->nums = i == 0 ? VIN_DUMMY_BUFFER_NUMS : ISP_DUMMY_BUFFER_NUMS;
-+ dummy_buffer->stream_count = 0;
-+ dummy_buffer->buffer = devm_kzalloc(dev,
-+ dummy_buffer->nums * sizeof(struct vin_dummy_buffer), GFP_KERNEL);
-+ atomic_set(&dummy_buffer->frame_skip, 0);
-+ }
-+
-+ for (i = VIN_LINE_WR;
-+ i < STF_ISP_LINE_MAX + 1; i++) {
-+ struct vin_line *l = &vin_dev->line[i];
-+ int is_mp;
-+
-+ is_mp = i == VIN_LINE_WR ? false : true;
-+ is_mp = false;
-+ if (stf_vin_map_isp_line(i) == STF_ISP_LINE_SRC_ITIR)
-+ l->video_out.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
-+ else
-+ l->video_out.type = is_mp ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
-+ V4L2_BUF_TYPE_VIDEO_CAPTURE;
-+ l->video_out.stfcamss = stfcamss;
-+ l->id = i;
-+ l->sdev_type = VIN_DEV_TYPE;
-+ l->formats = vin2_formats_table[i].fmts;
-+ l->nformats = vin2_formats_table[i].nfmts;
-+ spin_lock_init(&l->output_lock);
-+
-+ mutex_init(&l->stream_lock);
-+ l->stream_count = 0;
-+ mutex_init(&l->power_lock);
-+ l->power_count = 0;
-+ }
-+
-+ return 0;
-+out:
-+ return ret;
-+}
-+
-+static int vin_set_power(struct v4l2_subdev *sd, int on)
-+{
-+ struct vin_line *line = v4l2_get_subdevdata(sd);
-+ struct stf_vin2_dev *vin_dev = line_to_vin2_dev(line);
-+ struct stfcamss *stfcamss = vin_dev->stfcamss;
-+
-+ mutex_lock(&line->power_lock);
-+ if (on) {
-+ if (line->power_count == 0)
-+ vin_init_outputs(line);
-+ line->power_count++;
-+ } else {
-+ if (line->power_count == 0) {
-+ st_err(ST_VIN,
-+ "line power off on power_count == 0\n");
-+ goto exit_line;
-+ }
-+ line->power_count--;
-+ }
-+exit_line:
-+ mutex_unlock(&line->power_lock);
-+
-+ mutex_lock(&vin_dev->power_lock);
-+ if (on) {
-+ if (vin_dev->power_count == 0) {
-+ pm_runtime_get_sync(stfcamss->dev);
-+ vin_dev->hw_ops->vin_clk_enable(vin_dev);
-+ vin_dev->hw_ops->vin_config_set(vin_dev);
-+ }
-+ vin_dev->power_count++;
-+ } else {
-+ if (vin_dev->power_count == 0) {
-+ st_err(ST_VIN,
-+ "vin_dev power off on power_count == 0\n");
-+ goto exit;
-+ }
-+ if (vin_dev->power_count == 1) {
-+ vin_dev->hw_ops->vin_clk_disable(vin_dev);
-+ pm_runtime_put_sync(stfcamss->dev);
-+ }
-+ vin_dev->power_count--;
-+ }
-+exit:
-+
-+ mutex_unlock(&vin_dev->power_lock);
-+
-+ return 0;
-+}
-+
-+static unsigned int get_frame_skip(struct vin_line *line)
-+{
-+ unsigned int frame_skip = 0;
-+ unsigned int isp_ctrl_skip_frames = 0;
-+ struct media_entity *sensor;
-+ struct v4l2_subdev_frame_interval fi;
-+
-+ sensor = stfcamss_find_sensor(&line->subdev.entity);
-+ if (sensor) {
-+ int fps = 0;
-+ struct v4l2_subdev *subdev =
-+ media_entity_to_v4l2_subdev(sensor);
-+
-+ if (subdev->ops->video->g_frame_interval) {
-+ if (!subdev->ops->video->g_frame_interval(subdev, &fi))
-+ fps = fi.interval.denominator;
-+
-+ if (fps > 0 && fps <= 90)
-+ isp_ctrl_skip_frames = fps * VIN_FRAME_DROP_SEC_FOR_ISP_CTRL;
-+ }
-+ if (!fps)
-+ st_debug(ST_VIN, "%s, Failed to get sensor fps !\n", __func__);
-+
-+ if (isp_ctrl_skip_frames <= VIN_FRAME_DROP_MIN_VAL)
-+ isp_ctrl_skip_frames = VIN_FRAME_DROP_MIN_VAL;
-+
-+ v4l2_subdev_call(subdev, sensor, g_skip_frames, &frame_skip);
-+
-+ frame_skip += isp_ctrl_skip_frames;
-+
-+ if (frame_skip > VIN_FRAME_DROP_MAX_VAL)
-+ frame_skip = VIN_FRAME_DROP_MAX_VAL;
-+ st_debug(ST_VIN, "%s, frame_skip %d\n", __func__, frame_skip);
-+ }
-+
-+ return frame_skip;
-+}
-+
-+static void vin_buf_l2cache_flush(struct vin_output *output)
-+{
-+ struct stfcamss_buffer *buffer = NULL;
-+
-+ if (!list_empty(&output->pending_bufs)) {
-+ list_for_each_entry(buffer, &output->pending_bufs, queue) {
-+ sifive_l2_flush64_range(buffer->addr[0], buffer->sizeimage);
-+ }
-+ }
-+}
-+
-+static int vin_enable_output(struct vin_line *line)
-+{
-+ struct vin_output *output = &line->output;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&line->output_lock, flags);
-+
-+ output->state = VIN_OUTPUT_IDLE;
-+
-+ vin_buf_l2cache_flush(output);
-+
-+ output->buf[0] = vin_buf_get_pending(output);
-+#ifdef VIN_TWO_BUFFER
-+ if (line->id == VIN_LINE_WR)
-+ output->buf[1] = vin_buf_get_pending(output);
-+#endif
-+ if (!output->buf[0] && output->buf[1]) {
-+ output->buf[0] = output->buf[1];
-+ output->buf[1] = NULL;
-+ }
-+
-+ if (output->buf[0])
-+ output->state = VIN_OUTPUT_SINGLE;
-+
-+#ifdef VIN_TWO_BUFFER
-+ if (output->buf[1] && line->id == VIN_LINE_WR)
-+ output->state = VIN_OUTPUT_CONTINUOUS;
-+#endif
-+ output->sequence = 0;
-+
-+ vin_output_init_addrs(line);
-+ spin_unlock_irqrestore(&line->output_lock, flags);
-+ return 0;
-+}
-+
-+static int vin_disable_output(struct vin_line *line)
-+{
-+ struct vin_output *output = &line->output;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&line->output_lock, flags);
-+
-+ output->state = VIN_OUTPUT_OFF;
-+
-+ spin_unlock_irqrestore(&line->output_lock, flags);
-+ return 0;
-+}
-+
-+static u32 line_to_dummy_module(struct vin_line *line)
-+{
-+ u32 dummy_module = 0;
-+
-+ switch (line->id) {
-+ case VIN_LINE_WR:
-+ dummy_module = STF_DUMMY_VIN;
-+ break;
-+ case VIN_LINE_ISP:
-+ case VIN_LINE_ISP_SS0:
-+ case VIN_LINE_ISP_SS1:
-+ case VIN_LINE_ISP_ITIW:
-+ case VIN_LINE_ISP_ITIR:
-+ case VIN_LINE_ISP_RAW:
-+ case VIN_LINE_ISP_SCD_Y:
-+ dummy_module = STF_DUMMY_ISP;
-+ break;
-+ default:
-+ dummy_module = STF_DUMMY_VIN;
-+ break;
-+ }
-+
-+ return dummy_module;
-+}
-+
-+static int vin_alloc_dummy_buffer(struct stf_vin2_dev *vin_dev,
-+ struct v4l2_mbus_framefmt *fmt, int dummy_module)
-+{
-+ struct device *dev = vin_dev->stfcamss->dev;
-+ struct dummy_buffer *dummy_buffer = &vin_dev->dummy_buffer[dummy_module];
-+ struct vin_dummy_buffer *buffer = NULL;
-+ int ret = 0, i;
-+ u32 aligns;
-+
-+ for (i = 0; i < dummy_buffer->nums; i++) {
-+ buffer = &vin_dev->dummy_buffer[dummy_module].buffer[i];
-+ buffer->width = fmt->width;
-+ buffer->height = fmt->height;
-+ buffer->mcode = fmt->code;
-+ if (i == STF_VIN_PAD_SINK) {
-+ aligns = ALIGN(fmt->width * 4, STFCAMSS_FRAME_WIDTH_ALIGN_8);
-+ buffer->buffer_size = PAGE_ALIGN(aligns * fmt->height);
-+ } else if (i == STF_ISP_PAD_SRC
-+ || i == STF_ISP_PAD_SRC_SS0
-+ || i == STF_ISP_PAD_SRC_SS1) {
-+ aligns = ALIGN(fmt->width, STFCAMSS_FRAME_WIDTH_ALIGN_8);
-+ buffer->buffer_size = PAGE_ALIGN(aligns * fmt->height * 3 / 2);
-+ } else if (i == STF_ISP_PAD_SRC_ITIW
-+ || i == STF_ISP_PAD_SRC_ITIR) {
-+ aligns = ALIGN(fmt->width, STFCAMSS_FRAME_WIDTH_ALIGN_8);
-+ buffer->buffer_size = PAGE_ALIGN(aligns * fmt->height * 3);
-+ } else if (i == STF_ISP_PAD_SRC_RAW) {
-+ aligns = ALIGN(fmt->width * ISP_RAW_DATA_BITS / 8,
-+ STFCAMSS_FRAME_WIDTH_ALIGN_128);
-+ buffer->buffer_size = PAGE_ALIGN(aligns * fmt->height);
-+ } else if (i == STF_ISP_PAD_SRC_SCD_Y)
-+ buffer->buffer_size = PAGE_ALIGN(ISP_SCD_Y_BUFFER_SIZE);
-+ else
-+ continue;
-+
-+ buffer->vaddr = dma_alloc_coherent(dev, buffer->buffer_size,
-+ &buffer->paddr[0], GFP_DMA | GFP_KERNEL);
-+
-+ if (buffer->vaddr) {
-+ if (i == STF_ISP_PAD_SRC
-+ || i == STF_ISP_PAD_SRC_SS0
-+ || i == STF_ISP_PAD_SRC_SS1
-+ || i == STF_ISP_PAD_SRC_ITIW
-+ || i == STF_ISP_PAD_SRC_ITIR)
-+ buffer->paddr[1] = (dma_addr_t)(buffer->paddr[0] +
-+ aligns * fmt->height);
-+ else if (i == STF_ISP_PAD_SRC_SCD_Y)
-+ buffer->paddr[1] = (dma_addr_t)(buffer->paddr[0] +
-+ ISP_YHIST_BUFFER_SIZE);
-+ else
-+ st_debug(ST_VIN, "signal plane\n");
-+ }
-+ {
-+ char szPadName[][32] = {
-+ "VIN_PAD_SINK",
-+ "ISP_PAD_SRC",
-+ "ISP_PAD_SRC_SS0",
-+ "ISP_PAD_SRC_SS1",
-+ "ISP_PAD_SRC_ITIW",
-+ "ISP_PAD_SRC_ITIR",
-+ "ISP_PAD_SRC_RAW",
-+ "ISP_PAD_SRC_SCD_Y",
-+ "Unknown Pad"
-+ };
-+
-+ st_debug(ST_VIN, "%s: i = %d(%s) addr[0] = %llx, addr[1] = %llx, size = %u bytes\n",
-+ __func__,
-+ i,
-+ szPadName[i],
-+ buffer->paddr[0],
-+ buffer->paddr[1],
-+ buffer->buffer_size
-+ );
-+ }
-+ }
-+
-+ return ret;
-+}
-+
-+static void vin_free_dummy_buffer(struct stf_vin2_dev *vin_dev, int dummy_module)
-+{
-+ struct device *dev = vin_dev->stfcamss->dev;
-+ struct dummy_buffer *dummy_buffer = &vin_dev->dummy_buffer[dummy_module];
-+ struct vin_dummy_buffer *buffer = NULL;
-+ int i;
-+
-+ for (i = 0; i < dummy_buffer->nums; i++) {
-+ buffer = &dummy_buffer->buffer[i];
-+ if (buffer->vaddr)
-+ dma_free_coherent(dev, buffer->buffer_size,
-+ buffer->vaddr, buffer->paddr[0]);
-+ memset(buffer, 0, sizeof(struct vin_dummy_buffer));
-+ }
-+}
-+
-+static void vin_set_dummy_buffer(struct vin_line *line, u32 pad)
-+{
-+ struct stf_vin2_dev *vin_dev = line_to_vin2_dev(line);
-+ int dummy_module = line_to_dummy_module(line);
-+ struct dummy_buffer *dummy_buffer = &vin_dev->dummy_buffer[dummy_module];
-+ struct vin_dummy_buffer *buffer = NULL;
-+
-+ switch (pad) {
-+ case STF_VIN_PAD_SINK:
-+ if (line->id == VIN_LINE_WR) {
-+ buffer = &dummy_buffer->buffer[STF_VIN_PAD_SINK];
-+ vin_dev->hw_ops->vin_wr_set_ping_addr(vin_dev, buffer->paddr[0]);
-+ vin_dev->hw_ops->vin_wr_set_pong_addr(vin_dev, buffer->paddr[0]);
-+ } else {
-+ buffer = &dummy_buffer->buffer[STF_ISP_PAD_SRC];
-+ vin_dev->hw_ops->vin_isp_set_yuv_addr(vin_dev,
-+ buffer->paddr[0], buffer->paddr[1]);
-+
-+ buffer = &dummy_buffer->buffer[STF_ISP_PAD_SRC_SS0];
-+ vin_dev->hw_ops->vin_isp_set_ss0_addr(vin_dev,
-+ buffer->paddr[0], buffer->paddr[1]);
-+
-+ buffer = &dummy_buffer->buffer[STF_ISP_PAD_SRC_SS1];
-+ vin_dev->hw_ops->vin_isp_set_ss1_addr(vin_dev,
-+ buffer->paddr[0], buffer->paddr[1]);
-+
-+ buffer = &dummy_buffer->buffer[STF_ISP_PAD_SRC_ITIW];
-+ vin_dev->hw_ops->vin_isp_set_itiw_addr(vin_dev,
-+ buffer->paddr[0], buffer->paddr[1]);
-+
-+ buffer = &dummy_buffer->buffer[STF_ISP_PAD_SRC_ITIR];
-+ vin_dev->hw_ops->vin_isp_set_itir_addr(vin_dev,
-+ buffer->paddr[0], buffer->paddr[1]);
-+
-+ buffer = &dummy_buffer->buffer[STF_ISP_PAD_SRC_RAW];
-+ vin_dev->hw_ops->vin_isp_set_raw_addr(vin_dev, buffer->paddr[0]);
-+
-+ buffer = &dummy_buffer->buffer[STF_ISP_PAD_SRC_SCD_Y];
-+ vin_dev->hw_ops->vin_isp_set_scd_addr(vin_dev,
-+ buffer->paddr[0], buffer->paddr[1], AWB_TYPE);
-+ }
-+ break;
-+ case STF_ISP_PAD_SRC:
-+ buffer = &dummy_buffer->buffer[STF_ISP_PAD_SRC];
-+ vin_dev->hw_ops->vin_isp_set_yuv_addr(vin_dev,
-+ buffer->paddr[0], buffer->paddr[1]);
-+ break;
-+ case STF_ISP_PAD_SRC_SS0:
-+ buffer = &dummy_buffer->buffer[STF_ISP_PAD_SRC_SS0];
-+ vin_dev->hw_ops->vin_isp_set_ss0_addr(vin_dev,
-+ buffer->paddr[0], buffer->paddr[1]);
-+ break;
-+ case STF_ISP_PAD_SRC_SS1:
-+ buffer = &dummy_buffer->buffer[STF_ISP_PAD_SRC_SS1];
-+ vin_dev->hw_ops->vin_isp_set_ss1_addr(vin_dev,
-+ buffer->paddr[0], buffer->paddr[1]);
-+ break;
-+ case STF_ISP_PAD_SRC_ITIW:
-+ buffer = &dummy_buffer->buffer[STF_ISP_PAD_SRC_ITIW];
-+ vin_dev->hw_ops->vin_isp_set_itiw_addr(vin_dev,
-+ buffer->paddr[0], buffer->paddr[1]);
-+ break;
-+ case STF_ISP_PAD_SRC_ITIR:
-+ buffer = &dummy_buffer->buffer[STF_ISP_PAD_SRC_ITIR];
-+ vin_dev->hw_ops->vin_isp_set_itir_addr(vin_dev,
-+ buffer->paddr[0], buffer->paddr[1]);
-+ break;
-+ case STF_ISP_PAD_SRC_RAW:
-+ buffer = &dummy_buffer->buffer[STF_ISP_PAD_SRC_RAW];
-+ vin_dev->hw_ops->vin_isp_set_raw_addr(vin_dev, buffer->paddr[0]);
-+ break;
-+ case STF_ISP_PAD_SRC_SCD_Y:
-+ buffer = &dummy_buffer->buffer[STF_ISP_PAD_SRC_SCD_Y];
-+ vin_dev->hw_ops->vin_isp_set_scd_addr(vin_dev,
-+ buffer->paddr[0], buffer->paddr[1], AWB_TYPE);
-+ break;
-+ default:
-+ break;
-+ }
-+}
-+
-+static int vin_set_stream(struct v4l2_subdev *sd, int enable)
-+{
-+ struct vin_line *line = v4l2_get_subdevdata(sd);
-+ struct stf_vin2_dev *vin_dev = line_to_vin2_dev(line);
-+ int dummy_module = line_to_dummy_module(line);
-+ struct dummy_buffer *dummy_buffer = &vin_dev->dummy_buffer[dummy_module];
-+ struct v4l2_mbus_framefmt *fmt;
-+
-+ st_debug(ST_VIN, "%s, %d\n", __func__, __LINE__);
-+ fmt = __vin_get_format(line, NULL, STF_VIN_PAD_SINK, V4L2_SUBDEV_FORMAT_ACTIVE);
-+ mutex_lock(&dummy_buffer->stream_lock);
-+ if (enable) {
-+ if (dummy_buffer->stream_count == 0) {
-+ vin_alloc_dummy_buffer(vin_dev, fmt, dummy_module);
-+ vin_set_dummy_buffer(line, STF_VIN_PAD_SINK);
-+ atomic_set(&dummy_buffer->frame_skip, get_frame_skip(line));
-+ }
-+ dummy_buffer->stream_count++;
-+ } else {
-+ if (dummy_buffer->stream_count == 1) {
-+ vin_free_dummy_buffer(vin_dev, dummy_module);
-+ // set buffer addr to zero
-+ vin_set_dummy_buffer(line, STF_VIN_PAD_SINK);
-+ } else
-+ vin_set_dummy_buffer(line,
-+ stf_vin_map_isp_pad(line->id, STF_ISP_PAD_SINK));
-+
-+ dummy_buffer->stream_count--;
-+ }
-+ mutex_unlock(&dummy_buffer->stream_lock);
-+
-+ mutex_lock(&line->stream_lock);
-+ if (enable) {
-+ if (line->stream_count == 0) {
-+ if (line->id == VIN_LINE_WR) {
-+ vin_dev->hw_ops->vin_wr_irq_enable(vin_dev, 1);
-+ vin_dev->hw_ops->vin_wr_stream_set(vin_dev, 1);
-+ }
-+ }
-+ line->stream_count++;
-+ } else {
-+ if (line->stream_count == 1) {
-+ if (line->id == VIN_LINE_WR) {
-+ vin_dev->hw_ops->vin_wr_irq_enable(vin_dev, 0);
-+ vin_dev->hw_ops->vin_wr_stream_set(vin_dev, 0);
-+ }
-+ }
-+ line->stream_count--;
-+ }
-+ mutex_unlock(&line->stream_lock);
-+
-+ if (enable)
-+ vin_enable_output(line);
-+ else
-+ vin_disable_output(line);
-+
-+ return 0;
-+}
-+
-+static struct v4l2_mbus_framefmt *
-+__vin_get_format(struct vin_line *line,
-+ struct v4l2_subdev_state *state,
-+ unsigned int pad,
-+ enum v4l2_subdev_format_whence which)
-+{
-+ if (which == V4L2_SUBDEV_FORMAT_TRY)
-+ return v4l2_subdev_get_try_format(&line->subdev, state, pad);
-+ return &line->fmt[pad];
-+}
-+
-+static void vin_try_format(struct vin_line *line,
-+ struct v4l2_subdev_state *state,
-+ unsigned int pad,
-+ struct v4l2_mbus_framefmt *fmt,
-+ enum v4l2_subdev_format_whence which)
-+{
-+ unsigned int i;
-+
-+ switch (pad) {
-+ case STF_VIN_PAD_SINK:
-+ /* Set format on sink pad */
-+
-+ for (i = 0; i < line->nformats; i++)
-+ if (fmt->code == line->formats[i].code)
-+ break;
-+
-+ /* If not found, use UYVY as default */
-+ if (i >= line->nformats)
-+ fmt->code = line->formats[0].code;
-+
-+ fmt->width = clamp_t(u32,
-+ fmt->width, STFCAMSS_FRAME_MIN_WIDTH, STFCAMSS_FRAME_MAX_WIDTH);
-+ fmt->height = clamp_t(u32,
-+ fmt->height, STFCAMSS_FRAME_MIN_HEIGHT, STFCAMSS_FRAME_MAX_HEIGHT);
-+
-+ fmt->field = V4L2_FIELD_NONE;
-+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
-+ fmt->flags = 0;
-+
-+ break;
-+
-+ case STF_VIN_PAD_SRC:
-+ /* Set and return a format same as sink pad */
-+ *fmt = *__vin_get_format(line, state, STF_VIN_PAD_SINK, which);
-+ break;
-+ }
-+
-+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
-+}
-+
-+static int vin_enum_mbus_code(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_mbus_code_enum *code)
-+{
-+ struct vin_line *line = v4l2_get_subdevdata(sd);
-+
-+ if (code->index >= line->nformats)
-+ return -EINVAL;
-+ if (code->pad == STF_VIN_PAD_SINK) {
-+ code->code = line->formats[code->index].code;
-+ } else {
-+ struct v4l2_mbus_framefmt *sink_fmt;
-+
-+ sink_fmt = __vin_get_format(line, state, STF_VIN_PAD_SINK,
-+ code->which);
-+
-+ code->code = sink_fmt->code;
-+ if (!code->code)
-+ return -EINVAL;
-+ }
-+ code->flags = 0;
-+
-+ return 0;
-+}
-+
-+static int vin_enum_frame_size(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_frame_size_enum *fse)
-+{
-+ struct vin_line *line = v4l2_get_subdevdata(sd);
-+ struct v4l2_mbus_framefmt format;
-+
-+ if (fse->index != 0)
-+ return -EINVAL;
-+
-+ format.code = fse->code;
-+ format.width = 1;
-+ format.height = 1;
-+ vin_try_format(line, state, fse->pad, &format, fse->which);
-+ fse->min_width = format.width;
-+ fse->min_height = format.height;
-+
-+ if (format.code != fse->code)
-+ return -EINVAL;
-+
-+ format.code = fse->code;
-+ format.width = -1;
-+ format.height = -1;
-+ vin_try_format(line, state, fse->pad, &format, fse->which);
-+ fse->max_width = format.width;
-+ fse->max_height = format.height;
-+
-+ return 0;
-+}
-+
-+static int vin_get_format(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_format *fmt)
-+{
-+ struct vin_line *line = v4l2_get_subdevdata(sd);
-+ struct v4l2_mbus_framefmt *format;
-+
-+ format = __vin_get_format(line, state, fmt->pad, fmt->which);
-+ if (format == NULL)
-+ return -EINVAL;
-+
-+ fmt->format = *format;
-+
-+ return 0;
-+}
-+
-+static int vin_set_format(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_state *state,
-+ struct v4l2_subdev_format *fmt)
-+{
-+ struct vin_line *line = v4l2_get_subdevdata(sd);
-+ struct v4l2_mbus_framefmt *format;
-+
-+ st_debug(ST_VIDEO, "%s, pad %d, fmt code %x\n",
-+ __func__, fmt->pad, fmt->format.code);
-+
-+ format = __vin_get_format(line, state, fmt->pad, fmt->which);
-+ if (format == NULL)
-+ return -EINVAL;
-+
-+ mutex_lock(&line->stream_lock);
-+ if (line->stream_count) {
-+ fmt->format = *format;
-+ mutex_unlock(&line->stream_lock);
-+ goto out;
-+ } else {
-+ vin_try_format(line, state, fmt->pad, &fmt->format, fmt->which);
-+ *format = fmt->format;
-+ }
-+ mutex_unlock(&line->stream_lock);
-+
-+ if (fmt->pad == STF_VIN_PAD_SINK) {
-+ /* Propagate the format from sink to source */
-+ format = __vin_get_format(line, state, STF_VIN_PAD_SRC,
-+ fmt->which);
-+
-+ *format = fmt->format;
-+ vin_try_format(line, state, STF_VIN_PAD_SRC, format,
-+ fmt->which);
-+ }
-+
-+out:
-+ return 0;
-+}
-+
-+static int vin_init_formats(struct v4l2_subdev *sd,
-+ struct v4l2_subdev_fh *fh)
-+{
-+ struct v4l2_subdev_format format = {
-+ .pad = STF_VIN_PAD_SINK,
-+ .which = fh ? V4L2_SUBDEV_FORMAT_TRY :
-+ V4L2_SUBDEV_FORMAT_ACTIVE,
-+ .format = {
-+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
-+ .width = 1920,
-+ .height = 1080
-+ }
-+ };
-+
-+ return vin_set_format(sd, fh ? fh->state : NULL, &format);
-+}
-+
-+static void vin_output_init_addrs(struct vin_line *line)
-+{
-+ struct vin_output *output = &line->output;
-+ struct stf_vin2_dev *vin_dev = line_to_vin2_dev(line);
-+ dma_addr_t ping_addr;
-+ dma_addr_t pong_addr;
-+ dma_addr_t y_addr, uv_addr;
-+
-+ output->active_buf = 0;
-+
-+ if (output->buf[0]) {
-+ ping_addr = output->buf[0]->addr[0];
-+ y_addr = output->buf[0]->addr[0];
-+ uv_addr = output->buf[0]->addr[1];
-+ } else
-+ return;
-+
-+ if (output->buf[1])
-+ pong_addr = output->buf[1]->addr[0];
-+ else
-+ pong_addr = ping_addr;
-+
-+ switch (stf_vin_map_isp_line(line->id)) {
-+ case STF_ISP_LINE_SRC:
-+ vin_dev->hw_ops->vin_isp_set_yuv_addr(vin_dev,
-+ y_addr, uv_addr);
-+ break;
-+ case STF_ISP_LINE_SRC_SS0:
-+ vin_dev->hw_ops->vin_isp_set_ss0_addr(vin_dev,
-+ y_addr, uv_addr);
-+ break;
-+ case STF_ISP_LINE_SRC_SS1:
-+ vin_dev->hw_ops->vin_isp_set_ss1_addr(vin_dev,
-+ y_addr, uv_addr);
-+ break;
-+ case STF_ISP_LINE_SRC_ITIW:
-+ vin_dev->hw_ops->vin_isp_set_itiw_addr(vin_dev,
-+ y_addr, uv_addr);
-+ break;
-+ case STF_ISP_LINE_SRC_ITIR:
-+ vin_dev->hw_ops->vin_isp_set_itir_addr(vin_dev,
-+ y_addr, uv_addr);
-+ break;
-+ case STF_ISP_LINE_SRC_RAW:
-+ vin_dev->hw_ops->vin_isp_set_raw_addr(vin_dev, y_addr);
-+ break;
-+ case STF_ISP_LINE_SRC_SCD_Y:
-+ output->frame_skip = ISP_AWB_OECF_SKIP_FRAME;
-+ vin_dev->hw_ops->vin_isp_set_scd_addr(vin_dev,
-+ y_addr, uv_addr, AWB_TYPE);
-+ break;
-+ default:
-+ if (line->id == VIN_LINE_WR) {
-+ vin_dev->hw_ops->vin_wr_set_ping_addr(vin_dev, ping_addr);
-+#ifdef VIN_TWO_BUFFER
-+ vin_dev->hw_ops->vin_wr_set_pong_addr(vin_dev, pong_addr);
-+#else
-+ vin_dev->hw_ops->vin_wr_set_pong_addr(vin_dev, ping_addr);
-+#endif
-+ }
-+ break;
-+ }
-+}
-+
-+static void vin_init_outputs(struct vin_line *line)
-+{
-+ struct vin_output *output = &line->output;
-+
-+ output->state = VIN_OUTPUT_OFF;
-+ output->buf[0] = NULL;
-+ output->buf[1] = NULL;
-+ output->active_buf = 0;
-+ INIT_LIST_HEAD(&output->pending_bufs);
-+ INIT_LIST_HEAD(&output->ready_bufs);
-+}
-+
-+static void vin_buf_add_ready(struct vin_output *output,
-+ struct stfcamss_buffer *buffer)
-+{
-+ INIT_LIST_HEAD(&buffer->queue);
-+ list_add_tail(&buffer->queue, &output->ready_bufs);
-+}
-+
-+static struct stfcamss_buffer *vin_buf_get_ready(struct vin_output *output)
-+{
-+ struct stfcamss_buffer *buffer = NULL;
-+
-+ if (!list_empty(&output->ready_bufs)) {
-+ buffer = list_first_entry(&output->ready_bufs,
-+ struct stfcamss_buffer,
-+ queue);
-+ list_del(&buffer->queue);
-+ }
-+
-+ return buffer;
-+}
-+
-+static void vin_buf_add_pending(struct vin_output *output,
-+ struct stfcamss_buffer *buffer)
-+{
-+ INIT_LIST_HEAD(&buffer->queue);
-+ list_add_tail(&buffer->queue, &output->pending_bufs);
-+}
-+
-+static struct stfcamss_buffer *vin_buf_get_pending(struct vin_output *output)
-+{
-+ struct stfcamss_buffer *buffer = NULL;
-+
-+ if (!list_empty(&output->pending_bufs)) {
-+ buffer = list_first_entry(&output->pending_bufs,
-+ struct stfcamss_buffer,
-+ queue);
-+ list_del(&buffer->queue);
-+ }
-+
-+ return buffer;
-+}
-+
-+#ifdef UNUSED_CODE
-+static void vin_output_checkpending(struct vin_line *line)
-+{
-+ struct vin_output *output = &line->output;
-+
-+ if (output->state == VIN_OUTPUT_STOPPING) {
-+ /* Release last buffer when hw is idle */
-+ if (output->last_buffer) {
-+ // vb2_buffer_done(&output->last_buffer->vb.vb2_buf,
-+ // VB2_BUF_STATE_DONE);
-+ vin_buf_add_pending(output, output->last_buffer);
-+ output->last_buffer = NULL;
-+ }
-+ output->state = VIN_OUTPUT_IDLE;
-+
-+ /* Buffers received in stopping state are queued in */
-+ /* dma pending queue, start next capture here */
-+ output->buf[0] = vin_buf_get_pending(output);
-+#ifdef VIN_TWO_BUFFER
-+ if (line->id == VIN_LINE_WR)
-+ output->buf[1] = vin_buf_get_pending(output);
-+#endif
-+
-+ if (!output->buf[0] && output->buf[1]) {
-+ output->buf[0] = output->buf[1];
-+ output->buf[1] = NULL;
-+ }
-+
-+ if (output->buf[0])
-+ output->state = VIN_OUTPUT_SINGLE;
-+
-+#ifdef VIN_TWO_BUFFER
-+ if (output->buf[1] && line->id == VIN_LINE_WR)
-+ output->state = VIN_OUTPUT_CONTINUOUS;
-+#endif
-+ vin_output_init_addrs(line);
-+ }
-+}
-+#endif
-+
-+static void vin_buf_update_on_last(struct vin_line *line)
-+{
-+ struct vin_output *output = &line->output;
-+
-+ switch (output->state) {
-+ case VIN_OUTPUT_CONTINUOUS:
-+ output->state = VIN_OUTPUT_SINGLE;
-+ output->active_buf = !output->active_buf;
-+ break;
-+ case VIN_OUTPUT_SINGLE:
-+ output->state = VIN_OUTPUT_STOPPING;
-+ break;
-+ default:
-+ st_err_ratelimited(ST_VIN,
-+ "Last buff in wrong state! %d\n",
-+ output->state);
-+ break;
-+ }
-+}
-+
-+static void vin_buf_update_on_next(struct vin_line *line)
-+{
-+ struct vin_output *output = &line->output;
-+
-+ switch (output->state) {
-+ case VIN_OUTPUT_CONTINUOUS:
-+ output->active_buf = !output->active_buf;
-+ break;
-+ case VIN_OUTPUT_SINGLE:
-+ default:
-+#ifdef VIN_TWO_BUFFER
-+ if (line->id == VIN_LINE_WR)
-+ st_err_ratelimited(ST_VIN,
-+ "Next buf in wrong state! %d\n",
-+ output->state);
-+#endif
-+ break;
-+ }
-+}
-+
-+static void vin_buf_update_on_new(struct vin_line *line,
-+ struct vin_output *output,
-+ struct stfcamss_buffer *new_buf)
-+{
-+#ifdef VIN_TWO_BUFFER
-+ struct stf_vin2_dev *vin_dev = line_to_vin2_dev(line);
-+ int inactive_idx;
-+#endif
-+
-+ switch (output->state) {
-+ case VIN_OUTPUT_SINGLE:
-+#ifdef VIN_TWO_BUFFER
-+ int inactive_idx = !output->active_buf;
-+
-+ if (!output->buf[inactive_idx] && line->id == VIN_LINE_WR) {
-+ output->buf[inactive_idx] = new_buf;
-+ if (inactive_idx)
-+ vin_dev->hw_ops->vin_wr_set_pong_addr(vin_dev,
-+ output->buf[1]->addr[0]);
-+ else
-+ vin_dev->hw_ops->vin_wr_set_ping_addr(vin_dev,
-+ output->buf[0]->addr[0]);
-+ output->state = VIN_OUTPUT_CONTINUOUS;
-+
-+ } else {
-+ vin_buf_add_pending(output, new_buf);
-+ if (line->id == VIN_LINE_WR)
-+ st_warn(ST_VIN, "Inactive buffer is busy\n");
-+ }
-+#else
-+ vin_buf_add_pending(output, new_buf);
-+#endif
-+ break;
-+ case VIN_OUTPUT_IDLE:
-+ st_warn(ST_VIN, "Output idle buffer set!\n");
-+ if (!output->buf[0]) {
-+ output->buf[0] = new_buf;
-+ vin_output_init_addrs(line);
-+ output->state = VIN_OUTPUT_SINGLE;
-+ } else {
-+ vin_buf_add_pending(output, new_buf);
-+ st_warn(ST_VIN, "Output idle with buffer set!\n");
-+ }
-+ break;
-+ case VIN_OUTPUT_STOPPING:
-+ if (output->last_buffer) {
-+ output->buf[output->active_buf] = output->last_buffer;
-+ output->last_buffer = NULL;
-+ } else
-+ st_err(ST_VIN, "stop state lost lastbuffer!\n");
-+ output->state = VIN_OUTPUT_SINGLE;
-+ // vin_output_checkpending(line);
-+ vin_buf_add_pending(output, new_buf);
-+ break;
-+ case VIN_OUTPUT_CONTINUOUS:
-+ default:
-+ vin_buf_add_pending(output, new_buf);
-+ break;
-+ }
-+}
-+
-+static void vin_buf_flush(struct vin_output *output,
-+ enum vb2_buffer_state state)
-+{
-+ struct stfcamss_buffer *buf;
-+ struct stfcamss_buffer *t;
-+
-+ list_for_each_entry_safe(buf, t, &output->pending_bufs, queue) {
-+ vb2_buffer_done(&buf->vb.vb2_buf, state);
-+ list_del(&buf->queue);
-+ }
-+ list_for_each_entry_safe(buf, t, &output->ready_bufs, queue) {
-+ vb2_buffer_done(&buf->vb.vb2_buf, state);
-+ list_del(&buf->queue);
-+ }
-+}
-+
-+static void vin_buffer_done(struct vin_line *line, struct vin_params *params)
-+{
-+ struct stfcamss_buffer *ready_buf;
-+ struct vin_output *output = &line->output;
-+ unsigned long flags;
-+ u64 ts = ktime_get_ns();
-+ struct v4l2_event event = {
-+ .type = V4L2_EVENT_FRAME_SYNC,
-+ };
-+
-+ if (output->state == VIN_OUTPUT_OFF
-+ || output->state == VIN_OUTPUT_RESERVED)
-+ return;
-+
-+ spin_lock_irqsave(&line->output_lock, flags);
-+
-+ while ((ready_buf = vin_buf_get_ready(output))) {
-+ if (line->id >= VIN_LINE_ISP && line->id <= VIN_LINE_ISP_SS1) {
-+ event.u.frame_sync.frame_sequence = output->sequence;
-+ v4l2_event_queue(line->subdev.devnode, &event);
-+ }
-+
-+ ready_buf->vb.vb2_buf.timestamp = ts;
-+ ready_buf->vb.sequence = output->sequence++;
-+
-+ /* The stf_isp_ctrl currently buffered with mmap,
-+ * which will not update cache by default.
-+ * Flush L2 cache to make sure data is updated.
-+ */
-+ if (ready_buf->vb.vb2_buf.memory == VB2_MEMORY_MMAP)
-+ sifive_l2_flush64_range(ready_buf->addr[0], ready_buf->sizeimage);
-+
-+ vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
-+ }
-+
-+ spin_unlock_irqrestore(&line->output_lock, flags);
-+}
-+
-+static void vin_change_buffer(struct vin_line *line)
-+{
-+ struct stfcamss_buffer *ready_buf;
-+ struct vin_output *output = &line->output;
-+ struct stf_vin2_dev *vin_dev = line_to_vin2_dev(line);
-+ dma_addr_t *new_addr;
-+ unsigned long flags;
-+ u32 active_index;
-+ int scd_type;
-+
-+ if (output->state == VIN_OUTPUT_OFF
-+ || output->state == VIN_OUTPUT_STOPPING
-+ || output->state == VIN_OUTPUT_RESERVED
-+ || output->state == VIN_OUTPUT_IDLE)
-+ return;
-+
-+ spin_lock_irqsave(&line->output_lock, flags);
-+
-+ active_index = output->active_buf;
-+
-+ ready_buf = output->buf[active_index];
-+ if (!ready_buf) {
-+ st_err_ratelimited(ST_VIN,
-+ "Missing ready buf %d %d!\n",
-+ active_index, output->state);
-+ active_index = !active_index;
-+ ready_buf = output->buf[active_index];
-+ if (!ready_buf) {
-+ st_err_ratelimited(ST_VIN,
-+ "Missing ready buf 2 %d %d!\n",
-+ active_index, output->state);
-+ goto out_unlock;
-+ }
-+ }
-+
-+ /* Get next buffer */
-+ output->buf[active_index] = vin_buf_get_pending(output);
-+ if (!output->buf[active_index]) {
-+ /* No next buffer - set same address */
-+ new_addr = ready_buf->addr;
-+ vin_buf_update_on_last(line);
-+ } else {
-+ new_addr = output->buf[active_index]->addr;
-+ vin_buf_update_on_next(line);
-+ }
-+
-+ if (output->state == VIN_OUTPUT_STOPPING)
-+ output->last_buffer = ready_buf;
-+ else {
-+ switch (stf_vin_map_isp_line(line->id)) {
-+ case STF_ISP_LINE_SRC:
-+ vin_dev->hw_ops->vin_isp_set_yuv_addr(vin_dev,
-+ new_addr[0], new_addr[1]);
-+ break;
-+ case STF_ISP_LINE_SRC_SS0:
-+ vin_dev->hw_ops->vin_isp_set_ss0_addr(vin_dev,
-+ new_addr[0], new_addr[1]);
-+ break;
-+ case STF_ISP_LINE_SRC_SS1:
-+ vin_dev->hw_ops->vin_isp_set_ss1_addr(vin_dev,
-+ new_addr[0], new_addr[1]);
-+ break;
-+ case STF_ISP_LINE_SRC_ITIW:
-+ vin_dev->hw_ops->vin_isp_set_itiw_addr(vin_dev,
-+ new_addr[0], new_addr[1]);
-+ break;
-+ case STF_ISP_LINE_SRC_ITIR:
-+ vin_dev->hw_ops->vin_isp_set_itir_addr(vin_dev,
-+ new_addr[0], new_addr[1]);
-+ break;
-+ case STF_ISP_LINE_SRC_RAW:
-+ vin_dev->hw_ops->vin_isp_set_raw_addr(vin_dev, new_addr[0]);
-+ break;
-+ case STF_ISP_LINE_SRC_SCD_Y:
-+ scd_type = vin_dev->hw_ops->vin_isp_get_scd_type(vin_dev);
-+ ready_buf->vb.flags &= ~(V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME);
-+ if (scd_type == AWB_TYPE)
-+ ready_buf->vb.flags |= V4L2_BUF_FLAG_PFRAME;
-+ else
-+ ready_buf->vb.flags |= V4L2_BUF_FLAG_BFRAME;
-+ if (!output->frame_skip) {
-+ output->frame_skip = ISP_AWB_OECF_SKIP_FRAME;
-+ scd_type = scd_type == AWB_TYPE ? OECF_TYPE : AWB_TYPE;
-+ } else {
-+ output->frame_skip--;
-+ scd_type = scd_type == AWB_TYPE ? AWB_TYPE : OECF_TYPE;
-+ }
-+ vin_dev->hw_ops->vin_isp_set_scd_addr(vin_dev,
-+ new_addr[0], new_addr[1], scd_type);
-+ break;
-+ default:
-+ if (line->id == VIN_LINE_WR) {
-+#ifdef VIN_TWO_BUFFER
-+ if (active_index)
-+ vin_dev->hw_ops->vin_wr_set_pong_addr(vin_dev,
-+ new_addr[0]);
-+ else
-+ vin_dev->hw_ops->vin_wr_set_ping_addr(vin_dev,
-+ new_addr[0]);
-+#else
-+ vin_dev->hw_ops->vin_wr_set_ping_addr(vin_dev,
-+ new_addr[0]);
-+ vin_dev->hw_ops->vin_wr_set_pong_addr(vin_dev,
-+ new_addr[0]);
-+#endif
-+ }
-+ break;
-+ }
-+
-+ vin_buf_add_ready(output, ready_buf);
-+ }
-+
-+ spin_unlock_irqrestore(&line->output_lock, flags);
-+ return;
-+
-+out_unlock:
-+ spin_unlock_irqrestore(&line->output_lock, flags);
-+}
-+
-+static int vin_queue_buffer(struct stfcamss_video *vid,
-+ struct stfcamss_buffer *buf)
-+{
-+ struct vin_line *line = container_of(vid, struct vin_line, video_out);
-+ struct vin_output *output;
-+ unsigned long flags;
-+
-+
-+ output = &line->output;
-+
-+ spin_lock_irqsave(&line->output_lock, flags);
-+
-+ vin_buf_update_on_new(line, output, buf);
-+
-+ spin_unlock_irqrestore(&line->output_lock, flags);
-+
-+ return 0;
-+}
-+
-+static int vin_flush_buffers(struct stfcamss_video *vid,
-+ enum vb2_buffer_state state)
-+{
-+ struct vin_line *line = container_of(vid, struct vin_line, video_out);
-+ struct vin_output *output = &line->output;
-+ unsigned long flags;
-+
-+ spin_lock_irqsave(&line->output_lock, flags);
-+
-+ vin_buf_flush(output, state);
-+ if (output->buf[0])
-+ vb2_buffer_done(&output->buf[0]->vb.vb2_buf, state);
-+
-+ if (output->buf[1])
-+ vb2_buffer_done(&output->buf[1]->vb.vb2_buf, state);
-+
-+ if (output->last_buffer) {
-+ vb2_buffer_done(&output->last_buffer->vb.vb2_buf, state);
-+ output->last_buffer = NULL;
-+ }
-+ output->buf[0] = output->buf[1] = NULL;
-+
-+ spin_unlock_irqrestore(&line->output_lock, flags);
-+ return 0;
-+}
-+
-+static int vin_link_setup(struct media_entity *entity,
-+ const struct media_pad *local,
-+ const struct media_pad *remote, u32 flags)
-+{
-+ if (flags & MEDIA_LNK_FL_ENABLED)
-+ if (media_pad_remote_pad_first(local))
-+ return -EBUSY;
-+ return 0;
-+}
-+
-+static int stf_vin_subscribe_event(struct v4l2_subdev *sd,
-+ struct v4l2_fh *fh,
-+ struct v4l2_event_subscription *sub)
-+{
-+ switch (sub->type) {
-+ case V4L2_EVENT_FRAME_SYNC:
-+ return v4l2_event_subscribe(fh, sub, 0, NULL);
-+ default:
-+ st_debug(ST_VIN, "unsupport subscribe_event\n");
-+ return -EINVAL;
-+ }
-+}
-+
-+static const struct v4l2_subdev_core_ops vin_core_ops = {
-+ .s_power = vin_set_power,
-+ .subscribe_event = stf_vin_subscribe_event,
-+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
-+};
-+
-+static const struct v4l2_subdev_video_ops vin_video_ops = {
-+ .s_stream = vin_set_stream,
-+};
-+
-+static const struct v4l2_subdev_pad_ops vin_pad_ops = {
-+ .enum_mbus_code = vin_enum_mbus_code,
-+ .enum_frame_size = vin_enum_frame_size,
-+ .get_fmt = vin_get_format,
-+ .set_fmt = vin_set_format,
-+};
-+
-+static const struct v4l2_subdev_ops vin_v4l2_ops = {
-+ .core = &vin_core_ops,
-+ .video = &vin_video_ops,
-+ .pad = &vin_pad_ops,
-+};
-+
-+static const struct v4l2_subdev_internal_ops vin_v4l2_internal_ops = {
-+ .open = vin_init_formats,
-+};
-+
-+static const struct stfcamss_video_ops stfcamss_vin_video_ops = {
-+ .queue_buffer = vin_queue_buffer,
-+ .flush_buffers = vin_flush_buffers,
-+};
-+
-+static const struct media_entity_operations vin_media_ops = {
-+ .link_setup = vin_link_setup,
-+ .link_validate = v4l2_subdev_link_validate,
-+};
-+
-+int stf_vin_register(struct stf_vin2_dev *vin_dev, struct v4l2_device *v4l2_dev)
-+{
-+ struct v4l2_subdev *sd;
-+ struct stfcamss_video *video_out;
-+ struct media_pad *pads;
-+ int ret;
-+ int i;
-+
-+ for (i = 0; i < STF_ISP_LINE_MAX + 1; i++) {
-+ char name[32];
-+ char *sub_name = get_line_subdevname(i);
-+ int is_mp;
-+
-+#ifdef STF_CAMSS_SKIP_ITI
-+ if ((stf_vin_map_isp_line(i) == STF_ISP_LINE_SRC_ITIW) ||
-+ (stf_vin_map_isp_line(i) == STF_ISP_LINE_SRC_ITIR))
-+ continue;
-+#endif
-+ is_mp = (stf_vin_map_isp_line(i) == STF_ISP_LINE_SRC) ? true : false;
-+ is_mp = false;
-+ sd = &vin_dev->line[i].subdev;
-+ pads = vin_dev->line[i].pads;
-+ video_out = &vin_dev->line[i].video_out;
-+ video_out->id = i;
-+
-+ v4l2_subdev_init(sd, &vin_v4l2_ops);
-+ sd->internal_ops = &vin_v4l2_internal_ops;
-+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
-+ snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d_%s",
-+ STF_VIN_NAME, 0, sub_name);
-+ v4l2_set_subdevdata(sd, &vin_dev->line[i]);
-+
-+ ret = vin_init_formats(sd, NULL);
-+ if (ret < 0) {
-+ st_err(ST_VIN, "Failed to init format: %d\n", ret);
-+ goto err_init;
-+ }
-+
-+ pads[STF_VIN_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
-+ pads[STF_VIN_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
-+
-+ sd->entity.function =
-+ MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
-+ sd->entity.ops = &vin_media_ops;
-+ ret = media_entity_pads_init(&sd->entity,
-+ STF_VIN_PADS_NUM, pads);
-+ if (ret < 0) {
-+ st_err(ST_VIN, "Failed to init media entity: %d\n", ret);
-+ goto err_init;
-+ }
-+
-+ ret = v4l2_device_register_subdev(v4l2_dev, sd);
-+ if (ret < 0) {
-+ st_err(ST_VIN, "Failed to register subdev: %d\n", ret);
-+ goto err_reg_subdev;
-+ }
-+
-+ video_out->ops = &stfcamss_vin_video_ops;
-+ video_out->bpl_alignment = 16 * 8;
-+
-+ snprintf(name, ARRAY_SIZE(name), "%s_%s%d",
-+ sd->name, "video", i);
-+ ret = stf_video_register(video_out, v4l2_dev, name, is_mp);
-+ if (ret < 0) {
-+ st_err(ST_VIN, "Failed to register video node: %d\n",
-+ ret);
-+ goto err_vid_reg;
-+ }
-+
-+ ret = media_create_pad_link(
-+ &sd->entity, STF_VIN_PAD_SRC,
-+ &video_out->vdev.entity, 0,
-+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
-+ if (ret < 0) {
-+ st_err(ST_VIN, "Failed to link %s->%s entities: %d\n",
-+ sd->entity.name, video_out->vdev.entity.name,
-+ ret);
-+ goto err_create_link;
-+ }
-+ }
-+
-+ return 0;
-+
-+err_create_link:
-+ stf_video_unregister(video_out);
-+err_vid_reg:
-+ v4l2_device_unregister_subdev(sd);
-+err_reg_subdev:
-+ media_entity_cleanup(&sd->entity);
-+err_init:
-+ for (i--; i >= 0; i--) {
-+ sd = &vin_dev->line[i].subdev;
-+ video_out = &vin_dev->line[i].video_out;
-+
-+ stf_video_unregister(video_out);
-+ v4l2_device_unregister_subdev(sd);
-+ media_entity_cleanup(&sd->entity);
-+ }
-+ return ret;
-+}
-+
-+int stf_vin_unregister(struct stf_vin2_dev *vin_dev)
-+{
-+ struct v4l2_subdev *sd;
-+ struct stfcamss_video *video_out;
-+ int i;
-+
-+ mutex_destroy(&vin_dev->power_lock);
-+ for (i = 0; i < STF_DUMMY_MODULE_NUMS; i++)
-+ mutex_destroy(&vin_dev->dummy_buffer[i].stream_lock);
-+
-+ for (i = 0; i < STF_ISP_LINE_MAX + 1; i++) {
-+ sd = &vin_dev->line[i].subdev;
-+ video_out = &vin_dev->line[i].video_out;
-+
-+ stf_video_unregister(video_out);
-+ v4l2_device_unregister_subdev(sd);
-+ media_entity_cleanup(&sd->entity);
-+ mutex_destroy(&vin_dev->line[i].stream_lock);
-+ mutex_destroy(&vin_dev->line[i].power_lock);
-+ }
-+ return 0;
-+}
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/stf_vin.h
-@@ -0,0 +1,182 @@
-+/* SPDX-License-Identifier: GPL-2.0
-+ *
-+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
-+ *
-+ */
-+
-+#ifndef STF_VIN_H
-+#define STF_VIN_H
-+
-+#include <media/v4l2-device.h>
-+#include <media/v4l2-subdev.h>
-+#include <linux/spinlock_types.h>
-+#include <video/stf-vin.h>
-+#include <linux/platform_device.h>
-+
-+#include "stf_video.h"
-+
-+#define STF_VIN_NAME "stf_vin"
-+
-+#define STF_VIN_PAD_SINK 0
-+#define STF_VIN_PAD_SRC 1
-+#define STF_VIN_PADS_NUM 2
-+
-+struct vin2_format {
-+ u32 code;
-+ u8 bpp;
-+};
-+
-+struct vin2_format_table {
-+ const struct vin2_format *fmts;
-+ int nfmts;
-+};
-+
-+enum vin_output_state {
-+ VIN_OUTPUT_OFF,
-+ VIN_OUTPUT_RESERVED,
-+ VIN_OUTPUT_SINGLE,
-+ VIN_OUTPUT_CONTINUOUS,
-+ VIN_OUTPUT_IDLE,
-+ VIN_OUTPUT_STOPPING
-+};
-+
-+struct vin_output {
-+ int active_buf;
-+ struct stfcamss_buffer *buf[2];
-+ struct stfcamss_buffer *last_buffer;
-+ struct list_head pending_bufs;
-+ struct list_head ready_bufs;
-+ enum vin_output_state state;
-+ unsigned int sequence;
-+ unsigned int frame_skip;
-+};
-+
-+/* The vin output lines include all isp controller lines,
-+ * and one vin_wr output line.
-+ */
-+enum vin_line_id {
-+ VIN_LINE_NONE = -1,
-+ VIN_LINE_WR = 0,
-+ VIN_LINE_ISP = 1,
-+ VIN_LINE_ISP_SS0 = 2,
-+ VIN_LINE_ISP_SS1 = 3,
-+ VIN_LINE_ISP_ITIW = 4,
-+ VIN_LINE_ISP_ITIR = 5,
-+ VIN_LINE_ISP_RAW = 6,
-+ VIN_LINE_ISP_SCD_Y = 7,
-+ VIN_LINE_MAX = 8,
-+};
-+
-+enum subdev_type;
-+
-+struct vin_line {
-+ enum subdev_type sdev_type; // must be frist
-+ enum vin_line_id id;
-+ struct v4l2_subdev subdev;
-+ struct media_pad pads[STF_VIN_PADS_NUM];
-+ struct v4l2_mbus_framefmt fmt[STF_VIN_PADS_NUM];
-+ struct stfcamss_video video_out;
-+ struct mutex stream_lock;
-+ int stream_count;
-+ struct mutex power_lock;
-+ int power_count;
-+ struct vin_output output;
-+ spinlock_t output_lock;
-+ const struct vin2_format *formats;
-+ unsigned int nformats;
-+#ifdef CONFIG_PM_SLEEP
-+ int pm_stream_count;
-+ int pm_power_count;
-+#endif
-+};
-+
-+struct stf_vin2_dev;
-+
-+struct vin_hw_ops {
-+ int (*vin_clk_enable)(struct stf_vin2_dev *vin_dev);
-+ int (*vin_clk_disable)(struct stf_vin2_dev *vin_dev);
-+ int (*vin_config_set)(struct stf_vin2_dev *vin_dev);
-+ int (*vin_wr_stream_set)(struct stf_vin2_dev *vin_dev, int on);
-+ void (*vin_wr_irq_enable)(struct stf_vin2_dev *vin_dev, int enable);
-+ void (*vin_power_on)(struct stf_vin2_dev *vin_dev, int on);
-+ void (*wr_rd_set_addr)(struct stf_vin2_dev *vin_dev,
-+ dma_addr_t wr_addr, dma_addr_t rd_addr);
-+ void (*vin_wr_set_ping_addr)(struct stf_vin2_dev *vin_dev,
-+ dma_addr_t addr);
-+ void (*vin_wr_set_pong_addr)(struct stf_vin2_dev *vin_dev,
-+ dma_addr_t addr);
-+ void (*vin_wr_get_ping_pong_status)(struct stf_vin2_dev *vin_dev);
-+ void (*vin_isp_set_yuv_addr)(struct stf_vin2_dev *vin_dev,
-+ dma_addr_t y_addr, dma_addr_t uv_addr);
-+ void (*vin_isp_set_raw_addr)(struct stf_vin2_dev *vin_dev,
-+ dma_addr_t raw_addr);
-+ void (*vin_isp_set_ss0_addr)(struct stf_vin2_dev *vin_dev,
-+ dma_addr_t y_addr, dma_addr_t uv_addr);
-+ void (*vin_isp_set_ss1_addr)(struct stf_vin2_dev *vin_dev,
-+ dma_addr_t y_addr, dma_addr_t uv_addr);
-+ void (*vin_isp_set_itiw_addr)(struct stf_vin2_dev *vin_dev,
-+ dma_addr_t y_addr, dma_addr_t uv_addr);
-+ void (*vin_isp_set_itir_addr)(struct stf_vin2_dev *vin_dev,
-+ dma_addr_t y_addr, dma_addr_t uv_addr);
-+ void (*vin_isp_set_scd_addr)(struct stf_vin2_dev *vin_dev,
-+ dma_addr_t yhist_addr,
-+ dma_addr_t scd_addr, int scd_type);
-+ int (*vin_isp_get_scd_type)(struct stf_vin2_dev *vin_dev);
-+ irqreturn_t (*vin_wr_irq_handler)(int irq, void *priv);
-+ irqreturn_t (*vin_isp_irq_handler)(int irq, void *priv);
-+ irqreturn_t (*vin_isp_csi_irq_handler)(int irq, void *priv);
-+ irqreturn_t (*vin_isp_scd_irq_handler)(int irq, void *priv);
-+ irqreturn_t (*vin_isp_irq_csiline_handler)(int irq, void *priv);
-+ void (*isr_buffer_done)(struct vin_line *line,
-+ struct vin_params *params);
-+ void (*isr_change_buffer)(struct vin_line *line);
-+};
-+
-+#define ISP_DUMMY_BUFFER_NUMS STF_ISP_PAD_MAX
-+#define VIN_DUMMY_BUFFER_NUMS 1
-+
-+enum {
-+ STF_DUMMY_VIN,
-+ STF_DUMMY_ISP,
-+ STF_DUMMY_MODULE_NUMS,
-+};
-+
-+struct vin_dummy_buffer {
-+ dma_addr_t paddr[3];
-+ void *vaddr;
-+ u32 buffer_size;
-+ u32 width;
-+ u32 height;
-+ u32 mcode;
-+};
-+
-+struct dummy_buffer {
-+ struct vin_dummy_buffer *buffer;
-+ u32 nums;
-+ struct mutex stream_lock;
-+ int stream_count;
-+ atomic_t frame_skip;
-+};
-+
-+struct stf_vin2_dev {
-+ struct stfcamss *stfcamss;
-+ struct vin_line line[VIN_LINE_MAX];
-+ struct dummy_buffer dummy_buffer[STF_DUMMY_MODULE_NUMS];
-+ struct vin_hw_ops *hw_ops;
-+ atomic_t ref_count;
-+ struct mutex power_lock;
-+ int power_count;
-+};
-+
-+extern void sifive_l2_flush64_range(unsigned long start, unsigned long len);
-+extern int stf_vin_subdev_init(struct stfcamss *stfcamss);
-+extern int stf_vin_register(struct stf_vin2_dev *vin_dev,
-+ struct v4l2_device *v4l2_dev);
-+extern int stf_vin_unregister(struct stf_vin2_dev *vin_dev);
-+
-+extern struct vin_hw_ops vin_ops;
-+extern void dump_vin_reg(void *__iomem regbase);
-+extern enum isp_pad_id stf_vin_map_isp_pad(enum vin_line_id line,
-+ enum isp_pad_id def);
-+
-+#endif /* STF_VIN_H */
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/stf_vin_hw_ops.c
-@@ -0,0 +1,433 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
-+ *
-+ */
-+
-+#include "stfcamss.h"
-+#include <linux/of_graph.h>
-+#include <media/v4l2-async.h>
-+#include <media/v4l2-fwnode.h>
-+#include <media/v4l2-subdev.h>
-+
-+static void vin_intr_clear(void __iomem *sysctrl_base)
-+{
-+ reg_set_bit(sysctrl_base, SYSCONSAIF_SYSCFG_28,
-+ U0_VIN_CNFG_AXIWR0_INTR_CLEAN,
-+ 0x1);
-+ reg_set_bit(sysctrl_base, SYSCONSAIF_SYSCFG_28,
-+ U0_VIN_CNFG_AXIWR0_INTR_CLEAN,
-+ 0x0);
-+}
-+
-+static irqreturn_t stf_vin_wr_irq_handler(int irq, void *priv)
-+{
-+ static struct vin_params params;
-+ struct stf_vin2_dev *vin_dev = priv;
-+ struct stf_vin_dev *vin = vin_dev->stfcamss->vin;
-+ struct dummy_buffer *dummy_buffer =
-+ &vin_dev->dummy_buffer[STF_DUMMY_VIN];
-+
-+ if (atomic_dec_if_positive(&dummy_buffer->frame_skip) < 0) {
-+ vin_dev->hw_ops->isr_change_buffer(&vin_dev->line[VIN_LINE_WR]);
-+ vin_dev->hw_ops->isr_buffer_done(&vin_dev->line[VIN_LINE_WR], ¶ms);
-+ }
-+
-+ vin_intr_clear(vin->sysctrl_base);
-+
-+ return IRQ_HANDLED;
-+}
-+
-+static void __iomem *stf_vin_get_ispbase(struct stf_vin_dev *vin)
-+{
-+ void __iomem *base = vin->isp_base;
-+
-+ return base;
-+}
-+
-+static irqreturn_t stf_vin_isp_irq_handler(int irq, void *priv)
-+{
-+ static struct vin_params params;
-+ struct stf_vin2_dev *vin_dev = priv;
-+ struct stf_vin_dev *vin = vin_dev->stfcamss->vin;
-+ void __iomem *ispbase;
-+ u32 int_status, value;
-+
-+ ispbase = stf_vin_get_ispbase(vin);
-+
-+ int_status = reg_read(ispbase, ISP_REG_ISP_CTRL_0);
-+
-+ if (int_status & BIT(24)) {
-+ if ((int_status & BIT(11)))
-+ vin_dev->hw_ops->isr_buffer_done(
-+ &vin_dev->line[VIN_LINE_ISP_SS0], ¶ms);
-+
-+ if ((int_status & BIT(12)))
-+ vin_dev->hw_ops->isr_buffer_done(
-+ &vin_dev->line[VIN_LINE_ISP_SS1], ¶ms);
-+
-+ if ((int_status & BIT(20)))
-+ vin_dev->hw_ops->isr_buffer_done(
-+ &vin_dev->line[VIN_LINE_ISP], ¶ms);
-+
-+ value = reg_read(ispbase, ISP_REG_ITIDPSR);
-+ if ((value & BIT(17)))
-+ vin_dev->hw_ops->isr_buffer_done(
-+ &vin_dev->line[VIN_LINE_ISP_ITIW], ¶ms);
-+ if ((value & BIT(16)))
-+ vin_dev->hw_ops->isr_buffer_done(
-+ &vin_dev->line[VIN_LINE_ISP_ITIR], ¶ms);
-+
-+#ifndef ISP_USE_CSI_AND_SC_DONE_INTERRUPT
-+ if (int_status & BIT(25))
-+ vin_dev->hw_ops->isr_buffer_done(
-+ &vin_dev->line[VIN_LINE_ISP_RAW], ¶ms);
-+
-+ if (int_status & BIT(26))
-+ vin_dev->hw_ops->isr_buffer_done(
-+ &vin_dev->line[VIN_LINE_ISP_SCD_Y], ¶ms);
-+
-+ /* clear interrupt */
-+ reg_write(ispbase, ISP_REG_ISP_CTRL_0, (int_status & ~EN_INT_ALL)
-+ | EN_INT_ISP_DONE | EN_INT_CSI_DONE | EN_INT_SC_DONE);
-+#else
-+ /* clear interrupt */
-+ reg_write(ispbase, ISP_REG_ISP_CTRL_0,
-+ (int_status & ~EN_INT_ALL) | EN_INT_ISP_DONE);
-+#endif
-+ } else
-+ st_debug(ST_VIN, "%s, Unknown interrupt!!!\n", __func__);
-+
-+ return IRQ_HANDLED;
-+}
-+
-+static irqreturn_t stf_vin_isp_csi_irq_handler(int irq, void *priv)
-+{
-+ static struct vin_params params;
-+ struct stf_vin2_dev *vin_dev = priv;
-+ struct stf_vin_dev *vin = vin_dev->stfcamss->vin;
-+ void __iomem *ispbase;
-+ u32 int_status;
-+
-+ ispbase = stf_vin_get_ispbase(vin);
-+
-+ int_status = reg_read(ispbase, ISP_REG_ISP_CTRL_0);
-+
-+ if (int_status & BIT(25)) {
-+ vin_dev->hw_ops->isr_buffer_done(
-+ &vin_dev->line[VIN_LINE_ISP_RAW], ¶ms);
-+
-+ /* clear interrupt */
-+ reg_write(ispbase, ISP_REG_ISP_CTRL_0,
-+ (int_status & ~EN_INT_ALL) | EN_INT_CSI_DONE);
-+ } else
-+ st_debug(ST_VIN, "%s, Unknown interrupt!!!\n", __func__);
-+
-+ return IRQ_HANDLED;
-+}
-+
-+static irqreturn_t stf_vin_isp_scd_irq_handler(int irq, void *priv)
-+{
-+ static struct vin_params params;
-+ struct stf_vin2_dev *vin_dev = priv;
-+ struct stf_vin_dev *vin = vin_dev->stfcamss->vin;
-+ void __iomem *ispbase;
-+ u32 int_status;
-+
-+ ispbase = stf_vin_get_ispbase(vin);
-+
-+ int_status = reg_read(ispbase, ISP_REG_ISP_CTRL_0);
-+
-+ if (int_status & BIT(26)) {
-+ vin_dev->hw_ops->isr_buffer_done(
-+ &vin_dev->line[VIN_LINE_ISP_SCD_Y], ¶ms);
-+
-+ /* clear interrupt */
-+ reg_write(ispbase, ISP_REG_ISP_CTRL_0, (int_status & ~EN_INT_ALL) | EN_INT_SC_DONE);
-+ } else
-+ st_debug(ST_VIN, "%s, Unknown interrupt!!!\n", __func__);
-+
-+ return IRQ_HANDLED;
-+}
-+
-+static irqreturn_t stf_vin_isp_irq_csiline_handler(int irq, void *priv)
-+{
-+ struct stf_vin2_dev *vin_dev = priv;
-+ struct stf_vin_dev *vin = vin_dev->stfcamss->vin;
-+ struct stf_isp_dev *isp_dev;
-+ void __iomem *ispbase;
-+ u32 int_status, value;
-+
-+ ispbase = stf_vin_get_ispbase(vin);
-+
-+ isp_dev = vin_dev->stfcamss->isp_dev;
-+
-+ int_status = reg_read(ispbase, ISP_REG_ISP_CTRL_0);
-+ if (int_status & BIT(27)) {
-+ struct dummy_buffer *dummy_buffer =
-+ &vin_dev->dummy_buffer[STF_DUMMY_ISP];
-+
-+ if (!atomic_read(&isp_dev->shadow_count)) {
-+ if (atomic_dec_if_positive(&dummy_buffer->frame_skip) < 0) {
-+ if ((int_status & BIT(11)))
-+ vin_dev->hw_ops->isr_change_buffer(
-+ &vin_dev->line[VIN_LINE_ISP_SS0]);
-+ if ((int_status & BIT(12)))
-+ vin_dev->hw_ops->isr_change_buffer(
-+ &vin_dev->line[VIN_LINE_ISP_SS1]);
-+ if ((int_status & BIT(20)))
-+ vin_dev->hw_ops->isr_change_buffer(
-+ &vin_dev->line[VIN_LINE_ISP]);
-+
-+ value = reg_read(ispbase, ISP_REG_ITIDPSR);
-+ if ((value & BIT(17)))
-+ vin_dev->hw_ops->isr_change_buffer(
-+ &vin_dev->line[VIN_LINE_ISP_ITIW]);
-+ if ((value & BIT(16)))
-+ vin_dev->hw_ops->isr_change_buffer(
-+ &vin_dev->line[VIN_LINE_ISP_ITIR]);
-+
-+ value = reg_read(ispbase, ISP_REG_CSI_MODULE_CFG);
-+ if ((value & BIT(19)))
-+ vin_dev->hw_ops->isr_change_buffer(
-+ &vin_dev->line[VIN_LINE_ISP_RAW]);
-+ if ((value & BIT(17)))
-+ vin_dev->hw_ops->isr_change_buffer(
-+ &vin_dev->line[VIN_LINE_ISP_SCD_Y]);
-+ }
-+
-+ // shadow update
-+ reg_set_bit(ispbase, ISP_REG_CSIINTS_ADDR, 0x30000, 0x30000);
-+ reg_set_bit(ispbase, ISP_REG_IESHD_ADDR, BIT(1) | BIT(0), 0x3);
-+ } else {
-+ st_warn(ST_VIN, "isp shadow_lock locked. skip this frame\n");
-+ }
-+
-+ /* clear interrupt */
-+ reg_write(ispbase, ISP_REG_ISP_CTRL_0,
-+ (int_status & ~EN_INT_ALL) | EN_INT_LINE_INT);
-+ } else
-+ st_debug(ST_VIN, "%s, Unknown interrupt!!!\n", __func__);
-+
-+ return IRQ_HANDLED;
-+}
-+
-+static int stf_vin_clk_enable(struct stf_vin2_dev *vin_dev)
-+{
-+ struct stfcamss *stfcamss = vin_dev->stfcamss;
-+
-+ clk_prepare_enable(stfcamss->sys_clk[STFCLK_PCLK].clk);
-+ clk_set_rate(stfcamss->sys_clk[STFCLK_APB_FUNC].clk, 49500000);
-+ clk_set_rate(stfcamss->sys_clk[STFCLK_SYS_CLK].clk, 297000000);
-+
-+ reset_control_deassert(stfcamss->sys_rst[STFRST_PCLK].rstc);
-+ reset_control_deassert(stfcamss->sys_rst[STFRST_SYS_CLK].rstc);
-+
-+ return 0;
-+}
-+
-+
-+static int stf_vin_clk_disable(struct stf_vin2_dev *vin_dev)
-+{
-+ struct stfcamss *stfcamss = vin_dev->stfcamss;
-+
-+ reset_control_assert(stfcamss->sys_rst[STFRST_PCLK].rstc);
-+ reset_control_assert(stfcamss->sys_rst[STFRST_SYS_CLK].rstc);
-+
-+ clk_disable_unprepare(stfcamss->sys_clk[STFCLK_PCLK].clk);
-+
-+ return 0;
-+}
-+
-+static int stf_vin_config_set(struct stf_vin2_dev *vin_dev)
-+{
-+ return 0;
-+}
-+
-+static int stf_vin_wr_stream_set(struct stf_vin2_dev *vin_dev, int on)
-+{
-+ struct stf_vin_dev *vin = vin_dev->stfcamss->vin;
-+
-+ //make the axiwr alway on
-+ if (on)
-+ reg_set(vin->sysctrl_base, SYSCONSAIF_SYSCFG_20, U0_VIN_CNFG_AXIWR0_EN);
-+
-+ return 0;
-+}
-+
-+static void stf_vin_wr_irq_enable(struct stf_vin2_dev *vin_dev,
-+ int enable)
-+{
-+ struct stf_vin_dev *vin = vin_dev->stfcamss->vin;
-+ unsigned int value = 0;
-+
-+ if (enable) {
-+ value = ~(0x1 << 1);
-+ reg_set_bit(vin->sysctrl_base, SYSCONSAIF_SYSCFG_28,
-+ U0_VIN_CNFG_AXIWR0_MASK,
-+ value);
-+ } else {
-+ /* clear vin interrupt */
-+ value = 0x1 << 1;
-+ reg_set_bit(vin->sysctrl_base, SYSCONSAIF_SYSCFG_28,
-+ U0_VIN_CNFG_AXIWR0_INTR_CLEAN,
-+ 0x1);
-+ reg_set_bit(vin->sysctrl_base, SYSCONSAIF_SYSCFG_28,
-+ U0_VIN_CNFG_AXIWR0_INTR_CLEAN,
-+ 0x0);
-+ reg_set_bit(vin->sysctrl_base, SYSCONSAIF_SYSCFG_28,
-+ U0_VIN_CNFG_AXIWR0_MASK,
-+ value);
-+ }
-+}
-+
-+static void stf_vin_wr_rd_set_addr(struct stf_vin2_dev *vin_dev,
-+ dma_addr_t wr_addr, dma_addr_t rd_addr)
-+{
-+#ifdef UNUSED_CODE
-+ struct stf_vin_dev *vin = vin_dev->stfcamss->vin;
-+
-+ /* set the start address*/
-+ reg_write(vin->sysctrl_base,
-+ SYSCTRL_VIN_WR_START_ADDR, (long)wr_addr);
-+ reg_write(vin->sysctrl_base,
-+ SYSCTRL_VIN_RD_END_ADDR, (long)rd_addr);
-+#endif
-+}
-+
-+void stf_vin_wr_set_ping_addr(struct stf_vin2_dev *vin_dev,
-+ dma_addr_t addr)
-+{
-+ struct stf_vin_dev *vin = vin_dev->stfcamss->vin;
-+
-+ /* set the start address */
-+ reg_write(vin->sysctrl_base, SYSCONSAIF_SYSCFG_24, (long)addr);
-+}
-+
-+void stf_vin_wr_set_pong_addr(struct stf_vin2_dev *vin_dev, dma_addr_t addr)
-+{
-+ struct stf_vin_dev *vin = vin_dev->stfcamss->vin;
-+
-+ /* set the start address */
-+ reg_write(vin->sysctrl_base, SYSCONSAIF_SYSCFG_32, (long)addr);
-+}
-+
-+void stf_vin_isp_set_yuv_addr(struct stf_vin2_dev *vin_dev,
-+ dma_addr_t y_addr, dma_addr_t uv_addr)
-+{
-+
-+ struct stf_vin_dev *vin = vin_dev->stfcamss->vin;
-+ void __iomem *ispbase = stf_vin_get_ispbase(vin);
-+
-+ reg_write(ispbase, ISP_REG_Y_PLANE_START_ADDR, y_addr);
-+ reg_write(ispbase, ISP_REG_UV_PLANE_START_ADDR, uv_addr);
-+ // reg_set_bit(ispbase, ISP_REG_ISP_CTRL_0, BIT(0), 1);
-+}
-+
-+void stf_vin_isp_set_raw_addr(struct stf_vin2_dev *vin_dev,
-+ dma_addr_t raw_addr)
-+{
-+ struct stf_vin_dev *vin = vin_dev->stfcamss->vin;
-+ void __iomem *ispbase = stf_vin_get_ispbase(vin);
-+
-+ reg_write(ispbase, ISP_REG_DUMP_CFG_0, raw_addr);
-+}
-+
-+void stf_vin_isp_set_ss0_addr(struct stf_vin2_dev *vin_dev,
-+ dma_addr_t y_addr, dma_addr_t uv_addr)
-+{
-+ struct stf_vin_dev *vin = vin_dev->stfcamss->vin;
-+ void __iomem *ispbase = stf_vin_get_ispbase(vin);
-+
-+ reg_write(ispbase, ISP_REG_SS0AY, y_addr);
-+ reg_write(ispbase, ISP_REG_SS0AUV, uv_addr);
-+}
-+
-+void stf_vin_isp_set_ss1_addr(struct stf_vin2_dev *vin_dev,
-+ dma_addr_t y_addr, dma_addr_t uv_addr)
-+{
-+ struct stf_vin_dev *vin = vin_dev->stfcamss->vin;
-+ void __iomem *ispbase = stf_vin_get_ispbase(vin);
-+
-+ reg_write(ispbase, ISP_REG_SS1AY, y_addr);
-+ reg_write(ispbase, ISP_REG_SS1AUV, uv_addr);
-+}
-+
-+void stf_vin_isp_set_itiw_addr(struct stf_vin2_dev *vin_dev,
-+ dma_addr_t y_addr, dma_addr_t uv_addr)
-+{
-+ struct stf_vin_dev *vin = vin_dev->stfcamss->vin;
-+ void __iomem *ispbase = stf_vin_get_ispbase(vin);
-+
-+ reg_write(ispbase, ISP_REG_ITIDWYSAR, y_addr);
-+ reg_write(ispbase, ISP_REG_ITIDWUSAR, uv_addr);
-+}
-+
-+void stf_vin_isp_set_itir_addr(struct stf_vin2_dev *vin_dev,
-+ dma_addr_t y_addr, dma_addr_t uv_addr)
-+{
-+ struct stf_vin_dev *vin = vin_dev->stfcamss->vin;
-+ void __iomem *ispbase = stf_vin_get_ispbase(vin);
-+
-+ reg_write(ispbase, ISP_REG_ITIDRYSAR, y_addr);
-+ reg_write(ispbase, ISP_REG_ITIDRUSAR, uv_addr);
-+}
-+
-+int stf_vin_isp_get_scd_type(struct stf_vin2_dev *vin_dev)
-+{
-+ struct stf_vin_dev *vin = vin_dev->stfcamss->vin;
-+ void __iomem *ispbase = stf_vin_get_ispbase(vin);
-+
-+ return (reg_read(ispbase, ISP_REG_SC_CFG_1) & (0x3 << 30)) >> 30;
-+}
-+
-+void stf_vin_isp_set_scd_addr(struct stf_vin2_dev *vin_dev,
-+ dma_addr_t yhist_addr, dma_addr_t scd_addr, int scd_type)
-+{
-+ struct stf_vin_dev *vin = vin_dev->stfcamss->vin;
-+ void __iomem *ispbase = stf_vin_get_ispbase(vin);
-+
-+ reg_set_bit(ispbase, ISP_REG_SC_CFG_1, 0x3 << 30, scd_type << 30);
-+ reg_write(ispbase, ISP_REG_SCD_CFG_0, scd_addr);
-+ reg_write(ispbase, ISP_REG_YHIST_CFG_4, yhist_addr);
-+}
-+
-+void dump_vin_reg(void *__iomem regbase)
-+{
-+ st_debug(ST_VIN, "DUMP VIN register:\n");
-+ print_reg(ST_VIN, regbase, 0x00);
-+ print_reg(ST_VIN, regbase, 0x04);
-+ print_reg(ST_VIN, regbase, 0x08);
-+ print_reg(ST_VIN, regbase, 0x0c);
-+ print_reg(ST_VIN, regbase, 0x10);
-+ print_reg(ST_VIN, regbase, 0x14);
-+ print_reg(ST_VIN, regbase, 0x18);
-+ print_reg(ST_VIN, regbase, 0x1c);
-+ print_reg(ST_VIN, regbase, 0x20);
-+ print_reg(ST_VIN, regbase, 0x24);
-+ print_reg(ST_VIN, regbase, 0x28);
-+}
-+
-+struct vin_hw_ops vin_ops = {
-+ .vin_clk_enable = stf_vin_clk_enable,
-+ .vin_clk_disable = stf_vin_clk_disable,
-+ .vin_config_set = stf_vin_config_set,
-+ .vin_wr_stream_set = stf_vin_wr_stream_set,
-+ .vin_wr_irq_enable = stf_vin_wr_irq_enable,
-+ .wr_rd_set_addr = stf_vin_wr_rd_set_addr,
-+ .vin_wr_set_ping_addr = stf_vin_wr_set_ping_addr,
-+ .vin_wr_set_pong_addr = stf_vin_wr_set_pong_addr,
-+ .vin_isp_set_yuv_addr = stf_vin_isp_set_yuv_addr,
-+ .vin_isp_set_raw_addr = stf_vin_isp_set_raw_addr,
-+ .vin_isp_set_ss0_addr = stf_vin_isp_set_ss0_addr,
-+ .vin_isp_set_ss1_addr = stf_vin_isp_set_ss1_addr,
-+ .vin_isp_set_itiw_addr = stf_vin_isp_set_itiw_addr,
-+ .vin_isp_set_itir_addr = stf_vin_isp_set_itir_addr,
-+ .vin_isp_set_scd_addr = stf_vin_isp_set_scd_addr,
-+ .vin_isp_get_scd_type = stf_vin_isp_get_scd_type,
-+ .vin_wr_irq_handler = stf_vin_wr_irq_handler,
-+ .vin_isp_irq_handler = stf_vin_isp_irq_handler,
-+ .vin_isp_csi_irq_handler = stf_vin_isp_csi_irq_handler,
-+ .vin_isp_scd_irq_handler = stf_vin_isp_scd_irq_handler,
-+ .vin_isp_irq_csiline_handler = stf_vin_isp_irq_csiline_handler,
-+};
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/stfcamss.c
-@@ -0,0 +1,1369 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
-+ *
-+ */
-+
-+#include <linux/completion.h>
-+#include <linux/delay.h>
-+#include <linux/dmaengine.h>
-+#include <linux/init.h>
-+#include <linux/interrupt.h>
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/of.h>
-+#include <linux/of_device.h>
-+#include <linux/of_reserved_mem.h>
-+#include <linux/of_graph.h>
-+#include <linux/of_address.h>
-+#include <linux/pinctrl/consumer.h>
-+#include <linux/platform_device.h>
-+#include <linux/pm_runtime.h>
-+#include <linux/io.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/uaccess.h>
-+#include <linux/mfd/syscon.h>
-+
-+#include <linux/videodev2.h>
-+
-+#include <media/media-device.h>
-+#include <media/v4l2-async.h>
-+#include <media/v4l2-device.h>
-+#include <media/v4l2-mc.h>
-+#include <media/v4l2-fwnode.h>
-+#include <linux/debugfs.h>
-+
-+#include "stfcamss.h"
-+
-+#ifdef STF_DEBUG
-+unsigned int stdbg_level = ST_DEBUG;
-+unsigned int stdbg_mask = 0x7F;
-+#else
-+unsigned int stdbg_level = ST_ERR;
-+unsigned int stdbg_mask = 0x7F;
-+#endif
-+EXPORT_SYMBOL_GPL(stdbg_level);
-+EXPORT_SYMBOL_GPL(stdbg_mask);
-+
-+static const struct reg_name mem_reg_name[] = {
-+ {"csi2rx"},
-+ {"vclk"},
-+ {"vrst"},
-+ {"sctrl"},
-+ {"isp"},
-+ {"trst"},
-+ {"pmu"},
-+ {"syscrg"},
-+};
-+
-+static struct clk_bulk_data stfcamss_clocks[] = {
-+ { .id = "clk_apb_func" },
-+ { .id = "clk_pclk" },
-+ { .id = "clk_sys_clk" },
-+ { .id = "clk_wrapper_clk_c" },
-+ { .id = "clk_dvp_inv" },
-+ { .id = "clk_axiwr" },
-+ { .id = "clk_mipi_rx0_pxl" },
-+ { .id = "clk_pixel_clk_if0" },
-+ { .id = "clk_pixel_clk_if1" },
-+ { .id = "clk_pixel_clk_if2" },
-+ { .id = "clk_pixel_clk_if3" },
-+ { .id = "clk_m31dphy_cfgclk_in" },
-+ { .id = "clk_m31dphy_refclk_in" },
-+ { .id = "clk_m31dphy_txclkesc_lan0" },
-+ { .id = "clk_ispcore_2x" },
-+ { .id = "clk_isp_axi" },
-+};
-+
-+static struct reset_control_bulk_data stfcamss_resets[] = {
-+ { .id = "rst_wrapper_p" },
-+ { .id = "rst_wrapper_c" },
-+ { .id = "rst_pclk" },
-+ { .id = "rst_sys_clk" },
-+ { .id = "rst_axird" },
-+ { .id = "rst_axiwr" },
-+ { .id = "rst_pixel_clk_if0" },
-+ { .id = "rst_pixel_clk_if1" },
-+ { .id = "rst_pixel_clk_if2" },
-+ { .id = "rst_pixel_clk_if3" },
-+ { .id = "rst_m31dphy_hw" },
-+ { .id = "rst_m31dphy_b09_always_on" },
-+ { .id = "rst_isp_top_n" },
-+ { .id = "rst_isp_top_axi" },
-+};
-+
-+int stfcamss_get_mem_res(struct platform_device *pdev, struct stf_vin_dev *vin)
-+{
-+ struct device *dev = &pdev->dev;
-+ struct resource *res;
-+ char *name;
-+ int i;
-+
-+ for (i = 0; i < ARRAY_SIZE(mem_reg_name); i++) {
-+ name = (char *)(&mem_reg_name[i]);
-+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
-+
-+ if (!res)
-+ return -EINVAL;
-+
-+ if (!strcmp(name, "csi2rx")) {
-+ vin->csi2rx_base = devm_ioremap_resource(dev, res);
-+ if (IS_ERR(vin->csi2rx_base))
-+ return PTR_ERR(vin->csi2rx_base);
-+ } else if (!strcmp(name, "vclk")) {
-+ vin->clkgen_base = ioremap(res->start, resource_size(res));
-+ if (!vin->clkgen_base)
-+ return -ENOMEM;
-+ } else if (!strcmp(name, "vrst")) {
-+ vin->rstgen_base = devm_ioremap_resource(dev, res);
-+ if (IS_ERR(vin->rstgen_base))
-+ return PTR_ERR(vin->rstgen_base);
-+ } else if (!strcmp(name, "sctrl")) {
-+ vin->sysctrl_base = devm_ioremap_resource(dev, res);
-+ if (IS_ERR(vin->sysctrl_base))
-+ return PTR_ERR(vin->sysctrl_base);
-+ } else if (!strcmp(name, "isp")) {
-+ vin->isp_base = devm_ioremap_resource(dev, res);
-+ if (IS_ERR(vin->isp_base))
-+ return PTR_ERR(vin->isp_base);
-+ } else if (!strcmp(name, "trst")) {
-+ vin->vin_top_rstgen_base = devm_ioremap_resource(dev, res);
-+ if (IS_ERR(vin->vin_top_rstgen_base))
-+ return PTR_ERR(vin->vin_top_rstgen_base);
-+ } else if (!strcmp(name, "pmu")) {
-+ vin->pmu_test = ioremap(res->start, resource_size(res));
-+ if (!vin->pmu_test)
-+ return -ENOMEM;
-+ } else if (!strcmp(name, "syscrg")) {
-+ vin->sys_crg = ioremap(res->start, resource_size(res));
-+ if (!vin->sys_crg)
-+ return -ENOMEM;
-+ } else {
-+ st_err(ST_CAMSS, "Could not match resource name\n");
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+int vin_parse_dt(struct device *dev, struct stf_vin_dev *vin)
-+{
-+ int ret = 0;
-+ struct device_node *np = dev->of_node;
-+
-+ if (!np)
-+ return -EINVAL;
-+
-+ return ret;
-+}
-+
-+struct media_entity *stfcamss_find_sensor(struct media_entity *entity)
-+{
-+ struct media_pad *pad;
-+
-+ while (1) {
-+ if (!entity->pads)
-+ return NULL;
-+
-+ pad = &entity->pads[0];
-+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
-+ return NULL;
-+
-+ pad = media_pad_remote_pad_first(pad);
-+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
-+ return NULL;
-+
-+ entity = pad->entity;
-+
-+ if (entity->function == MEDIA_ENT_F_CAM_SENSOR)
-+ return entity;
-+ }
-+}
-+
-+static int stfcamss_of_parse_endpoint_node(struct device *dev,
-+ struct device_node *node,
-+ struct stfcamss_async_subdev *csd)
-+{
-+ struct v4l2_fwnode_endpoint vep = { { 0 } };
-+ struct v4l2_mbus_config_parallel *parallel_bus = &vep.bus.parallel;
-+ struct v4l2_mbus_config_mipi_csi2 *csi2_bus = &vep.bus.mipi_csi2;
-+ struct dvp_cfg *dvp = &csd->interface.dvp;
-+ struct csi2phy_cfg *csiphy = &csd->interface.csiphy;
-+
-+ v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &vep);
-+ st_debug(ST_CAMSS, "%s: vep.base.port = 0x%x, id = 0x%x\n",
-+ __func__, vep.base.port, vep.base.id);
-+
-+ csd->port = vep.base.port;
-+ switch (csd->port) {
-+ case DVP_SENSOR_PORT_NUMBER:
-+ st_debug(ST_CAMSS, "%s, flags = 0x%x\n", __func__,
-+ parallel_bus->flags);
-+ dvp->flags = parallel_bus->flags;
-+ dvp->bus_width = parallel_bus->bus_width;
-+ dvp->data_shift = parallel_bus->data_shift;
-+ break;
-+ case CSI2RX_SENSOR_PORT_NUMBER:
-+ st_debug(ST_CAMSS, "%s, CSI2 flags = 0x%x\n",
-+ __func__, parallel_bus->flags);
-+ csiphy->flags = csi2_bus->flags;
-+ memcpy(csiphy->data_lanes,
-+ csi2_bus->data_lanes, csi2_bus->num_data_lanes);
-+ csiphy->clock_lane = csi2_bus->clock_lane;
-+ csiphy->num_data_lanes = csi2_bus->num_data_lanes;
-+ memcpy(csiphy->lane_polarities,
-+ csi2_bus->lane_polarities,
-+ csi2_bus->num_data_lanes + 1);
-+ break;
-+ default:
-+ break;
-+ };
-+
-+ return 0;
-+}
-+
-+static int stfcamss_of_parse_ports(struct stfcamss *stfcamss)
-+{
-+ struct device *dev = stfcamss->dev;
-+ struct device_node *node = NULL;
-+ struct device_node *remote = NULL;
-+ int ret, num_subdevs = 0;
-+
-+ for_each_endpoint_of_node(dev->of_node, node) {
-+ struct stfcamss_async_subdev *csd;
-+
-+ if (!of_device_is_available(node))
-+ continue;
-+
-+ remote = of_graph_get_remote_port_parent(node);
-+ if (!remote) {
-+ st_err(ST_CAMSS, "Cannot get remote parent\n");
-+ ret = -EINVAL;
-+ goto err_cleanup;
-+ }
-+
-+ csd = v4l2_async_nf_add_fwnode(&stfcamss->notifier,
-+ of_fwnode_handle(remote),
-+ struct stfcamss_async_subdev);
-+ of_node_put(remote);
-+ if (IS_ERR(csd)) {
-+ ret = PTR_ERR(csd);
-+ goto err_cleanup;
-+ }
-+
-+ ret = stfcamss_of_parse_endpoint_node(dev, node, csd);
-+ if (ret < 0)
-+ goto err_cleanup;
-+
-+ num_subdevs++;
-+ }
-+
-+ return num_subdevs;
-+
-+err_cleanup:
-+ of_node_put(node);
-+ return ret;
-+}
-+
-+static int stfcamss_init_subdevices(struct stfcamss *stfcamss)
-+{
-+ int ret;
-+
-+ ret = stf_dvp_subdev_init(stfcamss);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS,
-+ "Failed to init stf_dvp sub-device: %d\n",
-+ ret);
-+ return ret;
-+ }
-+
-+ ret = stf_csiphy_subdev_init(stfcamss);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS,
-+ "Failed to init stf_csiphy sub-device: %d\n",
-+ ret);
-+ return ret;
-+ }
-+
-+ ret = stf_csi_subdev_init(stfcamss);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS,
-+ "Failed to init stf_csi sub-device: %d\n",
-+ ret);
-+ return ret;
-+ }
-+
-+ ret = stf_isp_subdev_init(stfcamss);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS,
-+ "Failed to init stf_isp sub-device: %d\n",
-+ ret);
-+ return ret;
-+ }
-+
-+ ret = stf_vin_subdev_init(stfcamss);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS,
-+ "Failed to init stf_vin sub-device: %d\n",
-+ ret);
-+ return ret;
-+ }
-+ return ret;
-+}
-+
-+static int stfcamss_register_subdevices(struct stfcamss *stfcamss)
-+{
-+ int ret;
-+ struct stf_vin2_dev *vin_dev = stfcamss->vin_dev;
-+ struct stf_dvp_dev *dvp_dev = stfcamss->dvp_dev;
-+ struct stf_csiphy_dev *csiphy_dev = stfcamss->csiphy_dev;
-+ struct stf_csi_dev *csi_dev = stfcamss->csi_dev;
-+ struct stf_isp_dev *isp_dev = stfcamss->isp_dev;
-+
-+ ret = stf_dvp_register(dvp_dev, &stfcamss->v4l2_dev);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS,
-+ "Failed to register stf dvp%d entity: %d\n",
-+ 0, ret);
-+ goto err_reg_dvp;
-+ }
-+
-+ ret = stf_csiphy_register(csiphy_dev, &stfcamss->v4l2_dev);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS,
-+ "Failed to register stf csiphy%d entity: %d\n",
-+ 0, ret);
-+ goto err_reg_csiphy;
-+ }
-+
-+ ret = stf_csi_register(csi_dev, &stfcamss->v4l2_dev);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS,
-+ "Failed to register stf csi%d entity: %d\n",
-+ 0, ret);
-+ goto err_reg_csi;
-+ }
-+
-+ ret = stf_isp_register(isp_dev, &stfcamss->v4l2_dev);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS,
-+ "Failed to register stf isp%d entity: %d\n",
-+ 0, ret);
-+ goto err_reg_isp;
-+ }
-+
-+ ret = stf_vin_register(vin_dev, &stfcamss->v4l2_dev);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS,
-+ "Failed to register vin entity: %d\n",
-+ ret);
-+ goto err_reg_vin;
-+ }
-+
-+ ret = media_create_pad_link(
-+ &dvp_dev->subdev.entity,
-+ STF_DVP_PAD_SRC,
-+ &vin_dev->line[VIN_LINE_WR].subdev.entity,
-+ STF_VIN_PAD_SINK,
-+ 0);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS,
-+ "Failed to link %s->vin entities: %d\n",
-+ dvp_dev->subdev.entity.name,
-+ ret);
-+ goto err_link;
-+ }
-+
-+ ret = media_create_pad_link(
-+ &csi_dev->subdev.entity,
-+ STF_CSI_PAD_SRC,
-+ &vin_dev->line[VIN_LINE_WR].subdev.entity,
-+ STF_VIN_PAD_SINK,
-+ 0);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS,
-+ "Failed to link %s->vin entities: %d\n",
-+ csi_dev->subdev.entity.name,
-+ ret);
-+ goto err_link;
-+ }
-+
-+ ret = media_create_pad_link(
-+ &csiphy_dev->subdev.entity,
-+ STF_CSIPHY_PAD_SRC,
-+ &csi_dev->subdev.entity,
-+ STF_CSI_PAD_SINK,
-+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS,
-+ "Failed to link %s->%s entities: %d\n",
-+ csiphy_dev->subdev.entity.name,
-+ csi_dev->subdev.entity.name,
-+ ret);
-+ goto err_link;
-+ }
-+
-+ ret = media_create_pad_link(
-+ &isp_dev->subdev.entity,
-+ STF_ISP_PAD_SRC,
-+ &vin_dev->line[VIN_LINE_ISP].subdev.entity,
-+ STF_VIN_PAD_SINK,
-+ 0);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS,
-+ "Failed to link %s->%s entities: %d\n",
-+ isp_dev->subdev.entity.name,
-+ vin_dev->line[VIN_LINE_ISP]
-+ .subdev.entity.name,
-+ ret);
-+ goto err_link;
-+ }
-+
-+ ret = media_create_pad_link(
-+ &isp_dev->subdev.entity,
-+ STF_ISP_PAD_SRC_SS0,
-+ &vin_dev->line[VIN_LINE_ISP_SS0].subdev.entity,
-+ STF_VIN_PAD_SINK,
-+ 0);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS,
-+ "Failed to link %s->%s entities: %d\n",
-+ isp_dev->subdev.entity.name,
-+ vin_dev->line[VIN_LINE_ISP_SS0]
-+ .subdev.entity.name,
-+ ret);
-+ goto err_link;
-+ }
-+
-+ ret = media_create_pad_link(
-+ &isp_dev->subdev.entity,
-+ STF_ISP_PAD_SRC_SS1,
-+ &vin_dev->line[VIN_LINE_ISP_SS1].subdev.entity,
-+ STF_VIN_PAD_SINK,
-+ 0);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS,
-+ "Failed to link %s->%s entities: %d\n",
-+ isp_dev->subdev.entity.name,
-+ vin_dev->line[VIN_LINE_ISP_SS1]
-+ .subdev.entity.name,
-+ ret);
-+ goto err_link;
-+ }
-+
-+#ifndef STF_CAMSS_SKIP_ITI
-+ ret = media_create_pad_link(
-+ &isp_dev->subdev.entity,
-+ STF_ISP_PAD_SRC_ITIW,
-+ &vin_dev->line[VIN_LINE_ISP_ITIW].subdev.entity,
-+ STF_VIN_PAD_SINK,
-+ 0);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS,
-+ "Failed to link %s->%s entities: %d\n",
-+ isp_dev->subdev.entity.name,
-+ vin_dev->line[VIN_LINE_ISP_ITIW]
-+ .subdev.entity.name,
-+ ret);
-+ goto err_link;
-+ }
-+
-+ ret = media_create_pad_link(
-+ &isp_dev->subdev.entity,
-+ STF_ISP_PAD_SRC_ITIR,
-+ &vin_dev->line[VIN_LINE_ISP_ITIR].subdev.entity,
-+ STF_VIN_PAD_SINK,
-+ 0);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS,
-+ "Failed to link %s->%s entities: %d\n",
-+ isp_dev->subdev.entity.name,
-+ vin_dev->line[VIN_LINE_ISP_ITIR]
-+ .subdev.entity.name,
-+ ret);
-+ goto err_link;
-+ }
-+#endif
-+
-+ ret = media_create_pad_link(
-+ &isp_dev->subdev.entity,
-+ STF_ISP_PAD_SRC_RAW,
-+ &vin_dev->line[VIN_LINE_ISP_RAW].subdev.entity,
-+ STF_VIN_PAD_SINK,
-+ 0);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS,
-+ "Failed to link %s->%s entities: %d\n",
-+ isp_dev->subdev.entity.name,
-+ vin_dev->line[VIN_LINE_ISP_RAW]
-+ .subdev.entity.name,
-+ ret);
-+ goto err_link;
-+ }
-+
-+ ret = media_create_pad_link(
-+ &isp_dev->subdev.entity,
-+ STF_ISP_PAD_SRC_SCD_Y,
-+ &vin_dev->line[VIN_LINE_ISP_SCD_Y].subdev.entity,
-+ STF_VIN_PAD_SINK,
-+ 0);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS,
-+ "Failed to link %s->%s entities: %d\n",
-+ isp_dev->subdev.entity.name,
-+ vin_dev->line[VIN_LINE_ISP_SCD_Y]
-+ .subdev.entity.name,
-+ ret);
-+ goto err_link;
-+ }
-+
-+ ret = media_create_pad_link(
-+ &dvp_dev->subdev.entity,
-+ STF_DVP_PAD_SRC,
-+ &isp_dev->subdev.entity,
-+ STF_ISP_PAD_SINK,
-+ 0);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS,
-+ "Failed to link %s->%s entities: %d\n",
-+ dvp_dev->subdev.entity.name,
-+ isp_dev->subdev.entity.name,
-+ ret);
-+ goto err_link;
-+ }
-+
-+ ret = media_create_pad_link(
-+ &csi_dev->subdev.entity,
-+ STF_CSI_PAD_SRC,
-+ &isp_dev->subdev.entity,
-+ STF_ISP_PAD_SINK,
-+ 0);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS,
-+ "Failed to link %s->%s entities: %d\n",
-+ csi_dev->subdev.entity.name,
-+ isp_dev->subdev.entity.name,
-+ ret);
-+ goto err_link;
-+ }
-+
-+ return ret;
-+
-+err_link:
-+ stf_vin_unregister(stfcamss->vin_dev);
-+err_reg_vin:
-+ stf_isp_unregister(stfcamss->isp_dev);
-+err_reg_isp:
-+ stf_csi_unregister(stfcamss->csi_dev);
-+err_reg_csi:
-+ stf_csiphy_unregister(stfcamss->csiphy_dev);
-+err_reg_csiphy:
-+ stf_dvp_unregister(stfcamss->dvp_dev);
-+err_reg_dvp:
-+ return ret;
-+}
-+
-+static void stfcamss_unregister_subdevices(struct stfcamss *stfcamss)
-+{
-+ stf_dvp_unregister(stfcamss->dvp_dev);
-+ stf_csiphy_unregister(stfcamss->csiphy_dev);
-+ stf_csi_unregister(stfcamss->csi_dev);
-+ stf_isp_unregister(stfcamss->isp_dev);
-+ stf_vin_unregister(stfcamss->vin_dev);
-+}
-+
-+static int stfcamss_register_mediadevice_subdevnodes(
-+ struct v4l2_async_notifier *async,
-+ struct v4l2_subdev *sd)
-+{
-+ struct stfcamss *stfcamss =
-+ container_of(async, struct stfcamss, notifier);
-+ int ret;
-+
-+ if (sd->host_priv) {
-+ struct media_entity *sensor = &sd->entity;
-+ struct media_entity *input = sd->host_priv;
-+ unsigned int i;
-+
-+ for (i = 0; i < sensor->num_pads; i++) {
-+ if (sensor->pads[i].flags & MEDIA_PAD_FL_SOURCE)
-+ break;
-+ }
-+ if (i == sensor->num_pads) {
-+ st_err(ST_CAMSS,
-+ "No source pad in external entity\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = media_create_pad_link(sensor, i,
-+ input, STF_PAD_SINK,
-+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS,
-+ "Failed to link %s->%s entities: %d\n",
-+ sensor->name, input->name, ret);
-+ return ret;
-+ }
-+ }
-+
-+ ret = v4l2_device_register_subdev_nodes(&stfcamss->v4l2_dev);
-+ if (ret < 0)
-+ return ret;
-+
-+ if (stfcamss->media_dev.devnode)
-+ return ret;
-+
-+ st_debug(ST_CAMSS, "stfcamss register media device\n");
-+ return media_device_register(&stfcamss->media_dev);
-+}
-+
-+static int stfcamss_subdev_notifier_bound(struct v4l2_async_notifier *async,
-+ struct v4l2_subdev *subdev,
-+ struct v4l2_async_connection *asd)
-+{
-+ struct stfcamss *stfcamss =
-+ container_of(async, struct stfcamss, notifier);
-+ struct stfcamss_async_subdev *csd =
-+ container_of(asd, struct stfcamss_async_subdev, asd);
-+ enum port_num port = csd->port;
-+ struct stf_dvp_dev *dvp_dev = stfcamss->dvp_dev;
-+ struct stf_csiphy_dev *csiphy_dev = stfcamss->csiphy_dev;
-+
-+ switch (port) {
-+ case DVP_SENSOR_PORT_NUMBER:
-+ dvp_dev->dvp = &csd->interface.dvp;
-+ subdev->host_priv = &dvp_dev->subdev.entity;
-+ break;
-+ case CSI2RX_SENSOR_PORT_NUMBER:
-+ csiphy_dev->csiphy = &csd->interface.csiphy;
-+ subdev->host_priv = &csiphy_dev->subdev.entity;
-+ break;
-+ default:
-+ break;
-+ };
-+
-+ stfcamss_register_mediadevice_subdevnodes(async, subdev);
-+
-+ return 0;
-+}
-+
-+#ifdef UNUSED_CODE
-+static int stfcamss_subdev_notifier_complete(
-+ struct v4l2_async_notifier *async)
-+{
-+ struct stfcamss *stfcamss =
-+ container_of(async, struct stfcamss, notifier);
-+ struct v4l2_device *v4l2_dev = &stfcamss->v4l2_dev;
-+ struct v4l2_subdev *sd;
-+ int ret;
-+
-+ list_for_each_entry(sd, &v4l2_dev->subdevs, list) {
-+ if (sd->host_priv) {
-+ struct media_entity *sensor = &sd->entity;
-+ struct media_entity *input = sd->host_priv;
-+ unsigned int i;
-+
-+ for (i = 0; i < sensor->num_pads; i++) {
-+ if (sensor->pads[i].flags & MEDIA_PAD_FL_SOURCE)
-+ break;
-+ }
-+ if (i == sensor->num_pads) {
-+ st_err(ST_CAMSS,
-+ "No source pad in external entity\n");
-+ return -EINVAL;
-+ }
-+
-+ ret = media_create_pad_link(sensor, i,
-+ input, STF_PAD_SINK,
-+ MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS,
-+ "Failed to link %s->%s entities: %d\n",
-+ sensor->name, input->name, ret);
-+ return ret;
-+ }
-+ }
-+ }
-+
-+ ret = v4l2_device_register_subdev_nodes(&stfcamss->v4l2_dev);
-+ if (ret < 0)
-+ return ret;
-+
-+ return media_device_register(&stfcamss->media_dev);
-+}
-+#endif
-+
-+static const struct v4l2_async_notifier_operations
-+stfcamss_subdev_notifier_ops = {
-+ .bound = stfcamss_subdev_notifier_bound,
-+};
-+
-+static const struct media_device_ops stfcamss_media_ops = {
-+ .link_notify = v4l2_pipeline_link_notify,
-+};
-+
-+#ifdef CONFIG_DEBUG_FS
-+enum module_id {
-+ VIN_MODULE = 0,
-+ ISP_MODULE,
-+ CSI_MODULE,
-+ CSIPHY_MODULE,
-+ DVP_MODULE,
-+ CLK_MODULE,
-+};
-+
-+static enum module_id id_num = ISP_MODULE;
-+
-+void dump_clk_reg(void __iomem *reg_base)
-+{
-+ int i;
-+
-+ st_info(ST_CAMSS, "DUMP Clk register:\n");
-+ for (i = 0; i <= CLK_C_ISP_CTRL; i += 4)
-+ print_reg(ST_CAMSS, reg_base, i);
-+}
-+
-+static ssize_t vin_debug_read(struct file *file, char __user *user_buf,
-+ size_t count, loff_t *ppos)
-+{
-+ struct device *dev = file->private_data;
-+ void __iomem *reg_base;
-+ struct stfcamss *stfcamss = dev_get_drvdata(dev);
-+ struct stf_vin_dev *vin = stfcamss->vin;
-+ struct stf_vin2_dev *vin_dev = stfcamss->vin_dev;
-+ struct stf_isp_dev *isp_dev = stfcamss->isp_dev;
-+ struct stf_csi_dev *csi0_dev = stfcamss->csi_dev;
-+
-+ switch (id_num) {
-+ case VIN_MODULE:
-+ case CSIPHY_MODULE:
-+ case DVP_MODULE:
-+ mutex_lock(&vin_dev->power_lock);
-+ if (vin_dev->power_count > 0) {
-+ reg_base = vin->sysctrl_base;
-+ dump_vin_reg(reg_base);
-+ }
-+ mutex_unlock(&vin_dev->power_lock);
-+ break;
-+ case ISP_MODULE:
-+ mutex_lock(&isp_dev->stream_lock);
-+ if (isp_dev->stream_count > 0) {
-+ reg_base = vin->isp_base;
-+ dump_isp_reg(reg_base);
-+ }
-+ mutex_unlock(&isp_dev->stream_lock);
-+ break;
-+ case CSI_MODULE:
-+ mutex_lock(&csi0_dev->stream_lock);
-+ if (csi0_dev->stream_count > 0) {
-+ reg_base = vin->csi2rx_base;
-+ dump_csi_reg(reg_base);
-+ }
-+ mutex_unlock(&csi0_dev->stream_lock);
-+ break;
-+ case CLK_MODULE:
-+ mutex_lock(&vin_dev->power_lock);
-+ if (vin_dev->power_count > 0) {
-+ reg_base = vin->clkgen_base;
-+ dump_clk_reg(reg_base);
-+ }
-+ mutex_unlock(&vin_dev->power_lock);
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ return 0;
-+}
-+
-+static void set_reg_val(struct stfcamss *stfcamss, int id, u32 offset, u32 val)
-+{
-+ struct stf_vin_dev *vin = stfcamss->vin;
-+ struct stf_vin2_dev *vin_dev = stfcamss->vin_dev;
-+ struct stf_isp_dev *isp_dev = stfcamss->isp_dev;
-+ struct stf_csi_dev *csi_dev = stfcamss->csi_dev;
-+ void __iomem *reg_base;
-+
-+ switch (id) {
-+ case VIN_MODULE:
-+ case CSIPHY_MODULE:
-+ case DVP_MODULE:
-+ mutex_lock(&vin_dev->power_lock);
-+ if (vin_dev->power_count > 0) {
-+ reg_base = vin->sysctrl_base;
-+ print_reg(ST_VIN, reg_base, offset);
-+ reg_write(reg_base, offset, val);
-+ print_reg(ST_VIN, reg_base, offset);
-+ }
-+ mutex_unlock(&vin_dev->power_lock);
-+ break;
-+ case ISP_MODULE:
-+ mutex_lock(&isp_dev->stream_lock);
-+ if (isp_dev->stream_count > 0) {
-+ reg_base = vin->isp_base;
-+ print_reg(ST_ISP, reg_base, offset);
-+ reg_write(reg_base, offset, val);
-+ print_reg(ST_ISP, reg_base, offset);
-+ }
-+ mutex_unlock(&isp_dev->stream_lock);
-+ break;
-+ case CSI_MODULE:
-+ mutex_lock(&csi_dev->stream_lock);
-+ if (csi_dev->stream_count > 0) {
-+ reg_base = vin->csi2rx_base;
-+ print_reg(ST_CSI, reg_base, offset);
-+ reg_write(reg_base, offset, val);
-+ print_reg(ST_CSI, reg_base, offset);
-+ }
-+ mutex_unlock(&csi_dev->stream_lock);
-+ break;
-+ case CLK_MODULE:
-+ mutex_lock(&vin_dev->power_lock);
-+ if (vin_dev->power_count > 0) {
-+ reg_base = vin->clkgen_base;
-+ print_reg(ST_CAMSS, reg_base, offset);
-+ reg_write(reg_base, offset, val);
-+ print_reg(ST_CAMSS, reg_base, offset);
-+ }
-+ mutex_unlock(&vin_dev->power_lock);
-+ break;
-+ default:
-+ break;
-+
-+ }
-+}
-+
-+static u32 atoi(const char *s)
-+{
-+ u32 ret = 0, d = 0;
-+ char ch;
-+ int hex = 0;
-+
-+ if ((*s == '0') && (*(s+1) == 'x')) {
-+ hex = 1;
-+ s += 2;
-+ }
-+
-+ while (1) {
-+ if (!hex) {
-+ d = (*s++) - '0';
-+ if (d > 9)
-+ break;
-+ ret *= 10;
-+ ret += d;
-+ } else {
-+ ch = tolower(*s++);
-+ if (isdigit(ch))
-+ d = ch - '0';
-+ else if (islower(ch))
-+ d = ch - 'a' + 10;
-+ else
-+ break;
-+ if (d > 15)
-+ break;
-+ ret *= 16;
-+ ret += d;
-+ }
-+ }
-+
-+ return ret;
-+}
-+
-+static ssize_t vin_debug_write(struct file *file, const char __user *user_buf,
-+ size_t count, loff_t *ppos)
-+{
-+ struct device *dev = file->private_data;
-+ struct stfcamss *stfcamss = dev_get_drvdata(dev);
-+ char *buf;
-+ char *line;
-+ char *p;
-+ static const char *delims = " \t\r";
-+ char *token;
-+ u32 offset, val;
-+
-+ buf = memdup_user_nul(user_buf, min_t(size_t, PAGE_SIZE, count));
-+ if (IS_ERR(buf))
-+ return PTR_ERR(buf);
-+ p = buf;
-+ st_debug(ST_CAMSS, "dup buf: %s, len: %lu, count: %lu\n", p, strlen(p), count);
-+ while (p && *p) {
-+ p = skip_spaces(p);
-+ line = strsep(&p, "\n");
-+ if (!*line || *line == '#')
-+ break;
-+ token = strsep(&line, delims);
-+ if (!token)
-+ goto out;
-+ id_num = atoi(token);
-+ token = strsep(&line, delims);
-+ if (!token)
-+ goto out;
-+ offset = atoi(token);
-+ token = strsep(&line, delims);
-+ if (!token)
-+ goto out;
-+ val = atoi(token);
-+ }
-+ set_reg_val(stfcamss, id_num, offset, val);
-+out:
-+ kfree(buf);
-+ st_info(ST_CAMSS, "id_num = %d, offset = 0x%x, 0x%x\n", id_num, offset, val);
-+ return count;
-+}
-+
-+static const struct file_operations vin_debug_fops = {
-+ .open = simple_open,
-+ .read = vin_debug_read,
-+ .write = vin_debug_write,
-+};
-+#endif /* CONFIG_DEBUG_FS */
-+
-+
-+static int stfcamss_probe(struct platform_device *pdev)
-+{
-+ struct stfcamss *stfcamss;
-+ struct stf_vin_dev *vin;
-+ struct device *dev = &pdev->dev;
-+ struct of_phandle_args args;
-+ int ret = 0, num_subdevs;
-+
-+ dev_info(dev, "stfcamss probe enter!\n");
-+
-+ stfcamss = devm_kzalloc(dev, sizeof(struct stfcamss), GFP_KERNEL);
-+ if (!stfcamss)
-+ return -ENOMEM;
-+
-+ stfcamss->dvp_dev = devm_kzalloc(dev,
-+ sizeof(*stfcamss->dvp_dev), GFP_KERNEL);
-+ if (!stfcamss->dvp_dev) {
-+ ret = -ENOMEM;
-+ goto err_cam;
-+ }
-+
-+ stfcamss->csiphy_dev = devm_kzalloc(dev,
-+ sizeof(*stfcamss->csiphy_dev),
-+ GFP_KERNEL);
-+ if (!stfcamss->csiphy_dev) {
-+ ret = -ENOMEM;
-+ goto err_cam;
-+ }
-+
-+ stfcamss->csi_dev = devm_kzalloc(dev,
-+ sizeof(*stfcamss->csi_dev),
-+ GFP_KERNEL);
-+ if (!stfcamss->csi_dev) {
-+ ret = -ENOMEM;
-+ goto err_cam;
-+ }
-+
-+ stfcamss->isp_dev = devm_kzalloc(dev,
-+ sizeof(*stfcamss->isp_dev),
-+ GFP_KERNEL);
-+ if (!stfcamss->isp_dev) {
-+ ret = -ENOMEM;
-+ goto err_cam;
-+ }
-+
-+ stfcamss->vin_dev = devm_kzalloc(dev,
-+ sizeof(*stfcamss->vin_dev),
-+ GFP_KERNEL);
-+ if (!stfcamss->vin_dev) {
-+ ret = -ENOMEM;
-+ goto err_cam;
-+ }
-+
-+ stfcamss->vin = devm_kzalloc(dev,
-+ sizeof(struct stf_vin_dev),
-+ GFP_KERNEL);
-+ if (!stfcamss->vin) {
-+ ret = -ENOMEM;
-+ goto err_cam;
-+ }
-+
-+ vin = stfcamss->vin;
-+
-+ vin->irq = platform_get_irq(pdev, 0);
-+ if (vin->irq <= 0) {
-+ st_err(ST_CAMSS, "Could not get irq\n");
-+ goto err_cam;
-+ }
-+
-+ vin->isp_irq = platform_get_irq(pdev, 1);
-+ if (vin->isp_irq <= 0) {
-+ st_err(ST_CAMSS, "Could not get isp irq\n");
-+ goto err_cam;
-+ }
-+
-+ vin->isp_csi_irq = platform_get_irq(pdev, 2);
-+ if (vin->isp_csi_irq <= 0) {
-+ st_err(ST_CAMSS, "Could not get isp csi irq\n");
-+ goto err_cam;
-+ }
-+
-+ vin->isp_scd_irq = platform_get_irq(pdev, 3);
-+ if (vin->isp_scd_irq <= 0) {
-+ st_err(ST_CAMSS, "Could not get isp scd irq\n");
-+ goto err_cam;
-+ }
-+
-+ vin->isp_irq_csiline = platform_get_irq(pdev, 4);
-+ if (vin->isp_irq_csiline <= 0) {
-+ st_err(ST_CAMSS, "Could not get isp irq csiline\n");
-+ goto err_cam;
-+ }
-+
-+ pm_runtime_enable(dev);
-+
-+ stfcamss->nclks = ARRAY_SIZE(stfcamss_clocks);
-+ stfcamss->sys_clk = stfcamss_clocks;
-+
-+ ret = devm_clk_bulk_get(dev, stfcamss->nclks, stfcamss->sys_clk);
-+ if (ret) {
-+ st_err(ST_CAMSS, "Failed to get clk controls\n");
-+ return ret;
-+ }
-+
-+ stfcamss->nrsts = ARRAY_SIZE(stfcamss_resets);
-+ stfcamss->sys_rst = stfcamss_resets;
-+
-+ ret = devm_reset_control_bulk_get_shared(dev, stfcamss->nrsts,
-+ stfcamss->sys_rst);
-+ if (ret) {
-+ st_err(ST_CAMSS, "Failed to get reset controls\n");
-+ return ret;
-+ }
-+
-+ ret = of_parse_phandle_with_fixed_args(dev->of_node,
-+ "starfive,aon-syscon", 1, 0, &args);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS, "Failed to parse starfive,aon-syscon\n");
-+ return -EINVAL;
-+ }
-+
-+ stfcamss->stf_aon_syscon = syscon_node_to_regmap(args.np);
-+ of_node_put(args.np);
-+ if (IS_ERR(stfcamss->stf_aon_syscon))
-+ return PTR_ERR(stfcamss->stf_aon_syscon);
-+
-+ stfcamss->aon_gp_reg = args.args[0];
-+
-+ ret = stfcamss_get_mem_res(pdev, vin);
-+ if (ret) {
-+ st_err(ST_CAMSS, "Could not map registers\n");
-+ goto err_cam;
-+ }
-+
-+ ret = vin_parse_dt(dev, vin);
-+ if (ret)
-+ goto err_cam;
-+
-+ vin->dev = dev;
-+ stfcamss->dev = dev;
-+ platform_set_drvdata(pdev, stfcamss);
-+
-+ v4l2_async_nf_init(&stfcamss->notifier, &stfcamss->v4l2_dev);
-+
-+ num_subdevs = stfcamss_of_parse_ports(stfcamss);
-+ if (num_subdevs < 0) {
-+ ret = num_subdevs;
-+ goto err_cam_noti;
-+ }
-+
-+ ret = stfcamss_init_subdevices(stfcamss);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS, "Failed to init subdevice: %d\n", ret);
-+ goto err_cam_noti;
-+ }
-+
-+ stfcamss->media_dev.dev = stfcamss->dev;
-+ strscpy(stfcamss->media_dev.model, "Starfive Camera Subsystem",
-+ sizeof(stfcamss->media_dev.model));
-+ strscpy(stfcamss->media_dev.serial, "0123456789ABCDEF",
-+ sizeof(stfcamss->media_dev.serial));
-+ snprintf(stfcamss->media_dev.bus_info, sizeof(stfcamss->media_dev.bus_info),
-+ "%s:%s", dev_bus_name(dev), pdev->name);
-+ stfcamss->media_dev.hw_revision = 0x01;
-+ stfcamss->media_dev.ops = &stfcamss_media_ops;
-+ media_device_init(&stfcamss->media_dev);
-+
-+ stfcamss->v4l2_dev.mdev = &stfcamss->media_dev;
-+
-+ ret = v4l2_device_register(stfcamss->dev, &stfcamss->v4l2_dev);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS, "Failed to register V4L2 device: %d\n", ret);
-+ goto err_cam_noti_med;
-+ }
-+
-+ ret = stfcamss_register_subdevices(stfcamss);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS, "Failed to register subdevice: %d\n", ret);
-+ goto err_cam_noti_med_vreg;
-+ }
-+
-+ if (num_subdevs) {
-+ stfcamss->notifier.ops = &stfcamss_subdev_notifier_ops;
-+ ret = v4l2_async_nf_register(&stfcamss->notifier);
-+ if (ret) {
-+ st_err(ST_CAMSS,
-+ "Failed to register async subdev nodes: %d\n",
-+ ret);
-+ goto err_cam_noti_med_vreg_sub;
-+ }
-+ } else {
-+ ret = v4l2_device_register_subdev_nodes(&stfcamss->v4l2_dev);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS,
-+ "Failed to register subdev nodes: %d\n",
-+ ret);
-+ goto err_cam_noti_med_vreg_sub;
-+ }
-+
-+ ret = media_device_register(&stfcamss->media_dev);
-+ if (ret < 0) {
-+ st_err(ST_CAMSS, "Failed to register media device: %d\n",
-+ ret);
-+ goto err_cam_noti_med_vreg_sub_medreg;
-+ }
-+ }
-+
-+#ifdef CONFIG_DEBUG_FS
-+ stfcamss->debugfs_entry = debugfs_create_dir("stfcamss", NULL);
-+ stfcamss->vin_debugfs = debugfs_create_file("stf_vin",
-+ 0644, stfcamss->debugfs_entry,
-+ (void *)dev, &vin_debug_fops);
-+ debugfs_create_u32("dbg_level",
-+ 0644, stfcamss->debugfs_entry,
-+ &stdbg_level);
-+ debugfs_create_u32("dbg_mask",
-+ 0644, stfcamss->debugfs_entry,
-+ &stdbg_mask);
-+#endif
-+ dev_info(dev, "stfcamss probe success!\n");
-+
-+ return 0;
-+
-+#ifdef CONFIG_DEBUG_FS
-+ debugfs_remove(stfcamss->vin_debugfs);
-+ debugfs_remove_recursive(stfcamss->debugfs_entry);
-+ stfcamss->debugfs_entry = NULL;
-+#endif
-+
-+err_cam_noti_med_vreg_sub_medreg:
-+err_cam_noti_med_vreg_sub:
-+ stfcamss_unregister_subdevices(stfcamss);
-+err_cam_noti_med_vreg:
-+ v4l2_device_unregister(&stfcamss->v4l2_dev);
-+err_cam_noti_med:
-+ media_device_cleanup(&stfcamss->media_dev);
-+err_cam_noti:
-+ v4l2_async_nf_cleanup(&stfcamss->notifier);
-+err_cam:
-+ // kfree(stfcamss);
-+ return ret;
-+}
-+
-+static int stfcamss_remove(struct platform_device *pdev)
-+{
-+ struct stfcamss *stfcamss = platform_get_drvdata(pdev);
-+
-+ dev_info(&pdev->dev, "remove done\n");
-+
-+#ifdef CONFIG_DEBUG_FS
-+ debugfs_remove(stfcamss->vin_debugfs);
-+ debugfs_remove_recursive(stfcamss->debugfs_entry);
-+ stfcamss->debugfs_entry = NULL;
-+#endif
-+
-+ stfcamss_unregister_subdevices(stfcamss);
-+ v4l2_device_unregister(&stfcamss->v4l2_dev);
-+ media_device_cleanup(&stfcamss->media_dev);
-+ pm_runtime_disable(&pdev->dev);
-+
-+ kfree(stfcamss);
-+
-+ return 0;
-+}
-+
-+static const struct of_device_id stfcamss_of_match[] = {
-+ { .compatible = "starfive,jh7110-vin" },
-+ { /* end node */ },
-+};
-+
-+MODULE_DEVICE_TABLE(of, stfcamss_of_match);
-+
-+#ifdef CONFIG_PM_SLEEP
-+static int stfcamss_suspend(struct device *dev)
-+{
-+ struct stfcamss *stfcamss = dev_get_drvdata(dev);
-+ struct stf_vin2_dev *vin_dev = stfcamss->vin_dev;
-+ struct media_entity *entity;
-+ struct media_pad *pad;
-+ struct v4l2_subdev *subdev;
-+ struct stfcamss_video *video;
-+ struct video_device *vdev;
-+ int i = 0;
-+ int pm_power_count;
-+ int pm_stream_count;
-+
-+ for (i = 0; i < VIN_LINE_MAX; i++) {
-+ video = &vin_dev->line[i].video_out;
-+ vdev = &vin_dev->line[i].video_out.vdev;
-+ vin_dev->line[i].pm_power_count = vin_dev->line[i].power_count;
-+ vin_dev->line[i].pm_stream_count = vin_dev->line[i].stream_count;
-+ pm_power_count = vin_dev->line[i].pm_power_count;
-+ pm_stream_count = vin_dev->line[i].pm_stream_count;
-+
-+ if (pm_stream_count) {
-+ while (pm_stream_count--) {
-+ entity = &vdev->entity;
-+ while (1) {
-+ pad = &entity->pads[0];
-+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
-+ break;
-+
-+ pad = media_pad_remote_pad_first(pad);
-+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
-+ break;
-+
-+ entity = pad->entity;
-+ subdev = media_entity_to_v4l2_subdev(entity);
-+
-+ v4l2_subdev_call(subdev, video, s_stream, 0);
-+ }
-+ }
-+ video_device_pipeline_stop(vdev);
-+ video->ops->flush_buffers(video, VB2_BUF_STATE_ERROR);
-+ }
-+
-+ if (!pm_power_count)
-+ continue;
-+
-+ v4l2_pipeline_pm_put(&vdev->entity);
-+ }
-+
-+ return pm_runtime_force_suspend(dev);
-+}
-+
-+static int stfcamss_resume(struct device *dev)
-+{
-+ struct stfcamss *stfcamss = dev_get_drvdata(dev);
-+ struct stf_vin2_dev *vin_dev = stfcamss->vin_dev;
-+ struct media_entity *entity;
-+ struct media_pad *pad;
-+ struct v4l2_subdev *subdev;
-+ struct stfcamss_video *video;
-+ struct video_device *vdev;
-+ int i = 0;
-+ int pm_power_count;
-+ int pm_stream_count;
-+ int ret = 0;
-+
-+ pm_runtime_force_resume(dev);
-+
-+ for (i = 0; i < VIN_LINE_MAX; i++) {
-+ video = &vin_dev->line[i].video_out;
-+ vdev = &vin_dev->line[i].video_out.vdev;
-+ pm_power_count = vin_dev->line[i].pm_power_count;
-+ pm_stream_count = vin_dev->line[i].pm_stream_count;
-+
-+ if (!pm_power_count)
-+ continue;
-+
-+ ret = v4l2_pipeline_pm_get(&vdev->entity);
-+ if (ret < 0)
-+ goto err;
-+
-+ if (pm_stream_count) {
-+ ret = video_device_pipeline_start(vdev, &video->stfcamss->pipe);
-+ if (ret < 0)
-+ goto err_pm_put;
-+
-+ while (pm_stream_count--) {
-+ entity = &vdev->entity;
-+ while (1) {
-+ pad = &entity->pads[0];
-+ if (!(pad->flags & MEDIA_PAD_FL_SINK))
-+ break;
-+
-+ pad = media_pad_remote_pad_first(pad);
-+ if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
-+ break;
-+
-+ entity = pad->entity;
-+ subdev = media_entity_to_v4l2_subdev(entity);
-+
-+ ret = v4l2_subdev_call(subdev, video, s_stream, 1);
-+ if (ret < 0 && ret != -ENOIOCTLCMD)
-+ goto err_pipeline_stop;
-+ }
-+ }
-+ }
-+ }
-+
-+ return 0;
-+
-+err_pipeline_stop:
-+ video_device_pipeline_stop(vdev);
-+err_pm_put:
-+ v4l2_pipeline_pm_put(&vdev->entity);
-+err:
-+ return ret;
-+}
-+#endif /* CONFIG_PM_SLEEP */
-+
-+#ifdef CONFIG_PM
-+static int stfcamss_runtime_suspend(struct device *dev)
-+{
-+ struct stfcamss *stfcamss = dev_get_drvdata(dev);
-+
-+ reset_control_assert(stfcamss->sys_rst[STFRST_ISP_TOP_AXI].rstc);
-+ reset_control_assert(stfcamss->sys_rst[STFRST_ISP_TOP_N].rstc);
-+ clk_disable_unprepare(stfcamss->sys_clk[STFCLK_ISP_AXI].clk);
-+ clk_disable_unprepare(stfcamss->sys_clk[STFCLK_ISPCORE_2X].clk);
-+
-+ return 0;
-+}
-+
-+static int stfcamss_runtime_resume(struct device *dev)
-+{
-+ struct stfcamss *stfcamss = dev_get_drvdata(dev);
-+
-+ clk_prepare_enable(stfcamss->sys_clk[STFCLK_ISPCORE_2X].clk);
-+ clk_prepare_enable(stfcamss->sys_clk[STFCLK_ISP_AXI].clk);
-+ reset_control_deassert(stfcamss->sys_rst[STFRST_ISP_TOP_N].rstc);
-+ reset_control_deassert(stfcamss->sys_rst[STFRST_ISP_TOP_AXI].rstc);
-+
-+ return 0;
-+}
-+#endif /* CONFIG_PM */
-+
-+static const struct dev_pm_ops stfcamss_pm_ops = {
-+ SET_SYSTEM_SLEEP_PM_OPS(stfcamss_suspend, stfcamss_resume)
-+ SET_RUNTIME_PM_OPS(stfcamss_runtime_suspend, stfcamss_runtime_resume, NULL)
-+};
-+
-+static struct platform_driver stfcamss_driver = {
-+ .probe = stfcamss_probe,
-+ .remove = stfcamss_remove,
-+ .driver = {
-+ .name = DRV_NAME,
-+ .pm = &stfcamss_pm_ops,
-+ .of_match_table = of_match_ptr(stfcamss_of_match),
-+ },
-+};
-+
-+static int __init stfcamss_init(void)
-+{
-+ return platform_driver_register(&stfcamss_driver);
-+}
-+
-+static void __exit stfcamss_cleanup(void)
-+{
-+ platform_driver_unregister(&stfcamss_driver);
-+}
-+
-+module_init(stfcamss_init);
-+//fs_initcall(stfcamss_init);
-+module_exit(stfcamss_cleanup);
-+
-+MODULE_LICENSE("GPL");
---- /dev/null
-+++ b/drivers/media/platform/starfive/v4l2_driver/stfcamss.h
-@@ -0,0 +1,117 @@
-+/* SPDX-License-Identifier: GPL-2.0
-+ *
-+ * Copyright (C) 2021-2023 StarFive Technology Co., Ltd.
-+ *
-+ */
-+
-+#ifndef STFCAMSS_H
-+#define STFCAMSS_H
-+
-+#include <linux/io.h>
-+#include <linux/delay.h>
-+#include <linux/reset.h>
-+#include <linux/clk.h>
-+
-+enum sensor_type {
-+ SENSOR_VIN,
-+ /* need replace sensor */
-+ SENSOR_ISP,
-+};
-+
-+enum subdev_type {
-+ VIN_DEV_TYPE,
-+ ISP_DEV_TYPE,
-+};
-+
-+#include "stf_common.h"
-+#include "stf_dvp.h"
-+#include "stf_csi.h"
-+#include "stf_csiphy.h"
-+#include "stf_isp.h"
-+#include "stf_vin.h"
-+
-+#define STF_PAD_SINK 0
-+#define STF_PAD_SRC 1
-+#define STF_PADS_NUM 2
-+
-+#define STF_CAMSS_SKIP_ITI
-+
-+enum port_num {
-+ DVP_SENSOR_PORT_NUMBER = 0,
-+ CSI2RX_SENSOR_PORT_NUMBER
-+};
-+
-+enum stf_clk_num {
-+ STFCLK_APB_FUNC = 0,
-+ STFCLK_PCLK,
-+ STFCLK_SYS_CLK,
-+ STFCLK_WRAPPER_CLK_C,
-+ STFCLK_DVP_INV,
-+ STFCLK_AXIWR,
-+ STFCLK_MIPI_RX0_PXL,
-+ STFCLK_PIXEL_CLK_IF0,
-+ STFCLK_PIXEL_CLK_IF1,
-+ STFCLK_PIXEL_CLK_IF2,
-+ STFCLK_PIXEL_CLK_IF3,
-+ STFCLK_M31DPHY_CFGCLK_IN,
-+ STFCLK_M31DPHY_REFCLK_IN,
-+ STFCLK_M31DPHY_TXCLKESC_LAN0,
-+ STFCLK_ISPCORE_2X,
-+ STFCLK_ISP_AXI,
-+ STFCLK_NUM
-+};
-+
-+enum stf_rst_num {
-+ STFRST_WRAPPER_P = 0,
-+ STFRST_WRAPPER_C,
-+ STFRST_PCLK,
-+ STFRST_SYS_CLK,
-+ STFRST_AXIRD,
-+ STFRST_AXIWR,
-+ STFRST_PIXEL_CLK_IF0,
-+ STFRST_PIXEL_CLK_IF1,
-+ STFRST_PIXEL_CLK_IF2,
-+ STFRST_PIXEL_CLK_IF3,
-+ STFRST_M31DPHY_HW,
-+ STFRST_M31DPHY_B09_ALWAYS_ON,
-+ STFRST_ISP_TOP_N,
-+ STFRST_ISP_TOP_AXI,
-+ STFRST_NUM
-+};
-+
-+struct stfcamss {
-+ struct stf_vin_dev *vin; // stfcamss phy res
-+ struct v4l2_device v4l2_dev;
-+ struct media_device media_dev;
-+ struct media_pipeline pipe;
-+ struct device *dev;
-+ struct stf_vin2_dev *vin_dev; // subdev
-+ struct stf_dvp_dev *dvp_dev; // subdev
-+ struct stf_csi_dev *csi_dev; // subdev
-+ struct stf_csiphy_dev *csiphy_dev; // subdev
-+ struct stf_isp_dev *isp_dev; // subdev
-+ struct v4l2_async_notifier notifier;
-+ struct clk_bulk_data *sys_clk;
-+ int nclks;
-+ struct reset_control_bulk_data *sys_rst;
-+ int nrsts;
-+ struct regmap *stf_aon_syscon;
-+ uint32_t aon_gp_reg;
-+#ifdef CONFIG_DEBUG_FS
-+ struct dentry *debugfs_entry;
-+ struct dentry *vin_debugfs;
-+#endif
-+};
-+
-+struct stfcamss_async_subdev {
-+ struct v4l2_async_connection asd; // must be first
-+ enum port_num port;
-+ struct {
-+ struct dvp_cfg dvp;
-+ struct csi2phy_cfg csiphy;
-+ } interface;
-+};
-+
-+extern struct media_entity *stfcamss_find_sensor(struct media_entity *entity);
-+
-+#endif /* STFCAMSS_H */
---- /dev/null
-+++ b/include/uapi/linux/jh7110-isp.h
-@@ -0,0 +1,253 @@
-+/* SPDX-License-Identifier: ((GPL-2.0+ WITH Linux-syscall-note) OR BSD-3-Clause) */
-+/*
-+ * jh7110-isp.h
-+ *
-+ * JH7110 ISP driver - user space header file.
-+ *
-+ * Copyright © 2023 Starfive Technology Co., Ltd.
-+ *
-+ * Author: Su Zejian (zejian.su@starfivetech.com)
-+ *
-+ */
-+
-+#ifndef __JH7110_ISP_H_
-+#define __JH7110_ISP_H_
-+
-+#include <linux/v4l2-controls.h>
-+
-+#define V4L2_CID_USER_JH7110_ISP_WB_SETTING \
-+ (V4L2_CID_USER_JH7110_ISP_BASE + 0x0001)
-+#define V4L2_CID_USER_JH7110_ISP_CAR_SETTING \
-+ (V4L2_CID_USER_JH7110_ISP_BASE + 0x0002)
-+#define V4L2_CID_USER_JH7110_ISP_CCM_SETTING \
-+ (V4L2_CID_USER_JH7110_ISP_BASE + 0x0003)
-+#define V4L2_CID_USER_JH7110_ISP_CFA_SETTING \
-+ (V4L2_CID_USER_JH7110_ISP_BASE + 0x0004)
-+#define V4L2_CID_USER_JH7110_ISP_CTC_SETTING \
-+ (V4L2_CID_USER_JH7110_ISP_BASE + 0x0005)
-+#define V4L2_CID_USER_JH7110_ISP_DBC_SETTING \
-+ (V4L2_CID_USER_JH7110_ISP_BASE + 0x0006)
-+#define V4L2_CID_USER_JH7110_ISP_DNYUV_SETTING \
-+ (V4L2_CID_USER_JH7110_ISP_BASE + 0x0007)
-+#define V4L2_CID_USER_JH7110_ISP_GMARGB_SETTING \
-+ (V4L2_CID_USER_JH7110_ISP_BASE + 0x0008)
-+#define V4L2_CID_USER_JH7110_ISP_LCCF_SETTING \
-+ (V4L2_CID_USER_JH7110_ISP_BASE + 0x0009)
-+#define V4L2_CID_USER_JH7110_ISP_OBC_SETTING \
-+ (V4L2_CID_USER_JH7110_ISP_BASE + 0x000a)
-+#define V4L2_CID_USER_JH7110_ISP_OECF_SETTING \
-+ (V4L2_CID_USER_JH7110_ISP_BASE + 0x000b)
-+#define V4L2_CID_USER_JH7110_ISP_R2Y_SETTING \
-+ (V4L2_CID_USER_JH7110_ISP_BASE + 0x000c)
-+#define V4L2_CID_USER_JH7110_ISP_SAT_SETTING \
-+ (V4L2_CID_USER_JH7110_ISP_BASE + 0x000d)
-+#define V4L2_CID_USER_JH7110_ISP_SHRP_SETTING \
-+ (V4L2_CID_USER_JH7110_ISP_BASE + 0x000e)
-+#define V4L2_CID_USER_JH7110_ISP_YCRV_SETTING \
-+ (V4L2_CID_USER_JH7110_ISP_BASE + 0x000f)
-+
-+struct jh7110_isp_wb_gain {
-+ __u16 gain_r;
-+ __u16 gain_g;
-+ __u16 gain_b;
-+};
-+
-+struct jh7110_isp_wb_setting {
-+ __u32 enabled;
-+ struct jh7110_isp_wb_gain gains;
-+};
-+
-+struct jh7110_isp_car_setting {
-+ __u32 enabled;
-+};
-+
-+struct jh7110_isp_ccm_smlow {
-+ __s32 ccm[3][3];
-+ __s32 offsets[3];
-+};
-+
-+struct jh7110_isp_ccm_setting {
-+ __u32 enabled;
-+ struct jh7110_isp_ccm_smlow ccm_smlow;
-+};
-+
-+struct jh7110_isp_cfa_params {
-+ __s32 hv_width;
-+ __s32 cross_cov;
-+};
-+
-+struct jh7110_isp_cfa_setting {
-+ __u32 enabled;
-+ struct jh7110_isp_cfa_params settings;
-+};
-+
-+struct jh7110_isp_ctc_params {
-+ __u8 saf_mode;
-+ __u8 daf_mode;
-+ __s32 max_gt;
-+ __s32 min_gt;
-+};
-+
-+struct jh7110_isp_ctc_setting {
-+ __u32 enabled;
-+ struct jh7110_isp_ctc_params settings;
-+};
-+
-+struct jh7110_isp_dbc_params {
-+ __s32 bad_gt;
-+ __s32 bad_xt;
-+};
-+
-+struct jh7110_isp_dbc_setting {
-+ __u32 enabled;
-+ struct jh7110_isp_dbc_params settings;
-+};
-+
-+struct jh7110_isp_dnyuv_params {
-+ __u8 y_sweight[10];
-+ __u16 y_curve[7];
-+ __u8 uv_sweight[10];
-+ __u16 uv_curve[7];
-+};
-+
-+struct jh7110_isp_dnyuv_setting {
-+ __u32 enabled;
-+ struct jh7110_isp_dnyuv_params settings;
-+};
-+
-+struct jh7110_isp_gmargb_point {
-+ __u16 g_val;
-+ __u16 sg_val;
-+};
-+
-+struct jh7110_isp_gmargb_setting {
-+ __u32 enabled;
-+ struct jh7110_isp_gmargb_point curve[15];
-+};
-+
-+struct jh7110_isp_lccf_circle {
-+ __s16 center_x;
-+ __s16 center_y;
-+ __u8 radius;
-+};
-+
-+struct jh7110_isp_lccf_curve_param {
-+ __s16 f1;
-+ __s16 f2;
-+};
-+
-+struct jh7110_isp_lccf_setting {
-+ __u32 enabled;
-+ struct jh7110_isp_lccf_circle circle;
-+ struct jh7110_isp_lccf_curve_param r_param;
-+ struct jh7110_isp_lccf_curve_param gr_param;
-+ struct jh7110_isp_lccf_curve_param gb_param;
-+ struct jh7110_isp_lccf_curve_param b_param;
-+};
-+
-+struct jh7110_isp_blacklevel_win_size {
-+ __u32 width;
-+ __u32 height;
-+};
-+
-+struct jh7110_isp_blacklevel_gain {
-+ __u8 tl_gain;
-+ __u8 tr_gain;
-+ __u8 bl_gain;
-+ __u8 br_gain;
-+};
-+
-+struct jh7110_isp_blacklevel_offset {
-+ __u8 tl_offset;
-+ __u8 tr_offset;
-+ __u8 bl_offset;
-+ __u8 br_offset;
-+};
-+
-+struct jh7110_isp_blacklevel_setting {
-+ __u32 enabled;
-+ struct jh7110_isp_blacklevel_win_size win_size;
-+ struct jh7110_isp_blacklevel_gain gain[4];
-+ struct jh7110_isp_blacklevel_offset offset[4];
-+};
-+
-+struct jh7110_isp_oecf_point {
-+ __u16 x;
-+ __u16 y;
-+ __s16 slope;
-+};
-+
-+struct jh7110_isp_oecf_setting {
-+ __u32 enabled;
-+ struct jh7110_isp_oecf_point r_curve[16];
-+ struct jh7110_isp_oecf_point gr_curve[16];
-+ struct jh7110_isp_oecf_point gb_curve[16];
-+ struct jh7110_isp_oecf_point b_curve[16];
-+};
-+
-+struct jh7110_isp_r2y_matrix {
-+ __s16 m[9];
-+};
-+
-+struct jh7110_isp_r2y_setting {
-+ __u32 enabled;
-+ struct jh7110_isp_r2y_matrix matrix;
-+};
-+
-+struct jh7110_isp_sat_curve {
-+ __s16 yi_min;
-+ __s16 yo_ir;
-+ __s16 yo_min;
-+ __s16 yo_max;
-+};
-+
-+struct jh7110_isp_sat_hue_info {
-+ __s16 sin;
-+ __s16 cos;
-+};
-+
-+struct jh7110_isp_sat_info {
-+ __s16 gain_cmab;
-+ __s16 gain_cmad;
-+ __s16 threshold_cmb;
-+ __s16 threshold_cmd;
-+ __s16 offset_u;
-+ __s16 offset_v;
-+ __s16 cmsf;
-+};
-+
-+struct jh7110_isp_sat_setting {
-+ __u32 enabled;
-+ struct jh7110_isp_sat_curve curve;
-+ struct jh7110_isp_sat_hue_info hue_info;
-+ struct jh7110_isp_sat_info sat_info;
-+};
-+
-+struct jh7110_isp_sharp_weight {
-+ __u8 weight[15];
-+ __u32 recip_wei_sum;
-+};
-+
-+struct jh7110_isp_sharp_strength {
-+ __s16 diff[4];
-+ __s16 f[4];
-+};
-+
-+struct jh7110_isp_sharp_setting {
-+ __u32 enabled;
-+ struct jh7110_isp_sharp_weight weight;
-+ struct jh7110_isp_sharp_strength strength;
-+ __s8 pdirf;
-+ __s8 ndirf;
-+};
-+
-+struct jh7110_isp_ycrv_curve {
-+ __s16 y[64];
-+};
-+
-+struct jh7110_isp_ycrv_setting {
-+ __u32 enabled;
-+ struct jh7110_isp_ycrv_curve curve;
-+};
-+
-+#endif
---- a/include/uapi/linux/v4l2-controls.h
-+++ b/include/uapi/linux/v4l2-controls.h
-@@ -203,6 +203,12 @@ enum v4l2_colorfx {
- */
- #define V4L2_CID_USER_ASPEED_BASE (V4L2_CID_USER_BASE + 0x11a0)
-
-+/*
-+ * The base for the jh7110-isp driver controls.
-+ * We reserve 16 controls for this driver.
-+ */
-+#define V4L2_CID_USER_JH7110_ISP_BASE (V4L2_CID_USER_BASE + 0x1170)
-+
- /* MPEG-class control IDs */
- /* The MPEG controls are applicable to all codec controls
- * and the 'MPEG' part of the define is historical */
---- /dev/null
-+++ b/include/video/stf-vin.h
-@@ -0,0 +1,443 @@
-+/* include/video/stf-vin.h
-+ *
-+ * Copyright 2020 starfive tech.
-+ * Eric Tang <eric.tang@starfivetech.com>
-+ *
-+ * Generic vin notifier interface
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ *
-+*/
-+#ifndef _VIDEO_VIN_H
-+#define _VIDEO_VIN_H
-+
-+#include <linux/cdev.h>
-+
-+#define DRV_NAME "jh7110-vin"
-+#define FB_FIRST_ADDR 0xf9000000
-+#define FB_SECOND_ADDR 0xf97e9000
-+
-+#define RESERVED_MEM_SIZE 0x1000000
-+
-+#define VIN_MIPI_CONTROLLER0_OFFSET 0x00000
-+#define VIN_CLKGEN_OFFSET 0x10000
-+#define VIN_RSTGEN_OFFSET 0x20000
-+#define VIN_MIPI_CONTROLLER1_OFFSET 0x30000
-+#define VIN_SYSCONTROLLER_OFFSET 0x40000
-+
-+#define VD_1080P 1080
-+#define VD_720P 720
-+#define VD_PAL 480
-+
-+#define VD_HEIGHT_1080P VD_1080P
-+#define VD_WIDTH_1080P 1920
-+
-+#define VD_HEIGHT_720P VD_720P
-+#define VD_WIDTH_720P 1080
-+
-+#define VD_HEIGHT_480 480
-+#define VD_WIDTH_640 640
-+
-+#define SEEED_WIDTH_800 800
-+#define SEEED_HIGH_480 480
-+
-+#define VIN_TOP_CLKGEN_BASE_ADDR 0x11800000
-+#define VIN_TOP_RSTGEN_BASE_ADDR 0x11840000
-+#define VIN_TOP_IOPAD_BASE_ADDR 0x11858000
-+
-+#define ISP_BASE_MIPI0_ADDR 0x19800000
-+#define ISP_BASE_CLKGEN_ADDR 0x19810000
-+#define ISP_BASE_RSTGEN_ADDR 0x19820000
-+#define ISP_BASE_MIPI1_ADDR 0x19830000
-+#define ISP_BASE_SYSCTRL_ADDR 0x19840000
-+#define ISP_BASE_ISP0_ADDR 0x19870000
-+#define ISP_BASE_ISP1_ADDR 0x198a0000
-+
-+
-+//vin clk registers
-+#define CLK_VIN_SRC_CTRL 0x188
-+#define CLK_ISP0_AXI_CTRL 0x190
-+#define CLK_ISP0NOC_AXI_CTRL 0x194
-+#define CLK_ISPSLV_AXI_CTRL 0x198
-+#define CLK_ISP1_AXI_CTRL 0x1A0
-+#define CLK_ISP1NOC_AXI_CTRL 0x1A4
-+#define CLK_VIN_AXI 0x1AC
-+#define CLK_VINNOC_AXI 0x1B0
-+
-+
-+#define CLK_DOM4_APB_FUNC 0x0
-+#define CLK_MUX_SEL 0xffffff
-+
-+#define CLK_MIPI_RX0_PXL 0x4
-+
-+#define CLK_DVP_INV 0x8
-+#define CLK_U0_VIN_PCLK 0x18
-+#define CLK_U0_VIN_PCLK_ICG (0x1<<31)
-+
-+#define CLK_U0_VIN_SYS_CLK 0x1c
-+#define CLK_U0_VIN_CLK_P_AXIWR 0x30
-+#define CLK_U0_VIN_MUX_SEL (BIT(24) | BIT(25) | BIT(26) | BIT(27) | BIT(28) | BIT(29))
-+
-+#define CLK_U0_VIN_PIXEL_CLK_IF0 0x20
-+#define CLK_U0_VIN_PIXEL_CLK_IF1 0x24
-+#define CLK_U0_VIN_PIXEL_CLK_IF2 0x28
-+#define CLK_U0_VIN_PIXEL_CLK_IF3 0x2c
-+
-+#define CLK_U0_VIN_CLK_P_AXIWR 0x30
-+
-+#define CLK_U0_ISPV2_TOP_WRAPPER_CLK_C 0x34u
-+#define CLK_U0_ISPV2_MUX_SEL (0x1<<24 | 0x1<<25 | 0x1<<26 | 0x1<<27 | 0x1<<28 | 0x1<< 29)
-+
-+#define CLK_U0_ISPV2_CLK_ICG (0x1<<31)
-+
-+#define SOFTWARE_RESET_ASSERT0_ASSERT_SET 0x38U
-+#define SOFTWARE_RESET_ASSERT0_ASSERT_SET_STATE 0x3CU
-+#define RST_U0_ISPV2_TOP_WRAPPER_RST_P BIT(0)
-+#define RST_U0_ISPV2_TOP_WRAPPER_RST_C BIT(1)
-+#define RSTN_U0_VIN_RST_N_PCLK BIT(4)
-+#define RSTN_U0_VIN_RST_N_SYS_CLK BIT(9)
-+#define RSTN_U0_VIN_RST_P_AXIRD BIT(10)
-+#define RSTN_U0_VIN_RST_P_AXIWR BIT(11)
-+
-+
-+#define CLK_POLARITY (0x1<<30)
-+
-+#define M31DPHY_APBCFGSAIF__SYSCFG_0 0x0
-+#define M31DPHY_APBCFGSAIF__SYSCFG_4 0x4
-+#define M31DPHY_APBCFGSAIF__SYSCFG_8 0x8
-+#define M31DPHY_APBCFGSAIF__SYSCFG_12 0xc
-+#define M31DPHY_APBCFGSAIF__SYSCFG_16 0x10
-+#define M31DPHY_APBCFGSAIF__SYSCFG_20 0x14
-+#define M31DPHY_APBCFGSAIF__SYSCFG_24 0x18
-+#define M31DPHY_APBCFGSAIF__SYSCFG_28 0x1c
-+#define M31DPHY_APBCFGSAIF__SYSCFG_32 0x20
-+#define M31DPHY_APBCFGSAIF__SYSCFG_36 0x24
-+#define M31DPHY_APBCFGSAIF__SYSCFG_40 0x28
-+#define M31DPHY_APBCFGSAIF__SYSCFG_44 0x2c
-+#define M31DPHY_APBCFGSAIF__SYSCFG_48 0x30
-+#define M31DPHY_APBCFGSAIF__SYSCFG_52 0x34
-+#define M31DPHY_APBCFGSAIF__SYSCFG_56 0x38
-+#define M31DPHY_APBCFGSAIF__SYSCFG_60 0x3c
-+#define M31DPHY_APBCFGSAIF__SYSCFG_64 0x40
-+#define M31DPHY_APBCFGSAIF__SYSCFG_68 0x44
-+#define M31DPHY_APBCFGSAIF__SYSCFG_72 0x48
-+#define M31DPHY_APBCFGSAIF__SYSCFG_76 0x4c
-+#define M31DPHY_APBCFGSAIF__SYSCFG_80 0x50
-+#define M31DPHY_APBCFGSAIF__SYSCFG_84 0x54
-+#define M31DPHY_APBCFGSAIF__SYSCFG_88 0x58
-+#define M31DPHY_APBCFGSAIF__SYSCFG_92 0x5c
-+#define M31DPHY_APBCFGSAIF__SYSCFG_96 0x60
-+#define M31DPHY_APBCFGSAIF__SYSCFG_100 0x64
-+#define M31DPHY_APBCFGSAIF__SYSCFG_104 0x68
-+#define M31DPHY_APBCFGSAIF__SYSCFG_108 0x6c
-+#define M31DPHY_APBCFGSAIF__SYSCFG_112 0x70
-+#define M31DPHY_APBCFGSAIF__SYSCFG_116 0x74
-+#define M31DPHY_APBCFGSAIF__SYSCFG_120 0x78
-+#define M31DPHY_APBCFGSAIF__SYSCFG_124 0x7c
-+#define M31DPHY_APBCFGSAIF__SYSCFG_128 0x80
-+#define M31DPHY_APBCFGSAIF__SYSCFG_132 0x84
-+#define M31DPHY_APBCFGSAIF__SYSCFG_136 0x88
-+#define M31DPHY_APBCFGSAIF__SYSCFG_140 0x8c
-+#define M31DPHY_APBCFGSAIF__SYSCFG_144 0x90
-+#define M31DPHY_APBCFGSAIF__SYSCFG_184 0xb8
-+
-+//pmu registers
-+#define SW_DEST_POWER_ON 0x0C
-+#define SW_DEST_POWER_OFF 0x10
-+#define SW_ENCOURAGE 0x44
-+
-+
-+//isp clk registers
-+#define CLK_DPHY_CFGCLK_ISPCORE_2X_CTRL 0x00
-+#define CLK_DPHY_REFCLK_ISPCORE_2X_CTRL 0x04
-+#define CLK_DPHY_TXCLKESC_IN_CTRL 0x08
-+#define CLK_MIPI_RX0_PXL_CTRL 0x0c
-+#define CLK_MIPI_RX1_PXL_CTRL 0x10
-+#define CLK_MIPI_RX0_PXL_0_CTRL 0X14
-+#define CLK_MIPI_RX0_PXL_1_CTRL 0X18
-+#define CLK_MIPI_RX0_PXL_2_CTRL 0X1C
-+#define CLK_MIPI_RX0_PXL_3_CTRL 0X20
-+#define CLK_MIPI_RX0_SYS0_CTRL 0x24
-+#define CLK_MIPI_RX1_PXL_0_CTRL 0X28
-+#define CLK_MIPI_RX1_PXL_1_CTRL 0X2C
-+#define CLK_MIPI_RX1_PXL_2_CTRL 0X30
-+#define CLK_MIPI_RX1_PXL_3_CTRL 0X34
-+#define CLK_MIPI_RX1_SYS1_CTRL 0x38
-+#define CLK_ISP_CTRL 0x3c
-+#define CLK_ISP_2X_CTRL 0x40
-+#define CLK_ISP_MIPI_CTRL 0x44
-+#define CLK_C_ISP_CTRL 0x64
-+#define CLK_CSI2RX0_APB_CTRL 0x58
-+
-+
-+#define CLK_VIN_AXI_WR_CTRL 0x5C
-+
-+#define SOFTWARE_RESET_ASSERT0 0x0
-+#define SOFTWARE_RESET_ASSERT1 0x4
-+#define SOFTWARE_RESET_STATUS 0x4
-+
-+#define IOPAD_REG81 0x144
-+#define IOPAD_REG82 0x148
-+#define IOPAD_REG83 0x14C
-+#define IOPAD_REG84 0x150
-+#define IOPAD_REG85 0x154
-+#define IOPAD_REG86 0x158
-+#define IOPAD_REG87 0x15C
-+#define IOPAD_REG88 0x160
-+#define IOPAD_REG89 0x164
-+
-+//sys control REG DEFINE
-+#define SYSCONSAIF_SYSCFG_0 0X0
-+#define U0_VIN_SCFG_SRAM_CONFIG (BIT(0) | BIT(1))
-+
-+#define SYSCONSAIF_SYSCFG_4 0x4
-+#define U0_VIN_CNFG_AXIRD_END_ADDR 0xffffffff
-+#define SYSCONSAIF_SYSCFG_8 0x8
-+#define U0_VIN_CNFG_AXIRD_LINE_CNT_END (BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13))
-+#define U0_VIN_CNFG_AXIRD_LINE_CNT_START (BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25))
-+#define SYSCONSAIF_SYSCFG_12 0xc
-+#define U0_VIN_CNFG_AXIRD_PIX_CNT_END (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12))
-+#define U0_VIN_CNFG_AXIRD_PIX_CNT_START (BIT(13) | BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25))
-+#define U0_VIN_CNFG_AXIRD_PIX_CT (BIT(26) | BIT(27))
-+#define SYSCONSAIF_SYSCFG_16 0x10
-+#define U0_VIN_CNFG_AXIRD_START_ADDR 0xFFFFFFFF
-+#define SYSCONSAIF_SYSCFG_20 0x14
-+#define U0_VIN_CNFG_AXIWR0_EN BIT(4)
-+#define U0_VIN_CNFG_AXIWR0_CHANNEL_SEL (BIT(0) | BIT(1) | BIT(2) | BIT(3))
-+#define SYSCONSAIF_SYSCFG_24 0x18
-+#define U0_VIN_CNFG_AXIWR0_END_ADDR 0xFFFFFFFF
-+
-+#define SYSCONSAIF_SYSCFG_28 0x1c
-+#define U0_VIN_CNFG_AXIWR0_INTR_CLEAN BIT(0)
-+#define U0_VIN_CNFG_AXIWR0_MASK BIT(1)
-+#define U0_VIN_CNFG_AXIWR0_PIX_CNT_END (BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12))
-+#define U0_VIN_CNFG_AXIWR0_PIX_CT (BIT(13) | BIT(14))
-+#define UO_VIN_CNFG_AXIWR0_PIXEL_HEIGH_BIT_SEL (BIT(15) | BIT(16))
-+#define SYSCONSAIF_SYSCFG_32 0x20
-+
-+#define SYSCONSAIF_SYSCFG_36 0x24
-+#define UO_VIN_CNFG_COLOR_BAR_EN BIT(0)
-+#define U0_VIN_CNFG_DVP_HS_POS (0x1<<1)
-+#define U0_VIN_CNFG_DVP_SWAP_EN BIT(2)
-+#define U0_VIN_CNFG_DVP_VS_POS (0x1<<3)
-+#define U0_VIN_CNFG_GEN_EN_AXIRD BIT(4)
-+#define U0_VIN_CNFG_ISP_DVP_EN0 BIT(5)
-+#define U0_VIN_CNFG_MIPI_BYTE_EN_ISP0 (BIT(6) |BIT(7))
-+#define U0_VIN_CNFG_P_I_MIPI_CHANNEL_SEL0 (BIT(8) |BIT(9) | BIT(10) | BIT(11))
-+#define U0_VIN_CNFG_P_I_MIPI_HEADER_EN0 BIT(12)
-+
-+#define U0_VIN_CNFG_PIX_NUM (0x1<<13 | 0x1<<14 | 0x1<<15 | 0x1<<16)
-+#define U0_VIN_CNFG_AXIRD_AXI_CNT_END (BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13))
-+
-+#define U0_VIN_CNFG_AXI_DVP_EN BIT(2)
-+#define U0_VIN_CNFG_AXIRD_INTR_MASK BIT(1)
-+#define U0_VIN_CNFG_AXIWRD_INTR_MASK BIT(1)
-+#define U0_VIN_CNFG_AXIWR0_START_ADDR 0xffffffff
-+#define U0_VIN_CNFG_COLOR_BAR_EN 0X0
-+#define U0_VIN_CNFG_AXIWR0_PIX_CNT_CT (BIT(13) | BIT(14))
-+#define U0_VIN_CNFG_AXIWR0_PIX_CNT_CNT_END (BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12))
-+#define U0_VIN_CNFG_AXIWR0_PIXEL_HITH_BIT_SEL (BIT(15) | BIT(16))
-+
-+#define SYSCTRL_REG4 0x10
-+#define SYSCTRL_DPHY_CTRL 0x14
-+#define SYSCTRL_VIN_AXI_CTRL 0x18
-+#define SYSCTRL_VIN_WR_START_ADDR 0x28
-+#define SYSCTRL_VIN_RD_END_ADDR 0x2C
-+#define SYSCTRL_VIN_WR_PIX_TOTAL 0x30
-+#define SYSCTRL_VIN_RD_PIX_TOTAL 0x34
-+#define SYSCTRL_VIN_RW_CTRL 0x38
-+#define SYSCTRL_VIN_SRC_CHAN_SEL 0x24
-+#define SYSCTRL_VIN_SRC_DW_SEL 0x40
-+#define SYSCTRL_VIN_RD_VBLANK 0x44
-+#define SYSCTRL_VIN_RD_VEND 0x48
-+#define SYSCTRL_VIN_RD_HBLANK 0x4C
-+#define SYSCTRL_VIN_RD_HEND 0x50
-+#define SYSCTRL_VIN_INTP_CTRL 0x54
-+
-+#define ISP_NO_SCALE_ENABLE (0x1<<20)
-+#define ISP_MULTI_FRAME_ENABLE (0x1<<17)
-+#define ISP_SS0_ENABLE (0x1<<11)
-+#define ISP_SS1_ENABLE (0x1<<12)
-+#define ISP_RESET (0x1<<1)
-+#define ISP_ENBALE (0x1)
-+
-+
-+
-+ //ISP REG DEFINE
-+#define ISP_REG_DVP_POLARITY_CFG 0x00000014
-+#define ISP_REG_RAW_FORMAT_CFG 0x00000018
-+#define ISP_REG_CFA_MODE 0x00000A1C
-+#define ISP_REG_PIC_CAPTURE_START_CFG 0x0000001C
-+#define ISP_REG_PIC_CAPTURE_END_CFG 0x00000020
-+#define ISP_REG_PIPELINE_XY_SIZE 0x00000A0C
-+#define ISP_REG_Y_PLANE_START_ADDR 0x00000A80
-+#define ISP_REG_UV_PLANE_START_ADDR 0x00000A84
-+#define ISP_REG_STRIDE 0x00000A88
-+#define ISP_REG_PIXEL_COORDINATE_GEN 0x00000A8C
-+#define ISP_REG_PIXEL_AXI_CONTROL 0x00000A90
-+#define ISP_REG_SS_AXI_CONTROL 0x00000AC4
-+#define ISP_REG_RGB_TO_YUV_COVERSION0 0x00000E40
-+#define ISP_REG_RGB_TO_YUV_COVERSION1 0x00000E44
-+#define ISP_REG_RGB_TO_YUV_COVERSION2 0x00000E48
-+#define ISP_REG_RGB_TO_YUV_COVERSION3 0x00000E4C
-+#define ISP_REG_RGB_TO_YUV_COVERSION4 0x00000E50
-+#define ISP_REG_RGB_TO_YUV_COVERSION5 0x00000E54
-+#define ISP_REG_RGB_TO_YUV_COVERSION6 0x00000E58
-+#define ISP_REG_RGB_TO_YUV_COVERSION7 0x00000E5C
-+#define ISP_REG_RGB_TO_YUV_COVERSION8 0x00000E60
-+#define ISP_REG_CSI_MODULE_CFG 0x00000010
-+#define ISP_REG_ISP_CTRL_1 0x00000A08
-+#define ISP_REG_ISP_CTRL_0 0x00000A00
-+#define ISP_REG_DC_AXI_ID 0x00000044
-+#define ISP_REG_CSI_INPUT_EN_AND_STATUS 0x00000000
-+
-+//CSI registers
-+#define DEVICE_CONFIG 0x00
-+#define SOFT_RESET 0x04
-+#define STATIC_CFG 0x08
-+#define ERROR_BYPASS_CFG 0x10
-+#define MONITOR_IRQS 0x18
-+#define MONITOR_IRQS_MASK_CFG 0x1c
-+#define INFO_IRQS 0x20
-+#define INFO_IRQS_MASK_CFG 0x24
-+#define ERROR_IRQS 0x28
-+#define ERROR_IRQS_MASK_CFG 0x2c
-+#define DPHY_LANE_CONTROL 0x40
-+#define DPHY_STATUS 0x48
-+#define DPHY_ERR_STATUS_IRQ 0x4C
-+#define DPHY_ERR_IRQ_MASK_CFG 0x50
-+#define INTEGRATION_DEBUG 0x60
-+#define ERROR_DEBUG 0x74
-+
-+#define STREAM0_CTRL 0x100
-+#define STREAM0_STATUS 0x104
-+#define STREAM0_DATA_CFG 0x108
-+#define STREAM0_CFG 0x10c
-+#define STREAM0_MONITOR_CTRL 0x110
-+#define STREAM0_MONITOR_FRAME 0x114
-+#define STREAM0_MONITOR_LB 0x118
-+#define STREAM0_TIMER 0x11c
-+#define STREAM0_FCC_CFG 0x120
-+#define STREAM0_FCC_CTRL 0x124
-+#define STREAM0_FIFO_FILL_LVL 0x128
-+
-+//m31_dphy registers
-+#define M31DPHY_APBCFGSAIF__SYSCFG_188 0xbc
-+#define M31DPHY_APBCFGSAIF__SYSCFG_192 0xc0
-+#define M31DPHY_APBCFGSAIF__SYSCFG_196 0xc4
-+#define M31DPHY_APBCFGSAIF__SYSCFG_200 0xc8
-+
-+typedef enum
-+{
-+ DT_RAW6 = 0x28,
-+ DT_RAW7 = 0x29,
-+ DT_RAW8 = 0x2a,
-+ DT_RAW10 = 0x2b,
-+ DT_RAW12 = 0x2c,
-+ DT_RAW14 = 0x2d,
-+} mipicam_data_type_t;
-+
-+
-+enum VIN_SOURCE_FORMAT {
-+ SRC_COLORBAR_VIN_ISP = 0,
-+ SRC_DVP_SENSOR_VIN,
-+ SRC_DVP_SENSOR_VIN_ISP,//need replace sensor
-+ SRC_CSI2RX_VIN_ISP,
-+ SRC_DVP_SENSOR_VIN_OV5640,
-+};
-+
-+struct reg_name {
-+ char name[10];
-+};
-+
-+typedef struct
-+{
-+ int dlane_nb;
-+ int dlane_map[4];
-+ int dlane_en[4];
-+ int dlane_pn_swap[4];
-+ int clane_nb;
-+ int clane_map[2];
-+ int clane_pn_swap[2];
-+} csi2rx_dphy_cfg_t;
-+
-+typedef struct
-+{
-+ int lane_nb;
-+ int dlane_map[4];
-+ int dt;
-+ int hsize;
-+ int vsize;
-+} csi2rx_cfg_t;
-+
-+
-+typedef struct
-+{
-+ int mipi_id, w, h, dt, bpp, fps,lane;
-+ u8 clane_swap;
-+ u8 clane_pn_swap;
-+ u8 dlane_swap[4];
-+ u8 dlane_pn_swap[4];
-+} csi_format;
-+
-+struct vin_params {
-+ void *paddr;
-+ unsigned long size;
-+};
-+
-+struct vin_buf {
-+ void *vaddr;
-+ dma_addr_t paddr;
-+ u32 size;
-+};
-+
-+struct vin_framesize {
-+ u32 width;
-+ u32 height;
-+};
-+
-+struct vin_format {
-+ enum VIN_SOURCE_FORMAT format;
-+ u8 fps;
-+};
-+
-+struct stf_vin_dev {
-+ /* Protects the access of variables shared within the interrupt */
-+ spinlock_t irqlock;
-+ int irq;
-+ struct device *dev;
-+ struct cdev vin_cdev;
-+ void __iomem *base;
-+ void __iomem *csi2rx_base;
-+ void __iomem *clkgen_base;
-+ void __iomem *rstgen_base;
-+ void __iomem *sysctrl_base;
-+ void __iomem *isp_base;
-+ void __iomem *vin_top_clkgen_base;
-+ void __iomem *vin_top_rstgen_base;
-+ void __iomem *vin_top_iopad_base;
-+ void __iomem *pmu_test;
-+ void __iomem *sys_crg;
-+ struct vin_framesize frame;
-+ struct vin_format format;
-+ bool isp;
-+ int isp_irq;
-+ int isp_csi_irq;
-+ int isp_scd_irq;
-+ int isp_irq_csiline;
-+ u32 major;
-+ struct vin_buf buf;
-+
-+ wait_queue_head_t wq;
-+ bool condition;
-+ int odd;
-+
-+ csi_format csi_fmt;
-+};
-+
-+extern int vin_notifier_register(struct notifier_block *nb);
-+extern void vin_notifier_unregister(struct notifier_block *nb);
-+extern int vin_notifier_call(unsigned long e, void *v);
-+#endif