]> git.ipfire.org Git - thirdparty/openwrt.git/blob
f0c41d5dc4e91946ca2b5fb95b4e77b67eeec18a
[thirdparty/openwrt.git] /
1 From c52918744ee1e49cea86622a2633b9782446428f Mon Sep 17 00:00:00 2001
2 From: Lorenzo Bianconi <lorenzo@kernel.org>
3 Date: Fri, 16 May 2025 09:59:59 +0200
4 Subject: [PATCH 1/3] net: airoha: npu: Move memory allocation in
5 airoha_npu_send_msg() caller
6
7 Move ppe_mbox_data struct memory allocation from airoha_npu_send_msg
8 routine to the caller one. This is a preliminary patch to enable wlan NPU
9 offloading and flow counter stats support.
10
11 Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
12 Reviewed-by: Simon Horman <horms@kernel.org>
13 Link: https://patch.msgid.link/20250516-airoha-en7581-flowstats-v2-1-06d5fbf28984@kernel.org
14 Signed-off-by: Jakub Kicinski <kuba@kernel.org>
15 ---
16 drivers/net/ethernet/airoha/airoha_npu.c | 126 +++++++++++++----------
17 1 file changed, 72 insertions(+), 54 deletions(-)
18
19 --- a/drivers/net/ethernet/airoha/airoha_npu.c
20 +++ b/drivers/net/ethernet/airoha/airoha_npu.c
21 @@ -124,17 +124,12 @@ static int airoha_npu_send_msg(struct ai
22 u16 core = 0; /* FIXME */
23 u32 val, offset = core << 4;
24 dma_addr_t dma_addr;
25 - void *addr;
26 int ret;
27
28 - addr = kmemdup(p, size, GFP_ATOMIC);
29 - if (!addr)
30 - return -ENOMEM;
31 -
32 - dma_addr = dma_map_single(npu->dev, addr, size, DMA_TO_DEVICE);
33 + dma_addr = dma_map_single(npu->dev, p, size, DMA_TO_DEVICE);
34 ret = dma_mapping_error(npu->dev, dma_addr);
35 if (ret)
36 - goto out;
37 + return ret;
38
39 spin_lock_bh(&npu->cores[core].lock);
40
41 @@ -155,8 +150,6 @@ static int airoha_npu_send_msg(struct ai
42 spin_unlock_bh(&npu->cores[core].lock);
43
44 dma_unmap_single(npu->dev, dma_addr, size, DMA_TO_DEVICE);
45 -out:
46 - kfree(addr);
47
48 return ret;
49 }
50 @@ -261,76 +254,101 @@ static irqreturn_t airoha_npu_wdt_handle
51
52 static int airoha_npu_ppe_init(struct airoha_npu *npu)
53 {
54 - struct ppe_mbox_data ppe_data = {
55 - .func_type = NPU_OP_SET,
56 - .func_id = PPE_FUNC_SET_WAIT_HWNAT_INIT,
57 - .init_info = {
58 - .ppe_type = PPE_TYPE_L2B_IPV4_IPV6,
59 - .wan_mode = QDMA_WAN_ETHER,
60 - },
61 - };
62 + struct ppe_mbox_data *ppe_data;
63 + int err;
64
65 - return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
66 - sizeof(struct ppe_mbox_data));
67 + ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL);
68 + if (!ppe_data)
69 + return -ENOMEM;
70 +
71 + ppe_data->func_type = NPU_OP_SET;
72 + ppe_data->func_id = PPE_FUNC_SET_WAIT_HWNAT_INIT;
73 + ppe_data->init_info.ppe_type = PPE_TYPE_L2B_IPV4_IPV6;
74 + ppe_data->init_info.wan_mode = QDMA_WAN_ETHER;
75 +
76 + err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
77 + sizeof(*ppe_data));
78 + kfree(ppe_data);
79 +
80 + return err;
81 }
82
83 static int airoha_npu_ppe_deinit(struct airoha_npu *npu)
84 {
85 - struct ppe_mbox_data ppe_data = {
86 - .func_type = NPU_OP_SET,
87 - .func_id = PPE_FUNC_SET_WAIT_HWNAT_DEINIT,
88 - };
89 + struct ppe_mbox_data *ppe_data;
90 + int err;
91 +
92 + ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL);
93 + if (!ppe_data)
94 + return -ENOMEM;
95
96 - return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
97 - sizeof(struct ppe_mbox_data));
98 + ppe_data->func_type = NPU_OP_SET;
99 + ppe_data->func_id = PPE_FUNC_SET_WAIT_HWNAT_DEINIT;
100 +
101 + err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
102 + sizeof(*ppe_data));
103 + kfree(ppe_data);
104 +
105 + return err;
106 }
107
108 static int airoha_npu_ppe_flush_sram_entries(struct airoha_npu *npu,
109 dma_addr_t foe_addr,
110 int sram_num_entries)
111 {
112 - struct ppe_mbox_data ppe_data = {
113 - .func_type = NPU_OP_SET,
114 - .func_id = PPE_FUNC_SET_WAIT_API,
115 - .set_info = {
116 - .func_id = PPE_SRAM_RESET_VAL,
117 - .data = foe_addr,
118 - .size = sram_num_entries,
119 - },
120 - };
121 + struct ppe_mbox_data *ppe_data;
122 + int err;
123 +
124 + ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL);
125 + if (!ppe_data)
126 + return -ENOMEM;
127 +
128 + ppe_data->func_type = NPU_OP_SET;
129 + ppe_data->func_id = PPE_FUNC_SET_WAIT_API;
130 + ppe_data->set_info.func_id = PPE_SRAM_RESET_VAL;
131 + ppe_data->set_info.data = foe_addr;
132 + ppe_data->set_info.size = sram_num_entries;
133 +
134 + err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
135 + sizeof(*ppe_data));
136 + kfree(ppe_data);
137
138 - return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
139 - sizeof(struct ppe_mbox_data));
140 + return err;
141 }
142
143 static int airoha_npu_foe_commit_entry(struct airoha_npu *npu,
144 dma_addr_t foe_addr,
145 u32 entry_size, u32 hash, bool ppe2)
146 {
147 - struct ppe_mbox_data ppe_data = {
148 - .func_type = NPU_OP_SET,
149 - .func_id = PPE_FUNC_SET_WAIT_API,
150 - .set_info = {
151 - .data = foe_addr,
152 - .size = entry_size,
153 - },
154 - };
155 + struct ppe_mbox_data *ppe_data;
156 int err;
157
158 - ppe_data.set_info.func_id = ppe2 ? PPE2_SRAM_SET_ENTRY
159 - : PPE_SRAM_SET_ENTRY;
160 + ppe_data = kzalloc(sizeof(*ppe_data), GFP_ATOMIC);
161 + if (!ppe_data)
162 + return -ENOMEM;
163 +
164 + ppe_data->func_type = NPU_OP_SET;
165 + ppe_data->func_id = PPE_FUNC_SET_WAIT_API;
166 + ppe_data->set_info.data = foe_addr;
167 + ppe_data->set_info.size = entry_size;
168 + ppe_data->set_info.func_id = ppe2 ? PPE2_SRAM_SET_ENTRY
169 + : PPE_SRAM_SET_ENTRY;
170
171 - err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
172 - sizeof(struct ppe_mbox_data));
173 + err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
174 + sizeof(*ppe_data));
175 if (err)
176 - return err;
177 + goto out;
178
179 - ppe_data.set_info.func_id = PPE_SRAM_SET_VAL;
180 - ppe_data.set_info.data = hash;
181 - ppe_data.set_info.size = sizeof(u32);
182 + ppe_data->set_info.func_id = PPE_SRAM_SET_VAL;
183 + ppe_data->set_info.data = hash;
184 + ppe_data->set_info.size = sizeof(u32);
185 +
186 + err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
187 + sizeof(*ppe_data));
188 +out:
189 + kfree(ppe_data);
190
191 - return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
192 - sizeof(struct ppe_mbox_data));
193 + return err;
194 }
195
196 struct airoha_npu *airoha_npu_get(struct device *dev)