]>
Commit | Line | Data |
---|---|---|
731c46ed AE |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. | |
4 | * Copyright (C) 2019-2020 Linaro Ltd. | |
5 | */ | |
6 | ||
7 | #include <linux/types.h> | |
8 | #include <linux/device.h> | |
9 | #include <linux/slab.h> | |
10 | #include <linux/bitfield.h> | |
11 | #include <linux/dma-direction.h> | |
12 | ||
13 | #include "gsi.h" | |
14 | #include "gsi_trans.h" | |
15 | #include "ipa.h" | |
16 | #include "ipa_endpoint.h" | |
17 | #include "ipa_table.h" | |
18 | #include "ipa_cmd.h" | |
19 | #include "ipa_mem.h" | |
20 | ||
21 | /** | |
22 | * DOC: IPA Immediate Commands | |
23 | * | |
24 | * The AP command TX endpoint is used to issue immediate commands to the IPA. | |
25 | * An immediate command is generally used to request the IPA do something | |
26 | * other than data transfer to another endpoint. | |
27 | * | |
28 | * Immediate commands are represented by GSI transactions just like other | |
29 | * transfer requests, represented by a single GSI TRE. Each immediate | |
30 | * command has a well-defined format, having a payload of a known length. | |
31 | * This allows the transfer element's length field to be used to hold an | |
32 | * immediate command's opcode. The payload for a command resides in DRAM | |
33 | * and is described by a single scatterlist entry in its transaction. | |
34 | * Commands do not require a transaction completion callback. To commit | |
35 | * an immediate command transaction, either gsi_trans_commit_wait() or | |
36 | * gsi_trans_commit_wait_timeout() is used. | |
37 | */ | |
38 | ||
39 | /* Some commands can wait until indicated pipeline stages are clear */ | |
40 | enum pipeline_clear_options { | |
41 | pipeline_clear_hps = 0, | |
42 | pipeline_clear_src_grp = 1, | |
43 | pipeline_clear_full = 2, | |
44 | }; | |
45 | ||
46 | /* IPA_CMD_IP_V{4,6}_{FILTER,ROUTING}_INIT */ | |
47 | ||
48 | struct ipa_cmd_hw_ip_fltrt_init { | |
49 | __le64 hash_rules_addr; | |
50 | __le64 flags; | |
51 | __le64 nhash_rules_addr; | |
52 | }; | |
53 | ||
54 | /* Field masks for ipa_cmd_hw_ip_fltrt_init structure fields */ | |
55 | #define IP_FLTRT_FLAGS_HASH_SIZE_FMASK GENMASK_ULL(11, 0) | |
56 | #define IP_FLTRT_FLAGS_HASH_ADDR_FMASK GENMASK_ULL(27, 12) | |
57 | #define IP_FLTRT_FLAGS_NHASH_SIZE_FMASK GENMASK_ULL(39, 28) | |
58 | #define IP_FLTRT_FLAGS_NHASH_ADDR_FMASK GENMASK_ULL(55, 40) | |
59 | ||
60 | /* IPA_CMD_HDR_INIT_LOCAL */ | |
61 | ||
62 | struct ipa_cmd_hw_hdr_init_local { | |
63 | __le64 hdr_table_addr; | |
64 | __le32 flags; | |
65 | __le32 reserved; | |
66 | }; | |
67 | ||
68 | /* Field masks for ipa_cmd_hw_hdr_init_local structure fields */ | |
69 | #define HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK GENMASK(11, 0) | |
70 | #define HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK GENMASK(27, 12) | |
71 | ||
72 | /* IPA_CMD_REGISTER_WRITE */ | |
73 | ||
74 | /* For IPA v4.0+, this opcode gets modified with pipeline clear options */ | |
75 | ||
76 | #define REGISTER_WRITE_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8) | |
77 | #define REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9) | |
78 | ||
79 | struct ipa_cmd_register_write { | |
80 | __le16 flags; /* Unused/reserved for IPA v3.5.1 */ | |
81 | __le16 offset; | |
82 | __le32 value; | |
83 | __le32 value_mask; | |
84 | __le32 clear_options; /* Unused/reserved for IPA v4.0+ */ | |
85 | }; | |
86 | ||
87 | /* Field masks for ipa_cmd_register_write structure fields */ | |
88 | /* The next field is present for IPA v4.0 and above */ | |
89 | #define REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK GENMASK(14, 11) | |
90 | /* The next field is present for IPA v3.5.1 only */ | |
91 | #define REGISTER_WRITE_FLAGS_SKIP_CLEAR_FMASK GENMASK(15, 15) | |
92 | ||
93 | /* The next field and its values are present for IPA v3.5.1 only */ | |
94 | #define REGISTER_WRITE_CLEAR_OPTIONS_FMASK GENMASK(1, 0) | |
95 | ||
96 | /* IPA_CMD_IP_PACKET_INIT */ | |
97 | ||
98 | struct ipa_cmd_ip_packet_init { | |
99 | u8 dest_endpoint; | |
100 | u8 reserved[7]; | |
101 | }; | |
102 | ||
103 | /* Field masks for ipa_cmd_ip_packet_init dest_endpoint field */ | |
104 | #define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK GENMASK(4, 0) | |
105 | ||
106 | /* IPA_CMD_DMA_TASK_32B_ADDR */ | |
107 | ||
108 | /* This opcode gets modified with a DMA operation count */ | |
109 | ||
110 | #define DMA_TASK_32B_ADDR_OPCODE_COUNT_FMASK GENMASK(15, 8) | |
111 | ||
112 | struct ipa_cmd_hw_dma_task_32b_addr { | |
113 | __le16 flags; | |
114 | __le16 size; | |
115 | __le32 addr; | |
116 | __le16 packet_size; | |
117 | u8 reserved[6]; | |
118 | }; | |
119 | ||
120 | /* Field masks for ipa_cmd_hw_dma_task_32b_addr flags field */ | |
121 | #define DMA_TASK_32B_ADDR_FLAGS_SW_RSVD_FMASK GENMASK(10, 0) | |
122 | #define DMA_TASK_32B_ADDR_FLAGS_CMPLT_FMASK GENMASK(11, 11) | |
123 | #define DMA_TASK_32B_ADDR_FLAGS_EOF_FMASK GENMASK(12, 12) | |
124 | #define DMA_TASK_32B_ADDR_FLAGS_FLSH_FMASK GENMASK(13, 13) | |
125 | #define DMA_TASK_32B_ADDR_FLAGS_LOCK_FMASK GENMASK(14, 14) | |
126 | #define DMA_TASK_32B_ADDR_FLAGS_UNLOCK_FMASK GENMASK(15, 15) | |
127 | ||
128 | /* IPA_CMD_DMA_SHARED_MEM */ | |
129 | ||
130 | /* For IPA v4.0+, this opcode gets modified with pipeline clear options */ | |
131 | ||
132 | #define DMA_SHARED_MEM_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8) | |
133 | #define DMA_SHARED_MEM_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9) | |
134 | ||
135 | struct ipa_cmd_hw_dma_mem_mem { | |
136 | __le16 clear_after_read; /* 0 or DMA_SHARED_MEM_CLEAR_AFTER_READ */ | |
137 | __le16 size; | |
138 | __le16 local_addr; | |
139 | __le16 flags; | |
140 | __le64 system_addr; | |
141 | }; | |
142 | ||
143 | /* Flag allowing atomic clear of target region after reading data (v4.0+)*/ | |
144 | #define DMA_SHARED_MEM_CLEAR_AFTER_READ GENMASK(15, 15) | |
145 | ||
146 | /* Field masks for ipa_cmd_hw_dma_mem_mem structure fields */ | |
147 | #define DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK GENMASK(0, 0) | |
148 | /* The next two fields are present for IPA v3.5.1 only. */ | |
149 | #define DMA_SHARED_MEM_FLAGS_SKIP_CLEAR_FMASK GENMASK(1, 1) | |
150 | #define DMA_SHARED_MEM_FLAGS_CLEAR_OPTIONS_FMASK GENMASK(3, 2) | |
151 | ||
152 | /* IPA_CMD_IP_PACKET_TAG_STATUS */ | |
153 | ||
154 | struct ipa_cmd_ip_packet_tag_status { | |
155 | __le64 tag; | |
156 | }; | |
157 | ||
158 | #define IP_PACKET_TAG_STATUS_TAG_FMASK GENMASK_ULL(63, 16) | |
159 | ||
160 | /* Immediate command payload */ | |
161 | union ipa_cmd_payload { | |
162 | struct ipa_cmd_hw_ip_fltrt_init table_init; | |
163 | struct ipa_cmd_hw_hdr_init_local hdr_init_local; | |
164 | struct ipa_cmd_register_write register_write; | |
165 | struct ipa_cmd_ip_packet_init ip_packet_init; | |
166 | struct ipa_cmd_hw_dma_task_32b_addr dma_task_32b_addr; | |
167 | struct ipa_cmd_hw_dma_mem_mem dma_shared_mem; | |
168 | struct ipa_cmd_ip_packet_tag_status ip_packet_tag_status; | |
169 | }; | |
170 | ||
171 | static void ipa_cmd_validate_build(void) | |
172 | { | |
173 | /* The sizes of a filter and route tables need to fit into fields | |
174 | * in the ipa_cmd_hw_ip_fltrt_init structure. Although hashed tables | |
175 | * might not be used, non-hashed and hashed tables have the same | |
176 | * maximum size. IPv4 and IPv6 filter tables have the same number | |
177 | * of entries, as and IPv4 and IPv6 route tables have the same number | |
178 | * of entries. | |
179 | */ | |
180 | #define TABLE_SIZE (TABLE_COUNT_MAX * IPA_TABLE_ENTRY_SIZE) | |
181 | #define TABLE_COUNT_MAX max_t(u32, IPA_ROUTE_COUNT_MAX, IPA_FILTER_COUNT_MAX) | |
182 | BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK)); | |
183 | BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK)); | |
184 | #undef TABLE_COUNT_MAX | |
185 | #undef TABLE_SIZE | |
186 | } | |
187 | ||
188 | #ifdef IPA_VALIDATE | |
189 | ||
190 | /* Validate a memory region holding a table */ | |
191 | bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, | |
192 | bool route, bool ipv6, bool hashed) | |
193 | { | |
194 | struct device *dev = &ipa->pdev->dev; | |
195 | u32 offset_max; | |
196 | ||
197 | offset_max = hashed ? field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK) | |
198 | : field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK); | |
199 | if (mem->offset > offset_max || | |
200 | ipa->mem_offset > offset_max - mem->offset) { | |
201 | dev_err(dev, "IPv%c %s%s table region offset too large " | |
202 | "(0x%04x + 0x%04x > 0x%04x)\n", | |
203 | ipv6 ? '6' : '4', hashed ? "hashed " : "", | |
204 | route ? "route" : "filter", | |
205 | ipa->mem_offset, mem->offset, offset_max); | |
206 | return false; | |
207 | } | |
208 | ||
209 | if (mem->offset > ipa->mem_size || | |
210 | mem->size > ipa->mem_size - mem->offset) { | |
211 | dev_err(dev, "IPv%c %s%s table region out of range " | |
212 | "(0x%04x + 0x%04x > 0x%04x)\n", | |
213 | ipv6 ? '6' : '4', hashed ? "hashed " : "", | |
214 | route ? "route" : "filter", | |
215 | mem->offset, mem->size, ipa->mem_size); | |
216 | return false; | |
217 | } | |
218 | ||
219 | return true; | |
220 | } | |
221 | ||
222 | /* Validate the memory region that holds headers */ | |
223 | static bool ipa_cmd_header_valid(struct ipa *ipa) | |
224 | { | |
225 | const struct ipa_mem *mem = &ipa->mem[IPA_MEM_MODEM_HEADER]; | |
226 | struct device *dev = &ipa->pdev->dev; | |
227 | u32 offset_max; | |
228 | u32 size_max; | |
229 | u32 size; | |
230 | ||
231 | offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK); | |
232 | if (mem->offset > offset_max || | |
233 | ipa->mem_offset > offset_max - mem->offset) { | |
234 | dev_err(dev, "header table region offset too large " | |
235 | "(0x%04x + 0x%04x > 0x%04x)\n", | |
236 | ipa->mem_offset + mem->offset, offset_max); | |
237 | return false; | |
238 | } | |
239 | ||
240 | size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK); | |
241 | size = ipa->mem[IPA_MEM_MODEM_HEADER].size; | |
242 | size += ipa->mem[IPA_MEM_AP_HEADER].size; | |
243 | if (mem->offset > ipa->mem_size || size > ipa->mem_size - mem->offset) { | |
244 | dev_err(dev, "header table region out of range " | |
245 | "(0x%04x + 0x%04x > 0x%04x)\n", | |
246 | mem->offset, size, ipa->mem_size); | |
247 | return false; | |
248 | } | |
249 | ||
250 | return true; | |
251 | } | |
252 | ||
253 | /* Indicate whether an offset can be used with a register_write command */ | |
254 | static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa, | |
255 | const char *name, u32 offset) | |
256 | { | |
257 | struct ipa_cmd_register_write *payload; | |
258 | struct device *dev = &ipa->pdev->dev; | |
259 | u32 offset_max; | |
260 | u32 bit_count; | |
261 | ||
262 | /* The maximum offset in a register_write immediate command depends | |
263 | * on the version of IPA. IPA v3.5.1 supports a 16 bit offset, but | |
264 | * newer versions allow some additional high-order bits. | |
265 | */ | |
266 | bit_count = BITS_PER_BYTE * sizeof(payload->offset); | |
267 | if (ipa->version != IPA_VERSION_3_5_1) | |
268 | bit_count += hweight32(REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK); | |
269 | BUILD_BUG_ON(bit_count > 32); | |
270 | offset_max = ~0 >> (32 - bit_count); | |
271 | ||
272 | if (offset > offset_max || ipa->mem_offset > offset_max - offset) { | |
273 | dev_err(dev, "%s offset too large 0x%04x + 0x%04x > 0x%04x)\n", | |
274 | ipa->mem_offset + offset, offset_max); | |
275 | return false; | |
276 | } | |
277 | ||
278 | return true; | |
279 | } | |
280 | ||
281 | /* Check whether offsets passed to register_write are valid */ | |
282 | static bool ipa_cmd_register_write_valid(struct ipa *ipa) | |
283 | { | |
284 | const char *name; | |
285 | u32 offset; | |
286 | ||
287 | offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version); | |
288 | name = "filter/route hash flush"; | |
289 | if (!ipa_cmd_register_write_offset_valid(ipa, name, offset)) | |
290 | return false; | |
291 | ||
292 | offset = IPA_REG_ENDP_STATUS_N_OFFSET(IPA_ENDPOINT_COUNT); | |
293 | name = "maximal endpoint status"; | |
294 | if (!ipa_cmd_register_write_offset_valid(ipa, name, offset)) | |
295 | return false; | |
296 | ||
297 | return true; | |
298 | } | |
299 | ||
300 | bool ipa_cmd_data_valid(struct ipa *ipa) | |
301 | { | |
302 | if (!ipa_cmd_header_valid(ipa)) | |
303 | return false; | |
304 | ||
305 | if (!ipa_cmd_register_write_valid(ipa)) | |
306 | return false; | |
307 | ||
308 | return true; | |
309 | } | |
310 | ||
311 | #endif /* IPA_VALIDATE */ | |
312 | ||
313 | int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max) | |
314 | { | |
315 | struct gsi_trans_info *trans_info = &channel->trans_info; | |
316 | struct device *dev = channel->gsi->dev; | |
317 | int ret; | |
318 | ||
319 | /* This is as good a place as any to validate build constants */ | |
320 | ipa_cmd_validate_build(); | |
321 | ||
322 | /* Even though command payloads are allocated one at a time, | |
323 | * a single transaction can require up to tlv_count of them, | |
324 | * so we treat them as if that many can be allocated at once. | |
325 | */ | |
326 | ret = gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool, | |
327 | sizeof(union ipa_cmd_payload), | |
328 | tre_max, channel->tlv_count); | |
329 | if (ret) | |
330 | return ret; | |
331 | ||
332 | /* Each TRE needs a command info structure */ | |
333 | ret = gsi_trans_pool_init(&trans_info->info_pool, | |
334 | sizeof(struct ipa_cmd_info), | |
335 | tre_max, channel->tlv_count); | |
336 | if (ret) | |
337 | gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool); | |
338 | ||
339 | return ret; | |
340 | } | |
341 | ||
342 | void ipa_cmd_pool_exit(struct gsi_channel *channel) | |
343 | { | |
344 | struct gsi_trans_info *trans_info = &channel->trans_info; | |
345 | struct device *dev = channel->gsi->dev; | |
346 | ||
347 | gsi_trans_pool_exit(&trans_info->info_pool); | |
348 | gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool); | |
349 | } | |
350 | ||
351 | static union ipa_cmd_payload * | |
352 | ipa_cmd_payload_alloc(struct ipa *ipa, dma_addr_t *addr) | |
353 | { | |
354 | struct gsi_trans_info *trans_info; | |
355 | struct ipa_endpoint *endpoint; | |
356 | ||
357 | endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; | |
358 | trans_info = &ipa->gsi.channel[endpoint->channel_id].trans_info; | |
359 | ||
360 | return gsi_trans_pool_alloc_dma(&trans_info->cmd_pool, addr); | |
361 | } | |
362 | ||
363 | /* If hash_size is 0, hash_offset and hash_addr ignored. */ | |
364 | void ipa_cmd_table_init_add(struct gsi_trans *trans, | |
365 | enum ipa_cmd_opcode opcode, u16 size, u32 offset, | |
366 | dma_addr_t addr, u16 hash_size, u32 hash_offset, | |
367 | dma_addr_t hash_addr) | |
368 | { | |
369 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); | |
370 | enum dma_data_direction direction = DMA_TO_DEVICE; | |
371 | struct ipa_cmd_hw_ip_fltrt_init *payload; | |
372 | union ipa_cmd_payload *cmd_payload; | |
373 | dma_addr_t payload_addr; | |
374 | u64 val; | |
375 | ||
376 | /* Record the non-hash table offset and size */ | |
377 | offset += ipa->mem_offset; | |
378 | val = u64_encode_bits(offset, IP_FLTRT_FLAGS_NHASH_ADDR_FMASK); | |
379 | val |= u64_encode_bits(size, IP_FLTRT_FLAGS_NHASH_SIZE_FMASK); | |
380 | ||
381 | /* The hash table offset and address are zero if its size is 0 */ | |
382 | if (hash_size) { | |
383 | /* Record the hash table offset and size */ | |
384 | hash_offset += ipa->mem_offset; | |
385 | val |= u64_encode_bits(hash_offset, | |
386 | IP_FLTRT_FLAGS_HASH_ADDR_FMASK); | |
387 | val |= u64_encode_bits(hash_size, | |
388 | IP_FLTRT_FLAGS_HASH_SIZE_FMASK); | |
389 | } | |
390 | ||
391 | cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr); | |
392 | payload = &cmd_payload->table_init; | |
393 | ||
394 | /* Fill in all offsets and sizes and the non-hash table address */ | |
395 | if (hash_size) | |
396 | payload->hash_rules_addr = cpu_to_le64(hash_addr); | |
397 | payload->flags = cpu_to_le64(val); | |
398 | payload->nhash_rules_addr = cpu_to_le64(addr); | |
399 | ||
400 | gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, | |
401 | direction, opcode); | |
402 | } | |
403 | ||
404 | /* Initialize header space in IPA-local memory */ | |
405 | void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size, | |
406 | dma_addr_t addr) | |
407 | { | |
408 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); | |
409 | enum ipa_cmd_opcode opcode = IPA_CMD_HDR_INIT_LOCAL; | |
410 | enum dma_data_direction direction = DMA_TO_DEVICE; | |
411 | struct ipa_cmd_hw_hdr_init_local *payload; | |
412 | union ipa_cmd_payload *cmd_payload; | |
413 | dma_addr_t payload_addr; | |
414 | u32 flags; | |
415 | ||
416 | offset += ipa->mem_offset; | |
417 | ||
418 | /* With this command we tell the IPA where in its local memory the | |
419 | * header tables reside. The content of the buffer provided is | |
420 | * also written via DMA into that space. The IPA hardware owns | |
421 | * the table, but the AP must initialize it. | |
422 | */ | |
423 | cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr); | |
424 | payload = &cmd_payload->hdr_init_local; | |
425 | ||
426 | payload->hdr_table_addr = cpu_to_le64(addr); | |
427 | flags = u32_encode_bits(size, HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK); | |
428 | flags |= u32_encode_bits(offset, HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK); | |
429 | payload->flags = cpu_to_le32(flags); | |
430 | ||
431 | gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, | |
432 | direction, opcode); | |
433 | } | |
434 | ||
435 | void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value, | |
436 | u32 mask, bool clear_full) | |
437 | { | |
438 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); | |
439 | struct ipa_cmd_register_write *payload; | |
440 | union ipa_cmd_payload *cmd_payload; | |
441 | u32 opcode = IPA_CMD_REGISTER_WRITE; | |
442 | dma_addr_t payload_addr; | |
443 | u32 clear_option; | |
444 | u32 options; | |
445 | u16 flags; | |
446 | ||
447 | /* pipeline_clear_src_grp is not used */ | |
448 | clear_option = clear_full ? pipeline_clear_full : pipeline_clear_hps; | |
449 | ||
450 | if (ipa->version != IPA_VERSION_3_5_1) { | |
451 | u16 offset_high; | |
452 | u32 val; | |
453 | ||
454 | /* Opcode encodes pipeline clear options */ | |
455 | /* SKIP_CLEAR is always 0 (don't skip pipeline clear) */ | |
456 | val = u16_encode_bits(clear_option, | |
457 | REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK); | |
458 | opcode |= val; | |
459 | ||
460 | /* Extract the high 4 bits from the offset */ | |
461 | offset_high = (u16)u32_get_bits(offset, GENMASK(19, 16)); | |
462 | offset &= (1 << 16) - 1; | |
463 | ||
464 | /* Extract the top 4 bits and encode it into the flags field */ | |
465 | flags = u16_encode_bits(offset_high, | |
466 | REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK); | |
467 | options = 0; /* reserved */ | |
468 | ||
469 | } else { | |
470 | flags = 0; /* SKIP_CLEAR flag is always 0 */ | |
471 | options = u16_encode_bits(clear_option, | |
472 | REGISTER_WRITE_CLEAR_OPTIONS_FMASK); | |
473 | } | |
474 | ||
475 | cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr); | |
476 | payload = &cmd_payload->register_write; | |
477 | ||
478 | payload->flags = cpu_to_le16(flags); | |
479 | payload->offset = cpu_to_le16((u16)offset); | |
480 | payload->value = cpu_to_le32(value); | |
481 | payload->value_mask = cpu_to_le32(mask); | |
482 | payload->clear_options = cpu_to_le32(options); | |
483 | ||
484 | gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, | |
485 | DMA_NONE, opcode); | |
486 | } | |
487 | ||
488 | /* Skip IP packet processing on the next data transfer on a TX channel */ | |
489 | static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id) | |
490 | { | |
491 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); | |
492 | enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_INIT; | |
493 | enum dma_data_direction direction = DMA_TO_DEVICE; | |
494 | struct ipa_cmd_ip_packet_init *payload; | |
495 | union ipa_cmd_payload *cmd_payload; | |
496 | dma_addr_t payload_addr; | |
497 | ||
498 | /* assert(endpoint_id < | |
499 | field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK)); */ | |
500 | ||
501 | cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr); | |
502 | payload = &cmd_payload->ip_packet_init; | |
503 | ||
504 | payload->dest_endpoint = u8_encode_bits(endpoint_id, | |
505 | IPA_PACKET_INIT_DEST_ENDPOINT_FMASK); | |
506 | ||
507 | gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, | |
508 | direction, opcode); | |
509 | } | |
510 | ||
511 | /* Use a 32-bit DMA command to zero a block of memory */ | |
512 | void ipa_cmd_dma_task_32b_addr_add(struct gsi_trans *trans, u16 size, | |
513 | dma_addr_t addr, bool toward_ipa) | |
514 | { | |
515 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); | |
516 | enum ipa_cmd_opcode opcode = IPA_CMD_DMA_TASK_32B_ADDR; | |
517 | struct ipa_cmd_hw_dma_task_32b_addr *payload; | |
518 | union ipa_cmd_payload *cmd_payload; | |
519 | enum dma_data_direction direction; | |
520 | dma_addr_t payload_addr; | |
521 | u16 flags; | |
522 | ||
523 | /* assert(addr <= U32_MAX); */ | |
524 | addr &= GENMASK_ULL(31, 0); | |
525 | ||
526 | /* The opcode encodes the number of DMA operations in the high byte */ | |
527 | opcode |= u16_encode_bits(1, DMA_TASK_32B_ADDR_OPCODE_COUNT_FMASK); | |
528 | ||
529 | direction = toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; | |
530 | ||
531 | /* complete: 0 = don't interrupt; eof: 0 = don't assert eot */ | |
532 | flags = DMA_TASK_32B_ADDR_FLAGS_FLSH_FMASK; | |
533 | /* lock: 0 = don't lock endpoint; unlock: 0 = don't unlock */ | |
534 | ||
535 | cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr); | |
536 | payload = &cmd_payload->dma_task_32b_addr; | |
537 | ||
538 | payload->flags = cpu_to_le16(flags); | |
539 | payload->size = cpu_to_le16(size); | |
540 | payload->addr = cpu_to_le32((u32)addr); | |
541 | payload->packet_size = cpu_to_le16(size); | |
542 | ||
543 | gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, | |
544 | direction, opcode); | |
545 | } | |
546 | ||
547 | /* Use a DMA command to read or write a block of IPA-resident memory */ | |
548 | void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size, | |
549 | dma_addr_t addr, bool toward_ipa) | |
550 | { | |
551 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); | |
552 | enum ipa_cmd_opcode opcode = IPA_CMD_DMA_SHARED_MEM; | |
553 | struct ipa_cmd_hw_dma_mem_mem *payload; | |
554 | union ipa_cmd_payload *cmd_payload; | |
555 | enum dma_data_direction direction; | |
556 | dma_addr_t payload_addr; | |
557 | u16 flags; | |
558 | ||
559 | /* size and offset must fit in 16 bit fields */ | |
560 | /* assert(size > 0 && size <= U16_MAX); */ | |
561 | /* assert(offset <= U16_MAX && ipa->mem_offset <= U16_MAX - offset); */ | |
562 | ||
563 | offset += ipa->mem_offset; | |
564 | ||
565 | cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr); | |
566 | payload = &cmd_payload->dma_shared_mem; | |
567 | ||
568 | /* payload->clear_after_read was reserved prior to IPA v4.0. It's | |
569 | * never needed for current code, so it's 0 regardless of version. | |
570 | */ | |
571 | payload->size = cpu_to_le16(size); | |
572 | payload->local_addr = cpu_to_le16(offset); | |
573 | /* payload->flags: | |
574 | * direction: 0 = write to IPA, 1 read from IPA | |
575 | * Starting at v4.0 these are reserved; either way, all zero: | |
576 | * pipeline clear: 0 = wait for pipeline clear (don't skip) | |
577 | * clear_options: 0 = pipeline_clear_hps | |
578 | * Instead, for v4.0+ these are encoded in the opcode. But again | |
579 | * since both values are 0 we won't bother OR'ing them in. | |
580 | */ | |
581 | flags = toward_ipa ? 0 : DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK; | |
582 | payload->flags = cpu_to_le16(flags); | |
583 | payload->system_addr = cpu_to_le64(addr); | |
584 | ||
585 | direction = toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; | |
586 | ||
587 | gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, | |
588 | direction, opcode); | |
589 | } | |
590 | ||
591 | static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans, u64 tag) | |
592 | { | |
593 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); | |
594 | enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS; | |
595 | enum dma_data_direction direction = DMA_TO_DEVICE; | |
596 | struct ipa_cmd_ip_packet_tag_status *payload; | |
597 | union ipa_cmd_payload *cmd_payload; | |
598 | dma_addr_t payload_addr; | |
599 | ||
600 | /* assert(tag <= field_max(IP_PACKET_TAG_STATUS_TAG_FMASK)); */ | |
601 | ||
602 | cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr); | |
603 | payload = &cmd_payload->ip_packet_tag_status; | |
604 | ||
605 | payload->tag = u64_encode_bits(tag, IP_PACKET_TAG_STATUS_TAG_FMASK); | |
606 | ||
607 | gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, | |
608 | direction, opcode); | |
609 | } | |
610 | ||
611 | /* Issue a small command TX data transfer */ | |
612 | static void ipa_cmd_transfer_add(struct gsi_trans *trans, u16 size) | |
613 | { | |
614 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); | |
615 | enum dma_data_direction direction = DMA_TO_DEVICE; | |
616 | enum ipa_cmd_opcode opcode = IPA_CMD_NONE; | |
617 | union ipa_cmd_payload *payload; | |
618 | dma_addr_t payload_addr; | |
619 | ||
620 | /* assert(size <= sizeof(*payload)); */ | |
621 | ||
622 | /* Just transfer a zero-filled payload structure */ | |
623 | payload = ipa_cmd_payload_alloc(ipa, &payload_addr); | |
624 | ||
625 | gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr, | |
626 | direction, opcode); | |
627 | } | |
628 | ||
629 | void ipa_cmd_tag_process_add(struct gsi_trans *trans) | |
630 | { | |
731c46ed | 631 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); |
2c4bb809 | 632 | struct ipa_endpoint *endpoint; |
731c46ed AE |
633 | |
634 | endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]; | |
731c46ed | 635 | |
2c4bb809 AE |
636 | ipa_cmd_register_write_add(trans, 0, 0, 0, true); |
637 | ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id); | |
731c46ed | 638 | ipa_cmd_ip_tag_status_add(trans, 0xcba987654321); |
731c46ed | 639 | ipa_cmd_transfer_add(trans, 4); |
731c46ed AE |
640 | } |
641 | ||
642 | /* Returns the number of commands required for the tag process */ | |
643 | u32 ipa_cmd_tag_process_count(void) | |
644 | { | |
645 | return 4; | |
646 | } | |
647 | ||
648 | static struct ipa_cmd_info * | |
649 | ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count) | |
650 | { | |
651 | struct gsi_channel *channel; | |
652 | ||
653 | channel = &endpoint->ipa->gsi.channel[endpoint->channel_id]; | |
654 | ||
655 | return gsi_trans_pool_alloc(&channel->trans_info.info_pool, tre_count); | |
656 | } | |
657 | ||
658 | /* Allocate a transaction for the command TX endpoint */ | |
659 | struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count) | |
660 | { | |
661 | struct ipa_endpoint *endpoint; | |
662 | struct gsi_trans *trans; | |
663 | ||
664 | endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; | |
665 | ||
666 | trans = gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id, | |
667 | tre_count, DMA_NONE); | |
668 | if (trans) | |
669 | trans->info = ipa_cmd_info_alloc(endpoint, tre_count); | |
670 | ||
671 | return trans; | |
672 | } |