]>
Commit | Line | Data |
---|---|---|
84f9bd12 AE |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. | |
4 | * Copyright (C) 2019-2020 Linaro Ltd. | |
5 | */ | |
6 | ||
7 | #include <linux/types.h> | |
8 | #include <linux/device.h> | |
9 | #include <linux/slab.h> | |
10 | #include <linux/bitfield.h> | |
11 | #include <linux/if_rmnet.h> | |
84f9bd12 AE |
12 | #include <linux/dma-direction.h> |
13 | ||
14 | #include "gsi.h" | |
15 | #include "gsi_trans.h" | |
16 | #include "ipa.h" | |
17 | #include "ipa_data.h" | |
18 | #include "ipa_endpoint.h" | |
19 | #include "ipa_cmd.h" | |
20 | #include "ipa_mem.h" | |
21 | #include "ipa_modem.h" | |
22 | #include "ipa_table.h" | |
23 | #include "ipa_gsi.h" | |
24 | ||
25 | #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) | |
26 | ||
27 | #define IPA_REPLENISH_BATCH 16 | |
28 | ||
6fcd4224 AE |
29 | /* RX buffer is 1 page (or a power-of-2 contiguous pages) */ |
30 | #define IPA_RX_BUFFER_SIZE 8192 /* PAGE_SIZE > 4096 wastes a LOT */ | |
84f9bd12 AE |
31 | |
32 | /* The amount of RX buffer space consumed by standard skb overhead */ | |
33 | #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0)) | |
34 | ||
35 | #define IPA_ENDPOINT_STOP_RX_RETRIES 10 | |
36 | #define IPA_ENDPOINT_STOP_RX_SIZE 1 /* bytes */ | |
37 | ||
38 | #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3 | |
39 | #define IPA_AGGR_TIME_LIMIT_DEFAULT 1000 /* microseconds */ | |
40 | ||
41 | #define ENDPOINT_STOP_DMA_TIMEOUT 15 /* milliseconds */ | |
42 | ||
43 | /** enum ipa_status_opcode - status element opcode hardware values */ | |
44 | enum ipa_status_opcode { | |
45 | IPA_STATUS_OPCODE_PACKET = 0x01, | |
46 | IPA_STATUS_OPCODE_NEW_FRAG_RULE = 0x02, | |
47 | IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04, | |
48 | IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08, | |
49 | IPA_STATUS_OPCODE_LOG = 0x10, | |
50 | IPA_STATUS_OPCODE_DCMP = 0x20, | |
51 | IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40, | |
52 | }; | |
53 | ||
54 | /** enum ipa_status_exception - status element exception type */ | |
55 | enum ipa_status_exception { | |
56 | /* 0 means no exception */ | |
57 | IPA_STATUS_EXCEPTION_DEAGGR = 0x01, | |
58 | IPA_STATUS_EXCEPTION_IPTYPE = 0x04, | |
59 | IPA_STATUS_EXCEPTION_PACKET_LENGTH = 0x08, | |
60 | IPA_STATUS_EXCEPTION_FRAG_RULE_MISS = 0x10, | |
61 | IPA_STATUS_EXCEPTION_SW_FILT = 0x20, | |
62 | /* The meaning of the next value depends on whether the IP version */ | |
63 | IPA_STATUS_EXCEPTION_NAT = 0x40, /* IPv4 */ | |
64 | IPA_STATUS_EXCEPTION_IPV6CT = IPA_STATUS_EXCEPTION_NAT, | |
65 | }; | |
66 | ||
67 | /* Status element provided by hardware */ | |
68 | struct ipa_status { | |
69 | u8 opcode; /* enum ipa_status_opcode */ | |
70 | u8 exception; /* enum ipa_status_exception */ | |
71 | __le16 mask; | |
72 | __le16 pkt_len; | |
73 | u8 endp_src_idx; | |
74 | u8 endp_dst_idx; | |
75 | __le32 metadata; | |
76 | __le32 flags1; | |
77 | __le64 flags2; | |
78 | __le32 flags3; | |
79 | __le32 flags4; | |
80 | }; | |
81 | ||
82 | /* Field masks for struct ipa_status structure fields */ | |
83 | ||
84 | #define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0) | |
85 | ||
86 | #define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0) | |
87 | ||
88 | #define IPA_STATUS_FLAGS1_FLT_LOCAL_FMASK GENMASK(0, 0) | |
89 | #define IPA_STATUS_FLAGS1_FLT_HASH_FMASK GENMASK(1, 1) | |
90 | #define IPA_STATUS_FLAGS1_FLT_GLOBAL_FMASK GENMASK(2, 2) | |
91 | #define IPA_STATUS_FLAGS1_FLT_RET_HDR_FMASK GENMASK(3, 3) | |
92 | #define IPA_STATUS_FLAGS1_FLT_RULE_ID_FMASK GENMASK(13, 4) | |
93 | #define IPA_STATUS_FLAGS1_RT_LOCAL_FMASK GENMASK(14, 14) | |
94 | #define IPA_STATUS_FLAGS1_RT_HASH_FMASK GENMASK(15, 15) | |
95 | #define IPA_STATUS_FLAGS1_UCP_FMASK GENMASK(16, 16) | |
96 | #define IPA_STATUS_FLAGS1_RT_TBL_IDX_FMASK GENMASK(21, 17) | |
97 | #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22) | |
98 | ||
99 | #define IPA_STATUS_FLAGS2_NAT_HIT_FMASK GENMASK_ULL(0, 0) | |
100 | #define IPA_STATUS_FLAGS2_NAT_ENTRY_IDX_FMASK GENMASK_ULL(13, 1) | |
101 | #define IPA_STATUS_FLAGS2_NAT_TYPE_FMASK GENMASK_ULL(15, 14) | |
102 | #define IPA_STATUS_FLAGS2_TAG_INFO_FMASK GENMASK_ULL(63, 16) | |
103 | ||
104 | #define IPA_STATUS_FLAGS3_SEQ_NUM_FMASK GENMASK(7, 0) | |
105 | #define IPA_STATUS_FLAGS3_TOD_CTR_FMASK GENMASK(31, 8) | |
106 | ||
107 | #define IPA_STATUS_FLAGS4_HDR_LOCAL_FMASK GENMASK(0, 0) | |
108 | #define IPA_STATUS_FLAGS4_HDR_OFFSET_FMASK GENMASK(10, 1) | |
109 | #define IPA_STATUS_FLAGS4_FRAG_HIT_FMASK GENMASK(11, 11) | |
110 | #define IPA_STATUS_FLAGS4_FRAG_RULE_FMASK GENMASK(15, 12) | |
111 | #define IPA_STATUS_FLAGS4_HW_SPECIFIC_FMASK GENMASK(31, 16) | |
112 | ||
113 | #ifdef IPA_VALIDATE | |
114 | ||
115 | static void ipa_endpoint_validate_build(void) | |
116 | { | |
117 | /* The aggregation byte limit defines the point at which an | |
118 | * aggregation window will close. It is programmed into the | |
119 | * IPA hardware as a number of KB. We don't use "hard byte | |
120 | * limit" aggregation, which means that we need to supply | |
121 | * enough space in a receive buffer to hold a complete MTU | |
122 | * plus normal skb overhead *after* that aggregation byte | |
123 | * limit has been crossed. | |
124 | * | |
125 | * This check just ensures we don't define a receive buffer | |
126 | * size that would exceed what we can represent in the field | |
127 | * that is used to program its size. | |
128 | */ | |
129 | BUILD_BUG_ON(IPA_RX_BUFFER_SIZE > | |
130 | field_max(AGGR_BYTE_LIMIT_FMASK) * SZ_1K + | |
131 | IPA_MTU + IPA_RX_BUFFER_OVERHEAD); | |
132 | ||
133 | /* I honestly don't know where this requirement comes from. But | |
134 | * it holds, and if we someday need to loosen the constraint we | |
135 | * can try to track it down. | |
136 | */ | |
137 | BUILD_BUG_ON(sizeof(struct ipa_status) % 4); | |
138 | } | |
139 | ||
140 | static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, | |
141 | const struct ipa_gsi_endpoint_data *all_data, | |
142 | const struct ipa_gsi_endpoint_data *data) | |
143 | { | |
144 | const struct ipa_gsi_endpoint_data *other_data; | |
145 | struct device *dev = &ipa->pdev->dev; | |
146 | enum ipa_endpoint_name other_name; | |
147 | ||
148 | if (ipa_gsi_endpoint_data_empty(data)) | |
149 | return true; | |
150 | ||
151 | if (!data->toward_ipa) { | |
152 | if (data->endpoint.filter_support) { | |
153 | dev_err(dev, "filtering not supported for " | |
154 | "RX endpoint %u\n", | |
155 | data->endpoint_id); | |
156 | return false; | |
157 | } | |
158 | ||
159 | return true; /* Nothing more to check for RX */ | |
160 | } | |
161 | ||
162 | if (data->endpoint.config.status_enable) { | |
163 | other_name = data->endpoint.config.tx.status_endpoint; | |
164 | if (other_name >= count) { | |
165 | dev_err(dev, "status endpoint name %u out of range " | |
166 | "for endpoint %u\n", | |
167 | other_name, data->endpoint_id); | |
168 | return false; | |
169 | } | |
170 | ||
171 | /* Status endpoint must be defined... */ | |
172 | other_data = &all_data[other_name]; | |
173 | if (ipa_gsi_endpoint_data_empty(other_data)) { | |
174 | dev_err(dev, "DMA endpoint name %u undefined " | |
175 | "for endpoint %u\n", | |
176 | other_name, data->endpoint_id); | |
177 | return false; | |
178 | } | |
179 | ||
180 | /* ...and has to be an RX endpoint... */ | |
181 | if (other_data->toward_ipa) { | |
182 | dev_err(dev, | |
183 | "status endpoint for endpoint %u not RX\n", | |
184 | data->endpoint_id); | |
185 | return false; | |
186 | } | |
187 | ||
188 | /* ...and if it's to be an AP endpoint... */ | |
189 | if (other_data->ee_id == GSI_EE_AP) { | |
190 | /* ...make sure it has status enabled. */ | |
191 | if (!other_data->endpoint.config.status_enable) { | |
192 | dev_err(dev, | |
193 | "status not enabled for endpoint %u\n", | |
194 | other_data->endpoint_id); | |
195 | return false; | |
196 | } | |
197 | } | |
198 | } | |
199 | ||
200 | if (data->endpoint.config.dma_mode) { | |
201 | other_name = data->endpoint.config.dma_endpoint; | |
202 | if (other_name >= count) { | |
203 | dev_err(dev, "DMA endpoint name %u out of range " | |
204 | "for endpoint %u\n", | |
205 | other_name, data->endpoint_id); | |
206 | return false; | |
207 | } | |
208 | ||
209 | other_data = &all_data[other_name]; | |
210 | if (ipa_gsi_endpoint_data_empty(other_data)) { | |
211 | dev_err(dev, "DMA endpoint name %u undefined " | |
212 | "for endpoint %u\n", | |
213 | other_name, data->endpoint_id); | |
214 | return false; | |
215 | } | |
216 | } | |
217 | ||
218 | return true; | |
219 | } | |
220 | ||
221 | static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, | |
222 | const struct ipa_gsi_endpoint_data *data) | |
223 | { | |
224 | const struct ipa_gsi_endpoint_data *dp = data; | |
225 | struct device *dev = &ipa->pdev->dev; | |
226 | enum ipa_endpoint_name name; | |
227 | ||
228 | ipa_endpoint_validate_build(); | |
229 | ||
230 | if (count > IPA_ENDPOINT_COUNT) { | |
231 | dev_err(dev, "too many endpoints specified (%u > %u)\n", | |
232 | count, IPA_ENDPOINT_COUNT); | |
233 | return false; | |
234 | } | |
235 | ||
236 | /* Make sure needed endpoints have defined data */ | |
237 | if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) { | |
238 | dev_err(dev, "command TX endpoint not defined\n"); | |
239 | return false; | |
240 | } | |
241 | if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) { | |
242 | dev_err(dev, "LAN RX endpoint not defined\n"); | |
243 | return false; | |
244 | } | |
245 | if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) { | |
246 | dev_err(dev, "AP->modem TX endpoint not defined\n"); | |
247 | return false; | |
248 | } | |
249 | if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) { | |
250 | dev_err(dev, "AP<-modem RX endpoint not defined\n"); | |
251 | return false; | |
252 | } | |
253 | ||
254 | for (name = 0; name < count; name++, dp++) | |
255 | if (!ipa_endpoint_data_valid_one(ipa, count, data, dp)) | |
256 | return false; | |
257 | ||
258 | return true; | |
259 | } | |
260 | ||
261 | #else /* !IPA_VALIDATE */ | |
262 | ||
263 | static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, | |
264 | const struct ipa_gsi_endpoint_data *data) | |
265 | { | |
266 | return true; | |
267 | } | |
268 | ||
269 | #endif /* !IPA_VALIDATE */ | |
270 | ||
271 | /* Allocate a transaction to use on a non-command endpoint */ | |
272 | static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint, | |
273 | u32 tre_count) | |
274 | { | |
275 | struct gsi *gsi = &endpoint->ipa->gsi; | |
276 | u32 channel_id = endpoint->channel_id; | |
277 | enum dma_data_direction direction; | |
278 | ||
279 | direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; | |
280 | ||
281 | return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction); | |
282 | } | |
283 | ||
284 | /* suspend_delay represents suspend for RX, delay for TX endpoints. | |
285 | * Note that suspend is not supported starting with IPA v4.0. | |
286 | */ | |
287 | static int | |
288 | ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) | |
289 | { | |
290 | u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id); | |
291 | struct ipa *ipa = endpoint->ipa; | |
292 | u32 mask; | |
293 | u32 val; | |
294 | ||
295 | /* assert(ipa->version == IPA_VERSION_3_5_1 */ | |
296 | mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK; | |
297 | ||
298 | val = ioread32(ipa->reg_virt + offset); | |
299 | if (suspend_delay == !!(val & mask)) | |
300 | return -EALREADY; /* Already set to desired state */ | |
301 | ||
302 | val ^= mask; | |
303 | iowrite32(val, ipa->reg_virt + offset); | |
304 | ||
305 | return 0; | |
306 | } | |
307 | ||
308 | /* Enable or disable delay or suspend mode on all modem endpoints */ | |
309 | void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) | |
310 | { | |
311 | bool support_suspend; | |
312 | u32 endpoint_id; | |
313 | ||
314 | /* DELAY mode doesn't work right on IPA v4.2 */ | |
315 | if (ipa->version == IPA_VERSION_4_2) | |
316 | return; | |
317 | ||
318 | /* Only IPA v3.5.1 supports SUSPEND mode on RX endpoints */ | |
319 | support_suspend = ipa->version == IPA_VERSION_3_5_1; | |
320 | ||
321 | for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) { | |
322 | struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id]; | |
323 | ||
324 | if (endpoint->ee_id != GSI_EE_MODEM) | |
325 | continue; | |
326 | ||
327 | /* Set TX delay mode, or for IPA v3.5.1 RX suspend mode */ | |
328 | if (endpoint->toward_ipa || support_suspend) | |
329 | (void)ipa_endpoint_init_ctrl(endpoint, enable); | |
330 | } | |
331 | } | |
332 | ||
333 | /* Reset all modem endpoints to use the default exception endpoint */ | |
334 | int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) | |
335 | { | |
336 | u32 initialized = ipa->initialized; | |
337 | struct gsi_trans *trans; | |
338 | u32 count; | |
339 | ||
340 | /* We need one command per modem TX endpoint. We can get an upper | |
341 | * bound on that by assuming all initialized endpoints are modem->IPA. | |
342 | * That won't happen, and we could be more precise, but this is fine | |
343 | * for now. We need to end the transactio with a "tag process." | |
344 | */ | |
345 | count = hweight32(initialized) + ipa_cmd_tag_process_count(); | |
346 | trans = ipa_cmd_trans_alloc(ipa, count); | |
347 | if (!trans) { | |
348 | dev_err(&ipa->pdev->dev, | |
349 | "no transaction to reset modem exception endpoints\n"); | |
350 | return -EBUSY; | |
351 | } | |
352 | ||
353 | while (initialized) { | |
354 | u32 endpoint_id = __ffs(initialized); | |
355 | struct ipa_endpoint *endpoint; | |
356 | u32 offset; | |
357 | ||
358 | initialized ^= BIT(endpoint_id); | |
359 | ||
360 | /* We only reset modem TX endpoints */ | |
361 | endpoint = &ipa->endpoint[endpoint_id]; | |
362 | if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa)) | |
363 | continue; | |
364 | ||
365 | offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); | |
366 | ||
367 | /* Value written is 0, and all bits are updated. That | |
368 | * means status is disabled on the endpoint, and as a | |
369 | * result all other fields in the register are ignored. | |
370 | */ | |
371 | ipa_cmd_register_write_add(trans, offset, 0, ~0, false); | |
372 | } | |
373 | ||
374 | ipa_cmd_tag_process_add(trans); | |
375 | ||
376 | /* XXX This should have a 1 second timeout */ | |
377 | gsi_trans_commit_wait(trans); | |
378 | ||
379 | return 0; | |
380 | } | |
381 | ||
382 | static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint) | |
383 | { | |
384 | u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id); | |
385 | u32 val = 0; | |
386 | ||
387 | /* FRAG_OFFLOAD_EN is 0 */ | |
388 | if (endpoint->data->checksum) { | |
389 | if (endpoint->toward_ipa) { | |
390 | u32 checksum_offset; | |
391 | ||
392 | val |= u32_encode_bits(IPA_CS_OFFLOAD_UL, | |
393 | CS_OFFLOAD_EN_FMASK); | |
394 | /* Checksum header offset is in 4-byte units */ | |
395 | checksum_offset = sizeof(struct rmnet_map_header); | |
396 | checksum_offset /= sizeof(u32); | |
397 | val |= u32_encode_bits(checksum_offset, | |
398 | CS_METADATA_HDR_OFFSET_FMASK); | |
399 | } else { | |
400 | val |= u32_encode_bits(IPA_CS_OFFLOAD_DL, | |
401 | CS_OFFLOAD_EN_FMASK); | |
402 | } | |
403 | } else { | |
404 | val |= u32_encode_bits(IPA_CS_OFFLOAD_NONE, | |
405 | CS_OFFLOAD_EN_FMASK); | |
406 | } | |
407 | /* CS_GEN_QMB_MASTER_SEL is 0 */ | |
408 | ||
409 | iowrite32(val, endpoint->ipa->reg_virt + offset); | |
410 | } | |
411 | ||
412 | static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) | |
413 | { | |
414 | u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id); | |
415 | u32 val = 0; | |
416 | ||
417 | if (endpoint->data->qmap) { | |
418 | size_t header_size = sizeof(struct rmnet_map_header); | |
419 | ||
420 | if (endpoint->toward_ipa && endpoint->data->checksum) | |
421 | header_size += sizeof(struct rmnet_map_ul_csum_header); | |
422 | ||
423 | val |= u32_encode_bits(header_size, HDR_LEN_FMASK); | |
424 | /* metadata is the 4 byte rmnet_map header itself */ | |
425 | val |= HDR_OFST_METADATA_VALID_FMASK; | |
426 | val |= u32_encode_bits(0, HDR_OFST_METADATA_FMASK); | |
427 | /* HDR_ADDITIONAL_CONST_LEN is 0; (IPA->AP only) */ | |
428 | if (!endpoint->toward_ipa) { | |
429 | u32 size_offset = offsetof(struct rmnet_map_header, | |
430 | pkt_len); | |
431 | ||
432 | val |= HDR_OFST_PKT_SIZE_VALID_FMASK; | |
433 | val |= u32_encode_bits(size_offset, | |
434 | HDR_OFST_PKT_SIZE_FMASK); | |
435 | } | |
436 | /* HDR_A5_MUX is 0 */ | |
437 | /* HDR_LEN_INC_DEAGG_HDR is 0 */ | |
438 | /* HDR_METADATA_REG_VALID is 0; (AP->IPA only) */ | |
439 | } | |
440 | ||
441 | iowrite32(val, endpoint->ipa->reg_virt + offset); | |
442 | } | |
443 | ||
444 | static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) | |
445 | { | |
446 | u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id); | |
447 | u32 pad_align = endpoint->data->rx.pad_align; | |
448 | u32 val = 0; | |
449 | ||
450 | val |= HDR_ENDIANNESS_FMASK; /* big endian */ | |
451 | val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK; | |
452 | /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */ | |
453 | /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */ | |
454 | /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */ | |
455 | if (!endpoint->toward_ipa) | |
456 | val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK); | |
457 | ||
458 | iowrite32(val, endpoint->ipa->reg_virt + offset); | |
459 | } | |
460 | ||
461 | /** | |
462 | * Generate a metadata mask value that will select only the mux_id | |
463 | * field in an rmnet_map header structure. The mux_id is at offset | |
464 | * 1 byte from the beginning of the structure, but the metadata | |
465 | * value is treated as a 4-byte unit. So this mask must be computed | |
466 | * with endianness in mind. Note that ipa_endpoint_init_hdr_metadata_mask() | |
467 | * will convert this value to the proper byte order. | |
468 | * | |
469 | * Marked __always_inline because this is really computing a | |
470 | * constant value. | |
471 | */ | |
472 | static __always_inline __be32 ipa_rmnet_mux_id_metadata_mask(void) | |
473 | { | |
474 | size_t mux_id_offset = offsetof(struct rmnet_map_header, mux_id); | |
475 | u32 mux_id_mask = 0; | |
476 | u8 *bytes; | |
477 | ||
478 | bytes = (u8 *)&mux_id_mask; | |
479 | bytes[mux_id_offset] = 0xff; /* mux_id is 1 byte */ | |
480 | ||
481 | return cpu_to_be32(mux_id_mask); | |
482 | } | |
483 | ||
484 | static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) | |
485 | { | |
486 | u32 endpoint_id = endpoint->endpoint_id; | |
487 | u32 val = 0; | |
488 | u32 offset; | |
489 | ||
490 | offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id); | |
491 | ||
492 | if (!endpoint->toward_ipa && endpoint->data->qmap) | |
493 | val = ipa_rmnet_mux_id_metadata_mask(); | |
494 | ||
495 | iowrite32(val, endpoint->ipa->reg_virt + offset); | |
496 | } | |
497 | ||
498 | static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint) | |
499 | { | |
500 | u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id); | |
501 | u32 val; | |
502 | ||
503 | if (endpoint->toward_ipa && endpoint->data->dma_mode) { | |
504 | enum ipa_endpoint_name name = endpoint->data->dma_endpoint; | |
505 | u32 dma_endpoint_id; | |
506 | ||
507 | dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id; | |
508 | ||
509 | val = u32_encode_bits(IPA_DMA, MODE_FMASK); | |
510 | val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK); | |
511 | } else { | |
512 | val = u32_encode_bits(IPA_BASIC, MODE_FMASK); | |
513 | } | |
514 | /* Other bitfields unspecified (and 0) */ | |
515 | ||
516 | iowrite32(val, endpoint->ipa->reg_virt + offset); | |
517 | } | |
518 | ||
519 | /* Compute the aggregation size value to use for a given buffer size */ | |
520 | static u32 ipa_aggr_size_kb(u32 rx_buffer_size) | |
521 | { | |
522 | /* We don't use "hard byte limit" aggregation, so we define the | |
523 | * aggregation limit such that our buffer has enough space *after* | |
524 | * that limit to receive a full MTU of data, plus overhead. | |
525 | */ | |
526 | rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD; | |
527 | ||
528 | return rx_buffer_size / SZ_1K; | |
529 | } | |
530 | ||
531 | static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) | |
532 | { | |
533 | u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id); | |
534 | u32 val = 0; | |
535 | ||
536 | if (endpoint->data->aggregation) { | |
537 | if (!endpoint->toward_ipa) { | |
538 | u32 aggr_size = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE); | |
539 | u32 limit; | |
540 | ||
541 | val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK); | |
542 | val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK); | |
543 | val |= u32_encode_bits(aggr_size, | |
544 | AGGR_BYTE_LIMIT_FMASK); | |
545 | limit = IPA_AGGR_TIME_LIMIT_DEFAULT; | |
546 | val |= u32_encode_bits(limit / IPA_AGGR_GRANULARITY, | |
547 | AGGR_TIME_LIMIT_FMASK); | |
548 | val |= u32_encode_bits(0, AGGR_PKT_LIMIT_FMASK); | |
549 | if (endpoint->data->rx.aggr_close_eof) | |
550 | val |= AGGR_SW_EOF_ACTIVE_FMASK; | |
551 | /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */ | |
552 | } else { | |
553 | val |= u32_encode_bits(IPA_ENABLE_DEAGGR, | |
554 | AGGR_EN_FMASK); | |
555 | val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK); | |
556 | /* other fields ignored */ | |
557 | } | |
558 | /* AGGR_FORCE_CLOSE is 0 */ | |
559 | } else { | |
560 | val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK); | |
561 | /* other fields ignored */ | |
562 | } | |
563 | ||
564 | iowrite32(val, endpoint->ipa->reg_virt + offset); | |
565 | } | |
566 | ||
567 | /* A return value of 0 indicates an error */ | |
568 | static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds) | |
569 | { | |
570 | u32 scale; | |
571 | u32 base; | |
572 | u32 val; | |
573 | ||
574 | if (!microseconds) | |
575 | return 0; /* invalid delay */ | |
576 | ||
577 | /* Timer is represented in units of clock ticks. */ | |
578 | if (ipa->version < IPA_VERSION_4_2) | |
579 | return microseconds; /* XXX Needs to be computed */ | |
580 | ||
581 | /* IPA v4.2 represents the tick count as base * scale */ | |
582 | scale = 1; /* XXX Needs to be computed */ | |
583 | if (scale > field_max(SCALE_FMASK)) | |
584 | return 0; /* scale too big */ | |
585 | ||
586 | base = DIV_ROUND_CLOSEST(microseconds, scale); | |
587 | if (base > field_max(BASE_VALUE_FMASK)) | |
588 | return 0; /* microseconds too big */ | |
589 | ||
590 | val = u32_encode_bits(scale, SCALE_FMASK); | |
591 | val |= u32_encode_bits(base, BASE_VALUE_FMASK); | |
592 | ||
593 | return val; | |
594 | } | |
595 | ||
596 | static int ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint, | |
597 | u32 microseconds) | |
598 | { | |
599 | u32 endpoint_id = endpoint->endpoint_id; | |
600 | struct ipa *ipa = endpoint->ipa; | |
601 | u32 offset; | |
602 | u32 val; | |
603 | ||
604 | /* XXX We'll fix this when the register definition is clear */ | |
605 | if (microseconds) { | |
606 | struct device *dev = &ipa->pdev->dev; | |
607 | ||
608 | dev_err(dev, "endpoint %u non-zero HOLB period (ignoring)\n", | |
609 | endpoint_id); | |
610 | microseconds = 0; | |
611 | } | |
612 | ||
613 | if (microseconds) { | |
614 | val = ipa_reg_init_hol_block_timer_val(ipa, microseconds); | |
615 | if (!val) | |
616 | return -EINVAL; | |
617 | } else { | |
618 | val = 0; /* timeout is immediate */ | |
619 | } | |
620 | offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id); | |
621 | iowrite32(val, ipa->reg_virt + offset); | |
622 | ||
623 | return 0; | |
624 | } | |
625 | ||
626 | static void | |
627 | ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable) | |
628 | { | |
629 | u32 endpoint_id = endpoint->endpoint_id; | |
630 | u32 offset; | |
631 | u32 val; | |
632 | ||
633 | val = u32_encode_bits(enable ? 1 : 0, HOL_BLOCK_EN_FMASK); | |
634 | offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id); | |
635 | iowrite32(val, endpoint->ipa->reg_virt + offset); | |
636 | } | |
637 | ||
638 | void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) | |
639 | { | |
640 | u32 i; | |
641 | ||
642 | for (i = 0; i < IPA_ENDPOINT_MAX; i++) { | |
643 | struct ipa_endpoint *endpoint = &ipa->endpoint[i]; | |
644 | ||
645 | if (endpoint->ee_id != GSI_EE_MODEM) | |
646 | continue; | |
647 | ||
648 | (void)ipa_endpoint_init_hol_block_timer(endpoint, 0); | |
649 | ipa_endpoint_init_hol_block_enable(endpoint, true); | |
650 | } | |
651 | } | |
652 | ||
653 | static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint) | |
654 | { | |
655 | u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id); | |
656 | u32 val = 0; | |
657 | ||
658 | /* DEAGGR_HDR_LEN is 0 */ | |
659 | /* PACKET_OFFSET_VALID is 0 */ | |
660 | /* PACKET_OFFSET_LOCATION is ignored (not valid) */ | |
661 | /* MAX_PACKET_LEN is 0 (not enforced) */ | |
662 | ||
663 | iowrite32(val, endpoint->ipa->reg_virt + offset); | |
664 | } | |
665 | ||
666 | static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint) | |
667 | { | |
668 | u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id); | |
669 | u32 seq_type = endpoint->seq_type; | |
670 | u32 val = 0; | |
671 | ||
672 | val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK); | |
673 | val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK); | |
674 | /* HPS_REP_SEQ_TYPE is 0 */ | |
675 | /* DPS_REP_SEQ_TYPE is 0 */ | |
676 | ||
677 | iowrite32(val, endpoint->ipa->reg_virt + offset); | |
678 | } | |
679 | ||
680 | /** | |
681 | * ipa_endpoint_skb_tx() - Transmit a socket buffer | |
682 | * @endpoint: Endpoint pointer | |
683 | * @skb: Socket buffer to send | |
684 | * | |
685 | * Returns: 0 if successful, or a negative error code | |
686 | */ | |
687 | int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb) | |
688 | { | |
689 | struct gsi_trans *trans; | |
690 | u32 nr_frags; | |
691 | int ret; | |
692 | ||
693 | /* Make sure source endpoint's TLV FIFO has enough entries to | |
694 | * hold the linear portion of the skb and all its fragments. | |
695 | * If not, see if we can linearize it before giving up. | |
696 | */ | |
697 | nr_frags = skb_shinfo(skb)->nr_frags; | |
698 | if (1 + nr_frags > endpoint->trans_tre_max) { | |
699 | if (skb_linearize(skb)) | |
700 | return -E2BIG; | |
701 | nr_frags = 0; | |
702 | } | |
703 | ||
704 | trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags); | |
705 | if (!trans) | |
706 | return -EBUSY; | |
707 | ||
708 | ret = gsi_trans_skb_add(trans, skb); | |
709 | if (ret) | |
710 | goto err_trans_free; | |
711 | trans->data = skb; /* transaction owns skb now */ | |
712 | ||
713 | gsi_trans_commit(trans, !netdev_xmit_more()); | |
714 | ||
715 | return 0; | |
716 | ||
717 | err_trans_free: | |
718 | gsi_trans_free(trans); | |
719 | ||
720 | return -ENOMEM; | |
721 | } | |
722 | ||
723 | static void ipa_endpoint_status(struct ipa_endpoint *endpoint) | |
724 | { | |
725 | u32 endpoint_id = endpoint->endpoint_id; | |
726 | struct ipa *ipa = endpoint->ipa; | |
727 | u32 val = 0; | |
728 | u32 offset; | |
729 | ||
730 | offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); | |
731 | ||
732 | if (endpoint->data->status_enable) { | |
733 | val |= STATUS_EN_FMASK; | |
734 | if (endpoint->toward_ipa) { | |
735 | enum ipa_endpoint_name name; | |
736 | u32 status_endpoint_id; | |
737 | ||
738 | name = endpoint->data->tx.status_endpoint; | |
739 | status_endpoint_id = ipa->name_map[name]->endpoint_id; | |
740 | ||
741 | val |= u32_encode_bits(status_endpoint_id, | |
742 | STATUS_ENDP_FMASK); | |
743 | } | |
744 | /* STATUS_LOCATION is 0 (status element precedes packet) */ | |
745 | /* The next field is present for IPA v4.0 and above */ | |
746 | /* STATUS_PKT_SUPPRESS_FMASK is 0 */ | |
747 | } | |
748 | ||
749 | iowrite32(val, ipa->reg_virt + offset); | |
750 | } | |
751 | ||
752 | static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint) | |
753 | { | |
754 | struct gsi_trans *trans; | |
755 | bool doorbell = false; | |
756 | struct page *page; | |
757 | u32 offset; | |
758 | u32 len; | |
759 | int ret; | |
760 | ||
6fcd4224 | 761 | page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE)); |
84f9bd12 AE |
762 | if (!page) |
763 | return -ENOMEM; | |
764 | ||
765 | trans = ipa_endpoint_trans_alloc(endpoint, 1); | |
766 | if (!trans) | |
767 | goto err_free_pages; | |
768 | ||
769 | /* Offset the buffer to make space for skb headroom */ | |
770 | offset = NET_SKB_PAD; | |
771 | len = IPA_RX_BUFFER_SIZE - offset; | |
772 | ||
773 | ret = gsi_trans_page_add(trans, page, len, offset); | |
774 | if (ret) | |
775 | goto err_trans_free; | |
776 | trans->data = page; /* transaction owns page now */ | |
777 | ||
778 | if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) { | |
779 | doorbell = true; | |
780 | endpoint->replenish_ready = 0; | |
781 | } | |
782 | ||
783 | gsi_trans_commit(trans, doorbell); | |
784 | ||
785 | return 0; | |
786 | ||
787 | err_trans_free: | |
788 | gsi_trans_free(trans); | |
789 | err_free_pages: | |
6fcd4224 | 790 | __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); |
84f9bd12 AE |
791 | |
792 | return -ENOMEM; | |
793 | } | |
794 | ||
795 | /** | |
796 | * ipa_endpoint_replenish() - Replenish the Rx packets cache. | |
797 | * | |
798 | * Allocate RX packet wrapper structures with maximal socket buffers | |
799 | * for an endpoint. These are supplied to the hardware, which fills | |
800 | * them with incoming data. | |
801 | */ | |
802 | static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count) | |
803 | { | |
804 | struct gsi *gsi; | |
805 | u32 backlog; | |
806 | ||
807 | if (!endpoint->replenish_enabled) { | |
808 | if (count) | |
809 | atomic_add(count, &endpoint->replenish_saved); | |
810 | return; | |
811 | } | |
812 | ||
813 | ||
814 | while (atomic_dec_not_zero(&endpoint->replenish_backlog)) | |
815 | if (ipa_endpoint_replenish_one(endpoint)) | |
816 | goto try_again_later; | |
817 | if (count) | |
818 | atomic_add(count, &endpoint->replenish_backlog); | |
819 | ||
820 | return; | |
821 | ||
822 | try_again_later: | |
823 | /* The last one didn't succeed, so fix the backlog */ | |
824 | backlog = atomic_inc_return(&endpoint->replenish_backlog); | |
825 | ||
826 | if (count) | |
827 | atomic_add(count, &endpoint->replenish_backlog); | |
828 | ||
829 | /* Whenever a receive buffer transaction completes we'll try to | |
830 | * replenish again. It's unlikely, but if we fail to supply even | |
831 | * one buffer, nothing will trigger another replenish attempt. | |
832 | * Receive buffer transactions use one TRE, so schedule work to | |
833 | * try replenishing again if our backlog is *all* available TREs. | |
834 | */ | |
835 | gsi = &endpoint->ipa->gsi; | |
836 | if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id)) | |
837 | schedule_delayed_work(&endpoint->replenish_work, | |
838 | msecs_to_jiffies(1)); | |
839 | } | |
840 | ||
841 | static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint) | |
842 | { | |
843 | struct gsi *gsi = &endpoint->ipa->gsi; | |
844 | u32 max_backlog; | |
845 | u32 saved; | |
846 | ||
847 | endpoint->replenish_enabled = true; | |
848 | while ((saved = atomic_xchg(&endpoint->replenish_saved, 0))) | |
849 | atomic_add(saved, &endpoint->replenish_backlog); | |
850 | ||
851 | /* Start replenishing if hardware currently has no buffers */ | |
852 | max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id); | |
853 | if (atomic_read(&endpoint->replenish_backlog) == max_backlog) | |
854 | ipa_endpoint_replenish(endpoint, 0); | |
855 | } | |
856 | ||
857 | static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint) | |
858 | { | |
859 | u32 backlog; | |
860 | ||
861 | endpoint->replenish_enabled = false; | |
862 | while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0))) | |
863 | atomic_add(backlog, &endpoint->replenish_saved); | |
864 | } | |
865 | ||
866 | static void ipa_endpoint_replenish_work(struct work_struct *work) | |
867 | { | |
868 | struct delayed_work *dwork = to_delayed_work(work); | |
869 | struct ipa_endpoint *endpoint; | |
870 | ||
871 | endpoint = container_of(dwork, struct ipa_endpoint, replenish_work); | |
872 | ||
873 | ipa_endpoint_replenish(endpoint, 0); | |
874 | } | |
875 | ||
876 | static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, | |
877 | void *data, u32 len, u32 extra) | |
878 | { | |
879 | struct sk_buff *skb; | |
880 | ||
881 | skb = __dev_alloc_skb(len, GFP_ATOMIC); | |
882 | if (skb) { | |
883 | skb_put(skb, len); | |
884 | memcpy(skb->data, data, len); | |
885 | skb->truesize += extra; | |
886 | } | |
887 | ||
888 | /* Now receive it, or drop it if there's no netdev */ | |
889 | if (endpoint->netdev) | |
890 | ipa_modem_skb_rx(endpoint->netdev, skb); | |
891 | else if (skb) | |
892 | dev_kfree_skb_any(skb); | |
893 | } | |
894 | ||
895 | static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint, | |
896 | struct page *page, u32 len) | |
897 | { | |
898 | struct sk_buff *skb; | |
899 | ||
900 | /* Nothing to do if there's no netdev */ | |
901 | if (!endpoint->netdev) | |
902 | return false; | |
903 | ||
904 | /* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */ | |
905 | skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE); | |
906 | if (skb) { | |
907 | /* Reserve the headroom and account for the data */ | |
908 | skb_reserve(skb, NET_SKB_PAD); | |
909 | skb_put(skb, len); | |
910 | } | |
911 | ||
912 | /* Receive the buffer (or record drop if unable to build it) */ | |
913 | ipa_modem_skb_rx(endpoint->netdev, skb); | |
914 | ||
915 | return skb != NULL; | |
916 | } | |
917 | ||
918 | /* The format of a packet status element is the same for several status | |
919 | * types (opcodes). The NEW_FRAG_RULE, LOG, DCMP (decompression) types | |
920 | * aren't currently supported | |
921 | */ | |
922 | static bool ipa_status_format_packet(enum ipa_status_opcode opcode) | |
923 | { | |
924 | switch (opcode) { | |
925 | case IPA_STATUS_OPCODE_PACKET: | |
926 | case IPA_STATUS_OPCODE_DROPPED_PACKET: | |
927 | case IPA_STATUS_OPCODE_SUSPENDED_PACKET: | |
928 | case IPA_STATUS_OPCODE_PACKET_2ND_PASS: | |
929 | return true; | |
930 | default: | |
931 | return false; | |
932 | } | |
933 | } | |
934 | ||
935 | static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, | |
936 | const struct ipa_status *status) | |
937 | { | |
938 | u32 endpoint_id; | |
939 | ||
940 | if (!ipa_status_format_packet(status->opcode)) | |
941 | return true; | |
942 | if (!status->pkt_len) | |
943 | return true; | |
944 | endpoint_id = u32_get_bits(status->endp_dst_idx, | |
945 | IPA_STATUS_DST_IDX_FMASK); | |
946 | if (endpoint_id != endpoint->endpoint_id) | |
947 | return true; | |
948 | ||
949 | return false; /* Don't skip this packet, process it */ | |
950 | } | |
951 | ||
952 | /* Return whether the status indicates the packet should be dropped */ | |
953 | static bool ipa_status_drop_packet(const struct ipa_status *status) | |
954 | { | |
955 | u32 val; | |
956 | ||
957 | /* Deaggregation exceptions we drop; others we consume */ | |
958 | if (status->exception) | |
959 | return status->exception == IPA_STATUS_EXCEPTION_DEAGGR; | |
960 | ||
961 | /* Drop the packet if it fails to match a routing rule; otherwise no */ | |
962 | val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); | |
963 | ||
964 | return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); | |
965 | } | |
966 | ||
967 | static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, | |
968 | struct page *page, u32 total_len) | |
969 | { | |
970 | void *data = page_address(page) + NET_SKB_PAD; | |
971 | u32 unused = IPA_RX_BUFFER_SIZE - total_len; | |
972 | u32 resid = total_len; | |
973 | ||
974 | while (resid) { | |
975 | const struct ipa_status *status = data; | |
976 | u32 align; | |
977 | u32 len; | |
978 | ||
979 | if (resid < sizeof(*status)) { | |
980 | dev_err(&endpoint->ipa->pdev->dev, | |
981 | "short message (%u bytes < %zu byte status)\n", | |
982 | resid, sizeof(*status)); | |
983 | break; | |
984 | } | |
985 | ||
986 | /* Skip over status packets that lack packet data */ | |
987 | if (ipa_endpoint_status_skip(endpoint, status)) { | |
988 | data += sizeof(*status); | |
989 | resid -= sizeof(*status); | |
990 | continue; | |
991 | } | |
992 | ||
993 | /* Compute the amount of buffer space consumed by the | |
994 | * packet, including the status element. If the hardware | |
995 | * is configured to pad packet data to an aligned boundary, | |
996 | * account for that. And if checksum offload is is enabled | |
997 | * a trailer containing computed checksum information will | |
998 | * be appended. | |
999 | */ | |
1000 | align = endpoint->data->rx.pad_align ? : 1; | |
1001 | len = le16_to_cpu(status->pkt_len); | |
1002 | len = sizeof(*status) + ALIGN(len, align); | |
1003 | if (endpoint->data->checksum) | |
1004 | len += sizeof(struct rmnet_map_dl_csum_trailer); | |
1005 | ||
1006 | /* Charge the new packet with a proportional fraction of | |
1007 | * the unused space in the original receive buffer. | |
1008 | * XXX Charge a proportion of the *whole* receive buffer? | |
1009 | */ | |
1010 | if (!ipa_status_drop_packet(status)) { | |
1011 | u32 extra = unused * len / total_len; | |
1012 | void *data2 = data + sizeof(*status); | |
1013 | u32 len2 = le16_to_cpu(status->pkt_len); | |
1014 | ||
1015 | /* Client receives only packet data (no status) */ | |
1016 | ipa_endpoint_skb_copy(endpoint, data2, len2, extra); | |
1017 | } | |
1018 | ||
1019 | /* Consume status and the full packet it describes */ | |
1020 | data += len; | |
1021 | resid -= len; | |
1022 | } | |
1023 | } | |
1024 | ||
1025 | /* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */ | |
1026 | static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint, | |
1027 | struct gsi_trans *trans) | |
1028 | { | |
1029 | } | |
1030 | ||
1031 | /* Complete transaction initiated in ipa_endpoint_replenish_one() */ | |
1032 | static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint, | |
1033 | struct gsi_trans *trans) | |
1034 | { | |
1035 | struct page *page; | |
1036 | ||
1037 | ipa_endpoint_replenish(endpoint, 1); | |
1038 | ||
1039 | if (trans->cancelled) | |
1040 | return; | |
1041 | ||
1042 | /* Parse or build a socket buffer using the actual received length */ | |
1043 | page = trans->data; | |
1044 | if (endpoint->data->status_enable) | |
1045 | ipa_endpoint_status_parse(endpoint, page, trans->len); | |
1046 | else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) | |
1047 | trans->data = NULL; /* Pages have been consumed */ | |
1048 | } | |
1049 | ||
1050 | void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint, | |
1051 | struct gsi_trans *trans) | |
1052 | { | |
1053 | if (endpoint->toward_ipa) | |
1054 | ipa_endpoint_tx_complete(endpoint, trans); | |
1055 | else | |
1056 | ipa_endpoint_rx_complete(endpoint, trans); | |
1057 | } | |
1058 | ||
1059 | void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint, | |
1060 | struct gsi_trans *trans) | |
1061 | { | |
1062 | if (endpoint->toward_ipa) { | |
1063 | struct ipa *ipa = endpoint->ipa; | |
1064 | ||
1065 | /* Nothing to do for command transactions */ | |
1066 | if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { | |
1067 | struct sk_buff *skb = trans->data; | |
1068 | ||
1069 | if (skb) | |
1070 | dev_kfree_skb_any(skb); | |
1071 | } | |
1072 | } else { | |
1073 | struct page *page = trans->data; | |
1074 | ||
1075 | if (page) | |
6fcd4224 | 1076 | __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); |
84f9bd12 AE |
1077 | } |
1078 | } | |
1079 | ||
1080 | void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id) | |
1081 | { | |
1082 | u32 val; | |
1083 | ||
1084 | /* ROUTE_DIS is 0 */ | |
1085 | val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK); | |
1086 | val |= ROUTE_DEF_HDR_TABLE_FMASK; | |
1087 | val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK); | |
1088 | val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK); | |
1089 | val |= ROUTE_DEF_RETAIN_HDR_FMASK; | |
1090 | ||
1091 | iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET); | |
1092 | } | |
1093 | ||
1094 | void ipa_endpoint_default_route_clear(struct ipa *ipa) | |
1095 | { | |
1096 | ipa_endpoint_default_route_set(ipa, 0); | |
1097 | } | |
1098 | ||
1099 | static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint) | |
1100 | { | |
1101 | u32 mask = BIT(endpoint->endpoint_id); | |
1102 | struct ipa *ipa = endpoint->ipa; | |
1103 | u32 offset; | |
1104 | u32 val; | |
1105 | ||
1106 | /* assert(mask & ipa->available); */ | |
1107 | offset = ipa_reg_state_aggr_active_offset(ipa->version); | |
1108 | val = ioread32(ipa->reg_virt + offset); | |
1109 | ||
1110 | return !!(val & mask); | |
1111 | } | |
1112 | ||
1113 | static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint) | |
1114 | { | |
1115 | u32 mask = BIT(endpoint->endpoint_id); | |
1116 | struct ipa *ipa = endpoint->ipa; | |
1117 | ||
1118 | /* assert(mask & ipa->available); */ | |
1119 | iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET); | |
1120 | } | |
1121 | ||
1122 | /** | |
1123 | * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active | |
1124 | * @endpoint: Endpoint to be reset | |
1125 | * | |
1126 | * If aggregation is active on an RX endpoint when a reset is performed | |
1127 | * on its underlying GSI channel, a special sequence of actions must be | |
1128 | * taken to ensure the IPA pipeline is properly cleared. | |
1129 | * | |
1130 | * @Return: 0 if successful, or a negative error code | |
1131 | */ | |
1132 | static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) | |
1133 | { | |
1134 | struct device *dev = &endpoint->ipa->pdev->dev; | |
1135 | struct ipa *ipa = endpoint->ipa; | |
1136 | bool endpoint_suspended = false; | |
1137 | struct gsi *gsi = &ipa->gsi; | |
1138 | dma_addr_t addr; | |
1139 | bool db_enable; | |
1140 | u32 retries; | |
1141 | u32 len = 1; | |
1142 | void *virt; | |
1143 | int ret; | |
1144 | ||
1145 | virt = kzalloc(len, GFP_KERNEL); | |
1146 | if (!virt) | |
1147 | return -ENOMEM; | |
1148 | ||
1149 | addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE); | |
1150 | if (dma_mapping_error(dev, addr)) { | |
1151 | ret = -ENOMEM; | |
1152 | goto out_kfree; | |
1153 | } | |
1154 | ||
1155 | /* Force close aggregation before issuing the reset */ | |
1156 | ipa_endpoint_force_close(endpoint); | |
1157 | ||
1158 | /* Reset and reconfigure the channel with the doorbell engine | |
1159 | * disabled. Then poll until we know aggregation is no longer | |
1160 | * active. We'll re-enable the doorbell (if appropriate) when | |
1161 | * we reset again below. | |
1162 | */ | |
1163 | gsi_channel_reset(gsi, endpoint->channel_id, false); | |
1164 | ||
1165 | /* Make sure the channel isn't suspended */ | |
1166 | if (endpoint->ipa->version == IPA_VERSION_3_5_1) | |
1167 | if (!ipa_endpoint_init_ctrl(endpoint, false)) | |
1168 | endpoint_suspended = true; | |
1169 | ||
1170 | /* Start channel and do a 1 byte read */ | |
1171 | ret = gsi_channel_start(gsi, endpoint->channel_id); | |
1172 | if (ret) | |
1173 | goto out_suspend_again; | |
1174 | ||
1175 | ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr); | |
1176 | if (ret) | |
1177 | goto err_endpoint_stop; | |
1178 | ||
1179 | /* Wait for aggregation to be closed on the channel */ | |
1180 | retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX; | |
1181 | do { | |
1182 | if (!ipa_endpoint_aggr_active(endpoint)) | |
1183 | break; | |
1184 | msleep(1); | |
1185 | } while (retries--); | |
1186 | ||
1187 | /* Check one last time */ | |
1188 | if (ipa_endpoint_aggr_active(endpoint)) | |
1189 | dev_err(dev, "endpoint %u still active during reset\n", | |
1190 | endpoint->endpoint_id); | |
1191 | ||
1192 | gsi_trans_read_byte_done(gsi, endpoint->channel_id); | |
1193 | ||
1194 | ret = ipa_endpoint_stop(endpoint); | |
1195 | if (ret) | |
1196 | goto out_suspend_again; | |
1197 | ||
1198 | /* Finally, reset and reconfigure the channel again (re-enabling the | |
1199 | * the doorbell engine if appropriate). Sleep for 1 millisecond to | |
1200 | * complete the channel reset sequence. Finish by suspending the | |
1201 | * channel again (if necessary). | |
1202 | */ | |
1203 | db_enable = ipa->version == IPA_VERSION_3_5_1; | |
1204 | gsi_channel_reset(gsi, endpoint->channel_id, db_enable); | |
1205 | ||
1206 | msleep(1); | |
1207 | ||
1208 | goto out_suspend_again; | |
1209 | ||
1210 | err_endpoint_stop: | |
1211 | ipa_endpoint_stop(endpoint); | |
1212 | out_suspend_again: | |
1213 | if (endpoint_suspended) | |
1214 | (void)ipa_endpoint_init_ctrl(endpoint, true); | |
1215 | dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE); | |
1216 | out_kfree: | |
1217 | kfree(virt); | |
1218 | ||
1219 | return ret; | |
1220 | } | |
1221 | ||
1222 | static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) | |
1223 | { | |
1224 | u32 channel_id = endpoint->channel_id; | |
1225 | struct ipa *ipa = endpoint->ipa; | |
1226 | bool db_enable; | |
1227 | bool special; | |
1228 | int ret = 0; | |
1229 | ||
1230 | /* On IPA v3.5.1, if an RX endpoint is reset while aggregation | |
1231 | * is active, we need to handle things specially to recover. | |
1232 | * All other cases just need to reset the underlying GSI channel. | |
1233 | * | |
1234 | * IPA v3.5.1 enables the doorbell engine. Newer versions do not. | |
1235 | */ | |
1236 | db_enable = ipa->version == IPA_VERSION_3_5_1; | |
1237 | special = !endpoint->toward_ipa && endpoint->data->aggregation; | |
1238 | if (special && ipa_endpoint_aggr_active(endpoint)) | |
1239 | ret = ipa_endpoint_reset_rx_aggr(endpoint); | |
1240 | else | |
1241 | gsi_channel_reset(&ipa->gsi, channel_id, db_enable); | |
1242 | ||
1243 | if (ret) | |
1244 | dev_err(&ipa->pdev->dev, | |
1245 | "error %d resetting channel %u for endpoint %u\n", | |
1246 | ret, endpoint->channel_id, endpoint->endpoint_id); | |
1247 | } | |
1248 | ||
1249 | static int ipa_endpoint_stop_rx_dma(struct ipa *ipa) | |
1250 | { | |
1251 | u16 size = IPA_ENDPOINT_STOP_RX_SIZE; | |
1252 | struct gsi_trans *trans; | |
1253 | dma_addr_t addr; | |
1254 | int ret; | |
1255 | ||
1256 | trans = ipa_cmd_trans_alloc(ipa, 1); | |
1257 | if (!trans) { | |
1258 | dev_err(&ipa->pdev->dev, | |
1259 | "no transaction for RX endpoint STOP workaround\n"); | |
1260 | return -EBUSY; | |
1261 | } | |
1262 | ||
1263 | /* Read into the highest part of the zero memory area */ | |
1264 | addr = ipa->zero_addr + ipa->zero_size - size; | |
1265 | ||
1266 | ipa_cmd_dma_task_32b_addr_add(trans, size, addr, false); | |
1267 | ||
1268 | ret = gsi_trans_commit_wait_timeout(trans, ENDPOINT_STOP_DMA_TIMEOUT); | |
1269 | if (ret) | |
1270 | gsi_trans_free(trans); | |
1271 | ||
1272 | return ret; | |
1273 | } | |
1274 | ||
1275 | /** | |
1276 | * ipa_endpoint_stop() - Stops a GSI channel in IPA | |
1277 | * @client: Client whose endpoint should be stopped | |
1278 | * | |
1279 | * This function implements the sequence to stop a GSI channel | |
1280 | * in IPA. This function returns when the channel is is STOP state. | |
1281 | * | |
1282 | * Return value: 0 on success, negative otherwise | |
1283 | */ | |
1284 | int ipa_endpoint_stop(struct ipa_endpoint *endpoint) | |
1285 | { | |
713b6ebb | 1286 | u32 retries = IPA_ENDPOINT_STOP_RX_RETRIES; |
84f9bd12 AE |
1287 | int ret; |
1288 | ||
1289 | do { | |
1290 | struct ipa *ipa = endpoint->ipa; | |
1291 | struct gsi *gsi = &ipa->gsi; | |
1292 | ||
1293 | ret = gsi_channel_stop(gsi, endpoint->channel_id); | |
713b6ebb | 1294 | if (ret != -EAGAIN || endpoint->toward_ipa) |
84f9bd12 AE |
1295 | break; |
1296 | ||
84f9bd12 AE |
1297 | /* For IPA v3.5.1, send a DMA read task and check again */ |
1298 | if (ipa->version == IPA_VERSION_3_5_1) { | |
1299 | ret = ipa_endpoint_stop_rx_dma(ipa); | |
1300 | if (ret) | |
1301 | break; | |
1302 | } | |
1303 | ||
1304 | msleep(1); | |
1305 | } while (retries--); | |
1306 | ||
1307 | return retries ? ret : -EIO; | |
1308 | } | |
1309 | ||
1310 | static void ipa_endpoint_program(struct ipa_endpoint *endpoint) | |
1311 | { | |
1312 | struct device *dev = &endpoint->ipa->pdev->dev; | |
1313 | int ret; | |
1314 | ||
1315 | if (endpoint->toward_ipa) { | |
1316 | bool delay_mode = endpoint->data->tx.delay; | |
1317 | ||
1318 | ret = ipa_endpoint_init_ctrl(endpoint, delay_mode); | |
1319 | /* Endpoint is expected to not be in delay mode */ | |
1320 | if (!ret != delay_mode) { | |
1321 | dev_warn(dev, | |
1322 | "TX endpoint %u was %sin delay mode\n", | |
1323 | endpoint->endpoint_id, | |
1324 | delay_mode ? "already " : ""); | |
1325 | } | |
1326 | ipa_endpoint_init_hdr_ext(endpoint); | |
1327 | ipa_endpoint_init_aggr(endpoint); | |
1328 | ipa_endpoint_init_deaggr(endpoint); | |
1329 | ipa_endpoint_init_seq(endpoint); | |
1330 | } else { | |
1331 | if (endpoint->ipa->version == IPA_VERSION_3_5_1) { | |
1332 | if (!ipa_endpoint_init_ctrl(endpoint, false)) | |
1333 | dev_warn(dev, | |
1334 | "RX endpoint %u was suspended\n", | |
1335 | endpoint->endpoint_id); | |
1336 | } | |
1337 | ipa_endpoint_init_hdr_ext(endpoint); | |
1338 | ipa_endpoint_init_aggr(endpoint); | |
1339 | } | |
1340 | ipa_endpoint_init_cfg(endpoint); | |
1341 | ipa_endpoint_init_hdr(endpoint); | |
1342 | ipa_endpoint_init_hdr_metadata_mask(endpoint); | |
1343 | ipa_endpoint_init_mode(endpoint); | |
1344 | ipa_endpoint_status(endpoint); | |
1345 | } | |
1346 | ||
1347 | int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint) | |
1348 | { | |
1349 | struct ipa *ipa = endpoint->ipa; | |
1350 | struct gsi *gsi = &ipa->gsi; | |
1351 | int ret; | |
1352 | ||
1353 | ret = gsi_channel_start(gsi, endpoint->channel_id); | |
1354 | if (ret) { | |
1355 | dev_err(&ipa->pdev->dev, | |
1356 | "error %d starting %cX channel %u for endpoint %u\n", | |
1357 | ret, endpoint->toward_ipa ? 'T' : 'R', | |
1358 | endpoint->channel_id, endpoint->endpoint_id); | |
1359 | return ret; | |
1360 | } | |
1361 | ||
1362 | if (!endpoint->toward_ipa) { | |
1363 | ipa_interrupt_suspend_enable(ipa->interrupt, | |
1364 | endpoint->endpoint_id); | |
1365 | ipa_endpoint_replenish_enable(endpoint); | |
1366 | } | |
1367 | ||
1368 | ipa->enabled |= BIT(endpoint->endpoint_id); | |
1369 | ||
1370 | return 0; | |
1371 | } | |
1372 | ||
1373 | void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint) | |
1374 | { | |
1375 | u32 mask = BIT(endpoint->endpoint_id); | |
1376 | struct ipa *ipa = endpoint->ipa; | |
1377 | int ret; | |
1378 | ||
1379 | if (!(endpoint->ipa->enabled & mask)) | |
1380 | return; | |
1381 | ||
1382 | endpoint->ipa->enabled ^= mask; | |
1383 | ||
1384 | if (!endpoint->toward_ipa) { | |
1385 | ipa_endpoint_replenish_disable(endpoint); | |
1386 | ipa_interrupt_suspend_disable(ipa->interrupt, | |
1387 | endpoint->endpoint_id); | |
1388 | } | |
1389 | ||
1390 | /* Note that if stop fails, the channel's state is not well-defined */ | |
1391 | ret = ipa_endpoint_stop(endpoint); | |
1392 | if (ret) | |
1393 | dev_err(&ipa->pdev->dev, | |
1394 | "error %d attempting to stop endpoint %u\n", ret, | |
1395 | endpoint->endpoint_id); | |
1396 | } | |
1397 | ||
1398 | /** | |
1399 | * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt | |
1400 | * @endpoint_id: Endpoint on which to emulate a suspend | |
1401 | * | |
1402 | * Emulate suspend IPA interrupt to unsuspend an endpoint suspended | |
1403 | * with an open aggregation frame. This is to work around a hardware | |
1404 | * issue in IPA version 3.5.1 where the suspend interrupt will not be | |
1405 | * generated when it should be. | |
1406 | */ | |
1407 | static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint) | |
1408 | { | |
1409 | struct ipa *ipa = endpoint->ipa; | |
1410 | ||
1411 | /* assert(ipa->version == IPA_VERSION_3_5_1); */ | |
1412 | ||
1413 | if (!endpoint->data->aggregation) | |
1414 | return; | |
1415 | ||
1416 | /* Nothing to do if the endpoint doesn't have aggregation open */ | |
1417 | if (!ipa_endpoint_aggr_active(endpoint)) | |
1418 | return; | |
1419 | ||
1420 | /* Force close aggregation */ | |
1421 | ipa_endpoint_force_close(endpoint); | |
1422 | ||
1423 | ipa_interrupt_simulate_suspend(ipa->interrupt); | |
1424 | } | |
1425 | ||
1426 | void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) | |
1427 | { | |
1428 | struct device *dev = &endpoint->ipa->pdev->dev; | |
1429 | struct gsi *gsi = &endpoint->ipa->gsi; | |
1430 | bool stop_channel; | |
1431 | int ret; | |
1432 | ||
1433 | if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) | |
1434 | return; | |
1435 | ||
1436 | if (!endpoint->toward_ipa) | |
1437 | ipa_endpoint_replenish_disable(endpoint); | |
1438 | ||
1439 | /* IPA v3.5.1 doesn't use channel stop for suspend */ | |
1440 | stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; | |
1441 | if (!endpoint->toward_ipa && !stop_channel) { | |
1442 | /* Due to a hardware bug, a client suspended with an open | |
1443 | * aggregation frame will not generate a SUSPEND IPA | |
1444 | * interrupt. We work around this by force-closing the | |
1445 | * aggregation frame, then simulating the arrival of such | |
1446 | * an interrupt. | |
1447 | */ | |
1448 | WARN_ON(ipa_endpoint_init_ctrl(endpoint, true)); | |
1449 | ipa_endpoint_suspend_aggr(endpoint); | |
1450 | } | |
1451 | ||
1452 | ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel); | |
1453 | if (ret) | |
1454 | dev_err(dev, "error %d suspending channel %u\n", ret, | |
1455 | endpoint->channel_id); | |
1456 | } | |
1457 | ||
1458 | void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint) | |
1459 | { | |
1460 | struct device *dev = &endpoint->ipa->pdev->dev; | |
1461 | struct gsi *gsi = &endpoint->ipa->gsi; | |
1462 | bool start_channel; | |
1463 | int ret; | |
1464 | ||
1465 | if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) | |
1466 | return; | |
1467 | ||
1468 | /* IPA v3.5.1 doesn't use channel start for resume */ | |
1469 | start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; | |
1470 | if (!endpoint->toward_ipa && !start_channel) | |
1471 | WARN_ON(ipa_endpoint_init_ctrl(endpoint, false)); | |
1472 | ||
1473 | ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel); | |
1474 | if (ret) | |
1475 | dev_err(dev, "error %d resuming channel %u\n", ret, | |
1476 | endpoint->channel_id); | |
1477 | else if (!endpoint->toward_ipa) | |
1478 | ipa_endpoint_replenish_enable(endpoint); | |
1479 | } | |
1480 | ||
1481 | void ipa_endpoint_suspend(struct ipa *ipa) | |
1482 | { | |
1483 | if (ipa->modem_netdev) | |
1484 | ipa_modem_suspend(ipa->modem_netdev); | |
1485 | ||
1486 | ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); | |
1487 | ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); | |
1488 | } | |
1489 | ||
1490 | void ipa_endpoint_resume(struct ipa *ipa) | |
1491 | { | |
1492 | ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); | |
1493 | ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); | |
1494 | ||
1495 | if (ipa->modem_netdev) | |
1496 | ipa_modem_resume(ipa->modem_netdev); | |
1497 | } | |
1498 | ||
1499 | static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint) | |
1500 | { | |
1501 | struct gsi *gsi = &endpoint->ipa->gsi; | |
1502 | u32 channel_id = endpoint->channel_id; | |
1503 | ||
1504 | /* Only AP endpoints get set up */ | |
1505 | if (endpoint->ee_id != GSI_EE_AP) | |
1506 | return; | |
1507 | ||
1508 | endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id); | |
1509 | if (!endpoint->toward_ipa) { | |
1510 | /* RX transactions require a single TRE, so the maximum | |
1511 | * backlog is the same as the maximum outstanding TREs. | |
1512 | */ | |
1513 | endpoint->replenish_enabled = false; | |
1514 | atomic_set(&endpoint->replenish_saved, | |
1515 | gsi_channel_tre_max(gsi, endpoint->channel_id)); | |
1516 | atomic_set(&endpoint->replenish_backlog, 0); | |
1517 | INIT_DELAYED_WORK(&endpoint->replenish_work, | |
1518 | ipa_endpoint_replenish_work); | |
1519 | } | |
1520 | ||
1521 | ipa_endpoint_program(endpoint); | |
1522 | ||
1523 | endpoint->ipa->set_up |= BIT(endpoint->endpoint_id); | |
1524 | } | |
1525 | ||
1526 | static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint) | |
1527 | { | |
1528 | endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id); | |
1529 | ||
1530 | if (!endpoint->toward_ipa) | |
1531 | cancel_delayed_work_sync(&endpoint->replenish_work); | |
1532 | ||
1533 | ipa_endpoint_reset(endpoint); | |
1534 | } | |
1535 | ||
1536 | void ipa_endpoint_setup(struct ipa *ipa) | |
1537 | { | |
1538 | u32 initialized = ipa->initialized; | |
1539 | ||
1540 | ipa->set_up = 0; | |
1541 | while (initialized) { | |
1542 | u32 endpoint_id = __ffs(initialized); | |
1543 | ||
1544 | initialized ^= BIT(endpoint_id); | |
1545 | ||
1546 | ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); | |
1547 | } | |
1548 | } | |
1549 | ||
1550 | void ipa_endpoint_teardown(struct ipa *ipa) | |
1551 | { | |
1552 | u32 set_up = ipa->set_up; | |
1553 | ||
1554 | while (set_up) { | |
1555 | u32 endpoint_id = __fls(set_up); | |
1556 | ||
1557 | set_up ^= BIT(endpoint_id); | |
1558 | ||
1559 | ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); | |
1560 | } | |
1561 | ipa->set_up = 0; | |
1562 | } | |
1563 | ||
1564 | int ipa_endpoint_config(struct ipa *ipa) | |
1565 | { | |
1566 | struct device *dev = &ipa->pdev->dev; | |
1567 | u32 initialized; | |
1568 | u32 rx_base; | |
1569 | u32 rx_mask; | |
1570 | u32 tx_mask; | |
1571 | int ret = 0; | |
1572 | u32 max; | |
1573 | u32 val; | |
1574 | ||
1575 | /* Find out about the endpoints supplied by the hardware, and ensure | |
1576 | * the highest one doesn't exceed the number we support. | |
1577 | */ | |
1578 | val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET); | |
1579 | ||
1580 | /* Our RX is an IPA producer */ | |
1581 | rx_base = u32_get_bits(val, BAM_PROD_LOWEST_FMASK); | |
1582 | max = rx_base + u32_get_bits(val, BAM_MAX_PROD_PIPES_FMASK); | |
1583 | if (max > IPA_ENDPOINT_MAX) { | |
1584 | dev_err(dev, "too many endpoints (%u > %u)\n", | |
1585 | max, IPA_ENDPOINT_MAX); | |
1586 | return -EINVAL; | |
1587 | } | |
1588 | rx_mask = GENMASK(max - 1, rx_base); | |
1589 | ||
1590 | /* Our TX is an IPA consumer */ | |
1591 | max = u32_get_bits(val, BAM_MAX_CONS_PIPES_FMASK); | |
1592 | tx_mask = GENMASK(max - 1, 0); | |
1593 | ||
1594 | ipa->available = rx_mask | tx_mask; | |
1595 | ||
1596 | /* Check for initialized endpoints not supported by the hardware */ | |
1597 | if (ipa->initialized & ~ipa->available) { | |
1598 | dev_err(dev, "unavailable endpoint id(s) 0x%08x\n", | |
1599 | ipa->initialized & ~ipa->available); | |
1600 | ret = -EINVAL; /* Report other errors too */ | |
1601 | } | |
1602 | ||
1603 | initialized = ipa->initialized; | |
1604 | while (initialized) { | |
1605 | u32 endpoint_id = __ffs(initialized); | |
1606 | struct ipa_endpoint *endpoint; | |
1607 | ||
1608 | initialized ^= BIT(endpoint_id); | |
1609 | ||
1610 | /* Make sure it's pointing in the right direction */ | |
1611 | endpoint = &ipa->endpoint[endpoint_id]; | |
1612 | if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) { | |
1613 | dev_err(dev, "endpoint id %u wrong direction\n", | |
1614 | endpoint_id); | |
1615 | ret = -EINVAL; | |
1616 | } | |
1617 | } | |
1618 | ||
1619 | return ret; | |
1620 | } | |
1621 | ||
1622 | void ipa_endpoint_deconfig(struct ipa *ipa) | |
1623 | { | |
1624 | ipa->available = 0; /* Nothing more to do */ | |
1625 | } | |
1626 | ||
1627 | static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, | |
1628 | const struct ipa_gsi_endpoint_data *data) | |
1629 | { | |
1630 | struct ipa_endpoint *endpoint; | |
1631 | ||
1632 | endpoint = &ipa->endpoint[data->endpoint_id]; | |
1633 | ||
1634 | if (data->ee_id == GSI_EE_AP) | |
1635 | ipa->channel_map[data->channel_id] = endpoint; | |
1636 | ipa->name_map[name] = endpoint; | |
1637 | ||
1638 | endpoint->ipa = ipa; | |
1639 | endpoint->ee_id = data->ee_id; | |
1640 | endpoint->seq_type = data->endpoint.seq_type; | |
1641 | endpoint->channel_id = data->channel_id; | |
1642 | endpoint->endpoint_id = data->endpoint_id; | |
1643 | endpoint->toward_ipa = data->toward_ipa; | |
1644 | endpoint->data = &data->endpoint.config; | |
1645 | ||
1646 | ipa->initialized |= BIT(endpoint->endpoint_id); | |
1647 | } | |
1648 | ||
1649 | void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint) | |
1650 | { | |
1651 | endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id); | |
1652 | ||
1653 | memset(endpoint, 0, sizeof(*endpoint)); | |
1654 | } | |
1655 | ||
1656 | void ipa_endpoint_exit(struct ipa *ipa) | |
1657 | { | |
1658 | u32 initialized = ipa->initialized; | |
1659 | ||
1660 | while (initialized) { | |
1661 | u32 endpoint_id = __fls(initialized); | |
1662 | ||
1663 | initialized ^= BIT(endpoint_id); | |
1664 | ||
1665 | ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); | |
1666 | } | |
1667 | memset(ipa->name_map, 0, sizeof(ipa->name_map)); | |
1668 | memset(ipa->channel_map, 0, sizeof(ipa->channel_map)); | |
1669 | } | |
1670 | ||
1671 | /* Returns a bitmask of endpoints that support filtering, or 0 on error */ | |
1672 | u32 ipa_endpoint_init(struct ipa *ipa, u32 count, | |
1673 | const struct ipa_gsi_endpoint_data *data) | |
1674 | { | |
1675 | enum ipa_endpoint_name name; | |
1676 | u32 filter_map; | |
1677 | ||
1678 | if (!ipa_endpoint_data_valid(ipa, count, data)) | |
1679 | return 0; /* Error */ | |
1680 | ||
1681 | ipa->initialized = 0; | |
1682 | ||
1683 | filter_map = 0; | |
1684 | for (name = 0; name < count; name++, data++) { | |
1685 | if (ipa_gsi_endpoint_data_empty(data)) | |
1686 | continue; /* Skip over empty slots */ | |
1687 | ||
1688 | ipa_endpoint_init_one(ipa, name, data); | |
1689 | ||
1690 | if (data->endpoint.filter_support) | |
1691 | filter_map |= BIT(data->endpoint_id); | |
1692 | } | |
1693 | ||
1694 | if (!ipa_filter_map_valid(ipa, filter_map)) | |
1695 | goto err_endpoint_exit; | |
1696 | ||
1697 | return filter_map; /* Non-zero bitmask */ | |
1698 | ||
1699 | err_endpoint_exit: | |
1700 | ipa_endpoint_exit(ipa); | |
1701 | ||
1702 | return 0; /* Error */ | |
1703 | } |