]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/net/ipa/gsi.c
Merge tag 'block-5.7-2020-05-16' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / drivers / net / ipa / gsi.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2018-2020 Linaro Ltd.
5 */
6
7 #include <linux/types.h>
8 #include <linux/bits.h>
9 #include <linux/bitfield.h>
10 #include <linux/mutex.h>
11 #include <linux/completion.h>
12 #include <linux/io.h>
13 #include <linux/bug.h>
14 #include <linux/interrupt.h>
15 #include <linux/platform_device.h>
16 #include <linux/netdevice.h>
17
18 #include "gsi.h"
19 #include "gsi_reg.h"
20 #include "gsi_private.h"
21 #include "gsi_trans.h"
22 #include "ipa_gsi.h"
23 #include "ipa_data.h"
24
25 /**
26 * DOC: The IPA Generic Software Interface
27 *
28 * The generic software interface (GSI) is an integral component of the IPA,
29 * providing a well-defined communication layer between the AP subsystem
30 * and the IPA core. The modem uses the GSI layer as well.
31 *
32 * -------- ---------
33 * | | | |
34 * | AP +<---. .----+ Modem |
35 * | +--. | | .->+ |
36 * | | | | | | | |
37 * -------- | | | | ---------
38 * v | v |
39 * --+-+---+-+--
40 * | GSI |
41 * |-----------|
42 * | |
43 * | IPA |
44 * | |
45 * -------------
46 *
47 * In the above diagram, the AP and Modem represent "execution environments"
48 * (EEs), which are independent operating environments that use the IPA for
49 * data transfer.
50 *
51 * Each EE uses a set of unidirectional GSI "channels," which allow transfer
52 * of data to or from the IPA. A channel is implemented as a ring buffer,
53 * with a DRAM-resident array of "transfer elements" (TREs) available to
54 * describe transfers to or from other EEs through the IPA. A transfer
55 * element can also contain an immediate command, requesting the IPA perform
56 * actions other than data transfer.
57 *
58 * Each TRE refers to a block of data--also located DRAM. After writing one
59 * or more TREs to a channel, the writer (either the IPA or an EE) writes a
60 * doorbell register to inform the receiving side how many elements have
61 * been written.
62 *
63 * Each channel has a GSI "event ring" associated with it. An event ring
64 * is implemented very much like a channel ring, but is always directed from
65 * the IPA to an EE. The IPA notifies an EE (such as the AP) about channel
66 * events by adding an entry to the event ring associated with the channel.
67 * The GSI then writes its doorbell for the event ring, causing the target
68 * EE to be interrupted. Each entry in an event ring contains a pointer
69 * to the channel TRE whose completion the event represents.
70 *
71 * Each TRE in a channel ring has a set of flags. One flag indicates whether
72 * the completion of the transfer operation generates an entry (and possibly
73 * an interrupt) in the channel's event ring. Other flags allow transfer
74 * elements to be chained together, forming a single logical transaction.
75 * TRE flags are used to control whether and when interrupts are generated
76 * to signal completion of channel transfers.
77 *
78 * Elements in channel and event rings are completed (or consumed) strictly
79 * in order. Completion of one entry implies the completion of all preceding
80 * entries. A single completion interrupt can therefore communicate the
81 * completion of many transfers.
82 *
83 * Note that all GSI registers are little-endian, which is the assumed
84 * endianness of I/O space accesses. The accessor functions perform byte
85 * swapping if needed (i.e., for a big endian CPU).
86 */
87
88 /* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
89 #define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */
90
91 #define GSI_CMD_TIMEOUT 5 /* seconds */
92
93 #define GSI_CHANNEL_STOP_RX_RETRIES 10
94
95 #define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */
96 #define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */
97
98 #define GSI_ISR_MAX_ITER 50 /* Detect interrupt storms */
99
100 /* An entry in an event ring */
101 struct gsi_event {
102 __le64 xfer_ptr;
103 __le16 len;
104 u8 reserved1;
105 u8 code;
106 __le16 reserved2;
107 u8 type;
108 u8 chid;
109 };
110
111 /* Hardware values from the error log register error code field */
112 enum gsi_err_code {
113 GSI_INVALID_TRE_ERR = 0x1,
114 GSI_OUT_OF_BUFFERS_ERR = 0x2,
115 GSI_OUT_OF_RESOURCES_ERR = 0x3,
116 GSI_UNSUPPORTED_INTER_EE_OP_ERR = 0x4,
117 GSI_EVT_RING_EMPTY_ERR = 0x5,
118 GSI_NON_ALLOCATED_EVT_ACCESS_ERR = 0x6,
119 GSI_HWO_1_ERR = 0x8,
120 };
121
122 /* Hardware values from the error log register error type field */
123 enum gsi_err_type {
124 GSI_ERR_TYPE_GLOB = 0x1,
125 GSI_ERR_TYPE_CHAN = 0x2,
126 GSI_ERR_TYPE_EVT = 0x3,
127 };
128
129 /* Hardware values used when programming an event ring */
130 enum gsi_evt_chtype {
131 GSI_EVT_CHTYPE_MHI_EV = 0x0,
132 GSI_EVT_CHTYPE_XHCI_EV = 0x1,
133 GSI_EVT_CHTYPE_GPI_EV = 0x2,
134 GSI_EVT_CHTYPE_XDCI_EV = 0x3,
135 };
136
137 /* Hardware values used when programming a channel */
138 enum gsi_channel_protocol {
139 GSI_CHANNEL_PROTOCOL_MHI = 0x0,
140 GSI_CHANNEL_PROTOCOL_XHCI = 0x1,
141 GSI_CHANNEL_PROTOCOL_GPI = 0x2,
142 GSI_CHANNEL_PROTOCOL_XDCI = 0x3,
143 };
144
145 /* Hardware values representing an event ring immediate command opcode */
146 enum gsi_evt_cmd_opcode {
147 GSI_EVT_ALLOCATE = 0x0,
148 GSI_EVT_RESET = 0x9,
149 GSI_EVT_DE_ALLOC = 0xa,
150 };
151
152 /* Hardware values representing a generic immediate command opcode */
153 enum gsi_generic_cmd_opcode {
154 GSI_GENERIC_HALT_CHANNEL = 0x1,
155 GSI_GENERIC_ALLOCATE_CHANNEL = 0x2,
156 };
157
158 /* Hardware values representing a channel immediate command opcode */
159 enum gsi_ch_cmd_opcode {
160 GSI_CH_ALLOCATE = 0x0,
161 GSI_CH_START = 0x1,
162 GSI_CH_STOP = 0x2,
163 GSI_CH_RESET = 0x9,
164 GSI_CH_DE_ALLOC = 0xa,
165 };
166
167 /** gsi_channel_scratch_gpi - GPI protocol scratch register
168 * @max_outstanding_tre:
169 * Defines the maximum number of TREs allowed in a single transaction
170 * on a channel (in bytes). This determines the amount of prefetch
171 * performed by the hardware. We configure this to equal the size of
172 * the TLV FIFO for the channel.
173 * @outstanding_threshold:
174 * Defines the threshold (in bytes) determining when the sequencer
175 * should update the channel doorbell. We configure this to equal
176 * the size of two TREs.
177 */
178 struct gsi_channel_scratch_gpi {
179 u64 reserved1;
180 u16 reserved2;
181 u16 max_outstanding_tre;
182 u16 reserved3;
183 u16 outstanding_threshold;
184 };
185
186 /** gsi_channel_scratch - channel scratch configuration area
187 *
188 * The exact interpretation of this register is protocol-specific.
189 * We only use GPI channels; see struct gsi_channel_scratch_gpi, above.
190 */
191 union gsi_channel_scratch {
192 struct gsi_channel_scratch_gpi gpi;
193 struct {
194 u32 word1;
195 u32 word2;
196 u32 word3;
197 u32 word4;
198 } data;
199 };
200
201 /* Check things that can be validated at build time. */
202 static void gsi_validate_build(void)
203 {
204 /* This is used as a divisor */
205 BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE);
206
207 /* Code assumes the size of channel and event ring element are
208 * the same (and fixed). Make sure the size of an event ring
209 * element is what's expected.
210 */
211 BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE);
212
213 /* Hardware requires a 2^n ring size. We ensure the number of
214 * elements in an event ring is a power of 2 elsewhere; this
215 * ensure the elements themselves meet the requirement.
216 */
217 BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE));
218
219 /* The channel element size must fit in this field */
220 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK));
221
222 /* The event ring element size must fit in this field */
223 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK));
224 }
225
226 /* Return the channel id associated with a given channel */
227 static u32 gsi_channel_id(struct gsi_channel *channel)
228 {
229 return channel - &channel->gsi->channel[0];
230 }
231
232 static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id)
233 {
234 u32 val;
235
236 gsi->event_enable_bitmap |= BIT(evt_ring_id);
237 val = gsi->event_enable_bitmap;
238 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
239 }
240
241 static void gsi_isr_ieob_clear(struct gsi *gsi, u32 mask)
242 {
243 iowrite32(mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
244 }
245
246 static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id)
247 {
248 u32 val;
249
250 gsi->event_enable_bitmap &= ~BIT(evt_ring_id);
251 val = gsi->event_enable_bitmap;
252 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
253 }
254
255 /* Enable all GSI_interrupt types */
256 static void gsi_irq_enable(struct gsi *gsi)
257 {
258 u32 val;
259
260 /* We don't use inter-EE channel or event interrupts */
261 val = GSI_CNTXT_TYPE_IRQ_MSK_ALL;
262 val &= ~MSK_INTER_EE_CH_CTRL_FMASK;
263 val &= ~MSK_INTER_EE_EV_CTRL_FMASK;
264 iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
265
266 val = GENMASK(gsi->channel_count - 1, 0);
267 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
268
269 val = GENMASK(gsi->evt_ring_count - 1, 0);
270 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
271
272 /* Each IEOB interrupt is enabled (later) as needed by channels */
273 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
274
275 val = GSI_CNTXT_GLOB_IRQ_ALL;
276 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
277
278 /* Never enable GSI_BREAK_POINT */
279 val = GSI_CNTXT_GSI_IRQ_ALL & ~EN_BREAK_POINT_FMASK;
280 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
281 }
282
283 /* Disable all GSI_interrupt types */
284 static void gsi_irq_disable(struct gsi *gsi)
285 {
286 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
287 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
288 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
289 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
290 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
291 iowrite32(0, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
292 }
293
294 /* Return the virtual address associated with a ring index */
295 void *gsi_ring_virt(struct gsi_ring *ring, u32 index)
296 {
297 /* Note: index *must* be used modulo the ring count here */
298 return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE;
299 }
300
301 /* Return the 32-bit DMA address associated with a ring index */
302 static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index)
303 {
304 return (ring->addr & GENMASK(31, 0)) + index * GSI_RING_ELEMENT_SIZE;
305 }
306
307 /* Return the ring index of a 32-bit ring offset */
308 static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
309 {
310 return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE;
311 }
312
313 /* Issue a GSI command by writing a value to a register, then wait for
314 * completion to be signaled. Returns true if the command completes
315 * or false if it times out.
316 */
317 static bool
318 gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion)
319 {
320 reinit_completion(completion);
321
322 iowrite32(val, gsi->virt + reg);
323
324 return !!wait_for_completion_timeout(completion, GSI_CMD_TIMEOUT * HZ);
325 }
326
327 /* Return the hardware's notion of the current state of an event ring */
328 static enum gsi_evt_ring_state
329 gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
330 {
331 u32 val;
332
333 val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
334
335 return u32_get_bits(val, EV_CHSTATE_FMASK);
336 }
337
338 /* Issue an event ring command and wait for it to complete */
339 static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
340 enum gsi_evt_cmd_opcode opcode)
341 {
342 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
343 struct completion *completion = &evt_ring->completion;
344 u32 val;
345
346 val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
347 val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
348
349 if (gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion))
350 return 0; /* Success! */
351
352 dev_err(gsi->dev, "GSI command %u to event ring %u timed out "
353 "(state is %u)\n", opcode, evt_ring_id, evt_ring->state);
354
355 return -ETIMEDOUT;
356 }
357
358 /* Allocate an event ring in NOT_ALLOCATED state */
359 static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
360 {
361 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
362 int ret;
363
364 /* Get initial event ring state */
365 evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
366
367 if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
368 return -EINVAL;
369
370 ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
371 if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
372 dev_err(gsi->dev, "bad event ring state (%u) after alloc\n",
373 evt_ring->state);
374 ret = -EIO;
375 }
376
377 return ret;
378 }
379
380 /* Reset a GSI event ring in ALLOCATED or ERROR state. */
381 static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
382 {
383 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
384 enum gsi_evt_ring_state state = evt_ring->state;
385 int ret;
386
387 if (state != GSI_EVT_RING_STATE_ALLOCATED &&
388 state != GSI_EVT_RING_STATE_ERROR) {
389 dev_err(gsi->dev, "bad event ring state (%u) before reset\n",
390 evt_ring->state);
391 return;
392 }
393
394 ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
395 if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED)
396 dev_err(gsi->dev, "bad event ring state (%u) after reset\n",
397 evt_ring->state);
398 }
399
400 /* Issue a hardware de-allocation request for an allocated event ring */
401 static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
402 {
403 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
404 int ret;
405
406 if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
407 dev_err(gsi->dev, "bad event ring state (%u) before dealloc\n",
408 evt_ring->state);
409 return;
410 }
411
412 ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
413 if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
414 dev_err(gsi->dev, "bad event ring state (%u) after dealloc\n",
415 evt_ring->state);
416 }
417
418 /* Return the hardware's notion of the current state of a channel */
419 static enum gsi_channel_state
420 gsi_channel_state(struct gsi *gsi, u32 channel_id)
421 {
422 u32 val;
423
424 val = ioread32(gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
425
426 return u32_get_bits(val, CHSTATE_FMASK);
427 }
428
429 /* Issue a channel command and wait for it to complete */
430 static int
431 gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
432 {
433 struct completion *completion = &channel->completion;
434 u32 channel_id = gsi_channel_id(channel);
435 u32 val;
436
437 val = u32_encode_bits(channel_id, CH_CHID_FMASK);
438 val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
439
440 if (gsi_command(channel->gsi, GSI_CH_CMD_OFFSET, val, completion))
441 return 0; /* Success! */
442
443 dev_err(channel->gsi->dev, "GSI command %u to channel %u timed out "
444 "(state is %u)\n", opcode, channel_id, channel->state);
445
446 return -ETIMEDOUT;
447 }
448
449 /* Allocate GSI channel in NOT_ALLOCATED state */
450 static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
451 {
452 struct gsi_channel *channel = &gsi->channel[channel_id];
453 int ret;
454
455 /* Get initial channel state */
456 channel->state = gsi_channel_state(gsi, channel_id);
457
458 if (channel->state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
459 return -EINVAL;
460
461 ret = gsi_channel_command(channel, GSI_CH_ALLOCATE);
462 if (!ret && channel->state != GSI_CHANNEL_STATE_ALLOCATED) {
463 dev_err(gsi->dev, "bad channel state (%u) after alloc\n",
464 channel->state);
465 ret = -EIO;
466 }
467
468 return ret;
469 }
470
471 /* Start an ALLOCATED channel */
472 static int gsi_channel_start_command(struct gsi_channel *channel)
473 {
474 enum gsi_channel_state state = channel->state;
475 int ret;
476
477 if (state != GSI_CHANNEL_STATE_ALLOCATED &&
478 state != GSI_CHANNEL_STATE_STOPPED)
479 return -EINVAL;
480
481 ret = gsi_channel_command(channel, GSI_CH_START);
482 if (!ret && channel->state != GSI_CHANNEL_STATE_STARTED) {
483 dev_err(channel->gsi->dev,
484 "bad channel state (%u) after start\n",
485 channel->state);
486 ret = -EIO;
487 }
488
489 return ret;
490 }
491
492 /* Stop a GSI channel in STARTED state */
493 static int gsi_channel_stop_command(struct gsi_channel *channel)
494 {
495 enum gsi_channel_state state = channel->state;
496 int ret;
497
498 if (state != GSI_CHANNEL_STATE_STARTED &&
499 state != GSI_CHANNEL_STATE_STOP_IN_PROC)
500 return -EINVAL;
501
502 ret = gsi_channel_command(channel, GSI_CH_STOP);
503 if (ret || channel->state == GSI_CHANNEL_STATE_STOPPED)
504 return ret;
505
506 /* We may have to try again if stop is in progress */
507 if (channel->state == GSI_CHANNEL_STATE_STOP_IN_PROC)
508 return -EAGAIN;
509
510 dev_err(channel->gsi->dev, "bad channel state (%u) after stop\n",
511 channel->state);
512
513 return -EIO;
514 }
515
516 /* Reset a GSI channel in ALLOCATED or ERROR state. */
517 static void gsi_channel_reset_command(struct gsi_channel *channel)
518 {
519 int ret;
520
521 msleep(1); /* A short delay is required before a RESET command */
522
523 if (channel->state != GSI_CHANNEL_STATE_STOPPED &&
524 channel->state != GSI_CHANNEL_STATE_ERROR) {
525 dev_err(channel->gsi->dev,
526 "bad channel state (%u) before reset\n",
527 channel->state);
528 return;
529 }
530
531 ret = gsi_channel_command(channel, GSI_CH_RESET);
532 if (!ret && channel->state != GSI_CHANNEL_STATE_ALLOCATED)
533 dev_err(channel->gsi->dev,
534 "bad channel state (%u) after reset\n",
535 channel->state);
536 }
537
538 /* Deallocate an ALLOCATED GSI channel */
539 static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
540 {
541 struct gsi_channel *channel = &gsi->channel[channel_id];
542 int ret;
543
544 if (channel->state != GSI_CHANNEL_STATE_ALLOCATED) {
545 dev_err(gsi->dev, "bad channel state (%u) before dealloc\n",
546 channel->state);
547 return;
548 }
549
550 ret = gsi_channel_command(channel, GSI_CH_DE_ALLOC);
551 if (!ret && channel->state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
552 dev_err(gsi->dev, "bad channel state (%u) after dealloc\n",
553 channel->state);
554 }
555
556 /* Ring an event ring doorbell, reporting the last entry processed by the AP.
557 * The index argument (modulo the ring count) is the first unfilled entry, so
558 * we supply one less than that with the doorbell. Update the event ring
559 * index field with the value provided.
560 */
561 static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
562 {
563 struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
564 u32 val;
565
566 ring->index = index; /* Next unused entry */
567
568 /* Note: index *must* be used modulo the ring count here */
569 val = gsi_ring_addr(ring, (index - 1) % ring->count);
570 iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id));
571 }
572
573 /* Program an event ring for use */
574 static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
575 {
576 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
577 size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE;
578 u32 val;
579
580 val = u32_encode_bits(GSI_EVT_CHTYPE_GPI_EV, EV_CHTYPE_FMASK);
581 val |= EV_INTYPE_FMASK;
582 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
583 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
584
585 val = u32_encode_bits(size, EV_R_LENGTH_FMASK);
586 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
587
588 /* The context 2 and 3 registers store the low-order and
589 * high-order 32 bits of the address of the event ring,
590 * respectively.
591 */
592 val = evt_ring->ring.addr & GENMASK(31, 0);
593 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
594
595 val = evt_ring->ring.addr >> 32;
596 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
597
598 /* Enable interrupt moderation by setting the moderation delay */
599 val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK);
600 val |= u32_encode_bits(1, MODC_FMASK); /* comes from channel */
601 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id));
602
603 /* No MSI write data, and MSI address high and low address is 0 */
604 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id));
605 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id));
606 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id));
607
608 /* We don't need to get event read pointer updates */
609 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
610 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
611
612 /* Finally, tell the hardware we've completed event 0 (arbitrary) */
613 gsi_evt_ring_doorbell(gsi, evt_ring_id, 0);
614 }
615
616 /* Return the last (most recent) transaction completed on a channel. */
617 static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
618 {
619 struct gsi_trans_info *trans_info = &channel->trans_info;
620 struct gsi_trans *trans;
621
622 spin_lock_bh(&trans_info->spinlock);
623
624 if (!list_empty(&trans_info->complete))
625 trans = list_last_entry(&trans_info->complete,
626 struct gsi_trans, links);
627 else if (!list_empty(&trans_info->polled))
628 trans = list_last_entry(&trans_info->polled,
629 struct gsi_trans, links);
630 else
631 trans = NULL;
632
633 /* Caller will wait for this, so take a reference */
634 if (trans)
635 refcount_inc(&trans->refcount);
636
637 spin_unlock_bh(&trans_info->spinlock);
638
639 return trans;
640 }
641
642 /* Wait for transaction activity on a channel to complete */
643 static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
644 {
645 struct gsi_trans *trans;
646
647 /* Get the last transaction, and wait for it to complete */
648 trans = gsi_channel_trans_last(channel);
649 if (trans) {
650 wait_for_completion(&trans->completion);
651 gsi_trans_free(trans);
652 }
653 }
654
655 /* Stop channel activity. Transactions may not be allocated until thawed. */
656 static void gsi_channel_freeze(struct gsi_channel *channel)
657 {
658 gsi_channel_trans_quiesce(channel);
659
660 napi_disable(&channel->napi);
661
662 gsi_irq_ieob_disable(channel->gsi, channel->evt_ring_id);
663 }
664
665 /* Allow transactions to be used on the channel again. */
666 static void gsi_channel_thaw(struct gsi_channel *channel)
667 {
668 gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
669
670 napi_enable(&channel->napi);
671 }
672
673 /* Program a channel for use */
674 static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
675 {
676 size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE;
677 u32 channel_id = gsi_channel_id(channel);
678 union gsi_channel_scratch scr = { };
679 struct gsi_channel_scratch_gpi *gpi;
680 struct gsi *gsi = channel->gsi;
681 u32 wrr_weight = 0;
682 u32 val;
683
684 /* Arbitrarily pick TRE 0 as the first channel element to use */
685 channel->tre_ring.index = 0;
686
687 /* We program all channels to use GPI protocol */
688 val = u32_encode_bits(GSI_CHANNEL_PROTOCOL_GPI, CHTYPE_PROTOCOL_FMASK);
689 if (channel->toward_ipa)
690 val |= CHTYPE_DIR_FMASK;
691 val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
692 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
693 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
694
695 val = u32_encode_bits(size, R_LENGTH_FMASK);
696 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id));
697
698 /* The context 2 and 3 registers store the low-order and
699 * high-order 32 bits of the address of the channel ring,
700 * respectively.
701 */
702 val = channel->tre_ring.addr & GENMASK(31, 0);
703 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id));
704
705 val = channel->tre_ring.addr >> 32;
706 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id));
707
708 /* Command channel gets low weighted round-robin priority */
709 if (channel->command)
710 wrr_weight = field_max(WRR_WEIGHT_FMASK);
711 val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK);
712
713 /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
714
715 /* Enable the doorbell engine if requested */
716 if (doorbell)
717 val |= USE_DB_ENG_FMASK;
718
719 if (!channel->use_prefetch)
720 val |= USE_ESCAPE_BUF_ONLY_FMASK;
721
722 iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
723
724 /* Now update the scratch registers for GPI protocol */
725 gpi = &scr.gpi;
726 gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) *
727 GSI_RING_ELEMENT_SIZE;
728 gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
729
730 val = scr.data.word1;
731 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id));
732
733 val = scr.data.word2;
734 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id));
735
736 val = scr.data.word3;
737 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id));
738
739 /* We must preserve the upper 16 bits of the last scratch register.
740 * The next sequence assumes those bits remain unchanged between the
741 * read and the write.
742 */
743 val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
744 val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0));
745 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
746
747 /* All done! */
748 }
749
750 static void gsi_channel_deprogram(struct gsi_channel *channel)
751 {
752 /* Nothing to do */
753 }
754
755 /* Start an allocated GSI channel */
756 int gsi_channel_start(struct gsi *gsi, u32 channel_id)
757 {
758 struct gsi_channel *channel = &gsi->channel[channel_id];
759 u32 evt_ring_id = channel->evt_ring_id;
760 int ret;
761
762 mutex_lock(&gsi->mutex);
763
764 ret = gsi_channel_start_command(channel);
765
766 mutex_unlock(&gsi->mutex);
767
768 /* Clear the channel's event ring interrupt in case it's pending */
769 gsi_isr_ieob_clear(gsi, BIT(evt_ring_id));
770
771 gsi_channel_thaw(channel);
772
773 return ret;
774 }
775
776 /* Stop a started channel */
777 int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
778 {
779 struct gsi_channel *channel = &gsi->channel[channel_id];
780 u32 retries;
781 int ret;
782
783 gsi_channel_freeze(channel);
784
785 /* Channel could have entered STOPPED state since last call if the
786 * STOP command timed out. We won't stop a channel if stopping it
787 * was successful previously (so we still want the freeze above).
788 */
789 if (channel->state == GSI_CHANNEL_STATE_STOPPED)
790 return 0;
791
792 /* RX channels might require a little time to enter STOPPED state */
793 retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES;
794
795 mutex_lock(&gsi->mutex);
796
797 do {
798 ret = gsi_channel_stop_command(channel);
799 if (ret != -EAGAIN)
800 break;
801 msleep(1);
802 } while (retries--);
803
804 mutex_unlock(&gsi->mutex);
805
806 /* Thaw the channel if we need to retry (or on error) */
807 if (ret)
808 gsi_channel_thaw(channel);
809
810 return ret;
811 }
812
813 /* Reset and reconfigure a channel (possibly leaving doorbell disabled) */
814 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool db_enable)
815 {
816 struct gsi_channel *channel = &gsi->channel[channel_id];
817
818 mutex_lock(&gsi->mutex);
819
820 /* Due to a hardware quirk we need to reset RX channels twice. */
821 gsi_channel_reset_command(channel);
822 if (!channel->toward_ipa)
823 gsi_channel_reset_command(channel);
824
825 gsi_channel_program(channel, db_enable);
826 gsi_channel_trans_cancel_pending(channel);
827
828 mutex_unlock(&gsi->mutex);
829 }
830
831 /* Stop a STARTED channel for suspend (using stop if requested) */
832 int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop)
833 {
834 struct gsi_channel *channel = &gsi->channel[channel_id];
835
836 if (stop)
837 return gsi_channel_stop(gsi, channel_id);
838
839 gsi_channel_freeze(channel);
840
841 return 0;
842 }
843
844 /* Resume a suspended channel (starting will be requested if STOPPED) */
845 int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start)
846 {
847 struct gsi_channel *channel = &gsi->channel[channel_id];
848
849 if (start)
850 return gsi_channel_start(gsi, channel_id);
851
852 gsi_channel_thaw(channel);
853
854 return 0;
855 }
856
857 /**
858 * gsi_channel_tx_queued() - Report queued TX transfers for a channel
859 * @channel: Channel for which to report
860 *
861 * Report to the network stack the number of bytes and transactions that
862 * have been queued to hardware since last call. This and the next function
863 * supply information used by the network stack for throttling.
864 *
865 * For each channel we track the number of transactions used and bytes of
866 * data those transactions represent. We also track what those values are
867 * each time this function is called. Subtracting the two tells us
868 * the number of bytes and transactions that have been added between
869 * successive calls.
870 *
871 * Calling this each time we ring the channel doorbell allows us to
872 * provide accurate information to the network stack about how much
873 * work we've given the hardware at any point in time.
874 */
875 void gsi_channel_tx_queued(struct gsi_channel *channel)
876 {
877 u32 trans_count;
878 u32 byte_count;
879
880 byte_count = channel->byte_count - channel->queued_byte_count;
881 trans_count = channel->trans_count - channel->queued_trans_count;
882 channel->queued_byte_count = channel->byte_count;
883 channel->queued_trans_count = channel->trans_count;
884
885 ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel),
886 trans_count, byte_count);
887 }
888
889 /**
890 * gsi_channel_tx_update() - Report completed TX transfers
891 * @channel: Channel that has completed transmitting packets
892 * @trans: Last transation known to be complete
893 *
894 * Compute the number of transactions and bytes that have been transferred
895 * over a TX channel since the given transaction was committed. Report this
896 * information to the network stack.
897 *
898 * At the time a transaction is committed, we record its channel's
899 * committed transaction and byte counts *in the transaction*.
900 * Completions are signaled by the hardware with an interrupt, and
901 * we can determine the latest completed transaction at that time.
902 *
903 * The difference between the byte/transaction count recorded in
904 * the transaction and the count last time we recorded a completion
905 * tells us exactly how much data has been transferred between
906 * completions.
907 *
908 * Calling this each time we learn of a newly-completed transaction
909 * allows us to provide accurate information to the network stack
910 * about how much work has been completed by the hardware at a given
911 * point in time.
912 */
913 static void
914 gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans)
915 {
916 u64 byte_count = trans->byte_count + trans->len;
917 u64 trans_count = trans->trans_count + 1;
918
919 byte_count -= channel->compl_byte_count;
920 channel->compl_byte_count += byte_count;
921 trans_count -= channel->compl_trans_count;
922 channel->compl_trans_count += trans_count;
923
924 ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel),
925 trans_count, byte_count);
926 }
927
928 /* Channel control interrupt handler */
929 static void gsi_isr_chan_ctrl(struct gsi *gsi)
930 {
931 u32 channel_mask;
932
933 channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET);
934 iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
935
936 while (channel_mask) {
937 u32 channel_id = __ffs(channel_mask);
938 struct gsi_channel *channel;
939
940 channel_mask ^= BIT(channel_id);
941
942 channel = &gsi->channel[channel_id];
943 channel->state = gsi_channel_state(gsi, channel_id);
944
945 complete(&channel->completion);
946 }
947 }
948
949 /* Event ring control interrupt handler */
950 static void gsi_isr_evt_ctrl(struct gsi *gsi)
951 {
952 u32 event_mask;
953
954 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET);
955 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
956
957 while (event_mask) {
958 u32 evt_ring_id = __ffs(event_mask);
959 struct gsi_evt_ring *evt_ring;
960
961 event_mask ^= BIT(evt_ring_id);
962
963 evt_ring = &gsi->evt_ring[evt_ring_id];
964 evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
965
966 complete(&evt_ring->completion);
967 }
968 }
969
970 /* Global channel error interrupt handler */
971 static void
972 gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
973 {
974 if (code == GSI_OUT_OF_RESOURCES_ERR) {
975 dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
976 complete(&gsi->channel[channel_id].completion);
977 return;
978 }
979
980 /* Report, but otherwise ignore all other error codes */
981 dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n",
982 channel_id, err_ee, code);
983 }
984
985 /* Global event error interrupt handler */
986 static void
987 gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
988 {
989 if (code == GSI_OUT_OF_RESOURCES_ERR) {
990 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
991 u32 channel_id = gsi_channel_id(evt_ring->channel);
992
993 complete(&evt_ring->completion);
994 dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
995 channel_id);
996 return;
997 }
998
999 /* Report, but otherwise ignore all other error codes */
1000 dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n",
1001 evt_ring_id, err_ee, code);
1002 }
1003
1004 /* Global error interrupt handler */
1005 static void gsi_isr_glob_err(struct gsi *gsi)
1006 {
1007 enum gsi_err_type type;
1008 enum gsi_err_code code;
1009 u32 which;
1010 u32 val;
1011 u32 ee;
1012
1013 /* Get the logged error, then reinitialize the log */
1014 val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET);
1015 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1016 iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
1017
1018 ee = u32_get_bits(val, ERR_EE_FMASK);
1019 which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
1020 type = u32_get_bits(val, ERR_TYPE_FMASK);
1021 code = u32_get_bits(val, ERR_CODE_FMASK);
1022
1023 if (type == GSI_ERR_TYPE_CHAN)
1024 gsi_isr_glob_chan_err(gsi, ee, which, code);
1025 else if (type == GSI_ERR_TYPE_EVT)
1026 gsi_isr_glob_evt_err(gsi, ee, which, code);
1027 else /* type GSI_ERR_TYPE_GLOB should be fatal */
1028 dev_err(gsi->dev, "unexpected global error 0x%08x\n", type);
1029 }
1030
1031 /* Generic EE interrupt handler */
1032 static void gsi_isr_gp_int1(struct gsi *gsi)
1033 {
1034 u32 result;
1035 u32 val;
1036
1037 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1038 result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
1039 if (result != GENERIC_EE_SUCCESS_FVAL)
1040 dev_err(gsi->dev, "global INT1 generic result %u\n", result);
1041
1042 complete(&gsi->completion);
1043 }
1044
1045 /* Inter-EE interrupt handler */
1046 static void gsi_isr_glob_ee(struct gsi *gsi)
1047 {
1048 u32 val;
1049
1050 val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
1051
1052 if (val & ERROR_INT_FMASK)
1053 gsi_isr_glob_err(gsi);
1054
1055 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
1056
1057 val &= ~ERROR_INT_FMASK;
1058
1059 if (val & EN_GP_INT1_FMASK) {
1060 val ^= EN_GP_INT1_FMASK;
1061 gsi_isr_gp_int1(gsi);
1062 }
1063
1064 if (val)
1065 dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val);
1066 }
1067
1068 /* I/O completion interrupt event */
1069 static void gsi_isr_ieob(struct gsi *gsi)
1070 {
1071 u32 event_mask;
1072
1073 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
1074 gsi_isr_ieob_clear(gsi, event_mask);
1075
1076 while (event_mask) {
1077 u32 evt_ring_id = __ffs(event_mask);
1078
1079 event_mask ^= BIT(evt_ring_id);
1080
1081 gsi_irq_ieob_disable(gsi, evt_ring_id);
1082 napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
1083 }
1084 }
1085
1086 /* General event interrupts represent serious problems, so report them */
1087 static void gsi_isr_general(struct gsi *gsi)
1088 {
1089 struct device *dev = gsi->dev;
1090 u32 val;
1091
1092 val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
1093 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
1094
1095 if (val)
1096 dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
1097 }
1098
1099 /**
1100 * gsi_isr() - Top level GSI interrupt service routine
1101 * @irq: Interrupt number (ignored)
1102 * @dev_id: GSI pointer supplied to request_irq()
1103 *
1104 * This is the main handler function registered for the GSI IRQ. Each type
1105 * of interrupt has a separate handler function that is called from here.
1106 */
1107 static irqreturn_t gsi_isr(int irq, void *dev_id)
1108 {
1109 struct gsi *gsi = dev_id;
1110 u32 intr_mask;
1111 u32 cnt = 0;
1112
1113 while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
1114 /* intr_mask contains bitmask of pending GSI interrupts */
1115 do {
1116 u32 gsi_intr = BIT(__ffs(intr_mask));
1117
1118 intr_mask ^= gsi_intr;
1119
1120 switch (gsi_intr) {
1121 case CH_CTRL_FMASK:
1122 gsi_isr_chan_ctrl(gsi);
1123 break;
1124 case EV_CTRL_FMASK:
1125 gsi_isr_evt_ctrl(gsi);
1126 break;
1127 case GLOB_EE_FMASK:
1128 gsi_isr_glob_ee(gsi);
1129 break;
1130 case IEOB_FMASK:
1131 gsi_isr_ieob(gsi);
1132 break;
1133 case GENERAL_FMASK:
1134 gsi_isr_general(gsi);
1135 break;
1136 default:
1137 dev_err(gsi->dev,
1138 "%s: unrecognized type 0x%08x\n",
1139 __func__, gsi_intr);
1140 break;
1141 }
1142 } while (intr_mask);
1143
1144 if (++cnt > GSI_ISR_MAX_ITER) {
1145 dev_err(gsi->dev, "interrupt flood\n");
1146 break;
1147 }
1148 }
1149
1150 return IRQ_HANDLED;
1151 }
1152
1153 /* Return the transaction associated with a transfer completion event */
1154 static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
1155 struct gsi_event *event)
1156 {
1157 u32 tre_offset;
1158 u32 tre_index;
1159
1160 /* Event xfer_ptr records the TRE it's associated with */
1161 tre_offset = le64_to_cpu(event->xfer_ptr) & GENMASK(31, 0);
1162 tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
1163
1164 return gsi_channel_trans_mapped(channel, tre_index);
1165 }
1166
1167 /**
1168 * gsi_evt_ring_rx_update() - Record lengths of received data
1169 * @evt_ring: Event ring associated with channel that received packets
1170 * @index: Event index in ring reported by hardware
1171 *
1172 * Events for RX channels contain the actual number of bytes received into
1173 * the buffer. Every event has a transaction associated with it, and here
1174 * we update transactions to record their actual received lengths.
1175 *
1176 * This function is called whenever we learn that the GSI hardware has filled
1177 * new events since the last time we checked. The ring's index field tells
1178 * the first entry in need of processing. The index provided is the
1179 * first *unfilled* event in the ring (following the last filled one).
1180 *
1181 * Events are sequential within the event ring, and transactions are
1182 * sequential within the transaction pool.
1183 *
1184 * Note that @index always refers to an element *within* the event ring.
1185 */
1186 static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
1187 {
1188 struct gsi_channel *channel = evt_ring->channel;
1189 struct gsi_ring *ring = &evt_ring->ring;
1190 struct gsi_trans_info *trans_info;
1191 struct gsi_event *event_done;
1192 struct gsi_event *event;
1193 struct gsi_trans *trans;
1194 u32 byte_count = 0;
1195 u32 old_index;
1196 u32 event_avail;
1197
1198 trans_info = &channel->trans_info;
1199
1200 /* We'll start with the oldest un-processed event. RX channels
1201 * replenish receive buffers in single-TRE transactions, so we
1202 * can just map that event to its transaction. Transactions
1203 * associated with completion events are consecutive.
1204 */
1205 old_index = ring->index;
1206 event = gsi_ring_virt(ring, old_index);
1207 trans = gsi_event_trans(channel, event);
1208
1209 /* Compute the number of events to process before we wrap,
1210 * and determine when we'll be done processing events.
1211 */
1212 event_avail = ring->count - old_index % ring->count;
1213 event_done = gsi_ring_virt(ring, index);
1214 do {
1215 trans->len = __le16_to_cpu(event->len);
1216 byte_count += trans->len;
1217
1218 /* Move on to the next event and transaction */
1219 if (--event_avail)
1220 event++;
1221 else
1222 event = gsi_ring_virt(ring, 0);
1223 trans = gsi_trans_pool_next(&trans_info->pool, trans);
1224 } while (event != event_done);
1225
1226 /* We record RX bytes when they are received */
1227 channel->byte_count += byte_count;
1228 channel->trans_count++;
1229 }
1230
1231 /* Initialize a ring, including allocating DMA memory for its entries */
1232 static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
1233 {
1234 size_t size = count * GSI_RING_ELEMENT_SIZE;
1235 struct device *dev = gsi->dev;
1236 dma_addr_t addr;
1237
1238 /* Hardware requires a 2^n ring size, with alignment equal to size */
1239 ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
1240 if (ring->virt && addr % size) {
1241 dma_free_coherent(dev, size, ring->virt, ring->addr);
1242 dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
1243 size);
1244 return -EINVAL; /* Not a good error value, but distinct */
1245 } else if (!ring->virt) {
1246 return -ENOMEM;
1247 }
1248 ring->addr = addr;
1249 ring->count = count;
1250
1251 return 0;
1252 }
1253
1254 /* Free a previously-allocated ring */
1255 static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring)
1256 {
1257 size_t size = ring->count * GSI_RING_ELEMENT_SIZE;
1258
1259 dma_free_coherent(gsi->dev, size, ring->virt, ring->addr);
1260 }
1261
1262 /* Allocate an available event ring id */
1263 static int gsi_evt_ring_id_alloc(struct gsi *gsi)
1264 {
1265 u32 evt_ring_id;
1266
1267 if (gsi->event_bitmap == ~0U) {
1268 dev_err(gsi->dev, "event rings exhausted\n");
1269 return -ENOSPC;
1270 }
1271
1272 evt_ring_id = ffz(gsi->event_bitmap);
1273 gsi->event_bitmap |= BIT(evt_ring_id);
1274
1275 return (int)evt_ring_id;
1276 }
1277
1278 /* Free a previously-allocated event ring id */
1279 static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id)
1280 {
1281 gsi->event_bitmap &= ~BIT(evt_ring_id);
1282 }
1283
1284 /* Ring a channel doorbell, reporting the first un-filled entry */
1285 void gsi_channel_doorbell(struct gsi_channel *channel)
1286 {
1287 struct gsi_ring *tre_ring = &channel->tre_ring;
1288 u32 channel_id = gsi_channel_id(channel);
1289 struct gsi *gsi = channel->gsi;
1290 u32 val;
1291
1292 /* Note: index *must* be used modulo the ring count here */
1293 val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count);
1294 iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
1295 }
1296
1297 /* Consult hardware, move any newly completed transactions to completed list */
1298 static void gsi_channel_update(struct gsi_channel *channel)
1299 {
1300 u32 evt_ring_id = channel->evt_ring_id;
1301 struct gsi *gsi = channel->gsi;
1302 struct gsi_evt_ring *evt_ring;
1303 struct gsi_trans *trans;
1304 struct gsi_ring *ring;
1305 u32 offset;
1306 u32 index;
1307
1308 evt_ring = &gsi->evt_ring[evt_ring_id];
1309 ring = &evt_ring->ring;
1310
1311 /* See if there's anything new to process; if not, we're done. Note
1312 * that index always refers to an entry *within* the event ring.
1313 */
1314 offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
1315 index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
1316 if (index == ring->index % ring->count)
1317 return;
1318
1319 /* Get the transaction for the latest completed event. Take a
1320 * reference to keep it from completing before we give the events
1321 * for this and previous transactions back to the hardware.
1322 */
1323 trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1));
1324 refcount_inc(&trans->refcount);
1325
1326 /* For RX channels, update each completed transaction with the number
1327 * of bytes that were actually received. For TX channels, report
1328 * the number of transactions and bytes this completion represents
1329 * up the network stack.
1330 */
1331 if (channel->toward_ipa)
1332 gsi_channel_tx_update(channel, trans);
1333 else
1334 gsi_evt_ring_rx_update(evt_ring, index);
1335
1336 gsi_trans_move_complete(trans);
1337
1338 /* Tell the hardware we've handled these events */
1339 gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index);
1340
1341 gsi_trans_free(trans);
1342 }
1343
1344 /**
1345 * gsi_channel_poll_one() - Return a single completed transaction on a channel
1346 * @channel: Channel to be polled
1347 *
1348 * @Return: Transaction pointer, or null if none are available
1349 *
1350 * This function returns the first entry on a channel's completed transaction
1351 * list. If that list is empty, the hardware is consulted to determine
1352 * whether any new transactions have completed. If so, they're moved to the
1353 * completed list and the new first entry is returned. If there are no more
1354 * completed transactions, a null pointer is returned.
1355 */
1356 static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
1357 {
1358 struct gsi_trans *trans;
1359
1360 /* Get the first transaction from the completed list */
1361 trans = gsi_channel_trans_complete(channel);
1362 if (!trans) {
1363 /* List is empty; see if there's more to do */
1364 gsi_channel_update(channel);
1365 trans = gsi_channel_trans_complete(channel);
1366 }
1367
1368 if (trans)
1369 gsi_trans_move_polled(trans);
1370
1371 return trans;
1372 }
1373
1374 /**
1375 * gsi_channel_poll() - NAPI poll function for a channel
1376 * @napi: NAPI structure for the channel
1377 * @budget: Budget supplied by NAPI core
1378
1379 * @Return: Number of items polled (<= budget)
1380 *
1381 * Single transactions completed by hardware are polled until either
1382 * the budget is exhausted, or there are no more. Each transaction
1383 * polled is passed to gsi_trans_complete(), to perform remaining
1384 * completion processing and retire/free the transaction.
1385 */
1386 static int gsi_channel_poll(struct napi_struct *napi, int budget)
1387 {
1388 struct gsi_channel *channel;
1389 int count = 0;
1390
1391 channel = container_of(napi, struct gsi_channel, napi);
1392 while (count < budget) {
1393 struct gsi_trans *trans;
1394
1395 trans = gsi_channel_poll_one(channel);
1396 if (!trans)
1397 break;
1398 gsi_trans_complete(trans);
1399 }
1400
1401 if (count < budget) {
1402 napi_complete(&channel->napi);
1403 gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
1404 }
1405
1406 return count;
1407 }
1408
1409 /* The event bitmap represents which event ids are available for allocation.
1410 * Set bits are not available, clear bits can be used. This function
1411 * initializes the map so all events supported by the hardware are available,
1412 * then precludes any reserved events from being allocated.
1413 */
1414 static u32 gsi_event_bitmap_init(u32 evt_ring_max)
1415 {
1416 u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max);
1417
1418 event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START);
1419
1420 return event_bitmap;
1421 }
1422
1423 /* Setup function for event rings */
1424 static void gsi_evt_ring_setup(struct gsi *gsi)
1425 {
1426 /* Nothing to do */
1427 }
1428
1429 /* Inverse of gsi_evt_ring_setup() */
1430 static void gsi_evt_ring_teardown(struct gsi *gsi)
1431 {
1432 /* Nothing to do */
1433 }
1434
1435 /* Setup function for a single channel */
1436 static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id,
1437 bool db_enable)
1438 {
1439 struct gsi_channel *channel = &gsi->channel[channel_id];
1440 u32 evt_ring_id = channel->evt_ring_id;
1441 int ret;
1442
1443 if (!channel->gsi)
1444 return 0; /* Ignore uninitialized channels */
1445
1446 ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id);
1447 if (ret)
1448 return ret;
1449
1450 gsi_evt_ring_program(gsi, evt_ring_id);
1451
1452 ret = gsi_channel_alloc_command(gsi, channel_id);
1453 if (ret)
1454 goto err_evt_ring_de_alloc;
1455
1456 gsi_channel_program(channel, db_enable);
1457
1458 if (channel->toward_ipa)
1459 netif_tx_napi_add(&gsi->dummy_dev, &channel->napi,
1460 gsi_channel_poll, NAPI_POLL_WEIGHT);
1461 else
1462 netif_napi_add(&gsi->dummy_dev, &channel->napi,
1463 gsi_channel_poll, NAPI_POLL_WEIGHT);
1464
1465 return 0;
1466
1467 err_evt_ring_de_alloc:
1468 /* We've done nothing with the event ring yet so don't reset */
1469 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1470
1471 return ret;
1472 }
1473
1474 /* Inverse of gsi_channel_setup_one() */
1475 static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
1476 {
1477 struct gsi_channel *channel = &gsi->channel[channel_id];
1478 u32 evt_ring_id = channel->evt_ring_id;
1479
1480 if (!channel->gsi)
1481 return; /* Ignore uninitialized channels */
1482
1483 netif_napi_del(&channel->napi);
1484
1485 gsi_channel_deprogram(channel);
1486 gsi_channel_de_alloc_command(gsi, channel_id);
1487 gsi_evt_ring_reset_command(gsi, evt_ring_id);
1488 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1489 }
1490
1491 static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
1492 enum gsi_generic_cmd_opcode opcode)
1493 {
1494 struct completion *completion = &gsi->completion;
1495 u32 val;
1496
1497 /* First zero the result code field */
1498 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1499 val &= ~GENERIC_EE_RESULT_FMASK;
1500 iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1501
1502 /* Now issue the command */
1503 val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
1504 val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
1505 val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
1506
1507 if (gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion))
1508 return 0; /* Success! */
1509
1510 dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
1511 opcode, channel_id);
1512
1513 return -ETIMEDOUT;
1514 }
1515
1516 static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
1517 {
1518 return gsi_generic_command(gsi, channel_id,
1519 GSI_GENERIC_ALLOCATE_CHANNEL);
1520 }
1521
1522 static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
1523 {
1524 int ret;
1525
1526 ret = gsi_generic_command(gsi, channel_id, GSI_GENERIC_HALT_CHANNEL);
1527 if (ret)
1528 dev_err(gsi->dev, "error %d halting modem channel %u\n",
1529 ret, channel_id);
1530 }
1531
1532 /* Setup function for channels */
1533 static int gsi_channel_setup(struct gsi *gsi, bool db_enable)
1534 {
1535 u32 channel_id = 0;
1536 u32 mask;
1537 int ret;
1538
1539 gsi_evt_ring_setup(gsi);
1540 gsi_irq_enable(gsi);
1541
1542 mutex_lock(&gsi->mutex);
1543
1544 do {
1545 ret = gsi_channel_setup_one(gsi, channel_id, db_enable);
1546 if (ret)
1547 goto err_unwind;
1548 } while (++channel_id < gsi->channel_count);
1549
1550 /* Make sure no channels were defined that hardware does not support */
1551 while (channel_id < GSI_CHANNEL_COUNT_MAX) {
1552 struct gsi_channel *channel = &gsi->channel[channel_id++];
1553
1554 if (!channel->gsi)
1555 continue; /* Ignore uninitialized channels */
1556
1557 dev_err(gsi->dev, "channel %u not supported by hardware\n",
1558 channel_id - 1);
1559 channel_id = gsi->channel_count;
1560 goto err_unwind;
1561 }
1562
1563 /* Allocate modem channels if necessary */
1564 mask = gsi->modem_channel_bitmap;
1565 while (mask) {
1566 u32 modem_channel_id = __ffs(mask);
1567
1568 ret = gsi_modem_channel_alloc(gsi, modem_channel_id);
1569 if (ret)
1570 goto err_unwind_modem;
1571
1572 /* Clear bit from mask only after success (for unwind) */
1573 mask ^= BIT(modem_channel_id);
1574 }
1575
1576 mutex_unlock(&gsi->mutex);
1577
1578 return 0;
1579
1580 err_unwind_modem:
1581 /* Compute which modem channels need to be deallocated */
1582 mask ^= gsi->modem_channel_bitmap;
1583 while (mask) {
1584 u32 channel_id = __fls(mask);
1585
1586 mask ^= BIT(channel_id);
1587
1588 gsi_modem_channel_halt(gsi, channel_id);
1589 }
1590
1591 err_unwind:
1592 while (channel_id--)
1593 gsi_channel_teardown_one(gsi, channel_id);
1594
1595 mutex_unlock(&gsi->mutex);
1596
1597 gsi_irq_disable(gsi);
1598 gsi_evt_ring_teardown(gsi);
1599
1600 return ret;
1601 }
1602
1603 /* Inverse of gsi_channel_setup() */
1604 static void gsi_channel_teardown(struct gsi *gsi)
1605 {
1606 u32 mask = gsi->modem_channel_bitmap;
1607 u32 channel_id;
1608
1609 mutex_lock(&gsi->mutex);
1610
1611 while (mask) {
1612 u32 channel_id = __fls(mask);
1613
1614 mask ^= BIT(channel_id);
1615
1616 gsi_modem_channel_halt(gsi, channel_id);
1617 }
1618
1619 channel_id = gsi->channel_count - 1;
1620 do
1621 gsi_channel_teardown_one(gsi, channel_id);
1622 while (channel_id--);
1623
1624 mutex_unlock(&gsi->mutex);
1625
1626 gsi_irq_disable(gsi);
1627 gsi_evt_ring_teardown(gsi);
1628 }
1629
1630 /* Setup function for GSI. GSI firmware must be loaded and initialized */
1631 int gsi_setup(struct gsi *gsi, bool db_enable)
1632 {
1633 u32 val;
1634
1635 /* Here is where we first touch the GSI hardware */
1636 val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
1637 if (!(val & ENABLED_FMASK)) {
1638 dev_err(gsi->dev, "GSI has not been enabled\n");
1639 return -EIO;
1640 }
1641
1642 val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
1643
1644 gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
1645 if (!gsi->channel_count) {
1646 dev_err(gsi->dev, "GSI reports zero channels supported\n");
1647 return -EINVAL;
1648 }
1649 if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) {
1650 dev_warn(gsi->dev,
1651 "limiting to %u channels (hardware supports %u)\n",
1652 GSI_CHANNEL_COUNT_MAX, gsi->channel_count);
1653 gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
1654 }
1655
1656 gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
1657 if (!gsi->evt_ring_count) {
1658 dev_err(gsi->dev, "GSI reports zero event rings supported\n");
1659 return -EINVAL;
1660 }
1661 if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) {
1662 dev_warn(gsi->dev,
1663 "limiting to %u event rings (hardware supports %u)\n",
1664 GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count);
1665 gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
1666 }
1667
1668 /* Initialize the error log */
1669 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1670
1671 /* Writing 1 indicates IRQ interrupts; 0 would be MSI */
1672 iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
1673
1674 return gsi_channel_setup(gsi, db_enable);
1675 }
1676
1677 /* Inverse of gsi_setup() */
1678 void gsi_teardown(struct gsi *gsi)
1679 {
1680 gsi_channel_teardown(gsi);
1681 }
1682
1683 /* Initialize a channel's event ring */
1684 static int gsi_channel_evt_ring_init(struct gsi_channel *channel)
1685 {
1686 struct gsi *gsi = channel->gsi;
1687 struct gsi_evt_ring *evt_ring;
1688 int ret;
1689
1690 ret = gsi_evt_ring_id_alloc(gsi);
1691 if (ret < 0)
1692 return ret;
1693 channel->evt_ring_id = ret;
1694
1695 evt_ring = &gsi->evt_ring[channel->evt_ring_id];
1696 evt_ring->channel = channel;
1697
1698 ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count);
1699 if (!ret)
1700 return 0; /* Success! */
1701
1702 dev_err(gsi->dev, "error %d allocating channel %u event ring\n",
1703 ret, gsi_channel_id(channel));
1704
1705 gsi_evt_ring_id_free(gsi, channel->evt_ring_id);
1706
1707 return ret;
1708 }
1709
1710 /* Inverse of gsi_channel_evt_ring_init() */
1711 static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
1712 {
1713 u32 evt_ring_id = channel->evt_ring_id;
1714 struct gsi *gsi = channel->gsi;
1715 struct gsi_evt_ring *evt_ring;
1716
1717 evt_ring = &gsi->evt_ring[evt_ring_id];
1718 gsi_ring_free(gsi, &evt_ring->ring);
1719 gsi_evt_ring_id_free(gsi, evt_ring_id);
1720 }
1721
1722 /* Init function for event rings */
1723 static void gsi_evt_ring_init(struct gsi *gsi)
1724 {
1725 u32 evt_ring_id = 0;
1726
1727 gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
1728 gsi->event_enable_bitmap = 0;
1729 do
1730 init_completion(&gsi->evt_ring[evt_ring_id].completion);
1731 while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
1732 }
1733
1734 /* Inverse of gsi_evt_ring_init() */
1735 static void gsi_evt_ring_exit(struct gsi *gsi)
1736 {
1737 /* Nothing to do */
1738 }
1739
1740 static bool gsi_channel_data_valid(struct gsi *gsi,
1741 const struct ipa_gsi_endpoint_data *data)
1742 {
1743 #ifdef IPA_VALIDATION
1744 u32 channel_id = data->channel_id;
1745 struct device *dev = gsi->dev;
1746
1747 /* Make sure channel ids are in the range driver supports */
1748 if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
1749 dev_err(dev, "bad channel id %u (must be less than %u)\n",
1750 channel_id, GSI_CHANNEL_COUNT_MAX);
1751 return false;
1752 }
1753
1754 if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
1755 dev_err(dev, "bad EE id %u (AP or modem)\n", data->ee_id);
1756 return false;
1757 }
1758
1759 if (!data->channel.tlv_count ||
1760 data->channel.tlv_count > GSI_TLV_MAX) {
1761 dev_err(dev, "channel %u bad tlv_count %u (must be 1..%u)\n",
1762 channel_id, data->channel.tlv_count, GSI_TLV_MAX);
1763 return false;
1764 }
1765
1766 /* We have to allow at least one maximally-sized transaction to
1767 * be outstanding (which would use tlv_count TREs). Given how
1768 * gsi_channel_tre_max() is computed, tre_count has to be almost
1769 * twice the TLV FIFO size to satisfy this requirement.
1770 */
1771 if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) {
1772 dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
1773 channel_id, data->channel.tlv_count,
1774 data->channel.tre_count);
1775 return false;
1776 }
1777
1778 if (!is_power_of_2(data->channel.tre_count)) {
1779 dev_err(dev, "channel %u bad tre_count %u (not power of 2)\n",
1780 channel_id, data->channel.tre_count);
1781 return false;
1782 }
1783
1784 if (!is_power_of_2(data->channel.event_count)) {
1785 dev_err(dev, "channel %u bad event_count %u (not power of 2)\n",
1786 channel_id, data->channel.event_count);
1787 return false;
1788 }
1789 #endif /* IPA_VALIDATION */
1790
1791 return true;
1792 }
1793
1794 /* Init function for a single channel */
1795 static int gsi_channel_init_one(struct gsi *gsi,
1796 const struct ipa_gsi_endpoint_data *data,
1797 bool command, bool prefetch)
1798 {
1799 struct gsi_channel *channel;
1800 u32 tre_count;
1801 int ret;
1802
1803 if (!gsi_channel_data_valid(gsi, data))
1804 return -EINVAL;
1805
1806 /* Worst case we need an event for every outstanding TRE */
1807 if (data->channel.tre_count > data->channel.event_count) {
1808 tre_count = data->channel.event_count;
1809 dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
1810 data->channel_id, tre_count);
1811 } else {
1812 tre_count = data->channel.tre_count;
1813 }
1814
1815 channel = &gsi->channel[data->channel_id];
1816 memset(channel, 0, sizeof(*channel));
1817
1818 channel->gsi = gsi;
1819 channel->toward_ipa = data->toward_ipa;
1820 channel->command = command;
1821 channel->use_prefetch = command && prefetch;
1822 channel->tlv_count = data->channel.tlv_count;
1823 channel->tre_count = tre_count;
1824 channel->event_count = data->channel.event_count;
1825 init_completion(&channel->completion);
1826
1827 ret = gsi_channel_evt_ring_init(channel);
1828 if (ret)
1829 goto err_clear_gsi;
1830
1831 ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count);
1832 if (ret) {
1833 dev_err(gsi->dev, "error %d allocating channel %u ring\n",
1834 ret, data->channel_id);
1835 goto err_channel_evt_ring_exit;
1836 }
1837
1838 ret = gsi_channel_trans_init(gsi, data->channel_id);
1839 if (ret)
1840 goto err_ring_free;
1841
1842 if (command) {
1843 u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id);
1844
1845 ret = ipa_cmd_pool_init(channel, tre_max);
1846 }
1847 if (!ret)
1848 return 0; /* Success! */
1849
1850 gsi_channel_trans_exit(channel);
1851 err_ring_free:
1852 gsi_ring_free(gsi, &channel->tre_ring);
1853 err_channel_evt_ring_exit:
1854 gsi_channel_evt_ring_exit(channel);
1855 err_clear_gsi:
1856 channel->gsi = NULL; /* Mark it not (fully) initialized */
1857
1858 return ret;
1859 }
1860
1861 /* Inverse of gsi_channel_init_one() */
1862 static void gsi_channel_exit_one(struct gsi_channel *channel)
1863 {
1864 if (!channel->gsi)
1865 return; /* Ignore uninitialized channels */
1866
1867 if (channel->command)
1868 ipa_cmd_pool_exit(channel);
1869 gsi_channel_trans_exit(channel);
1870 gsi_ring_free(channel->gsi, &channel->tre_ring);
1871 gsi_channel_evt_ring_exit(channel);
1872 }
1873
1874 /* Init function for channels */
1875 static int gsi_channel_init(struct gsi *gsi, bool prefetch, u32 count,
1876 const struct ipa_gsi_endpoint_data *data,
1877 bool modem_alloc)
1878 {
1879 int ret = 0;
1880 u32 i;
1881
1882 gsi_evt_ring_init(gsi);
1883
1884 /* The endpoint data array is indexed by endpoint name */
1885 for (i = 0; i < count; i++) {
1886 bool command = i == IPA_ENDPOINT_AP_COMMAND_TX;
1887
1888 if (ipa_gsi_endpoint_data_empty(&data[i]))
1889 continue; /* Skip over empty slots */
1890
1891 /* Mark modem channels to be allocated (hardware workaround) */
1892 if (data[i].ee_id == GSI_EE_MODEM) {
1893 if (modem_alloc)
1894 gsi->modem_channel_bitmap |=
1895 BIT(data[i].channel_id);
1896 continue;
1897 }
1898
1899 ret = gsi_channel_init_one(gsi, &data[i], command, prefetch);
1900 if (ret)
1901 goto err_unwind;
1902 }
1903
1904 return ret;
1905
1906 err_unwind:
1907 while (i--) {
1908 if (ipa_gsi_endpoint_data_empty(&data[i]))
1909 continue;
1910 if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) {
1911 gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id);
1912 continue;
1913 }
1914 gsi_channel_exit_one(&gsi->channel[data->channel_id]);
1915 }
1916 gsi_evt_ring_exit(gsi);
1917
1918 return ret;
1919 }
1920
1921 /* Inverse of gsi_channel_init() */
1922 static void gsi_channel_exit(struct gsi *gsi)
1923 {
1924 u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1;
1925
1926 do
1927 gsi_channel_exit_one(&gsi->channel[channel_id]);
1928 while (channel_id--);
1929 gsi->modem_channel_bitmap = 0;
1930
1931 gsi_evt_ring_exit(gsi);
1932 }
1933
1934 /* Init function for GSI. GSI hardware does not need to be "ready" */
1935 int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
1936 u32 count, const struct ipa_gsi_endpoint_data *data,
1937 bool modem_alloc)
1938 {
1939 struct resource *res;
1940 resource_size_t size;
1941 unsigned int irq;
1942 int ret;
1943
1944 gsi_validate_build();
1945
1946 gsi->dev = &pdev->dev;
1947
1948 /* The GSI layer performs NAPI on all endpoints. NAPI requires a
1949 * network device structure, but the GSI layer does not have one,
1950 * so we must create a dummy network device for this purpose.
1951 */
1952 init_dummy_netdev(&gsi->dummy_dev);
1953
1954 /* Get the GSI IRQ and request for it to wake the system */
1955 ret = platform_get_irq_byname(pdev, "gsi");
1956 if (ret <= 0) {
1957 dev_err(gsi->dev,
1958 "DT error %d getting \"gsi\" IRQ property\n", ret);
1959 return ret ? : -EINVAL;
1960 }
1961 irq = ret;
1962
1963 ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
1964 if (ret) {
1965 dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret);
1966 return ret;
1967 }
1968 gsi->irq = irq;
1969
1970 ret = enable_irq_wake(gsi->irq);
1971 if (ret)
1972 dev_warn(gsi->dev, "error %d enabling gsi wake irq\n", ret);
1973 gsi->irq_wake_enabled = !ret;
1974
1975 /* Get GSI memory range and map it */
1976 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
1977 if (!res) {
1978 dev_err(gsi->dev,
1979 "DT error getting \"gsi\" memory property\n");
1980 ret = -ENODEV;
1981 goto err_disable_irq_wake;
1982 }
1983
1984 size = resource_size(res);
1985 if (res->start > U32_MAX || size > U32_MAX - res->start) {
1986 dev_err(gsi->dev, "DT memory resource \"gsi\" out of range\n");
1987 ret = -EINVAL;
1988 goto err_disable_irq_wake;
1989 }
1990
1991 gsi->virt = ioremap(res->start, size);
1992 if (!gsi->virt) {
1993 dev_err(gsi->dev, "unable to remap \"gsi\" memory\n");
1994 ret = -ENOMEM;
1995 goto err_disable_irq_wake;
1996 }
1997
1998 ret = gsi_channel_init(gsi, prefetch, count, data, modem_alloc);
1999 if (ret)
2000 goto err_iounmap;
2001
2002 mutex_init(&gsi->mutex);
2003 init_completion(&gsi->completion);
2004
2005 return 0;
2006
2007 err_iounmap:
2008 iounmap(gsi->virt);
2009 err_disable_irq_wake:
2010 if (gsi->irq_wake_enabled)
2011 (void)disable_irq_wake(gsi->irq);
2012 free_irq(gsi->irq, gsi);
2013
2014 return ret;
2015 }
2016
2017 /* Inverse of gsi_init() */
2018 void gsi_exit(struct gsi *gsi)
2019 {
2020 mutex_destroy(&gsi->mutex);
2021 gsi_channel_exit(gsi);
2022 if (gsi->irq_wake_enabled)
2023 (void)disable_irq_wake(gsi->irq);
2024 free_irq(gsi->irq, gsi);
2025 iounmap(gsi->virt);
2026 }
2027
2028 /* The maximum number of outstanding TREs on a channel. This limits
2029 * a channel's maximum number of transactions outstanding (worst case
2030 * is one TRE per transaction).
2031 *
2032 * The absolute limit is the number of TREs in the channel's TRE ring,
2033 * and in theory we should be able use all of them. But in practice,
2034 * doing that led to the hardware reporting exhaustion of event ring
2035 * slots for writing completion information. So the hardware limit
2036 * would be (tre_count - 1).
2037 *
2038 * We reduce it a bit further though. Transaction resource pools are
2039 * sized to be a little larger than this maximum, to allow resource
2040 * allocations to always be contiguous. The number of entries in a
2041 * TRE ring buffer is a power of 2, and the extra resources in a pool
2042 * tends to nearly double the memory allocated for it. Reducing the
2043 * maximum number of outstanding TREs allows the number of entries in
2044 * a pool to avoid crossing that power-of-2 boundary, and this can
2045 * substantially reduce pool memory requirements. The number we
2046 * reduce it by matches the number added in gsi_trans_pool_init().
2047 */
2048 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
2049 {
2050 struct gsi_channel *channel = &gsi->channel[channel_id];
2051
2052 /* Hardware limit is channel->tre_count - 1 */
2053 return channel->tre_count - (channel->tlv_count - 1);
2054 }
2055
2056 /* Returns the maximum number of TREs in a single transaction for a channel */
2057 u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id)
2058 {
2059 struct gsi_channel *channel = &gsi->channel[channel_id];
2060
2061 return channel->tlv_count;
2062 }