]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/net/ipa/gsi_trans.c
io_uring: reset -EBUSY error when io sq thread is waken up
[thirdparty/linux.git] / drivers / net / ipa / gsi_trans.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2020 Linaro Ltd.
5 */
6
7 #include <linux/types.h>
8 #include <linux/bits.h>
9 #include <linux/bitfield.h>
10 #include <linux/refcount.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-direction.h>
13
14 #include "gsi.h"
15 #include "gsi_private.h"
16 #include "gsi_trans.h"
17 #include "ipa_gsi.h"
18 #include "ipa_data.h"
19 #include "ipa_cmd.h"
20
21 /**
22 * DOC: GSI Transactions
23 *
24 * A GSI transaction abstracts the behavior of a GSI channel by representing
25 * everything about a related group of IPA commands in a single structure.
26 * (A "command" in this sense is either a data transfer or an IPA immediate
27 * command.) Most details of interaction with the GSI hardware are managed
28 * by the GSI transaction core, allowing users to simply describe commands
29 * to be performed. When a transaction has completed a callback function
30 * (dependent on the type of endpoint associated with the channel) allows
31 * cleanup of resources associated with the transaction.
32 *
33 * To perform a command (or set of them), a user of the GSI transaction
34 * interface allocates a transaction, indicating the number of TREs required
35 * (one per command). If sufficient TREs are available, they are reserved
36 * for use in the transaction and the allocation succeeds. This way
37 * exhaustion of the available TREs in a channel ring is detected
38 * as early as possible. All resources required to complete a transaction
39 * are allocated at transaction allocation time.
40 *
41 * Commands performed as part of a transaction are represented in an array
42 * of Linux scatterlist structures. This array is allocated with the
43 * transaction, and its entries are initialized using standard scatterlist
44 * functions (such as sg_set_buf() or skb_to_sgvec()).
45 *
46 * Once a transaction's scatterlist structures have been initialized, the
47 * transaction is committed. The caller is responsible for mapping buffers
48 * for DMA if necessary, and this should be done *before* allocating
49 * the transaction. Between a successful allocation and commit of a
50 * transaction no errors should occur.
51 *
52 * Committing transfers ownership of the entire transaction to the GSI
53 * transaction core. The GSI transaction code formats the content of
54 * the scatterlist array into the channel ring buffer and informs the
55 * hardware that new TREs are available to process.
56 *
57 * The last TRE in each transaction is marked to interrupt the AP when the
58 * GSI hardware has completed it. Because transfers described by TREs are
59 * performed strictly in order, signaling the completion of just the last
60 * TRE in the transaction is sufficient to indicate the full transaction
61 * is complete.
62 *
63 * When a transaction is complete, ipa_gsi_trans_complete() is called by the
64 * GSI code into the IPA layer, allowing it to perform any final cleanup
65 * required before the transaction is freed.
66 */
67
68 /* Hardware values representing a transfer element type */
69 enum gsi_tre_type {
70 GSI_RE_XFER = 0x2,
71 GSI_RE_IMMD_CMD = 0x3,
72 };
73
74 /* An entry in a channel ring */
75 struct gsi_tre {
76 __le64 addr; /* DMA address */
77 __le16 len_opcode; /* length in bytes or enum IPA_CMD_* */
78 __le16 reserved;
79 __le32 flags; /* TRE_FLAGS_* */
80 };
81
82 /* gsi_tre->flags mask values (in CPU byte order) */
83 #define TRE_FLAGS_CHAIN_FMASK GENMASK(0, 0)
84 #define TRE_FLAGS_IEOB_FMASK GENMASK(8, 8)
85 #define TRE_FLAGS_IEOT_FMASK GENMASK(9, 9)
86 #define TRE_FLAGS_BEI_FMASK GENMASK(10, 10)
87 #define TRE_FLAGS_TYPE_FMASK GENMASK(23, 16)
88
89 int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
90 u32 max_alloc)
91 {
92 void *virt;
93
94 #ifdef IPA_VALIDATE
95 if (!size || size % 8)
96 return -EINVAL;
97 if (count < max_alloc)
98 return -EINVAL;
99 if (!max_alloc)
100 return -EINVAL;
101 #endif /* IPA_VALIDATE */
102
103 /* By allocating a few extra entries in our pool (one less
104 * than the maximum number that will be requested in a
105 * single allocation), we can always satisfy requests without
106 * ever worrying about straddling the end of the pool array.
107 * If there aren't enough entries starting at the free index,
108 * we just allocate free entries from the beginning of the pool.
109 */
110 virt = kcalloc(count + max_alloc - 1, size, GFP_KERNEL);
111 if (!virt)
112 return -ENOMEM;
113
114 pool->base = virt;
115 /* If the allocator gave us any extra memory, use it */
116 pool->count = ksize(pool->base) / size;
117 pool->free = 0;
118 pool->max_alloc = max_alloc;
119 pool->size = size;
120 pool->addr = 0; /* Only used for DMA pools */
121
122 return 0;
123 }
124
125 void gsi_trans_pool_exit(struct gsi_trans_pool *pool)
126 {
127 kfree(pool->base);
128 memset(pool, 0, sizeof(*pool));
129 }
130
131 /* Allocate the requested number of (zeroed) entries from the pool */
132 /* Home-grown DMA pool. This way we can preallocate and use the tre_count
133 * to guarantee allocations will succeed. Even though we specify max_alloc
134 * (and it can be more than one), we only allow allocation of a single
135 * element from a DMA pool.
136 */
137 int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
138 size_t size, u32 count, u32 max_alloc)
139 {
140 size_t total_size;
141 dma_addr_t addr;
142 void *virt;
143
144 #ifdef IPA_VALIDATE
145 if (!size || size % 8)
146 return -EINVAL;
147 if (count < max_alloc)
148 return -EINVAL;
149 if (!max_alloc)
150 return -EINVAL;
151 #endif /* IPA_VALIDATE */
152
153 /* Don't let allocations cross a power-of-two boundary */
154 size = __roundup_pow_of_two(size);
155 total_size = (count + max_alloc - 1) * size;
156
157 /* The allocator will give us a power-of-2 number of pages. But we
158 * can't guarantee that, so request it. That way we won't waste any
159 * memory that would be available beyond the required space.
160 */
161 total_size = get_order(total_size) << PAGE_SHIFT;
162
163 virt = dma_alloc_coherent(dev, total_size, &addr, GFP_KERNEL);
164 if (!virt)
165 return -ENOMEM;
166
167 pool->base = virt;
168 pool->count = total_size / size;
169 pool->free = 0;
170 pool->size = size;
171 pool->max_alloc = max_alloc;
172 pool->addr = addr;
173
174 return 0;
175 }
176
177 void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool)
178 {
179 dma_free_coherent(dev, pool->size, pool->base, pool->addr);
180 memset(pool, 0, sizeof(*pool));
181 }
182
183 /* Return the byte offset of the next free entry in the pool */
184 static u32 gsi_trans_pool_alloc_common(struct gsi_trans_pool *pool, u32 count)
185 {
186 u32 offset;
187
188 /* assert(count > 0); */
189 /* assert(count <= pool->max_alloc); */
190
191 /* Allocate from beginning if wrap would occur */
192 if (count > pool->count - pool->free)
193 pool->free = 0;
194
195 offset = pool->free * pool->size;
196 pool->free += count;
197 memset(pool->base + offset, 0, count * pool->size);
198
199 return offset;
200 }
201
202 /* Allocate a contiguous block of zeroed entries from a pool */
203 void *gsi_trans_pool_alloc(struct gsi_trans_pool *pool, u32 count)
204 {
205 return pool->base + gsi_trans_pool_alloc_common(pool, count);
206 }
207
208 /* Allocate a single zeroed entry from a DMA pool */
209 void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr)
210 {
211 u32 offset = gsi_trans_pool_alloc_common(pool, 1);
212
213 *addr = pool->addr + offset;
214
215 return pool->base + offset;
216 }
217
218 /* Return the pool element that immediately follows the one given.
219 * This only works done if elements are allocated one at a time.
220 */
221 void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element)
222 {
223 void *end = pool->base + pool->count * pool->size;
224
225 /* assert(element >= pool->base); */
226 /* assert(element < end); */
227 /* assert(pool->max_alloc == 1); */
228 element += pool->size;
229
230 return element < end ? element : pool->base;
231 }
232
233 /* Map a given ring entry index to the transaction associated with it */
234 static void gsi_channel_trans_map(struct gsi_channel *channel, u32 index,
235 struct gsi_trans *trans)
236 {
237 /* Note: index *must* be used modulo the ring count here */
238 channel->trans_info.map[index % channel->tre_ring.count] = trans;
239 }
240
241 /* Return the transaction mapped to a given ring entry */
242 struct gsi_trans *
243 gsi_channel_trans_mapped(struct gsi_channel *channel, u32 index)
244 {
245 /* Note: index *must* be used modulo the ring count here */
246 return channel->trans_info.map[index % channel->tre_ring.count];
247 }
248
249 /* Return the oldest completed transaction for a channel (or null) */
250 struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel)
251 {
252 return list_first_entry_or_null(&channel->trans_info.complete,
253 struct gsi_trans, links);
254 }
255
256 /* Move a transaction from the allocated list to the pending list */
257 static void gsi_trans_move_pending(struct gsi_trans *trans)
258 {
259 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
260 struct gsi_trans_info *trans_info = &channel->trans_info;
261
262 spin_lock_bh(&trans_info->spinlock);
263
264 list_move_tail(&trans->links, &trans_info->pending);
265
266 spin_unlock_bh(&trans_info->spinlock);
267 }
268
269 /* Move a transaction and all of its predecessors from the pending list
270 * to the completed list.
271 */
272 void gsi_trans_move_complete(struct gsi_trans *trans)
273 {
274 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
275 struct gsi_trans_info *trans_info = &channel->trans_info;
276 struct list_head list;
277
278 spin_lock_bh(&trans_info->spinlock);
279
280 /* Move this transaction and all predecessors to completed list */
281 list_cut_position(&list, &trans_info->pending, &trans->links);
282 list_splice_tail(&list, &trans_info->complete);
283
284 spin_unlock_bh(&trans_info->spinlock);
285 }
286
287 /* Move a transaction from the completed list to the polled list */
288 void gsi_trans_move_polled(struct gsi_trans *trans)
289 {
290 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
291 struct gsi_trans_info *trans_info = &channel->trans_info;
292
293 spin_lock_bh(&trans_info->spinlock);
294
295 list_move_tail(&trans->links, &trans_info->polled);
296
297 spin_unlock_bh(&trans_info->spinlock);
298 }
299
300 /* Reserve some number of TREs on a channel. Returns true if successful */
301 static bool
302 gsi_trans_tre_reserve(struct gsi_trans_info *trans_info, u32 tre_count)
303 {
304 int avail = atomic_read(&trans_info->tre_avail);
305 int new;
306
307 do {
308 new = avail - (int)tre_count;
309 if (unlikely(new < 0))
310 return false;
311 } while (!atomic_try_cmpxchg(&trans_info->tre_avail, &avail, new));
312
313 return true;
314 }
315
316 /* Release previously-reserved TRE entries to a channel */
317 static void
318 gsi_trans_tre_release(struct gsi_trans_info *trans_info, u32 tre_count)
319 {
320 atomic_add(tre_count, &trans_info->tre_avail);
321 }
322
323 /* Allocate a GSI transaction on a channel */
324 struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
325 u32 tre_count,
326 enum dma_data_direction direction)
327 {
328 struct gsi_channel *channel = &gsi->channel[channel_id];
329 struct gsi_trans_info *trans_info;
330 struct gsi_trans *trans;
331
332 /* assert(tre_count <= gsi_channel_trans_tre_max(gsi, channel_id)); */
333
334 trans_info = &channel->trans_info;
335
336 /* We reserve the TREs now, but consume them at commit time.
337 * If there aren't enough available, we're done.
338 */
339 if (!gsi_trans_tre_reserve(trans_info, tre_count))
340 return NULL;
341
342 /* Allocate and initialize non-zero fields in the the transaction */
343 trans = gsi_trans_pool_alloc(&trans_info->pool, 1);
344 trans->gsi = gsi;
345 trans->channel_id = channel_id;
346 trans->tre_count = tre_count;
347 init_completion(&trans->completion);
348
349 /* Allocate the scatterlist and (if requested) info entries. */
350 trans->sgl = gsi_trans_pool_alloc(&trans_info->sg_pool, tre_count);
351 sg_init_marker(trans->sgl, tre_count);
352
353 trans->direction = direction;
354
355 spin_lock_bh(&trans_info->spinlock);
356
357 list_add_tail(&trans->links, &trans_info->alloc);
358
359 spin_unlock_bh(&trans_info->spinlock);
360
361 refcount_set(&trans->refcount, 1);
362
363 return trans;
364 }
365
366 /* Free a previously-allocated transaction (used only in case of error) */
367 void gsi_trans_free(struct gsi_trans *trans)
368 {
369 struct gsi_trans_info *trans_info;
370
371 if (!refcount_dec_and_test(&trans->refcount))
372 return;
373
374 trans_info = &trans->gsi->channel[trans->channel_id].trans_info;
375
376 spin_lock_bh(&trans_info->spinlock);
377
378 list_del(&trans->links);
379
380 spin_unlock_bh(&trans_info->spinlock);
381
382 ipa_gsi_trans_release(trans);
383
384 /* Releasing the reserved TREs implicitly frees the sgl[] and
385 * (if present) info[] arrays, plus the transaction itself.
386 */
387 gsi_trans_tre_release(trans_info, trans->tre_count);
388 }
389
390 /* Add an immediate command to a transaction */
391 void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
392 dma_addr_t addr, enum dma_data_direction direction,
393 enum ipa_cmd_opcode opcode)
394 {
395 struct ipa_cmd_info *info;
396 u32 which = trans->used++;
397 struct scatterlist *sg;
398
399 /* assert(which < trans->tre_count); */
400
401 /* Set the page information for the buffer. We also need to fill in
402 * the DMA address for the buffer (something dma_map_sg() normally
403 * does).
404 */
405 sg = &trans->sgl[which];
406
407 sg_set_buf(sg, buf, size);
408 sg_dma_address(sg) = addr;
409
410 info = &trans->info[which];
411 info->opcode = opcode;
412 info->direction = direction;
413 }
414
415 /* Add a page transfer to a transaction. It will fill the only TRE. */
416 int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
417 u32 offset)
418 {
419 struct scatterlist *sg = &trans->sgl[0];
420 int ret;
421
422 /* assert(trans->tre_count == 1); */
423 /* assert(!trans->used); */
424
425 sg_set_page(sg, page, size, offset);
426 ret = dma_map_sg(trans->gsi->dev, sg, 1, trans->direction);
427 if (!ret)
428 return -ENOMEM;
429
430 trans->used++; /* Transaction now owns the (DMA mapped) page */
431
432 return 0;
433 }
434
435 /* Add an SKB transfer to a transaction. No other TREs will be used. */
436 int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb)
437 {
438 struct scatterlist *sg = &trans->sgl[0];
439 u32 used;
440 int ret;
441
442 /* assert(trans->tre_count == 1); */
443 /* assert(!trans->used); */
444
445 /* skb->len will not be 0 (checked early) */
446 ret = skb_to_sgvec(skb, sg, 0, skb->len);
447 if (ret < 0)
448 return ret;
449 used = ret;
450
451 ret = dma_map_sg(trans->gsi->dev, sg, used, trans->direction);
452 if (!ret)
453 return -ENOMEM;
454
455 trans->used += used; /* Transaction now owns the (DMA mapped) skb */
456
457 return 0;
458 }
459
460 /* Compute the length/opcode value to use for a TRE */
461 static __le16 gsi_tre_len_opcode(enum ipa_cmd_opcode opcode, u32 len)
462 {
463 return opcode == IPA_CMD_NONE ? cpu_to_le16((u16)len)
464 : cpu_to_le16((u16)opcode);
465 }
466
467 /* Compute the flags value to use for a given TRE */
468 static __le32 gsi_tre_flags(bool last_tre, bool bei, enum ipa_cmd_opcode opcode)
469 {
470 enum gsi_tre_type tre_type;
471 u32 tre_flags;
472
473 tre_type = opcode == IPA_CMD_NONE ? GSI_RE_XFER : GSI_RE_IMMD_CMD;
474 tre_flags = u32_encode_bits(tre_type, TRE_FLAGS_TYPE_FMASK);
475
476 /* Last TRE contains interrupt flags */
477 if (last_tre) {
478 /* All transactions end in a transfer completion interrupt */
479 tre_flags |= TRE_FLAGS_IEOT_FMASK;
480 /* Don't interrupt when outbound commands are acknowledged */
481 if (bei)
482 tre_flags |= TRE_FLAGS_BEI_FMASK;
483 } else { /* All others indicate there's more to come */
484 tre_flags |= TRE_FLAGS_CHAIN_FMASK;
485 }
486
487 return cpu_to_le32(tre_flags);
488 }
489
490 static void gsi_trans_tre_fill(struct gsi_tre *dest_tre, dma_addr_t addr,
491 u32 len, bool last_tre, bool bei,
492 enum ipa_cmd_opcode opcode)
493 {
494 struct gsi_tre tre;
495
496 tre.addr = cpu_to_le64(addr);
497 tre.len_opcode = gsi_tre_len_opcode(opcode, len);
498 tre.reserved = 0;
499 tre.flags = gsi_tre_flags(last_tre, bei, opcode);
500
501 /* ARM64 can write 16 bytes as a unit with a single instruction.
502 * Doing the assignment this way is an attempt to make that happen.
503 */
504 *dest_tre = tre;
505 }
506
507 /**
508 * __gsi_trans_commit() - Common GSI transaction commit code
509 * @trans: Transaction to commit
510 * @ring_db: Whether to tell the hardware about these queued transfers
511 *
512 * Formats channel ring TRE entries based on the content of the scatterlist.
513 * Maps a transaction pointer to the last ring entry used for the transaction,
514 * so it can be recovered when it completes. Moves the transaction to the
515 * pending list. Finally, updates the channel ring pointer and optionally
516 * rings the doorbell.
517 */
518 static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
519 {
520 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
521 struct gsi_ring *ring = &channel->tre_ring;
522 enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
523 bool bei = channel->toward_ipa;
524 struct ipa_cmd_info *info;
525 struct gsi_tre *dest_tre;
526 struct scatterlist *sg;
527 u32 byte_count = 0;
528 u32 avail;
529 u32 i;
530
531 /* assert(trans->used > 0); */
532
533 /* Consume the entries. If we cross the end of the ring while
534 * filling them we'll switch to the beginning to finish.
535 * If there is no info array we're doing a simple data
536 * transfer request, whose opcode is IPA_CMD_NONE.
537 */
538 info = trans->info ? &trans->info[0] : NULL;
539 avail = ring->count - ring->index % ring->count;
540 dest_tre = gsi_ring_virt(ring, ring->index);
541 for_each_sg(trans->sgl, sg, trans->used, i) {
542 bool last_tre = i == trans->used - 1;
543 dma_addr_t addr = sg_dma_address(sg);
544 u32 len = sg_dma_len(sg);
545
546 byte_count += len;
547 if (!avail--)
548 dest_tre = gsi_ring_virt(ring, 0);
549 if (info)
550 opcode = info++->opcode;
551
552 gsi_trans_tre_fill(dest_tre, addr, len, last_tre, bei, opcode);
553 dest_tre++;
554 }
555 ring->index += trans->used;
556
557 if (channel->toward_ipa) {
558 /* We record TX bytes when they are sent */
559 trans->len = byte_count;
560 trans->trans_count = channel->trans_count;
561 trans->byte_count = channel->byte_count;
562 channel->trans_count++;
563 channel->byte_count += byte_count;
564 }
565
566 /* Associate the last TRE with the transaction */
567 gsi_channel_trans_map(channel, ring->index - 1, trans);
568
569 gsi_trans_move_pending(trans);
570
571 /* Ring doorbell if requested, or if all TREs are allocated */
572 if (ring_db || !atomic_read(&channel->trans_info.tre_avail)) {
573 /* Report what we're handing off to hardware for TX channels */
574 if (channel->toward_ipa)
575 gsi_channel_tx_queued(channel);
576 gsi_channel_doorbell(channel);
577 }
578 }
579
580 /* Commit a GSI transaction */
581 void gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
582 {
583 if (trans->used)
584 __gsi_trans_commit(trans, ring_db);
585 else
586 gsi_trans_free(trans);
587 }
588
589 /* Commit a GSI transaction and wait for it to complete */
590 void gsi_trans_commit_wait(struct gsi_trans *trans)
591 {
592 if (!trans->used)
593 goto out_trans_free;
594
595 refcount_inc(&trans->refcount);
596
597 __gsi_trans_commit(trans, true);
598
599 wait_for_completion(&trans->completion);
600
601 out_trans_free:
602 gsi_trans_free(trans);
603 }
604
605 /* Commit a GSI transaction and wait for it to complete, with timeout */
606 int gsi_trans_commit_wait_timeout(struct gsi_trans *trans,
607 unsigned long timeout)
608 {
609 unsigned long timeout_jiffies = msecs_to_jiffies(timeout);
610 unsigned long remaining = 1; /* In case of empty transaction */
611
612 if (!trans->used)
613 goto out_trans_free;
614
615 refcount_inc(&trans->refcount);
616
617 __gsi_trans_commit(trans, true);
618
619 remaining = wait_for_completion_timeout(&trans->completion,
620 timeout_jiffies);
621 out_trans_free:
622 gsi_trans_free(trans);
623
624 return remaining ? 0 : -ETIMEDOUT;
625 }
626
627 /* Process the completion of a transaction; called while polling */
628 void gsi_trans_complete(struct gsi_trans *trans)
629 {
630 /* If the entire SGL was mapped when added, unmap it now */
631 if (trans->direction != DMA_NONE)
632 dma_unmap_sg(trans->gsi->dev, trans->sgl, trans->used,
633 trans->direction);
634
635 ipa_gsi_trans_complete(trans);
636
637 complete(&trans->completion);
638
639 gsi_trans_free(trans);
640 }
641
642 /* Cancel a channel's pending transactions */
643 void gsi_channel_trans_cancel_pending(struct gsi_channel *channel)
644 {
645 struct gsi_trans_info *trans_info = &channel->trans_info;
646 struct gsi_trans *trans;
647 bool cancelled;
648
649 /* channel->gsi->mutex is held by caller */
650 spin_lock_bh(&trans_info->spinlock);
651
652 cancelled = !list_empty(&trans_info->pending);
653 list_for_each_entry(trans, &trans_info->pending, links)
654 trans->cancelled = true;
655
656 list_splice_tail_init(&trans_info->pending, &trans_info->complete);
657
658 spin_unlock_bh(&trans_info->spinlock);
659
660 /* Schedule NAPI polling to complete the cancelled transactions */
661 if (cancelled)
662 napi_schedule(&channel->napi);
663 }
664
665 /* Issue a command to read a single byte from a channel */
666 int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr)
667 {
668 struct gsi_channel *channel = &gsi->channel[channel_id];
669 struct gsi_ring *ring = &channel->tre_ring;
670 struct gsi_trans_info *trans_info;
671 struct gsi_tre *dest_tre;
672
673 trans_info = &channel->trans_info;
674
675 /* First reserve the TRE, if possible */
676 if (!gsi_trans_tre_reserve(trans_info, 1))
677 return -EBUSY;
678
679 /* Now fill the the reserved TRE and tell the hardware */
680
681 dest_tre = gsi_ring_virt(ring, ring->index);
682 gsi_trans_tre_fill(dest_tre, addr, 1, true, false, IPA_CMD_NONE);
683
684 ring->index++;
685 gsi_channel_doorbell(channel);
686
687 return 0;
688 }
689
690 /* Mark a gsi_trans_read_byte() request done */
691 void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id)
692 {
693 struct gsi_channel *channel = &gsi->channel[channel_id];
694
695 gsi_trans_tre_release(&channel->trans_info, 1);
696 }
697
698 /* Initialize a channel's GSI transaction info */
699 int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id)
700 {
701 struct gsi_channel *channel = &gsi->channel[channel_id];
702 struct gsi_trans_info *trans_info;
703 u32 tre_max;
704 int ret;
705
706 /* Ensure the size of a channel element is what's expected */
707 BUILD_BUG_ON(sizeof(struct gsi_tre) != GSI_RING_ELEMENT_SIZE);
708
709 /* The map array is used to determine what transaction is associated
710 * with a TRE that the hardware reports has completed. We need one
711 * map entry per TRE.
712 */
713 trans_info = &channel->trans_info;
714 trans_info->map = kcalloc(channel->tre_count, sizeof(*trans_info->map),
715 GFP_KERNEL);
716 if (!trans_info->map)
717 return -ENOMEM;
718
719 /* We can't use more TREs than there are available in the ring.
720 * This limits the number of transactions that can be oustanding.
721 * Worst case is one TRE per transaction (but we actually limit
722 * it to something a little less than that). We allocate resources
723 * for transactions (including transaction structures) based on
724 * this maximum number.
725 */
726 tre_max = gsi_channel_tre_max(channel->gsi, channel_id);
727
728 /* Transactions are allocated one at a time. */
729 ret = gsi_trans_pool_init(&trans_info->pool, sizeof(struct gsi_trans),
730 tre_max, 1);
731 if (ret)
732 goto err_kfree;
733
734 /* A transaction uses a scatterlist array to represent the data
735 * transfers implemented by the transaction. Each scatterlist
736 * element is used to fill a single TRE when the transaction is
737 * committed. So we need as many scatterlist elements as the
738 * maximum number of TREs that can be outstanding.
739 *
740 * All TREs in a transaction must fit within the channel's TLV FIFO.
741 * A transaction on a channel can allocate as many TREs as that but
742 * no more.
743 */
744 ret = gsi_trans_pool_init(&trans_info->sg_pool,
745 sizeof(struct scatterlist),
746 tre_max, channel->tlv_count);
747 if (ret)
748 goto err_trans_pool_exit;
749
750 /* Finally, the tre_avail field is what ultimately limits the number
751 * of outstanding transactions and their resources. A transaction
752 * allocation succeeds only if the TREs available are sufficient for
753 * what the transaction might need. Transaction resource pools are
754 * sized based on the maximum number of outstanding TREs, so there
755 * will always be resources available if there are TREs available.
756 */
757 atomic_set(&trans_info->tre_avail, tre_max);
758
759 spin_lock_init(&trans_info->spinlock);
760 INIT_LIST_HEAD(&trans_info->alloc);
761 INIT_LIST_HEAD(&trans_info->pending);
762 INIT_LIST_HEAD(&trans_info->complete);
763 INIT_LIST_HEAD(&trans_info->polled);
764
765 return 0;
766
767 err_trans_pool_exit:
768 gsi_trans_pool_exit(&trans_info->pool);
769 err_kfree:
770 kfree(trans_info->map);
771
772 dev_err(gsi->dev, "error %d initializing channel %u transactions\n",
773 ret, channel_id);
774
775 return ret;
776 }
777
778 /* Inverse of gsi_channel_trans_init() */
779 void gsi_channel_trans_exit(struct gsi_channel *channel)
780 {
781 struct gsi_trans_info *trans_info = &channel->trans_info;
782
783 gsi_trans_pool_exit(&trans_info->sg_pool);
784 gsi_trans_pool_exit(&trans_info->pool);
785 kfree(trans_info->map);
786 }