]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/net/ipa/gsi_trans.c
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / drivers / net / ipa / gsi_trans.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2020 Linaro Ltd.
5 */
6
7 #include <linux/types.h>
8 #include <linux/bits.h>
9 #include <linux/bitfield.h>
10 #include <linux/refcount.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-direction.h>
13
14 #include "gsi.h"
15 #include "gsi_private.h"
16 #include "gsi_trans.h"
17 #include "ipa_gsi.h"
18 #include "ipa_data.h"
19 #include "ipa_cmd.h"
20
21 /**
22 * DOC: GSI Transactions
23 *
24 * A GSI transaction abstracts the behavior of a GSI channel by representing
25 * everything about a related group of IPA commands in a single structure.
26 * (A "command" in this sense is either a data transfer or an IPA immediate
27 * command.) Most details of interaction with the GSI hardware are managed
28 * by the GSI transaction core, allowing users to simply describe commands
29 * to be performed. When a transaction has completed a callback function
30 * (dependent on the type of endpoint associated with the channel) allows
31 * cleanup of resources associated with the transaction.
32 *
33 * To perform a command (or set of them), a user of the GSI transaction
34 * interface allocates a transaction, indicating the number of TREs required
35 * (one per command). If sufficient TREs are available, they are reserved
36 * for use in the transaction and the allocation succeeds. This way
37 * exhaustion of the available TREs in a channel ring is detected
38 * as early as possible. All resources required to complete a transaction
39 * are allocated at transaction allocation time.
40 *
41 * Commands performed as part of a transaction are represented in an array
42 * of Linux scatterlist structures. This array is allocated with the
43 * transaction, and its entries are initialized using standard scatterlist
44 * functions (such as sg_set_buf() or skb_to_sgvec()).
45 *
46 * Once a transaction's scatterlist structures have been initialized, the
47 * transaction is committed. The caller is responsible for mapping buffers
48 * for DMA if necessary, and this should be done *before* allocating
49 * the transaction. Between a successful allocation and commit of a
50 * transaction no errors should occur.
51 *
52 * Committing transfers ownership of the entire transaction to the GSI
53 * transaction core. The GSI transaction code formats the content of
54 * the scatterlist array into the channel ring buffer and informs the
55 * hardware that new TREs are available to process.
56 *
57 * The last TRE in each transaction is marked to interrupt the AP when the
58 * GSI hardware has completed it. Because transfers described by TREs are
59 * performed strictly in order, signaling the completion of just the last
60 * TRE in the transaction is sufficient to indicate the full transaction
61 * is complete.
62 *
63 * When a transaction is complete, ipa_gsi_trans_complete() is called by the
64 * GSI code into the IPA layer, allowing it to perform any final cleanup
65 * required before the transaction is freed.
66 */
67
68 /* Hardware values representing a transfer element type */
69 enum gsi_tre_type {
70 GSI_RE_XFER = 0x2,
71 GSI_RE_IMMD_CMD = 0x3,
72 };
73
74 /* An entry in a channel ring */
75 struct gsi_tre {
76 __le64 addr; /* DMA address */
77 __le16 len_opcode; /* length in bytes or enum IPA_CMD_* */
78 __le16 reserved;
79 __le32 flags; /* TRE_FLAGS_* */
80 };
81
82 /* gsi_tre->flags mask values (in CPU byte order) */
83 #define TRE_FLAGS_CHAIN_FMASK GENMASK(0, 0)
84 #define TRE_FLAGS_IEOB_FMASK GENMASK(8, 8)
85 #define TRE_FLAGS_IEOT_FMASK GENMASK(9, 9)
86 #define TRE_FLAGS_BEI_FMASK GENMASK(10, 10)
87 #define TRE_FLAGS_TYPE_FMASK GENMASK(23, 16)
88
89 int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
90 u32 max_alloc)
91 {
92 void *virt;
93
94 #ifdef IPA_VALIDATE
95 if (!size || size % 8)
96 return -EINVAL;
97 if (count < max_alloc)
98 return -EINVAL;
99 if (!max_alloc)
100 return -EINVAL;
101 #endif /* IPA_VALIDATE */
102
103 /* By allocating a few extra entries in our pool (one less
104 * than the maximum number that will be requested in a
105 * single allocation), we can always satisfy requests without
106 * ever worrying about straddling the end of the pool array.
107 * If there aren't enough entries starting at the free index,
108 * we just allocate free entries from the beginning of the pool.
109 */
110 virt = kcalloc(count + max_alloc - 1, size, GFP_KERNEL);
111 if (!virt)
112 return -ENOMEM;
113
114 pool->base = virt;
115 /* If the allocator gave us any extra memory, use it */
116 pool->count = ksize(pool->base) / size;
117 pool->free = 0;
118 pool->max_alloc = max_alloc;
119 pool->size = size;
120 pool->addr = 0; /* Only used for DMA pools */
121
122 return 0;
123 }
124
125 void gsi_trans_pool_exit(struct gsi_trans_pool *pool)
126 {
127 kfree(pool->base);
128 memset(pool, 0, sizeof(*pool));
129 }
130
131 /* Allocate the requested number of (zeroed) entries from the pool */
132 /* Home-grown DMA pool. This way we can preallocate and use the tre_count
133 * to guarantee allocations will succeed. Even though we specify max_alloc
134 * (and it can be more than one), we only allow allocation of a single
135 * element from a DMA pool.
136 */
137 int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
138 size_t size, u32 count, u32 max_alloc)
139 {
140 size_t total_size;
141 dma_addr_t addr;
142 void *virt;
143
144 #ifdef IPA_VALIDATE
145 if (!size || size % 8)
146 return -EINVAL;
147 if (count < max_alloc)
148 return -EINVAL;
149 if (!max_alloc)
150 return -EINVAL;
151 #endif /* IPA_VALIDATE */
152
153 /* Don't let allocations cross a power-of-two boundary */
154 size = __roundup_pow_of_two(size);
155 total_size = (count + max_alloc - 1) * size;
156
157 /* The allocator will give us a power-of-2 number of pages. But we
158 * can't guarantee that, so request it. That way we won't waste any
159 * memory that would be available beyond the required space.
160 */
161 total_size = get_order(total_size) << PAGE_SHIFT;
162
163 virt = dma_alloc_coherent(dev, total_size, &addr, GFP_KERNEL);
164 if (!virt)
165 return -ENOMEM;
166
167 pool->base = virt;
168 pool->count = total_size / size;
169 pool->free = 0;
170 pool->size = size;
171 pool->max_alloc = max_alloc;
172 pool->addr = addr;
173
174 return 0;
175 }
176
177 void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool)
178 {
179 dma_free_coherent(dev, pool->size, pool->base, pool->addr);
180 memset(pool, 0, sizeof(*pool));
181 }
182
183 /* Return the byte offset of the next free entry in the pool */
184 static u32 gsi_trans_pool_alloc_common(struct gsi_trans_pool *pool, u32 count)
185 {
186 u32 offset;
187
188 /* assert(count > 0); */
189 /* assert(count <= pool->max_alloc); */
190
191 /* Allocate from beginning if wrap would occur */
192 if (count > pool->count - pool->free)
193 pool->free = 0;
194
195 offset = pool->free * pool->size;
196 pool->free += count;
197 memset(pool->base + offset, 0, count * pool->size);
198
199 return offset;
200 }
201
202 /* Allocate a contiguous block of zeroed entries from a pool */
203 void *gsi_trans_pool_alloc(struct gsi_trans_pool *pool, u32 count)
204 {
205 return pool->base + gsi_trans_pool_alloc_common(pool, count);
206 }
207
208 /* Allocate a single zeroed entry from a DMA pool */
209 void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr)
210 {
211 u32 offset = gsi_trans_pool_alloc_common(pool, 1);
212
213 *addr = pool->addr + offset;
214
215 return pool->base + offset;
216 }
217
218 /* Return the pool element that immediately follows the one given.
219 * This only works done if elements are allocated one at a time.
220 */
221 void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element)
222 {
223 void *end = pool->base + pool->count * pool->size;
224
225 /* assert(element >= pool->base); */
226 /* assert(element < end); */
227 /* assert(pool->max_alloc == 1); */
228 element += pool->size;
229
230 return element < end ? element : pool->base;
231 }
232
233 /* Map a given ring entry index to the transaction associated with it */
234 static void gsi_channel_trans_map(struct gsi_channel *channel, u32 index,
235 struct gsi_trans *trans)
236 {
237 /* Note: index *must* be used modulo the ring count here */
238 channel->trans_info.map[index % channel->tre_ring.count] = trans;
239 }
240
241 /* Return the transaction mapped to a given ring entry */
242 struct gsi_trans *
243 gsi_channel_trans_mapped(struct gsi_channel *channel, u32 index)
244 {
245 /* Note: index *must* be used modulo the ring count here */
246 return channel->trans_info.map[index % channel->tre_ring.count];
247 }
248
249 /* Return the oldest completed transaction for a channel (or null) */
250 struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel)
251 {
252 return list_first_entry_or_null(&channel->trans_info.complete,
253 struct gsi_trans, links);
254 }
255
256 /* Move a transaction from the allocated list to the pending list */
257 static void gsi_trans_move_pending(struct gsi_trans *trans)
258 {
259 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
260 struct gsi_trans_info *trans_info = &channel->trans_info;
261
262 spin_lock_bh(&trans_info->spinlock);
263
264 list_move_tail(&trans->links, &trans_info->pending);
265
266 spin_unlock_bh(&trans_info->spinlock);
267 }
268
269 /* Move a transaction and all of its predecessors from the pending list
270 * to the completed list.
271 */
272 void gsi_trans_move_complete(struct gsi_trans *trans)
273 {
274 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
275 struct gsi_trans_info *trans_info = &channel->trans_info;
276 struct list_head list;
277
278 spin_lock_bh(&trans_info->spinlock);
279
280 /* Move this transaction and all predecessors to completed list */
281 list_cut_position(&list, &trans_info->pending, &trans->links);
282 list_splice_tail(&list, &trans_info->complete);
283
284 spin_unlock_bh(&trans_info->spinlock);
285 }
286
287 /* Move a transaction from the completed list to the polled list */
288 void gsi_trans_move_polled(struct gsi_trans *trans)
289 {
290 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
291 struct gsi_trans_info *trans_info = &channel->trans_info;
292
293 spin_lock_bh(&trans_info->spinlock);
294
295 list_move_tail(&trans->links, &trans_info->polled);
296
297 spin_unlock_bh(&trans_info->spinlock);
298 }
299
300 /* Reserve some number of TREs on a channel. Returns true if successful */
301 static bool
302 gsi_trans_tre_reserve(struct gsi_trans_info *trans_info, u32 tre_count)
303 {
304 int avail = atomic_read(&trans_info->tre_avail);
305 int new;
306
307 do {
308 new = avail - (int)tre_count;
309 if (unlikely(new < 0))
310 return false;
311 } while (!atomic_try_cmpxchg(&trans_info->tre_avail, &avail, new));
312
313 return true;
314 }
315
316 /* Release previously-reserved TRE entries to a channel */
317 static void
318 gsi_trans_tre_release(struct gsi_trans_info *trans_info, u32 tre_count)
319 {
320 atomic_add(tre_count, &trans_info->tre_avail);
321 }
322
323 /* Allocate a GSI transaction on a channel */
324 struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
325 u32 tre_count,
326 enum dma_data_direction direction)
327 {
328 struct gsi_channel *channel = &gsi->channel[channel_id];
329 struct gsi_trans_info *trans_info;
330 struct gsi_trans *trans;
331
332 /* assert(tre_count <= gsi_channel_trans_tre_max(gsi, channel_id)); */
333
334 trans_info = &channel->trans_info;
335
336 /* We reserve the TREs now, but consume them at commit time.
337 * If there aren't enough available, we're done.
338 */
339 if (!gsi_trans_tre_reserve(trans_info, tre_count))
340 return NULL;
341
342 /* Allocate and initialize non-zero fields in the the transaction */
343 trans = gsi_trans_pool_alloc(&trans_info->pool, 1);
344 trans->gsi = gsi;
345 trans->channel_id = channel_id;
346 trans->tre_count = tre_count;
347 init_completion(&trans->completion);
348
349 /* Allocate the scatterlist and (if requested) info entries. */
350 trans->sgl = gsi_trans_pool_alloc(&trans_info->sg_pool, tre_count);
351 sg_init_marker(trans->sgl, tre_count);
352
353 trans->direction = direction;
354
355 spin_lock_bh(&trans_info->spinlock);
356
357 list_add_tail(&trans->links, &trans_info->alloc);
358
359 spin_unlock_bh(&trans_info->spinlock);
360
361 refcount_set(&trans->refcount, 1);
362
363 return trans;
364 }
365
366 /* Free a previously-allocated transaction (used only in case of error) */
367 void gsi_trans_free(struct gsi_trans *trans)
368 {
369 struct gsi_trans_info *trans_info;
370
371 if (!refcount_dec_and_test(&trans->refcount))
372 return;
373
374 trans_info = &trans->gsi->channel[trans->channel_id].trans_info;
375
376 spin_lock_bh(&trans_info->spinlock);
377
378 list_del(&trans->links);
379
380 spin_unlock_bh(&trans_info->spinlock);
381
382 ipa_gsi_trans_release(trans);
383
384 /* Releasing the reserved TREs implicitly frees the sgl[] and
385 * (if present) info[] arrays, plus the transaction itself.
386 */
387 gsi_trans_tre_release(trans_info, trans->tre_count);
388 }
389
390 /* Add an immediate command to a transaction */
391 void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
392 dma_addr_t addr, enum dma_data_direction direction,
393 enum ipa_cmd_opcode opcode)
394 {
395 struct ipa_cmd_info *info;
396 u32 which = trans->used++;
397 struct scatterlist *sg;
398
399 /* assert(which < trans->tre_count); */
400
401 /* Set the page information for the buffer. We also need to fill in
402 * the DMA address and length for the buffer (something dma_map_sg()
403 * normally does).
404 */
405 sg = &trans->sgl[which];
406
407 sg_set_buf(sg, buf, size);
408 sg_dma_address(sg) = addr;
409 sg_dma_len(sg) = sg->length;
410
411 info = &trans->info[which];
412 info->opcode = opcode;
413 info->direction = direction;
414 }
415
416 /* Add a page transfer to a transaction. It will fill the only TRE. */
417 int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
418 u32 offset)
419 {
420 struct scatterlist *sg = &trans->sgl[0];
421 int ret;
422
423 /* assert(trans->tre_count == 1); */
424 /* assert(!trans->used); */
425
426 sg_set_page(sg, page, size, offset);
427 ret = dma_map_sg(trans->gsi->dev, sg, 1, trans->direction);
428 if (!ret)
429 return -ENOMEM;
430
431 trans->used++; /* Transaction now owns the (DMA mapped) page */
432
433 return 0;
434 }
435
436 /* Add an SKB transfer to a transaction. No other TREs will be used. */
437 int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb)
438 {
439 struct scatterlist *sg = &trans->sgl[0];
440 u32 used;
441 int ret;
442
443 /* assert(trans->tre_count == 1); */
444 /* assert(!trans->used); */
445
446 /* skb->len will not be 0 (checked early) */
447 ret = skb_to_sgvec(skb, sg, 0, skb->len);
448 if (ret < 0)
449 return ret;
450 used = ret;
451
452 ret = dma_map_sg(trans->gsi->dev, sg, used, trans->direction);
453 if (!ret)
454 return -ENOMEM;
455
456 trans->used += used; /* Transaction now owns the (DMA mapped) skb */
457
458 return 0;
459 }
460
461 /* Compute the length/opcode value to use for a TRE */
462 static __le16 gsi_tre_len_opcode(enum ipa_cmd_opcode opcode, u32 len)
463 {
464 return opcode == IPA_CMD_NONE ? cpu_to_le16((u16)len)
465 : cpu_to_le16((u16)opcode);
466 }
467
468 /* Compute the flags value to use for a given TRE */
469 static __le32 gsi_tre_flags(bool last_tre, bool bei, enum ipa_cmd_opcode opcode)
470 {
471 enum gsi_tre_type tre_type;
472 u32 tre_flags;
473
474 tre_type = opcode == IPA_CMD_NONE ? GSI_RE_XFER : GSI_RE_IMMD_CMD;
475 tre_flags = u32_encode_bits(tre_type, TRE_FLAGS_TYPE_FMASK);
476
477 /* Last TRE contains interrupt flags */
478 if (last_tre) {
479 /* All transactions end in a transfer completion interrupt */
480 tre_flags |= TRE_FLAGS_IEOT_FMASK;
481 /* Don't interrupt when outbound commands are acknowledged */
482 if (bei)
483 tre_flags |= TRE_FLAGS_BEI_FMASK;
484 } else { /* All others indicate there's more to come */
485 tre_flags |= TRE_FLAGS_CHAIN_FMASK;
486 }
487
488 return cpu_to_le32(tre_flags);
489 }
490
491 static void gsi_trans_tre_fill(struct gsi_tre *dest_tre, dma_addr_t addr,
492 u32 len, bool last_tre, bool bei,
493 enum ipa_cmd_opcode opcode)
494 {
495 struct gsi_tre tre;
496
497 tre.addr = cpu_to_le64(addr);
498 tre.len_opcode = gsi_tre_len_opcode(opcode, len);
499 tre.reserved = 0;
500 tre.flags = gsi_tre_flags(last_tre, bei, opcode);
501
502 /* ARM64 can write 16 bytes as a unit with a single instruction.
503 * Doing the assignment this way is an attempt to make that happen.
504 */
505 *dest_tre = tre;
506 }
507
508 /**
509 * __gsi_trans_commit() - Common GSI transaction commit code
510 * @trans: Transaction to commit
511 * @ring_db: Whether to tell the hardware about these queued transfers
512 *
513 * Formats channel ring TRE entries based on the content of the scatterlist.
514 * Maps a transaction pointer to the last ring entry used for the transaction,
515 * so it can be recovered when it completes. Moves the transaction to the
516 * pending list. Finally, updates the channel ring pointer and optionally
517 * rings the doorbell.
518 */
519 static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
520 {
521 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
522 struct gsi_ring *ring = &channel->tre_ring;
523 enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
524 bool bei = channel->toward_ipa;
525 struct ipa_cmd_info *info;
526 struct gsi_tre *dest_tre;
527 struct scatterlist *sg;
528 u32 byte_count = 0;
529 u32 avail;
530 u32 i;
531
532 /* assert(trans->used > 0); */
533
534 /* Consume the entries. If we cross the end of the ring while
535 * filling them we'll switch to the beginning to finish.
536 * If there is no info array we're doing a simple data
537 * transfer request, whose opcode is IPA_CMD_NONE.
538 */
539 info = trans->info ? &trans->info[0] : NULL;
540 avail = ring->count - ring->index % ring->count;
541 dest_tre = gsi_ring_virt(ring, ring->index);
542 for_each_sg(trans->sgl, sg, trans->used, i) {
543 bool last_tre = i == trans->used - 1;
544 dma_addr_t addr = sg_dma_address(sg);
545 u32 len = sg_dma_len(sg);
546
547 byte_count += len;
548 if (!avail--)
549 dest_tre = gsi_ring_virt(ring, 0);
550 if (info)
551 opcode = info++->opcode;
552
553 gsi_trans_tre_fill(dest_tre, addr, len, last_tre, bei, opcode);
554 dest_tre++;
555 }
556 ring->index += trans->used;
557
558 if (channel->toward_ipa) {
559 /* We record TX bytes when they are sent */
560 trans->len = byte_count;
561 trans->trans_count = channel->trans_count;
562 trans->byte_count = channel->byte_count;
563 channel->trans_count++;
564 channel->byte_count += byte_count;
565 }
566
567 /* Associate the last TRE with the transaction */
568 gsi_channel_trans_map(channel, ring->index - 1, trans);
569
570 gsi_trans_move_pending(trans);
571
572 /* Ring doorbell if requested, or if all TREs are allocated */
573 if (ring_db || !atomic_read(&channel->trans_info.tre_avail)) {
574 /* Report what we're handing off to hardware for TX channels */
575 if (channel->toward_ipa)
576 gsi_channel_tx_queued(channel);
577 gsi_channel_doorbell(channel);
578 }
579 }
580
581 /* Commit a GSI transaction */
582 void gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
583 {
584 if (trans->used)
585 __gsi_trans_commit(trans, ring_db);
586 else
587 gsi_trans_free(trans);
588 }
589
590 /* Commit a GSI transaction and wait for it to complete */
591 void gsi_trans_commit_wait(struct gsi_trans *trans)
592 {
593 if (!trans->used)
594 goto out_trans_free;
595
596 refcount_inc(&trans->refcount);
597
598 __gsi_trans_commit(trans, true);
599
600 wait_for_completion(&trans->completion);
601
602 out_trans_free:
603 gsi_trans_free(trans);
604 }
605
606 /* Commit a GSI transaction and wait for it to complete, with timeout */
607 int gsi_trans_commit_wait_timeout(struct gsi_trans *trans,
608 unsigned long timeout)
609 {
610 unsigned long timeout_jiffies = msecs_to_jiffies(timeout);
611 unsigned long remaining = 1; /* In case of empty transaction */
612
613 if (!trans->used)
614 goto out_trans_free;
615
616 refcount_inc(&trans->refcount);
617
618 __gsi_trans_commit(trans, true);
619
620 remaining = wait_for_completion_timeout(&trans->completion,
621 timeout_jiffies);
622 out_trans_free:
623 gsi_trans_free(trans);
624
625 return remaining ? 0 : -ETIMEDOUT;
626 }
627
628 /* Process the completion of a transaction; called while polling */
629 void gsi_trans_complete(struct gsi_trans *trans)
630 {
631 /* If the entire SGL was mapped when added, unmap it now */
632 if (trans->direction != DMA_NONE)
633 dma_unmap_sg(trans->gsi->dev, trans->sgl, trans->used,
634 trans->direction);
635
636 ipa_gsi_trans_complete(trans);
637
638 complete(&trans->completion);
639
640 gsi_trans_free(trans);
641 }
642
643 /* Cancel a channel's pending transactions */
644 void gsi_channel_trans_cancel_pending(struct gsi_channel *channel)
645 {
646 struct gsi_trans_info *trans_info = &channel->trans_info;
647 struct gsi_trans *trans;
648 bool cancelled;
649
650 /* channel->gsi->mutex is held by caller */
651 spin_lock_bh(&trans_info->spinlock);
652
653 cancelled = !list_empty(&trans_info->pending);
654 list_for_each_entry(trans, &trans_info->pending, links)
655 trans->cancelled = true;
656
657 list_splice_tail_init(&trans_info->pending, &trans_info->complete);
658
659 spin_unlock_bh(&trans_info->spinlock);
660
661 /* Schedule NAPI polling to complete the cancelled transactions */
662 if (cancelled)
663 napi_schedule(&channel->napi);
664 }
665
666 /* Issue a command to read a single byte from a channel */
667 int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr)
668 {
669 struct gsi_channel *channel = &gsi->channel[channel_id];
670 struct gsi_ring *ring = &channel->tre_ring;
671 struct gsi_trans_info *trans_info;
672 struct gsi_tre *dest_tre;
673
674 trans_info = &channel->trans_info;
675
676 /* First reserve the TRE, if possible */
677 if (!gsi_trans_tre_reserve(trans_info, 1))
678 return -EBUSY;
679
680 /* Now fill the the reserved TRE and tell the hardware */
681
682 dest_tre = gsi_ring_virt(ring, ring->index);
683 gsi_trans_tre_fill(dest_tre, addr, 1, true, false, IPA_CMD_NONE);
684
685 ring->index++;
686 gsi_channel_doorbell(channel);
687
688 return 0;
689 }
690
691 /* Mark a gsi_trans_read_byte() request done */
692 void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id)
693 {
694 struct gsi_channel *channel = &gsi->channel[channel_id];
695
696 gsi_trans_tre_release(&channel->trans_info, 1);
697 }
698
699 /* Initialize a channel's GSI transaction info */
700 int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id)
701 {
702 struct gsi_channel *channel = &gsi->channel[channel_id];
703 struct gsi_trans_info *trans_info;
704 u32 tre_max;
705 int ret;
706
707 /* Ensure the size of a channel element is what's expected */
708 BUILD_BUG_ON(sizeof(struct gsi_tre) != GSI_RING_ELEMENT_SIZE);
709
710 /* The map array is used to determine what transaction is associated
711 * with a TRE that the hardware reports has completed. We need one
712 * map entry per TRE.
713 */
714 trans_info = &channel->trans_info;
715 trans_info->map = kcalloc(channel->tre_count, sizeof(*trans_info->map),
716 GFP_KERNEL);
717 if (!trans_info->map)
718 return -ENOMEM;
719
720 /* We can't use more TREs than there are available in the ring.
721 * This limits the number of transactions that can be oustanding.
722 * Worst case is one TRE per transaction (but we actually limit
723 * it to something a little less than that). We allocate resources
724 * for transactions (including transaction structures) based on
725 * this maximum number.
726 */
727 tre_max = gsi_channel_tre_max(channel->gsi, channel_id);
728
729 /* Transactions are allocated one at a time. */
730 ret = gsi_trans_pool_init(&trans_info->pool, sizeof(struct gsi_trans),
731 tre_max, 1);
732 if (ret)
733 goto err_kfree;
734
735 /* A transaction uses a scatterlist array to represent the data
736 * transfers implemented by the transaction. Each scatterlist
737 * element is used to fill a single TRE when the transaction is
738 * committed. So we need as many scatterlist elements as the
739 * maximum number of TREs that can be outstanding.
740 *
741 * All TREs in a transaction must fit within the channel's TLV FIFO.
742 * A transaction on a channel can allocate as many TREs as that but
743 * no more.
744 */
745 ret = gsi_trans_pool_init(&trans_info->sg_pool,
746 sizeof(struct scatterlist),
747 tre_max, channel->tlv_count);
748 if (ret)
749 goto err_trans_pool_exit;
750
751 /* Finally, the tre_avail field is what ultimately limits the number
752 * of outstanding transactions and their resources. A transaction
753 * allocation succeeds only if the TREs available are sufficient for
754 * what the transaction might need. Transaction resource pools are
755 * sized based on the maximum number of outstanding TREs, so there
756 * will always be resources available if there are TREs available.
757 */
758 atomic_set(&trans_info->tre_avail, tre_max);
759
760 spin_lock_init(&trans_info->spinlock);
761 INIT_LIST_HEAD(&trans_info->alloc);
762 INIT_LIST_HEAD(&trans_info->pending);
763 INIT_LIST_HEAD(&trans_info->complete);
764 INIT_LIST_HEAD(&trans_info->polled);
765
766 return 0;
767
768 err_trans_pool_exit:
769 gsi_trans_pool_exit(&trans_info->pool);
770 err_kfree:
771 kfree(trans_info->map);
772
773 dev_err(gsi->dev, "error %d initializing channel %u transactions\n",
774 ret, channel_id);
775
776 return ret;
777 }
778
779 /* Inverse of gsi_channel_trans_init() */
780 void gsi_channel_trans_exit(struct gsi_channel *channel)
781 {
782 struct gsi_trans_info *trans_info = &channel->trans_info;
783
784 gsi_trans_pool_exit(&trans_info->sg_pool);
785 gsi_trans_pool_exit(&trans_info->pool);
786 kfree(trans_info->map);
787 }