]> git.ipfire.org Git - people/ms/u-boot.git/blob - include/fsl-mc/fsl_qbman_portal.h
configs: imx6q_logic: Move CONFIG_PHY_SMSC to defconfig
[people/ms/u-boot.git] / include / fsl-mc / fsl_qbman_portal.h
1 /*
2 * Copyright (C) 2014 Freescale Semiconductor
3 *
4 * SPDX-License-Identifier: GPL-2.0+
5 */
6
7 #ifndef _FSL_QBMAN_PORTAL_H
8 #define _FSL_QBMAN_PORTAL_H
9
10 #include <fsl-mc/fsl_qbman_base.h>
11
12 /* Create and destroy a functional object representing the given QBMan portal
13 * descriptor. */
14 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *);
15
16 /************/
17 /* Dequeues */
18 /************/
19
20 /* See the QBMan driver API documentation for details on the enqueue
21 * mechanisms. NB: the use of a 'ldpaa_' prefix for this type is because it is
22 * primarily used by the "DPIO" layer that sits above (and hides) the QBMan
23 * driver. The structure is defined in the DPIO interface, but to avoid circular
24 * dependencies we just pre/re-declare it here opaquely. */
25 struct ldpaa_dq;
26
27
28 /* ------------------- */
29 /* Pull-mode dequeuing */
30 /* ------------------- */
31
32 struct qbman_pull_desc {
33 uint32_t dont_manipulate_directly[6];
34 };
35
36 /* Clear the contents of a descriptor to default/starting state. */
37 void qbman_pull_desc_clear(struct qbman_pull_desc *);
38 /* If not called, or if called with 'storage' as NULL, the result pull dequeues
39 * will produce results to DQRR. If 'storage' is non-NULL, then results are
40 * produced to the given memory location (using the physical/DMA address which
41 * the caller provides in 'storage_phys'), and 'stash' controls whether or not
42 * those writes to main-memory express a cache-warming attribute. */
43 void qbman_pull_desc_set_storage(struct qbman_pull_desc *,
44 struct ldpaa_dq *storage,
45 dma_addr_t storage_phys,
46 int stash);
47 /* numframes must be between 1 and 16, inclusive */
48 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *, uint8_t numframes);
49 /* token is the value that shows up in the dequeue results that can be used to
50 * detect when the results have been published, and is not really used when
51 * dequeue results go to DQRR. The easiest technique is to zero result "storage"
52 * before issuing a pull dequeue, and use any non-zero 'token' value. */
53 void qbman_pull_desc_set_token(struct qbman_pull_desc *, uint8_t token);
54 /* Exactly one of the following descriptor "actions" should be set. (Calling any
55 * one of these will replace the effect of any prior call to one of these.)
56 * - pull dequeue from the given frame queue (FQ)
57 * - pull dequeue from any FQ in the given work queue (WQ)
58 * - pull dequeue from any FQ in any WQ in the given channel
59 */
60 void qbman_pull_desc_set_fq(struct qbman_pull_desc *, uint32_t fqid);
61
62 /* Issue the pull dequeue command */
63 int qbman_swp_pull(struct qbman_swp *, struct qbman_pull_desc *);
64
65 /* -------------------------------- */
66 /* Polling DQRR for dequeue results */
67 /* -------------------------------- */
68
69 /* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry
70 * only once, so repeated calls can return a sequence of DQRR entries, without
71 * requiring they be consumed immediately or in any particular order. */
72 const struct ldpaa_dq *qbman_swp_dqrr_next(struct qbman_swp *);
73 /* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */
74 void qbman_swp_dqrr_consume(struct qbman_swp *, const struct ldpaa_dq *);
75
76 /* ------------------------------------------------- */
77 /* Polling user-provided storage for dequeue results */
78 /* ------------------------------------------------- */
79
80 /* Only used for user-provided storage of dequeue results, not DQRR. Prior to
81 * being used, the storage must set "oldtoken", so that the driver notices when
82 * hardware has filled it in with results using a "newtoken". NB, for efficiency
83 * purposes, the driver will perform any required endianness conversion to
84 * ensure that the user's dequeue result storage is in host-endian format
85 * (whether or not that is the same as the little-endian format that hardware
86 * DMA'd to the user's storage). As such, once the user has called
87 * qbman_dq_entry_has_newtoken() and been returned a valid dequeue result, they
88 * should not call it again on the same memory location (except of course if
89 * another dequeue command has been executed to produce a new result to that
90 * location).
91 */
92 void qbman_dq_entry_set_oldtoken(struct ldpaa_dq *,
93 unsigned int num_entries,
94 uint8_t oldtoken);
95 int qbman_dq_entry_has_newtoken(struct qbman_swp *,
96 const struct ldpaa_dq *,
97 uint8_t newtoken);
98
99 /* -------------------------------------------------------- */
100 /* Parsing dequeue entries (DQRR and user-provided storage) */
101 /* -------------------------------------------------------- */
102
103 /* DQRR entries may contain non-dequeue results, ie. notifications */
104 int qbman_dq_entry_is_DQ(const struct ldpaa_dq *);
105
106 /************/
107 /* Enqueues */
108 /************/
109
110 struct qbman_eq_desc {
111 uint32_t dont_manipulate_directly[8];
112 };
113
114
115 /* Clear the contents of a descriptor to default/starting state. */
116 void qbman_eq_desc_clear(struct qbman_eq_desc *);
117 /* Exactly one of the following descriptor "actions" should be set. (Calling
118 * any one of these will replace the effect of any prior call to one of these.)
119 * - enqueue without order-restoration
120 * - enqueue with order-restoration
121 * - fill a hole in the order-restoration sequence, without any enqueue
122 * - advance NESN (Next Expected Sequence Number), without any enqueue
123 * 'respond_success' indicates whether an enqueue response should be DMA'd
124 * after success (otherwise a response is DMA'd only after failure).
125 * 'incomplete' indicates that other fragments of the same 'seqnum' are yet to
126 * be enqueued.
127 */
128 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *, int respond_success);
129 void qbman_eq_desc_set_response(struct qbman_eq_desc *,
130 dma_addr_t storage_phys,
131 int stash);
132 /* token is the value that shows up in an enqueue response that can be used to
133 * detect when the results have been published. The easiest technique is to zero
134 * result "storage" before issuing an enqueue, and use any non-zero 'token'
135 * value. */
136 void qbman_eq_desc_set_token(struct qbman_eq_desc *, uint8_t token);
137 /* Exactly one of the following descriptor "targets" should be set. (Calling any
138 * one of these will replace the effect of any prior call to one of these.)
139 * - enqueue to a frame queue
140 * - enqueue to a queuing destination
141 * Note, that none of these will have any affect if the "action" type has been
142 * set to "orp_hole" or "orp_nesn".
143 */
144 void qbman_eq_desc_set_fq(struct qbman_eq_desc *, uint32_t fqid);
145 void qbman_eq_desc_set_qd(struct qbman_eq_desc *, uint32_t qdid,
146 uint32_t qd_bin, uint32_t qd_prio);
147
148 /* Issue an enqueue command. ('fd' should only be NULL if the "action" of the
149 * descriptor is "orp_hole" or "orp_nesn".) */
150 int qbman_swp_enqueue(struct qbman_swp *, const struct qbman_eq_desc *,
151 const struct qbman_fd *fd);
152
153 /*******************/
154 /* Buffer releases */
155 /*******************/
156
157 struct qbman_release_desc {
158 uint32_t dont_manipulate_directly[1];
159 };
160
161 /* Clear the contents of a descriptor to default/starting state. */
162 void qbman_release_desc_clear(struct qbman_release_desc *);
163 /* Set the ID of the buffer pool to release to */
164 void qbman_release_desc_set_bpid(struct qbman_release_desc *, uint32_t bpid);
165 /* Issue a release command. 'num_buffers' must be less than 8. */
166 int qbman_swp_release(struct qbman_swp *, const struct qbman_release_desc *,
167 const uint64_t *buffers, unsigned int num_buffers);
168
169 /*******************/
170 /* Buffer acquires */
171 /*******************/
172
173 int qbman_swp_acquire(struct qbman_swp *, uint32_t bpid, uint64_t *buffers,
174 unsigned int num_buffers);
175 #endif /* !_FSL_QBMAN_PORTAL_H */