]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/soc/fsl/dpio/qbman-portal.h
soc: fsl: dpio: extract the QBMAN clock frequency from the attributes
[thirdparty/linux.git] / drivers / soc / fsl / dpio / qbman-portal.h
1 /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
2 /*
3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4 * Copyright 2016-2019 NXP
5 *
6 */
7 #ifndef __FSL_QBMAN_PORTAL_H
8 #define __FSL_QBMAN_PORTAL_H
9
10 #include <soc/fsl/dpaa2-fd.h>
11
12 #define QMAN_REV_4000 0x04000000
13 #define QMAN_REV_4100 0x04010000
14 #define QMAN_REV_4101 0x04010001
15 #define QMAN_REV_5000 0x05000000
16
17 #define QMAN_REV_MASK 0xffff0000
18
19 struct dpaa2_dq;
20 struct qbman_swp;
21
22 /* qbman software portal descriptor structure */
23 struct qbman_swp_desc {
24 void *cena_bar; /* Cache-enabled portal base address */
25 void __iomem *cinh_bar; /* Cache-inhibited portal base address */
26 u32 qman_version;
27 u32 qman_clk;
28 };
29
30 #define QBMAN_SWP_INTERRUPT_EQRI 0x01
31 #define QBMAN_SWP_INTERRUPT_EQDI 0x02
32 #define QBMAN_SWP_INTERRUPT_DQRI 0x04
33 #define QBMAN_SWP_INTERRUPT_RCRI 0x08
34 #define QBMAN_SWP_INTERRUPT_RCDI 0x10
35 #define QBMAN_SWP_INTERRUPT_VDCI 0x20
36
37 /* the structure for pull dequeue descriptor */
38 struct qbman_pull_desc {
39 u8 verb;
40 u8 numf;
41 u8 tok;
42 u8 reserved;
43 __le32 dq_src;
44 __le64 rsp_addr;
45 u64 rsp_addr_virt;
46 u8 padding[40];
47 };
48
49 enum qbman_pull_type_e {
50 /* dequeue with priority precedence, respect intra-class scheduling */
51 qbman_pull_type_prio = 1,
52 /* dequeue with active FQ precedence, respect ICS */
53 qbman_pull_type_active,
54 /* dequeue with active FQ precedence, no ICS */
55 qbman_pull_type_active_noics
56 };
57
58 /* Definitions for parsing dequeue entries */
59 #define QBMAN_RESULT_MASK 0x7f
60 #define QBMAN_RESULT_DQ 0x60
61 #define QBMAN_RESULT_FQRN 0x21
62 #define QBMAN_RESULT_FQRNI 0x22
63 #define QBMAN_RESULT_FQPN 0x24
64 #define QBMAN_RESULT_FQDAN 0x25
65 #define QBMAN_RESULT_CDAN 0x26
66 #define QBMAN_RESULT_CSCN_MEM 0x27
67 #define QBMAN_RESULT_CGCU 0x28
68 #define QBMAN_RESULT_BPSCN 0x29
69 #define QBMAN_RESULT_CSCN_WQ 0x2a
70
71 /* QBMan FQ management command codes */
72 #define QBMAN_FQ_SCHEDULE 0x48
73 #define QBMAN_FQ_FORCE 0x49
74 #define QBMAN_FQ_XON 0x4d
75 #define QBMAN_FQ_XOFF 0x4e
76
77 /* structure of enqueue descriptor */
78 struct qbman_eq_desc {
79 u8 verb;
80 u8 dca;
81 __le16 seqnum;
82 __le16 orpid;
83 __le16 reserved1;
84 __le32 tgtid;
85 __le32 tag;
86 __le16 qdbin;
87 u8 qpri;
88 u8 reserved[3];
89 u8 wae;
90 u8 rspid;
91 __le64 rsp_addr;
92 };
93
94 struct qbman_eq_desc_with_fd {
95 struct qbman_eq_desc desc;
96 u8 fd[32];
97 };
98
99 /* buffer release descriptor */
100 struct qbman_release_desc {
101 u8 verb;
102 u8 reserved;
103 __le16 bpid;
104 __le32 reserved2;
105 __le64 buf[7];
106 };
107
108 /* Management command result codes */
109 #define QBMAN_MC_RSLT_OK 0xf0
110
111 #define CODE_CDAN_WE_EN 0x1
112 #define CODE_CDAN_WE_CTX 0x4
113
114 /* portal data structure */
115 struct qbman_swp {
116 const struct qbman_swp_desc *desc;
117 void *addr_cena;
118 void __iomem *addr_cinh;
119
120 /* Management commands */
121 struct {
122 u32 valid_bit; /* 0x00 or 0x80 */
123 } mc;
124
125 /* Management response */
126 struct {
127 u32 valid_bit; /* 0x00 or 0x80 */
128 } mr;
129
130 /* Push dequeues */
131 u32 sdq;
132
133 /* Volatile dequeues */
134 struct {
135 atomic_t available; /* indicates if a command can be sent */
136 u32 valid_bit; /* 0x00 or 0x80 */
137 struct dpaa2_dq *storage; /* NULL if DQRR */
138 } vdq;
139
140 /* DQRR */
141 struct {
142 u32 next_idx;
143 u32 valid_bit;
144 u8 dqrr_size;
145 int reset_bug; /* indicates dqrr reset workaround is needed */
146 } dqrr;
147
148 struct {
149 u32 pi;
150 u32 pi_vb;
151 u32 pi_ring_size;
152 u32 pi_ci_mask;
153 u32 ci;
154 int available;
155 u32 pend;
156 u32 no_pfdr;
157 } eqcr;
158
159 spinlock_t access_spinlock;
160 };
161
162 /* Function pointers */
163 extern
164 int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
165 const struct qbman_eq_desc *d,
166 const struct dpaa2_fd *fd);
167 extern
168 int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
169 const struct qbman_eq_desc *d,
170 const struct dpaa2_fd *fd,
171 uint32_t *flags,
172 int num_frames);
173 extern
174 int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
175 const struct qbman_eq_desc *d,
176 const struct dpaa2_fd *fd,
177 int num_frames);
178 extern
179 int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d);
180 extern
181 const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s);
182 extern
183 int (*qbman_swp_release_ptr)(struct qbman_swp *s,
184 const struct qbman_release_desc *d,
185 const u64 *buffers,
186 unsigned int num_buffers);
187
188 /* Functions */
189 struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
190 void qbman_swp_finish(struct qbman_swp *p);
191 u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
192 void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask);
193 u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
194 void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask);
195 int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
196 void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
197
198 void qbman_swp_push_get(struct qbman_swp *p, u8 channel_idx, int *enabled);
199 void qbman_swp_push_set(struct qbman_swp *p, u8 channel_idx, int enable);
200
201 void qbman_pull_desc_clear(struct qbman_pull_desc *d);
202 void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
203 struct dpaa2_dq *storage,
204 dma_addr_t storage_phys,
205 int stash);
206 void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes);
207 void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid);
208 void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
209 enum qbman_pull_type_e dct);
210 void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
211 enum qbman_pull_type_e dct);
212
213 void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
214
215 int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
216
217 void qbman_eq_desc_clear(struct qbman_eq_desc *d);
218 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
219 void qbman_eq_desc_set_token(struct qbman_eq_desc *d, u8 token);
220 void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
221 void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
222 u32 qd_bin, u32 qd_prio);
223
224
225 void qbman_release_desc_clear(struct qbman_release_desc *d);
226 void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
227 void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
228
229 int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
230 unsigned int num_buffers);
231 int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
232 u8 alt_fq_verb);
233 int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
234 u8 we_mask, u8 cdan_en,
235 u64 ctx);
236
237 void *qbman_swp_mc_start(struct qbman_swp *p);
238 void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
239 void *qbman_swp_mc_result(struct qbman_swp *p);
240
241 /**
242 * qbman_swp_enqueue() - Issue an enqueue command
243 * @s: the software portal used for enqueue
244 * @d: the enqueue descriptor
245 * @fd: the frame descriptor to be enqueued
246 *
247 * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
248 */
249 static inline int
250 qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
251 const struct dpaa2_fd *fd)
252 {
253 return qbman_swp_enqueue_ptr(s, d, fd);
254 }
255
256 /**
257 * qbman_swp_enqueue_multiple() - Issue a multi enqueue command
258 * using one enqueue descriptor
259 * @s: the software portal used for enqueue
260 * @d: the enqueue descriptor
261 * @fd: table pointer of frame descriptor table to be enqueued
262 * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
263 * @num_frames: number of fd to be enqueued
264 *
265 * Return the number of fd enqueued, or a negative error number.
266 */
267 static inline int
268 qbman_swp_enqueue_multiple(struct qbman_swp *s,
269 const struct qbman_eq_desc *d,
270 const struct dpaa2_fd *fd,
271 uint32_t *flags,
272 int num_frames)
273 {
274 return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
275 }
276
277 /**
278 * qbman_swp_enqueue_multiple_desc() - Issue a multi enqueue command
279 * using multiple enqueue descriptor
280 * @s: the software portal used for enqueue
281 * @d: table of minimal enqueue descriptor
282 * @fd: table pointer of frame descriptor table to be enqueued
283 * @num_frames: number of fd to be enqueued
284 *
285 * Return the number of fd enqueued, or a negative error number.
286 */
287 static inline int
288 qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
289 const struct qbman_eq_desc *d,
290 const struct dpaa2_fd *fd,
291 int num_frames)
292 {
293 return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
294 }
295
296 /**
297 * qbman_result_is_DQ() - check if the dequeue result is a dequeue response
298 * @dq: the dequeue result to be checked
299 *
300 * DQRR entries may contain non-dequeue results, ie. notifications
301 */
302 static inline int qbman_result_is_DQ(const struct dpaa2_dq *dq)
303 {
304 return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_DQ);
305 }
306
307 /**
308 * qbman_result_is_SCN() - Check the dequeue result is notification or not
309 * @dq: the dequeue result to be checked
310 *
311 */
312 static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq)
313 {
314 return !qbman_result_is_DQ(dq);
315 }
316
317 /* FQ Data Availability */
318 static inline int qbman_result_is_FQDAN(const struct dpaa2_dq *dq)
319 {
320 return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQDAN);
321 }
322
323 /* Channel Data Availability */
324 static inline int qbman_result_is_CDAN(const struct dpaa2_dq *dq)
325 {
326 return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CDAN);
327 }
328
329 /* Congestion State Change */
330 static inline int qbman_result_is_CSCN(const struct dpaa2_dq *dq)
331 {
332 return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CSCN_WQ);
333 }
334
335 /* Buffer Pool State Change */
336 static inline int qbman_result_is_BPSCN(const struct dpaa2_dq *dq)
337 {
338 return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_BPSCN);
339 }
340
341 /* Congestion Group Count Update */
342 static inline int qbman_result_is_CGCU(const struct dpaa2_dq *dq)
343 {
344 return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CGCU);
345 }
346
347 /* Retirement */
348 static inline int qbman_result_is_FQRN(const struct dpaa2_dq *dq)
349 {
350 return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRN);
351 }
352
353 /* Retirement Immediate */
354 static inline int qbman_result_is_FQRNI(const struct dpaa2_dq *dq)
355 {
356 return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRNI);
357 }
358
359 /* Park */
360 static inline int qbman_result_is_FQPN(const struct dpaa2_dq *dq)
361 {
362 return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQPN);
363 }
364
365 /**
366 * qbman_result_SCN_state() - Get the state field in State-change notification
367 */
368 static inline u8 qbman_result_SCN_state(const struct dpaa2_dq *scn)
369 {
370 return scn->scn.state;
371 }
372
373 #define SCN_RID_MASK 0x00FFFFFF
374
375 /**
376 * qbman_result_SCN_rid() - Get the resource id in State-change notification
377 */
378 static inline u32 qbman_result_SCN_rid(const struct dpaa2_dq *scn)
379 {
380 return le32_to_cpu(scn->scn.rid_tok) & SCN_RID_MASK;
381 }
382
383 /**
384 * qbman_result_SCN_ctx() - Get the context data in State-change notification
385 */
386 static inline u64 qbman_result_SCN_ctx(const struct dpaa2_dq *scn)
387 {
388 return le64_to_cpu(scn->scn.ctx);
389 }
390
391 /**
392 * qbman_swp_fq_schedule() - Move the fq to the scheduled state
393 * @s: the software portal object
394 * @fqid: the index of frame queue to be scheduled
395 *
396 * There are a couple of different ways that a FQ can end up parked state,
397 * This schedules it.
398 *
399 * Return 0 for success, or negative error code for failure.
400 */
401 static inline int qbman_swp_fq_schedule(struct qbman_swp *s, u32 fqid)
402 {
403 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
404 }
405
406 /**
407 * qbman_swp_fq_force() - Force the FQ to fully scheduled state
408 * @s: the software portal object
409 * @fqid: the index of frame queue to be forced
410 *
411 * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
412 * and thus be available for selection by any channel-dequeuing behaviour (push
413 * or pull). If the FQ is subsequently "dequeued" from the channel and is still
414 * empty at the time this happens, the resulting dq_entry will have no FD.
415 * (qbman_result_DQ_fd() will return NULL.)
416 *
417 * Return 0 for success, or negative error code for failure.
418 */
419 static inline int qbman_swp_fq_force(struct qbman_swp *s, u32 fqid)
420 {
421 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
422 }
423
424 /**
425 * qbman_swp_fq_xon() - sets FQ flow-control to XON
426 * @s: the software portal object
427 * @fqid: the index of frame queue
428 *
429 * This setting doesn't affect enqueues to the FQ, just dequeues.
430 *
431 * Return 0 for success, or negative error code for failure.
432 */
433 static inline int qbman_swp_fq_xon(struct qbman_swp *s, u32 fqid)
434 {
435 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
436 }
437
438 /**
439 * qbman_swp_fq_xoff() - sets FQ flow-control to XOFF
440 * @s: the software portal object
441 * @fqid: the index of frame queue
442 *
443 * This setting doesn't affect enqueues to the FQ, just dequeues.
444 * XOFF FQs will remain in the tenatively-scheduled state, even when
445 * non-empty, meaning they won't be selected for scheduled dequeuing.
446 * If a FQ is changed to XOFF after it had already become truly-scheduled
447 * to a channel, and a pull dequeue of that channel occurs that selects
448 * that FQ for dequeuing, then the resulting dq_entry will have no FD.
449 * (qbman_result_DQ_fd() will return NULL.)
450 *
451 * Return 0 for success, or negative error code for failure.
452 */
453 static inline int qbman_swp_fq_xoff(struct qbman_swp *s, u32 fqid)
454 {
455 return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
456 }
457
458 /* If the user has been allocated a channel object that is going to generate
459 * CDANs to another channel, then the qbman_swp_CDAN* functions will be
460 * necessary.
461 *
462 * CDAN-enabled channels only generate a single CDAN notification, after which
463 * they need to be reenabled before they'll generate another. The idea is
464 * that pull dequeuing will occur in reaction to the CDAN, followed by a
465 * reenable step. Each function generates a distinct command to hardware, so a
466 * combination function is provided if the user wishes to modify the "context"
467 * (which shows up in each CDAN message) each time they reenable, as a single
468 * command to hardware.
469 */
470
471 /**
472 * qbman_swp_CDAN_set_context() - Set CDAN context
473 * @s: the software portal object
474 * @channelid: the channel index
475 * @ctx: the context to be set in CDAN
476 *
477 * Return 0 for success, or negative error code for failure.
478 */
479 static inline int qbman_swp_CDAN_set_context(struct qbman_swp *s, u16 channelid,
480 u64 ctx)
481 {
482 return qbman_swp_CDAN_set(s, channelid,
483 CODE_CDAN_WE_CTX,
484 0, ctx);
485 }
486
487 /**
488 * qbman_swp_CDAN_enable() - Enable CDAN for the channel
489 * @s: the software portal object
490 * @channelid: the index of the channel to generate CDAN
491 *
492 * Return 0 for success, or negative error code for failure.
493 */
494 static inline int qbman_swp_CDAN_enable(struct qbman_swp *s, u16 channelid)
495 {
496 return qbman_swp_CDAN_set(s, channelid,
497 CODE_CDAN_WE_EN,
498 1, 0);
499 }
500
501 /**
502 * qbman_swp_CDAN_disable() - disable CDAN for the channel
503 * @s: the software portal object
504 * @channelid: the index of the channel to generate CDAN
505 *
506 * Return 0 for success, or negative error code for failure.
507 */
508 static inline int qbman_swp_CDAN_disable(struct qbman_swp *s, u16 channelid)
509 {
510 return qbman_swp_CDAN_set(s, channelid,
511 CODE_CDAN_WE_EN,
512 0, 0);
513 }
514
515 /**
516 * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
517 * @s: the software portal object
518 * @channelid: the index of the channel to generate CDAN
519 * @ctx:i the context set in CDAN
520 *
521 * Return 0 for success, or negative error code for failure.
522 */
523 static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s,
524 u16 channelid,
525 u64 ctx)
526 {
527 return qbman_swp_CDAN_set(s, channelid,
528 CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
529 1, ctx);
530 }
531
532 /* Wraps up submit + poll-for-result */
533 static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
534 u8 cmd_verb)
535 {
536 int loopvar = 2000;
537
538 qbman_swp_mc_submit(swp, cmd, cmd_verb);
539
540 do {
541 cmd = qbman_swp_mc_result(swp);
542 } while (!cmd && loopvar--);
543
544 WARN_ON(!loopvar);
545
546 return cmd;
547 }
548
549 /* Query APIs */
550 struct qbman_fq_query_np_rslt {
551 u8 verb;
552 u8 rslt;
553 u8 st1;
554 u8 st2;
555 u8 reserved[2];
556 __le16 od1_sfdr;
557 __le16 od2_sfdr;
558 __le16 od3_sfdr;
559 __le16 ra1_sfdr;
560 __le16 ra2_sfdr;
561 __le32 pfdr_hptr;
562 __le32 pfdr_tptr;
563 __le32 frm_cnt;
564 __le32 byte_cnt;
565 __le16 ics_surp;
566 u8 is;
567 u8 reserved2[29];
568 };
569
570 int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
571 struct qbman_fq_query_np_rslt *r);
572 u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r);
573 u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r);
574
575 struct qbman_bp_query_rslt {
576 u8 verb;
577 u8 rslt;
578 u8 reserved[4];
579 u8 bdi;
580 u8 state;
581 __le32 fill;
582 __le32 hdotr;
583 __le16 swdet;
584 __le16 swdxt;
585 __le16 hwdet;
586 __le16 hwdxt;
587 __le16 swset;
588 __le16 swsxt;
589 __le16 vbpid;
590 __le16 icid;
591 __le64 bpscn_addr;
592 __le64 bpscn_ctx;
593 __le16 hw_targ;
594 u8 dbe;
595 u8 reserved2;
596 u8 sdcnt;
597 u8 hdcnt;
598 u8 sscnt;
599 u8 reserved3[9];
600 };
601
602 int qbman_bp_query(struct qbman_swp *s, u16 bpid,
603 struct qbman_bp_query_rslt *r);
604
605 u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a);
606
607 /**
608 * qbman_swp_release() - Issue a buffer release command
609 * @s: the software portal object
610 * @d: the release descriptor
611 * @buffers: a pointer pointing to the buffer address to be released
612 * @num_buffers: number of buffers to be released, must be less than 8
613 *
614 * Return 0 for success, -EBUSY if the release command ring is not ready.
615 */
616 static inline int qbman_swp_release(struct qbman_swp *s,
617 const struct qbman_release_desc *d,
618 const u64 *buffers,
619 unsigned int num_buffers)
620 {
621 return qbman_swp_release_ptr(s, d, buffers, num_buffers);
622 }
623
624 /**
625 * qbman_swp_pull() - Issue the pull dequeue command
626 * @s: the software portal object
627 * @d: the software portal descriptor which has been configured with
628 * the set of qbman_pull_desc_set_*() calls
629 *
630 * Return 0 for success, and -EBUSY if the software portal is not ready
631 * to do pull dequeue.
632 */
633 static inline int qbman_swp_pull(struct qbman_swp *s,
634 struct qbman_pull_desc *d)
635 {
636 return qbman_swp_pull_ptr(s, d);
637 }
638
639 /**
640 * qbman_swp_dqrr_next() - Get an valid DQRR entry
641 * @s: the software portal object
642 *
643 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
644 * only once, so repeated calls can return a sequence of DQRR entries, without
645 * requiring they be consumed immediately or in any particular order.
646 */
647 static inline const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
648 {
649 return qbman_swp_dqrr_next_ptr(s);
650 }
651
652 #endif /* __FSL_QBMAN_PORTAL_H */