1 /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4 * Copyright 2016-2019 NXP
7 #ifndef __FSL_QBMAN_PORTAL_H
8 #define __FSL_QBMAN_PORTAL_H
10 #include <soc/fsl/dpaa2-fd.h>
12 #define QMAN_REV_4000 0x04000000
13 #define QMAN_REV_4100 0x04010000
14 #define QMAN_REV_4101 0x04010001
15 #define QMAN_REV_5000 0x05000000
17 #define QMAN_REV_MASK 0xffff0000
22 /* qbman software portal descriptor structure */
23 struct qbman_swp_desc
{
24 void *cena_bar
; /* Cache-enabled portal base address */
25 void __iomem
*cinh_bar
; /* Cache-inhibited portal base address */
29 #define QBMAN_SWP_INTERRUPT_EQRI 0x01
30 #define QBMAN_SWP_INTERRUPT_EQDI 0x02
31 #define QBMAN_SWP_INTERRUPT_DQRI 0x04
32 #define QBMAN_SWP_INTERRUPT_RCRI 0x08
33 #define QBMAN_SWP_INTERRUPT_RCDI 0x10
34 #define QBMAN_SWP_INTERRUPT_VDCI 0x20
36 /* the structure for pull dequeue descriptor */
37 struct qbman_pull_desc
{
48 enum qbman_pull_type_e
{
49 /* dequeue with priority precedence, respect intra-class scheduling */
50 qbman_pull_type_prio
= 1,
51 /* dequeue with active FQ precedence, respect ICS */
52 qbman_pull_type_active
,
53 /* dequeue with active FQ precedence, no ICS */
54 qbman_pull_type_active_noics
57 /* Definitions for parsing dequeue entries */
58 #define QBMAN_RESULT_MASK 0x7f
59 #define QBMAN_RESULT_DQ 0x60
60 #define QBMAN_RESULT_FQRN 0x21
61 #define QBMAN_RESULT_FQRNI 0x22
62 #define QBMAN_RESULT_FQPN 0x24
63 #define QBMAN_RESULT_FQDAN 0x25
64 #define QBMAN_RESULT_CDAN 0x26
65 #define QBMAN_RESULT_CSCN_MEM 0x27
66 #define QBMAN_RESULT_CGCU 0x28
67 #define QBMAN_RESULT_BPSCN 0x29
68 #define QBMAN_RESULT_CSCN_WQ 0x2a
70 /* QBMan FQ management command codes */
71 #define QBMAN_FQ_SCHEDULE 0x48
72 #define QBMAN_FQ_FORCE 0x49
73 #define QBMAN_FQ_XON 0x4d
74 #define QBMAN_FQ_XOFF 0x4e
76 /* structure of enqueue descriptor */
77 struct qbman_eq_desc
{
93 struct qbman_eq_desc_with_fd
{
94 struct qbman_eq_desc desc
;
98 /* buffer release descriptor */
99 struct qbman_release_desc
{
107 /* Management command result codes */
108 #define QBMAN_MC_RSLT_OK 0xf0
110 #define CODE_CDAN_WE_EN 0x1
111 #define CODE_CDAN_WE_CTX 0x4
113 /* portal data structure */
115 const struct qbman_swp_desc
*desc
;
117 void __iomem
*addr_cinh
;
119 /* Management commands */
121 u32 valid_bit
; /* 0x00 or 0x80 */
124 /* Management response */
126 u32 valid_bit
; /* 0x00 or 0x80 */
132 /* Volatile dequeues */
134 atomic_t available
; /* indicates if a command can be sent */
135 u32 valid_bit
; /* 0x00 or 0x80 */
136 struct dpaa2_dq
*storage
; /* NULL if DQRR */
144 int reset_bug
; /* indicates dqrr reset workaround is needed */
158 spinlock_t access_spinlock
;
161 /* Function pointers */
163 int (*qbman_swp_enqueue_ptr
)(struct qbman_swp
*s
,
164 const struct qbman_eq_desc
*d
,
165 const struct dpaa2_fd
*fd
);
167 int (*qbman_swp_enqueue_multiple_ptr
)(struct qbman_swp
*s
,
168 const struct qbman_eq_desc
*d
,
169 const struct dpaa2_fd
*fd
,
173 int (*qbman_swp_enqueue_multiple_desc_ptr
)(struct qbman_swp
*s
,
174 const struct qbman_eq_desc
*d
,
175 const struct dpaa2_fd
*fd
,
178 int (*qbman_swp_pull_ptr
)(struct qbman_swp
*s
, struct qbman_pull_desc
*d
);
180 const struct dpaa2_dq
*(*qbman_swp_dqrr_next_ptr
)(struct qbman_swp
*s
);
182 int (*qbman_swp_release_ptr
)(struct qbman_swp
*s
,
183 const struct qbman_release_desc
*d
,
185 unsigned int num_buffers
);
188 struct qbman_swp
*qbman_swp_init(const struct qbman_swp_desc
*d
);
189 void qbman_swp_finish(struct qbman_swp
*p
);
190 u32
qbman_swp_interrupt_read_status(struct qbman_swp
*p
);
191 void qbman_swp_interrupt_clear_status(struct qbman_swp
*p
, u32 mask
);
192 u32
qbman_swp_interrupt_get_trigger(struct qbman_swp
*p
);
193 void qbman_swp_interrupt_set_trigger(struct qbman_swp
*p
, u32 mask
);
194 int qbman_swp_interrupt_get_inhibit(struct qbman_swp
*p
);
195 void qbman_swp_interrupt_set_inhibit(struct qbman_swp
*p
, int inhibit
);
197 void qbman_swp_push_get(struct qbman_swp
*p
, u8 channel_idx
, int *enabled
);
198 void qbman_swp_push_set(struct qbman_swp
*p
, u8 channel_idx
, int enable
);
200 void qbman_pull_desc_clear(struct qbman_pull_desc
*d
);
201 void qbman_pull_desc_set_storage(struct qbman_pull_desc
*d
,
202 struct dpaa2_dq
*storage
,
203 dma_addr_t storage_phys
,
205 void qbman_pull_desc_set_numframes(struct qbman_pull_desc
*d
, u8 numframes
);
206 void qbman_pull_desc_set_fq(struct qbman_pull_desc
*d
, u32 fqid
);
207 void qbman_pull_desc_set_wq(struct qbman_pull_desc
*d
, u32 wqid
,
208 enum qbman_pull_type_e dct
);
209 void qbman_pull_desc_set_channel(struct qbman_pull_desc
*d
, u32 chid
,
210 enum qbman_pull_type_e dct
);
212 void qbman_swp_dqrr_consume(struct qbman_swp
*s
, const struct dpaa2_dq
*dq
);
214 int qbman_result_has_new_result(struct qbman_swp
*p
, const struct dpaa2_dq
*dq
);
216 void qbman_eq_desc_clear(struct qbman_eq_desc
*d
);
217 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc
*d
, int respond_success
);
218 void qbman_eq_desc_set_token(struct qbman_eq_desc
*d
, u8 token
);
219 void qbman_eq_desc_set_fq(struct qbman_eq_desc
*d
, u32 fqid
);
220 void qbman_eq_desc_set_qd(struct qbman_eq_desc
*d
, u32 qdid
,
221 u32 qd_bin
, u32 qd_prio
);
224 void qbman_release_desc_clear(struct qbman_release_desc
*d
);
225 void qbman_release_desc_set_bpid(struct qbman_release_desc
*d
, u16 bpid
);
226 void qbman_release_desc_set_rcdi(struct qbman_release_desc
*d
, int enable
);
228 int qbman_swp_acquire(struct qbman_swp
*s
, u16 bpid
, u64
*buffers
,
229 unsigned int num_buffers
);
230 int qbman_swp_alt_fq_state(struct qbman_swp
*s
, u32 fqid
,
232 int qbman_swp_CDAN_set(struct qbman_swp
*s
, u16 channelid
,
233 u8 we_mask
, u8 cdan_en
,
236 void *qbman_swp_mc_start(struct qbman_swp
*p
);
237 void qbman_swp_mc_submit(struct qbman_swp
*p
, void *cmd
, u8 cmd_verb
);
238 void *qbman_swp_mc_result(struct qbman_swp
*p
);
241 * qbman_swp_enqueue() - Issue an enqueue command
242 * @s: the software portal used for enqueue
243 * @d: the enqueue descriptor
244 * @fd: the frame descriptor to be enqueued
246 * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
249 qbman_swp_enqueue(struct qbman_swp
*s
, const struct qbman_eq_desc
*d
,
250 const struct dpaa2_fd
*fd
)
252 return qbman_swp_enqueue_ptr(s
, d
, fd
);
256 * qbman_swp_enqueue_multiple() - Issue a multi enqueue command
257 * using one enqueue descriptor
258 * @s: the software portal used for enqueue
259 * @d: the enqueue descriptor
260 * @fd: table pointer of frame descriptor table to be enqueued
261 * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
262 * @num_frames: number of fd to be enqueued
264 * Return the number of fd enqueued, or a negative error number.
267 qbman_swp_enqueue_multiple(struct qbman_swp
*s
,
268 const struct qbman_eq_desc
*d
,
269 const struct dpaa2_fd
*fd
,
273 return qbman_swp_enqueue_multiple_ptr(s
, d
, fd
, flags
, num_frames
);
277 * qbman_swp_enqueue_multiple_desc() - Issue a multi enqueue command
278 * using multiple enqueue descriptor
279 * @s: the software portal used for enqueue
280 * @d: table of minimal enqueue descriptor
281 * @fd: table pointer of frame descriptor table to be enqueued
282 * @num_frames: number of fd to be enqueued
284 * Return the number of fd enqueued, or a negative error number.
287 qbman_swp_enqueue_multiple_desc(struct qbman_swp
*s
,
288 const struct qbman_eq_desc
*d
,
289 const struct dpaa2_fd
*fd
,
292 return qbman_swp_enqueue_multiple_desc_ptr(s
, d
, fd
, num_frames
);
296 * qbman_result_is_DQ() - check if the dequeue result is a dequeue response
297 * @dq: the dequeue result to be checked
299 * DQRR entries may contain non-dequeue results, ie. notifications
301 static inline int qbman_result_is_DQ(const struct dpaa2_dq
*dq
)
303 return ((dq
->dq
.verb
& QBMAN_RESULT_MASK
) == QBMAN_RESULT_DQ
);
307 * qbman_result_is_SCN() - Check the dequeue result is notification or not
308 * @dq: the dequeue result to be checked
311 static inline int qbman_result_is_SCN(const struct dpaa2_dq
*dq
)
313 return !qbman_result_is_DQ(dq
);
316 /* FQ Data Availability */
317 static inline int qbman_result_is_FQDAN(const struct dpaa2_dq
*dq
)
319 return ((dq
->dq
.verb
& QBMAN_RESULT_MASK
) == QBMAN_RESULT_FQDAN
);
322 /* Channel Data Availability */
323 static inline int qbman_result_is_CDAN(const struct dpaa2_dq
*dq
)
325 return ((dq
->dq
.verb
& QBMAN_RESULT_MASK
) == QBMAN_RESULT_CDAN
);
328 /* Congestion State Change */
329 static inline int qbman_result_is_CSCN(const struct dpaa2_dq
*dq
)
331 return ((dq
->dq
.verb
& QBMAN_RESULT_MASK
) == QBMAN_RESULT_CSCN_WQ
);
334 /* Buffer Pool State Change */
335 static inline int qbman_result_is_BPSCN(const struct dpaa2_dq
*dq
)
337 return ((dq
->dq
.verb
& QBMAN_RESULT_MASK
) == QBMAN_RESULT_BPSCN
);
340 /* Congestion Group Count Update */
341 static inline int qbman_result_is_CGCU(const struct dpaa2_dq
*dq
)
343 return ((dq
->dq
.verb
& QBMAN_RESULT_MASK
) == QBMAN_RESULT_CGCU
);
347 static inline int qbman_result_is_FQRN(const struct dpaa2_dq
*dq
)
349 return ((dq
->dq
.verb
& QBMAN_RESULT_MASK
) == QBMAN_RESULT_FQRN
);
352 /* Retirement Immediate */
353 static inline int qbman_result_is_FQRNI(const struct dpaa2_dq
*dq
)
355 return ((dq
->dq
.verb
& QBMAN_RESULT_MASK
) == QBMAN_RESULT_FQRNI
);
359 static inline int qbman_result_is_FQPN(const struct dpaa2_dq
*dq
)
361 return ((dq
->dq
.verb
& QBMAN_RESULT_MASK
) == QBMAN_RESULT_FQPN
);
365 * qbman_result_SCN_state() - Get the state field in State-change notification
367 static inline u8
qbman_result_SCN_state(const struct dpaa2_dq
*scn
)
369 return scn
->scn
.state
;
372 #define SCN_RID_MASK 0x00FFFFFF
375 * qbman_result_SCN_rid() - Get the resource id in State-change notification
377 static inline u32
qbman_result_SCN_rid(const struct dpaa2_dq
*scn
)
379 return le32_to_cpu(scn
->scn
.rid_tok
) & SCN_RID_MASK
;
383 * qbman_result_SCN_ctx() - Get the context data in State-change notification
385 static inline u64
qbman_result_SCN_ctx(const struct dpaa2_dq
*scn
)
387 return le64_to_cpu(scn
->scn
.ctx
);
391 * qbman_swp_fq_schedule() - Move the fq to the scheduled state
392 * @s: the software portal object
393 * @fqid: the index of frame queue to be scheduled
395 * There are a couple of different ways that a FQ can end up parked state,
398 * Return 0 for success, or negative error code for failure.
400 static inline int qbman_swp_fq_schedule(struct qbman_swp
*s
, u32 fqid
)
402 return qbman_swp_alt_fq_state(s
, fqid
, QBMAN_FQ_SCHEDULE
);
406 * qbman_swp_fq_force() - Force the FQ to fully scheduled state
407 * @s: the software portal object
408 * @fqid: the index of frame queue to be forced
410 * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
411 * and thus be available for selection by any channel-dequeuing behaviour (push
412 * or pull). If the FQ is subsequently "dequeued" from the channel and is still
413 * empty at the time this happens, the resulting dq_entry will have no FD.
414 * (qbman_result_DQ_fd() will return NULL.)
416 * Return 0 for success, or negative error code for failure.
418 static inline int qbman_swp_fq_force(struct qbman_swp
*s
, u32 fqid
)
420 return qbman_swp_alt_fq_state(s
, fqid
, QBMAN_FQ_FORCE
);
424 * qbman_swp_fq_xon() - sets FQ flow-control to XON
425 * @s: the software portal object
426 * @fqid: the index of frame queue
428 * This setting doesn't affect enqueues to the FQ, just dequeues.
430 * Return 0 for success, or negative error code for failure.
432 static inline int qbman_swp_fq_xon(struct qbman_swp
*s
, u32 fqid
)
434 return qbman_swp_alt_fq_state(s
, fqid
, QBMAN_FQ_XON
);
438 * qbman_swp_fq_xoff() - sets FQ flow-control to XOFF
439 * @s: the software portal object
440 * @fqid: the index of frame queue
442 * This setting doesn't affect enqueues to the FQ, just dequeues.
443 * XOFF FQs will remain in the tenatively-scheduled state, even when
444 * non-empty, meaning they won't be selected for scheduled dequeuing.
445 * If a FQ is changed to XOFF after it had already become truly-scheduled
446 * to a channel, and a pull dequeue of that channel occurs that selects
447 * that FQ for dequeuing, then the resulting dq_entry will have no FD.
448 * (qbman_result_DQ_fd() will return NULL.)
450 * Return 0 for success, or negative error code for failure.
452 static inline int qbman_swp_fq_xoff(struct qbman_swp
*s
, u32 fqid
)
454 return qbman_swp_alt_fq_state(s
, fqid
, QBMAN_FQ_XOFF
);
457 /* If the user has been allocated a channel object that is going to generate
458 * CDANs to another channel, then the qbman_swp_CDAN* functions will be
461 * CDAN-enabled channels only generate a single CDAN notification, after which
462 * they need to be reenabled before they'll generate another. The idea is
463 * that pull dequeuing will occur in reaction to the CDAN, followed by a
464 * reenable step. Each function generates a distinct command to hardware, so a
465 * combination function is provided if the user wishes to modify the "context"
466 * (which shows up in each CDAN message) each time they reenable, as a single
467 * command to hardware.
471 * qbman_swp_CDAN_set_context() - Set CDAN context
472 * @s: the software portal object
473 * @channelid: the channel index
474 * @ctx: the context to be set in CDAN
476 * Return 0 for success, or negative error code for failure.
478 static inline int qbman_swp_CDAN_set_context(struct qbman_swp
*s
, u16 channelid
,
481 return qbman_swp_CDAN_set(s
, channelid
,
487 * qbman_swp_CDAN_enable() - Enable CDAN for the channel
488 * @s: the software portal object
489 * @channelid: the index of the channel to generate CDAN
491 * Return 0 for success, or negative error code for failure.
493 static inline int qbman_swp_CDAN_enable(struct qbman_swp
*s
, u16 channelid
)
495 return qbman_swp_CDAN_set(s
, channelid
,
501 * qbman_swp_CDAN_disable() - disable CDAN for the channel
502 * @s: the software portal object
503 * @channelid: the index of the channel to generate CDAN
505 * Return 0 for success, or negative error code for failure.
507 static inline int qbman_swp_CDAN_disable(struct qbman_swp
*s
, u16 channelid
)
509 return qbman_swp_CDAN_set(s
, channelid
,
515 * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
516 * @s: the software portal object
517 * @channelid: the index of the channel to generate CDAN
518 * @ctx:i the context set in CDAN
520 * Return 0 for success, or negative error code for failure.
522 static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp
*s
,
526 return qbman_swp_CDAN_set(s
, channelid
,
527 CODE_CDAN_WE_EN
| CODE_CDAN_WE_CTX
,
531 /* Wraps up submit + poll-for-result */
532 static inline void *qbman_swp_mc_complete(struct qbman_swp
*swp
, void *cmd
,
537 qbman_swp_mc_submit(swp
, cmd
, cmd_verb
);
540 cmd
= qbman_swp_mc_result(swp
);
541 } while (!cmd
&& loopvar
--);
549 struct qbman_fq_query_np_rslt
{
569 int qbman_fq_query_state(struct qbman_swp
*s
, u32 fqid
,
570 struct qbman_fq_query_np_rslt
*r
);
571 u32
qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt
*r
);
572 u32
qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt
*r
);
574 struct qbman_bp_query_rslt
{
601 int qbman_bp_query(struct qbman_swp
*s
, u16 bpid
,
602 struct qbman_bp_query_rslt
*r
);
604 u32
qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt
*a
);
607 * qbman_swp_release() - Issue a buffer release command
608 * @s: the software portal object
609 * @d: the release descriptor
610 * @buffers: a pointer pointing to the buffer address to be released
611 * @num_buffers: number of buffers to be released, must be less than 8
613 * Return 0 for success, -EBUSY if the release command ring is not ready.
615 static inline int qbman_swp_release(struct qbman_swp
*s
,
616 const struct qbman_release_desc
*d
,
618 unsigned int num_buffers
)
620 return qbman_swp_release_ptr(s
, d
, buffers
, num_buffers
);
624 * qbman_swp_pull() - Issue the pull dequeue command
625 * @s: the software portal object
626 * @d: the software portal descriptor which has been configured with
627 * the set of qbman_pull_desc_set_*() calls
629 * Return 0 for success, and -EBUSY if the software portal is not ready
630 * to do pull dequeue.
632 static inline int qbman_swp_pull(struct qbman_swp
*s
,
633 struct qbman_pull_desc
*d
)
635 return qbman_swp_pull_ptr(s
, d
);
639 * qbman_swp_dqrr_next() - Get an valid DQRR entry
640 * @s: the software portal object
642 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
643 * only once, so repeated calls can return a sequence of DQRR entries, without
644 * requiring they be consumed immediately or in any particular order.
646 static inline const struct dpaa2_dq
*qbman_swp_dqrr_next(struct qbman_swp
*s
)
648 return qbman_swp_dqrr_next_ptr(s
);
651 #endif /* __FSL_QBMAN_PORTAL_H */