1 /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4 * Copyright 2016-2019 NXP
7 #ifndef __FSL_QBMAN_PORTAL_H
8 #define __FSL_QBMAN_PORTAL_H
10 #include <soc/fsl/dpaa2-fd.h>
15 /* qbman software portal descriptor structure */
16 struct qbman_swp_desc
{
17 void *cena_bar
; /* Cache-enabled portal base address */
18 void __iomem
*cinh_bar
; /* Cache-inhibited portal base address */
22 #define QBMAN_SWP_INTERRUPT_EQRI 0x01
23 #define QBMAN_SWP_INTERRUPT_EQDI 0x02
24 #define QBMAN_SWP_INTERRUPT_DQRI 0x04
25 #define QBMAN_SWP_INTERRUPT_RCRI 0x08
26 #define QBMAN_SWP_INTERRUPT_RCDI 0x10
27 #define QBMAN_SWP_INTERRUPT_VDCI 0x20
29 /* the structure for pull dequeue descriptor */
30 struct qbman_pull_desc
{
41 enum qbman_pull_type_e
{
42 /* dequeue with priority precedence, respect intra-class scheduling */
43 qbman_pull_type_prio
= 1,
44 /* dequeue with active FQ precedence, respect ICS */
45 qbman_pull_type_active
,
46 /* dequeue with active FQ precedence, no ICS */
47 qbman_pull_type_active_noics
50 /* Definitions for parsing dequeue entries */
51 #define QBMAN_RESULT_MASK 0x7f
52 #define QBMAN_RESULT_DQ 0x60
53 #define QBMAN_RESULT_FQRN 0x21
54 #define QBMAN_RESULT_FQRNI 0x22
55 #define QBMAN_RESULT_FQPN 0x24
56 #define QBMAN_RESULT_FQDAN 0x25
57 #define QBMAN_RESULT_CDAN 0x26
58 #define QBMAN_RESULT_CSCN_MEM 0x27
59 #define QBMAN_RESULT_CGCU 0x28
60 #define QBMAN_RESULT_BPSCN 0x29
61 #define QBMAN_RESULT_CSCN_WQ 0x2a
63 /* QBMan FQ management command codes */
64 #define QBMAN_FQ_SCHEDULE 0x48
65 #define QBMAN_FQ_FORCE 0x49
66 #define QBMAN_FQ_XON 0x4d
67 #define QBMAN_FQ_XOFF 0x4e
69 /* structure of enqueue descriptor */
70 struct qbman_eq_desc
{
87 /* buffer release descriptor */
88 struct qbman_release_desc
{
96 /* Management command result codes */
97 #define QBMAN_MC_RSLT_OK 0xf0
99 #define CODE_CDAN_WE_EN 0x1
100 #define CODE_CDAN_WE_CTX 0x4
102 /* portal data structure */
104 const struct qbman_swp_desc
*desc
;
106 void __iomem
*addr_cinh
;
108 /* Management commands */
110 u32 valid_bit
; /* 0x00 or 0x80 */
113 /* Management response */
115 u32 valid_bit
; /* 0x00 or 0x80 */
121 /* Volatile dequeues */
123 atomic_t available
; /* indicates if a command can be sent */
124 u32 valid_bit
; /* 0x00 or 0x80 */
125 struct dpaa2_dq
*storage
; /* NULL if DQRR */
133 int reset_bug
; /* indicates dqrr reset workaround is needed */
137 struct qbman_swp
*qbman_swp_init(const struct qbman_swp_desc
*d
);
138 void qbman_swp_finish(struct qbman_swp
*p
);
139 u32
qbman_swp_interrupt_read_status(struct qbman_swp
*p
);
140 void qbman_swp_interrupt_clear_status(struct qbman_swp
*p
, u32 mask
);
141 u32
qbman_swp_interrupt_get_trigger(struct qbman_swp
*p
);
142 void qbman_swp_interrupt_set_trigger(struct qbman_swp
*p
, u32 mask
);
143 int qbman_swp_interrupt_get_inhibit(struct qbman_swp
*p
);
144 void qbman_swp_interrupt_set_inhibit(struct qbman_swp
*p
, int inhibit
);
146 void qbman_swp_push_get(struct qbman_swp
*p
, u8 channel_idx
, int *enabled
);
147 void qbman_swp_push_set(struct qbman_swp
*p
, u8 channel_idx
, int enable
);
149 void qbman_pull_desc_clear(struct qbman_pull_desc
*d
);
150 void qbman_pull_desc_set_storage(struct qbman_pull_desc
*d
,
151 struct dpaa2_dq
*storage
,
152 dma_addr_t storage_phys
,
154 void qbman_pull_desc_set_numframes(struct qbman_pull_desc
*d
, u8 numframes
);
155 void qbman_pull_desc_set_fq(struct qbman_pull_desc
*d
, u32 fqid
);
156 void qbman_pull_desc_set_wq(struct qbman_pull_desc
*d
, u32 wqid
,
157 enum qbman_pull_type_e dct
);
158 void qbman_pull_desc_set_channel(struct qbman_pull_desc
*d
, u32 chid
,
159 enum qbman_pull_type_e dct
);
161 int qbman_swp_pull(struct qbman_swp
*p
, struct qbman_pull_desc
*d
);
163 const struct dpaa2_dq
*qbman_swp_dqrr_next(struct qbman_swp
*s
);
164 void qbman_swp_dqrr_consume(struct qbman_swp
*s
, const struct dpaa2_dq
*dq
);
166 int qbman_result_has_new_result(struct qbman_swp
*p
, const struct dpaa2_dq
*dq
);
168 void qbman_eq_desc_clear(struct qbman_eq_desc
*d
);
169 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc
*d
, int respond_success
);
170 void qbman_eq_desc_set_token(struct qbman_eq_desc
*d
, u8 token
);
171 void qbman_eq_desc_set_fq(struct qbman_eq_desc
*d
, u32 fqid
);
172 void qbman_eq_desc_set_qd(struct qbman_eq_desc
*d
, u32 qdid
,
173 u32 qd_bin
, u32 qd_prio
);
175 int qbman_swp_enqueue(struct qbman_swp
*p
, const struct qbman_eq_desc
*d
,
176 const struct dpaa2_fd
*fd
);
178 void qbman_release_desc_clear(struct qbman_release_desc
*d
);
179 void qbman_release_desc_set_bpid(struct qbman_release_desc
*d
, u16 bpid
);
180 void qbman_release_desc_set_rcdi(struct qbman_release_desc
*d
, int enable
);
182 int qbman_swp_release(struct qbman_swp
*s
, const struct qbman_release_desc
*d
,
183 const u64
*buffers
, unsigned int num_buffers
);
184 int qbman_swp_acquire(struct qbman_swp
*s
, u16 bpid
, u64
*buffers
,
185 unsigned int num_buffers
);
186 int qbman_swp_alt_fq_state(struct qbman_swp
*s
, u32 fqid
,
188 int qbman_swp_CDAN_set(struct qbman_swp
*s
, u16 channelid
,
189 u8 we_mask
, u8 cdan_en
,
192 void *qbman_swp_mc_start(struct qbman_swp
*p
);
193 void qbman_swp_mc_submit(struct qbman_swp
*p
, void *cmd
, u8 cmd_verb
);
194 void *qbman_swp_mc_result(struct qbman_swp
*p
);
197 * qbman_result_is_DQ() - check if the dequeue result is a dequeue response
198 * @dq: the dequeue result to be checked
200 * DQRR entries may contain non-dequeue results, ie. notifications
202 static inline int qbman_result_is_DQ(const struct dpaa2_dq
*dq
)
204 return ((dq
->dq
.verb
& QBMAN_RESULT_MASK
) == QBMAN_RESULT_DQ
);
208 * qbman_result_is_SCN() - Check the dequeue result is notification or not
209 * @dq: the dequeue result to be checked
212 static inline int qbman_result_is_SCN(const struct dpaa2_dq
*dq
)
214 return !qbman_result_is_DQ(dq
);
217 /* FQ Data Availability */
218 static inline int qbman_result_is_FQDAN(const struct dpaa2_dq
*dq
)
220 return ((dq
->dq
.verb
& QBMAN_RESULT_MASK
) == QBMAN_RESULT_FQDAN
);
223 /* Channel Data Availability */
224 static inline int qbman_result_is_CDAN(const struct dpaa2_dq
*dq
)
226 return ((dq
->dq
.verb
& QBMAN_RESULT_MASK
) == QBMAN_RESULT_CDAN
);
229 /* Congestion State Change */
230 static inline int qbman_result_is_CSCN(const struct dpaa2_dq
*dq
)
232 return ((dq
->dq
.verb
& QBMAN_RESULT_MASK
) == QBMAN_RESULT_CSCN_WQ
);
235 /* Buffer Pool State Change */
236 static inline int qbman_result_is_BPSCN(const struct dpaa2_dq
*dq
)
238 return ((dq
->dq
.verb
& QBMAN_RESULT_MASK
) == QBMAN_RESULT_BPSCN
);
241 /* Congestion Group Count Update */
242 static inline int qbman_result_is_CGCU(const struct dpaa2_dq
*dq
)
244 return ((dq
->dq
.verb
& QBMAN_RESULT_MASK
) == QBMAN_RESULT_CGCU
);
248 static inline int qbman_result_is_FQRN(const struct dpaa2_dq
*dq
)
250 return ((dq
->dq
.verb
& QBMAN_RESULT_MASK
) == QBMAN_RESULT_FQRN
);
253 /* Retirement Immediate */
254 static inline int qbman_result_is_FQRNI(const struct dpaa2_dq
*dq
)
256 return ((dq
->dq
.verb
& QBMAN_RESULT_MASK
) == QBMAN_RESULT_FQRNI
);
260 static inline int qbman_result_is_FQPN(const struct dpaa2_dq
*dq
)
262 return ((dq
->dq
.verb
& QBMAN_RESULT_MASK
) == QBMAN_RESULT_FQPN
);
266 * qbman_result_SCN_state() - Get the state field in State-change notification
268 static inline u8
qbman_result_SCN_state(const struct dpaa2_dq
*scn
)
270 return scn
->scn
.state
;
273 #define SCN_RID_MASK 0x00FFFFFF
276 * qbman_result_SCN_rid() - Get the resource id in State-change notification
278 static inline u32
qbman_result_SCN_rid(const struct dpaa2_dq
*scn
)
280 return le32_to_cpu(scn
->scn
.rid_tok
) & SCN_RID_MASK
;
284 * qbman_result_SCN_ctx() - Get the context data in State-change notification
286 static inline u64
qbman_result_SCN_ctx(const struct dpaa2_dq
*scn
)
288 return le64_to_cpu(scn
->scn
.ctx
);
292 * qbman_swp_fq_schedule() - Move the fq to the scheduled state
293 * @s: the software portal object
294 * @fqid: the index of frame queue to be scheduled
296 * There are a couple of different ways that a FQ can end up parked state,
299 * Return 0 for success, or negative error code for failure.
301 static inline int qbman_swp_fq_schedule(struct qbman_swp
*s
, u32 fqid
)
303 return qbman_swp_alt_fq_state(s
, fqid
, QBMAN_FQ_SCHEDULE
);
307 * qbman_swp_fq_force() - Force the FQ to fully scheduled state
308 * @s: the software portal object
309 * @fqid: the index of frame queue to be forced
311 * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
312 * and thus be available for selection by any channel-dequeuing behaviour (push
313 * or pull). If the FQ is subsequently "dequeued" from the channel and is still
314 * empty at the time this happens, the resulting dq_entry will have no FD.
315 * (qbman_result_DQ_fd() will return NULL.)
317 * Return 0 for success, or negative error code for failure.
319 static inline int qbman_swp_fq_force(struct qbman_swp
*s
, u32 fqid
)
321 return qbman_swp_alt_fq_state(s
, fqid
, QBMAN_FQ_FORCE
);
325 * qbman_swp_fq_xon() - sets FQ flow-control to XON
326 * @s: the software portal object
327 * @fqid: the index of frame queue
329 * This setting doesn't affect enqueues to the FQ, just dequeues.
331 * Return 0 for success, or negative error code for failure.
333 static inline int qbman_swp_fq_xon(struct qbman_swp
*s
, u32 fqid
)
335 return qbman_swp_alt_fq_state(s
, fqid
, QBMAN_FQ_XON
);
339 * qbman_swp_fq_xoff() - sets FQ flow-control to XOFF
340 * @s: the software portal object
341 * @fqid: the index of frame queue
343 * This setting doesn't affect enqueues to the FQ, just dequeues.
344 * XOFF FQs will remain in the tenatively-scheduled state, even when
345 * non-empty, meaning they won't be selected for scheduled dequeuing.
346 * If a FQ is changed to XOFF after it had already become truly-scheduled
347 * to a channel, and a pull dequeue of that channel occurs that selects
348 * that FQ for dequeuing, then the resulting dq_entry will have no FD.
349 * (qbman_result_DQ_fd() will return NULL.)
351 * Return 0 for success, or negative error code for failure.
353 static inline int qbman_swp_fq_xoff(struct qbman_swp
*s
, u32 fqid
)
355 return qbman_swp_alt_fq_state(s
, fqid
, QBMAN_FQ_XOFF
);
358 /* If the user has been allocated a channel object that is going to generate
359 * CDANs to another channel, then the qbman_swp_CDAN* functions will be
362 * CDAN-enabled channels only generate a single CDAN notification, after which
363 * they need to be reenabled before they'll generate another. The idea is
364 * that pull dequeuing will occur in reaction to the CDAN, followed by a
365 * reenable step. Each function generates a distinct command to hardware, so a
366 * combination function is provided if the user wishes to modify the "context"
367 * (which shows up in each CDAN message) each time they reenable, as a single
368 * command to hardware.
372 * qbman_swp_CDAN_set_context() - Set CDAN context
373 * @s: the software portal object
374 * @channelid: the channel index
375 * @ctx: the context to be set in CDAN
377 * Return 0 for success, or negative error code for failure.
379 static inline int qbman_swp_CDAN_set_context(struct qbman_swp
*s
, u16 channelid
,
382 return qbman_swp_CDAN_set(s
, channelid
,
388 * qbman_swp_CDAN_enable() - Enable CDAN for the channel
389 * @s: the software portal object
390 * @channelid: the index of the channel to generate CDAN
392 * Return 0 for success, or negative error code for failure.
394 static inline int qbman_swp_CDAN_enable(struct qbman_swp
*s
, u16 channelid
)
396 return qbman_swp_CDAN_set(s
, channelid
,
402 * qbman_swp_CDAN_disable() - disable CDAN for the channel
403 * @s: the software portal object
404 * @channelid: the index of the channel to generate CDAN
406 * Return 0 for success, or negative error code for failure.
408 static inline int qbman_swp_CDAN_disable(struct qbman_swp
*s
, u16 channelid
)
410 return qbman_swp_CDAN_set(s
, channelid
,
416 * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
417 * @s: the software portal object
418 * @channelid: the index of the channel to generate CDAN
419 * @ctx:i the context set in CDAN
421 * Return 0 for success, or negative error code for failure.
423 static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp
*s
,
427 return qbman_swp_CDAN_set(s
, channelid
,
428 CODE_CDAN_WE_EN
| CODE_CDAN_WE_CTX
,
432 /* Wraps up submit + poll-for-result */
433 static inline void *qbman_swp_mc_complete(struct qbman_swp
*swp
, void *cmd
,
438 qbman_swp_mc_submit(swp
, cmd
, cmd_verb
);
441 cmd
= qbman_swp_mc_result(swp
);
442 } while (!cmd
&& loopvar
--);
450 struct qbman_fq_query_np_rslt
{
470 int qbman_fq_query_state(struct qbman_swp
*s
, u32 fqid
,
471 struct qbman_fq_query_np_rslt
*r
);
472 u32
qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt
*r
);
473 u32
qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt
*r
);
475 struct qbman_bp_query_rslt
{
502 int qbman_bp_query(struct qbman_swp
*s
, u16 bpid
,
503 struct qbman_bp_query_rslt
*r
);
505 u32
qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt
*a
);
507 #endif /* __FSL_QBMAN_PORTAL_H */