1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4 * Copyright 2016-2019 NXP
8 #include <asm/cacheflush.h>
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <soc/fsl/dpaa2-global.h>
14 #include "qbman-portal.h"
16 /* All QBMan command and result structures use this "valid bit" encoding */
17 #define QB_VALID_BIT ((u32)0x80)
19 /* QBMan portal management command codes */
20 #define QBMAN_MC_ACQUIRE 0x30
21 #define QBMAN_WQCHAN_CONFIGURE 0x46
23 /* CINH register offsets */
24 #define QBMAN_CINH_SWP_EQCR_PI 0x800
25 #define QBMAN_CINH_SWP_EQCR_CI 0x840
26 #define QBMAN_CINH_SWP_EQAR 0x8c0
27 #define QBMAN_CINH_SWP_CR_RT 0x900
28 #define QBMAN_CINH_SWP_VDQCR_RT 0x940
29 #define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
30 #define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
31 #define QBMAN_CINH_SWP_DQPI 0xa00
32 #define QBMAN_CINH_SWP_DCAP 0xac0
33 #define QBMAN_CINH_SWP_SDQCR 0xb00
34 #define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
35 #define QBMAN_CINH_SWP_RCR_PI 0xc00
36 #define QBMAN_CINH_SWP_RAR 0xcc0
37 #define QBMAN_CINH_SWP_ISR 0xe00
38 #define QBMAN_CINH_SWP_IER 0xe40
39 #define QBMAN_CINH_SWP_ISDR 0xe80
40 #define QBMAN_CINH_SWP_IIR 0xec0
42 /* CENA register offsets */
43 #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
44 #define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6))
45 #define QBMAN_CENA_SWP_RCR(n) (0x400 + ((u32)(n) << 6))
46 #define QBMAN_CENA_SWP_CR 0x600
47 #define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1))
48 #define QBMAN_CENA_SWP_VDQCR 0x780
49 #define QBMAN_CENA_SWP_EQCR_CI 0x840
50 #define QBMAN_CENA_SWP_EQCR_CI_MEMBACK 0x1840
52 /* CENA register offsets in memory-backed mode */
53 #define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6))
54 #define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((u32)(n) << 6))
55 #define QBMAN_CENA_SWP_CR_MEM 0x1600
56 #define QBMAN_CENA_SWP_RR_MEM 0x1680
57 #define QBMAN_CENA_SWP_VDQCR_MEM 0x1780
59 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
60 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
62 /* Define token used to determine if response written to memory is valid */
63 #define QMAN_DQ_TOKEN_VALID 1
65 /* SDQCR attribute codes */
66 #define QB_SDQCR_FC_SHIFT 29
67 #define QB_SDQCR_FC_MASK 0x1
68 #define QB_SDQCR_DCT_SHIFT 24
69 #define QB_SDQCR_DCT_MASK 0x3
70 #define QB_SDQCR_TOK_SHIFT 16
71 #define QB_SDQCR_TOK_MASK 0xff
72 #define QB_SDQCR_SRC_SHIFT 0
73 #define QB_SDQCR_SRC_MASK 0xffff
75 /* opaque token for static dequeues */
76 #define QMAN_SDQCR_TOKEN 0xbb
78 #define QBMAN_EQCR_DCA_IDXMASK 0x0f
79 #define QBMAN_ENQUEUE_FLAG_DCA (1ULL << 31)
81 #define EQ_DESC_SIZE_WITHOUT_FD 29
82 #define EQ_DESC_SIZE_FD_START 32
84 enum qbman_sdqcr_dct
{
85 qbman_sdqcr_dct_null
= 0,
86 qbman_sdqcr_dct_prio_ics
,
87 qbman_sdqcr_dct_active_ics
,
88 qbman_sdqcr_dct_active
92 qbman_sdqcr_fc_one
= 0,
93 qbman_sdqcr_fc_up_to_3
= 1
96 /* Internal Function declaration */
97 static int qbman_swp_enqueue_direct(struct qbman_swp
*s
,
98 const struct qbman_eq_desc
*d
,
99 const struct dpaa2_fd
*fd
);
100 static int qbman_swp_enqueue_mem_back(struct qbman_swp
*s
,
101 const struct qbman_eq_desc
*d
,
102 const struct dpaa2_fd
*fd
);
103 static int qbman_swp_enqueue_multiple_direct(struct qbman_swp
*s
,
104 const struct qbman_eq_desc
*d
,
105 const struct dpaa2_fd
*fd
,
108 static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp
*s
,
109 const struct qbman_eq_desc
*d
,
110 const struct dpaa2_fd
*fd
,
114 qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp
*s
,
115 const struct qbman_eq_desc
*d
,
116 const struct dpaa2_fd
*fd
,
119 int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp
*s
,
120 const struct qbman_eq_desc
*d
,
121 const struct dpaa2_fd
*fd
,
123 static int qbman_swp_pull_direct(struct qbman_swp
*s
,
124 struct qbman_pull_desc
*d
);
125 static int qbman_swp_pull_mem_back(struct qbman_swp
*s
,
126 struct qbman_pull_desc
*d
);
128 const struct dpaa2_dq
*qbman_swp_dqrr_next_direct(struct qbman_swp
*s
);
129 const struct dpaa2_dq
*qbman_swp_dqrr_next_mem_back(struct qbman_swp
*s
);
131 static int qbman_swp_release_direct(struct qbman_swp
*s
,
132 const struct qbman_release_desc
*d
,
134 unsigned int num_buffers
);
135 static int qbman_swp_release_mem_back(struct qbman_swp
*s
,
136 const struct qbman_release_desc
*d
,
138 unsigned int num_buffers
);
140 /* Function pointers */
141 int (*qbman_swp_enqueue_ptr
)(struct qbman_swp
*s
,
142 const struct qbman_eq_desc
*d
,
143 const struct dpaa2_fd
*fd
)
144 = qbman_swp_enqueue_direct
;
146 int (*qbman_swp_enqueue_multiple_ptr
)(struct qbman_swp
*s
,
147 const struct qbman_eq_desc
*d
,
148 const struct dpaa2_fd
*fd
,
151 = qbman_swp_enqueue_multiple_direct
;
154 (*qbman_swp_enqueue_multiple_desc_ptr
)(struct qbman_swp
*s
,
155 const struct qbman_eq_desc
*d
,
156 const struct dpaa2_fd
*fd
,
158 = qbman_swp_enqueue_multiple_desc_direct
;
160 int (*qbman_swp_pull_ptr
)(struct qbman_swp
*s
, struct qbman_pull_desc
*d
)
161 = qbman_swp_pull_direct
;
163 const struct dpaa2_dq
*(*qbman_swp_dqrr_next_ptr
)(struct qbman_swp
*s
)
164 = qbman_swp_dqrr_next_direct
;
166 int (*qbman_swp_release_ptr
)(struct qbman_swp
*s
,
167 const struct qbman_release_desc
*d
,
169 unsigned int num_buffers
)
170 = qbman_swp_release_direct
;
174 static inline u32
qbman_read_register(struct qbman_swp
*p
, u32 offset
)
176 return readl_relaxed(p
->addr_cinh
+ offset
);
179 static inline void qbman_write_register(struct qbman_swp
*p
, u32 offset
,
182 writel_relaxed(value
, p
->addr_cinh
+ offset
);
185 static inline void *qbman_get_cmd(struct qbman_swp
*p
, u32 offset
)
187 return p
->addr_cena
+ offset
;
190 #define QBMAN_CINH_SWP_CFG 0xd00
192 #define SWP_CFG_DQRR_MF_SHIFT 20
193 #define SWP_CFG_EST_SHIFT 16
194 #define SWP_CFG_CPBS_SHIFT 15
195 #define SWP_CFG_WN_SHIFT 14
196 #define SWP_CFG_RPM_SHIFT 12
197 #define SWP_CFG_DCM_SHIFT 10
198 #define SWP_CFG_EPM_SHIFT 8
199 #define SWP_CFG_VPM_SHIFT 7
200 #define SWP_CFG_CPM_SHIFT 6
201 #define SWP_CFG_SD_SHIFT 5
202 #define SWP_CFG_SP_SHIFT 4
203 #define SWP_CFG_SE_SHIFT 3
204 #define SWP_CFG_DP_SHIFT 2
205 #define SWP_CFG_DE_SHIFT 1
206 #define SWP_CFG_EP_SHIFT 0
208 static inline u32
qbman_set_swp_cfg(u8 max_fill
, u8 wn
, u8 est
, u8 rpm
, u8 dcm
,
209 u8 epm
, int sd
, int sp
, int se
,
210 int dp
, int de
, int ep
)
212 return (max_fill
<< SWP_CFG_DQRR_MF_SHIFT
|
213 est
<< SWP_CFG_EST_SHIFT
|
214 wn
<< SWP_CFG_WN_SHIFT
|
215 rpm
<< SWP_CFG_RPM_SHIFT
|
216 dcm
<< SWP_CFG_DCM_SHIFT
|
217 epm
<< SWP_CFG_EPM_SHIFT
|
218 sd
<< SWP_CFG_SD_SHIFT
|
219 sp
<< SWP_CFG_SP_SHIFT
|
220 se
<< SWP_CFG_SE_SHIFT
|
221 dp
<< SWP_CFG_DP_SHIFT
|
222 de
<< SWP_CFG_DE_SHIFT
|
223 ep
<< SWP_CFG_EP_SHIFT
);
226 #define QMAN_RT_MODE 0x00000100
228 static inline u8
qm_cyc_diff(u8 ringsize
, u8 first
, u8 last
)
230 /* 'first' is included, 'last' is excluded */
234 return (2 * ringsize
) - (first
- last
);
238 * qbman_swp_init() - Create a functional object representing the given
239 * QBMan portal descriptor.
240 * @d: the given qbman swp descriptor
242 * Return qbman_swp portal for success, NULL if the object cannot
245 struct qbman_swp
*qbman_swp_init(const struct qbman_swp_desc
*d
)
247 struct qbman_swp
*p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
255 spin_lock_init(&p
->access_spinlock
);
258 p
->mc
.valid_bit
= QB_VALID_BIT
;
260 p
->sdq
|= qbman_sdqcr_dct_prio_ics
<< QB_SDQCR_DCT_SHIFT
;
261 p
->sdq
|= qbman_sdqcr_fc_up_to_3
<< QB_SDQCR_FC_SHIFT
;
262 p
->sdq
|= QMAN_SDQCR_TOKEN
<< QB_SDQCR_TOK_SHIFT
;
263 if ((p
->desc
->qman_version
& QMAN_REV_MASK
) >= QMAN_REV_5000
)
264 p
->mr
.valid_bit
= QB_VALID_BIT
;
266 atomic_set(&p
->vdq
.available
, 1);
267 p
->vdq
.valid_bit
= QB_VALID_BIT
;
268 p
->dqrr
.next_idx
= 0;
269 p
->dqrr
.valid_bit
= QB_VALID_BIT
;
271 if ((p
->desc
->qman_version
& QMAN_REV_MASK
) < QMAN_REV_4100
) {
272 p
->dqrr
.dqrr_size
= 4;
273 p
->dqrr
.reset_bug
= 1;
275 p
->dqrr
.dqrr_size
= 8;
276 p
->dqrr
.reset_bug
= 0;
279 p
->addr_cena
= d
->cena_bar
;
280 p
->addr_cinh
= d
->cinh_bar
;
282 if ((p
->desc
->qman_version
& QMAN_REV_MASK
) < QMAN_REV_5000
) {
284 reg
= qbman_set_swp_cfg(p
->dqrr
.dqrr_size
,
285 1, /* Writes Non-cacheable */
286 0, /* EQCR_CI stashing threshold */
287 3, /* RPM: RCR in array mode */
288 2, /* DCM: Discrete consumption ack */
289 2, /* EPM: EQCR in ring mode */
290 1, /* mem stashing drop enable enable */
291 1, /* mem stashing priority enable */
292 1, /* mem stashing enable */
293 1, /* dequeue stashing priority enable */
294 0, /* dequeue stashing enable enable */
295 0); /* EQCR_CI stashing priority enable */
297 memset(p
->addr_cena
, 0, 64 * 1024);
298 reg
= qbman_set_swp_cfg(p
->dqrr
.dqrr_size
,
299 1, /* Writes Non-cacheable */
300 1, /* EQCR_CI stashing threshold */
301 3, /* RPM: RCR in array mode */
302 2, /* DCM: Discrete consumption ack */
303 0, /* EPM: EQCR in ring mode */
304 1, /* mem stashing drop enable */
305 1, /* mem stashing priority enable */
306 1, /* mem stashing enable */
307 1, /* dequeue stashing priority enable */
308 0, /* dequeue stashing enable */
309 0); /* EQCR_CI stashing priority enable */
310 reg
|= 1 << SWP_CFG_CPBS_SHIFT
| /* memory-backed mode */
311 1 << SWP_CFG_VPM_SHIFT
| /* VDQCR read triggered mode */
312 1 << SWP_CFG_CPM_SHIFT
; /* CR read triggered mode */
315 qbman_write_register(p
, QBMAN_CINH_SWP_CFG
, reg
);
316 reg
= qbman_read_register(p
, QBMAN_CINH_SWP_CFG
);
318 pr_err("qbman: the portal is not enabled!\n");
323 if ((p
->desc
->qman_version
& QMAN_REV_MASK
) >= QMAN_REV_5000
) {
324 qbman_write_register(p
, QBMAN_CINH_SWP_EQCR_PI
, QMAN_RT_MODE
);
325 qbman_write_register(p
, QBMAN_CINH_SWP_RCR_PI
, QMAN_RT_MODE
);
328 * SDQCR needs to be initialized to 0 when no channels are
329 * being dequeued from or else the QMan HW will indicate an
330 * error. The values that were calculated above will be
331 * applied when dequeues from a specific channel are enabled.
333 qbman_write_register(p
, QBMAN_CINH_SWP_SDQCR
, 0);
335 p
->eqcr
.pi_ring_size
= 8;
336 if ((p
->desc
->qman_version
& QMAN_REV_MASK
) >= QMAN_REV_5000
) {
337 p
->eqcr
.pi_ring_size
= 32;
338 qbman_swp_enqueue_ptr
=
339 qbman_swp_enqueue_mem_back
;
340 qbman_swp_enqueue_multiple_ptr
=
341 qbman_swp_enqueue_multiple_mem_back
;
342 qbman_swp_enqueue_multiple_desc_ptr
=
343 qbman_swp_enqueue_multiple_desc_mem_back
;
344 qbman_swp_pull_ptr
= qbman_swp_pull_mem_back
;
345 qbman_swp_dqrr_next_ptr
= qbman_swp_dqrr_next_mem_back
;
346 qbman_swp_release_ptr
= qbman_swp_release_mem_back
;
349 for (mask_size
= p
->eqcr
.pi_ring_size
; mask_size
> 0; mask_size
>>= 1)
350 p
->eqcr
.pi_ci_mask
= (p
->eqcr
.pi_ci_mask
<< 1) + 1;
351 eqcr_pi
= qbman_read_register(p
, QBMAN_CINH_SWP_EQCR_PI
);
352 p
->eqcr
.pi
= eqcr_pi
& p
->eqcr
.pi_ci_mask
;
353 p
->eqcr
.pi_vb
= eqcr_pi
& QB_VALID_BIT
;
354 p
->eqcr
.ci
= qbman_read_register(p
, QBMAN_CINH_SWP_EQCR_CI
)
355 & p
->eqcr
.pi_ci_mask
;
356 p
->eqcr
.available
= p
->eqcr
.pi_ring_size
;
362 * qbman_swp_finish() - Create and destroy a functional object representing
363 * the given QBMan portal descriptor.
364 * @p: the qbman_swp object to be destroyed
366 void qbman_swp_finish(struct qbman_swp
*p
)
372 * qbman_swp_interrupt_read_status()
373 * @p: the given software portal
375 * Return the value in the SWP_ISR register.
377 u32
qbman_swp_interrupt_read_status(struct qbman_swp
*p
)
379 return qbman_read_register(p
, QBMAN_CINH_SWP_ISR
);
383 * qbman_swp_interrupt_clear_status()
384 * @p: the given software portal
385 * @mask: The mask to clear in SWP_ISR register
387 void qbman_swp_interrupt_clear_status(struct qbman_swp
*p
, u32 mask
)
389 qbman_write_register(p
, QBMAN_CINH_SWP_ISR
, mask
);
393 * qbman_swp_interrupt_get_trigger() - read interrupt enable register
394 * @p: the given software portal
396 * Return the value in the SWP_IER register.
398 u32
qbman_swp_interrupt_get_trigger(struct qbman_swp
*p
)
400 return qbman_read_register(p
, QBMAN_CINH_SWP_IER
);
404 * qbman_swp_interrupt_set_trigger() - enable interrupts for a swp
405 * @p: the given software portal
406 * @mask: The mask of bits to enable in SWP_IER
408 void qbman_swp_interrupt_set_trigger(struct qbman_swp
*p
, u32 mask
)
410 qbman_write_register(p
, QBMAN_CINH_SWP_IER
, mask
);
414 * qbman_swp_interrupt_get_inhibit() - read interrupt mask register
415 * @p: the given software portal object
417 * Return the value in the SWP_IIR register.
419 int qbman_swp_interrupt_get_inhibit(struct qbman_swp
*p
)
421 return qbman_read_register(p
, QBMAN_CINH_SWP_IIR
);
425 * qbman_swp_interrupt_set_inhibit() - write interrupt mask register
426 * @p: the given software portal object
427 * @inhibit: whether to inhibit the IRQs
429 void qbman_swp_interrupt_set_inhibit(struct qbman_swp
*p
, int inhibit
)
431 qbman_write_register(p
, QBMAN_CINH_SWP_IIR
, inhibit
? 0xffffffff : 0);
435 * Different management commands all use this common base layer of code to issue
436 * commands and poll for results.
440 * Returns a pointer to where the caller should fill in their management command
441 * (caller should ignore the verb byte)
443 void *qbman_swp_mc_start(struct qbman_swp
*p
)
445 if ((p
->desc
->qman_version
& QMAN_REV_MASK
) < QMAN_REV_5000
)
446 return qbman_get_cmd(p
, QBMAN_CENA_SWP_CR
);
448 return qbman_get_cmd(p
, QBMAN_CENA_SWP_CR_MEM
);
452 * Commits merges in the caller-supplied command verb (which should not include
453 * the valid-bit) and submits the command to hardware
455 void qbman_swp_mc_submit(struct qbman_swp
*p
, void *cmd
, u8 cmd_verb
)
459 if ((p
->desc
->qman_version
& QMAN_REV_MASK
) < QMAN_REV_5000
) {
461 *v
= cmd_verb
| p
->mc
.valid_bit
;
463 *v
= cmd_verb
| p
->mc
.valid_bit
;
465 qbman_write_register(p
, QBMAN_CINH_SWP_CR_RT
, QMAN_RT_MODE
);
470 * Checks for a completed response (returns non-NULL if only if the response
473 void *qbman_swp_mc_result(struct qbman_swp
*p
)
477 if ((p
->desc
->qman_version
& QMAN_REV_MASK
) < QMAN_REV_5000
) {
478 ret
= qbman_get_cmd(p
, QBMAN_CENA_SWP_RR(p
->mc
.valid_bit
));
479 /* Remove the valid-bit - command completed if the rest
482 verb
= ret
[0] & ~QB_VALID_BIT
;
485 p
->mc
.valid_bit
^= QB_VALID_BIT
;
487 ret
= qbman_get_cmd(p
, QBMAN_CENA_SWP_RR_MEM
);
488 /* Command completed if the valid bit is toggled */
489 if (p
->mr
.valid_bit
!= (ret
[0] & QB_VALID_BIT
))
491 /* Command completed if the rest is non-zero */
492 verb
= ret
[0] & ~QB_VALID_BIT
;
495 p
->mr
.valid_bit
^= QB_VALID_BIT
;
501 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0
502 enum qb_enqueue_commands
{
504 enqueue_response_always
= 1,
505 enqueue_rejects_to_fq
= 2
508 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
509 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
510 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
511 #define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
514 * qbman_eq_desc_clear() - Clear the contents of a descriptor to
515 * default/starting state.
517 void qbman_eq_desc_clear(struct qbman_eq_desc
*d
)
519 memset(d
, 0, sizeof(*d
));
523 * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
524 * @d: the enqueue descriptor.
525 * @respond_success: 1 = enqueue with response always; 0 = enqueue with
526 * rejections returned on a FQ.
528 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc
*d
, int respond_success
)
530 d
->verb
&= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT
);
532 d
->verb
|= enqueue_response_always
;
534 d
->verb
|= enqueue_rejects_to_fq
;
538 * Exactly one of the following descriptor "targets" should be set. (Calling any
539 * one of these will replace the effect of any prior call to one of these.)
540 * -enqueue to a frame queue
541 * -enqueue to a queuing destination
545 * qbman_eq_desc_set_fq() - set the FQ for the enqueue command
546 * @d: the enqueue descriptor
547 * @fqid: the id of the frame queue to be enqueued
549 void qbman_eq_desc_set_fq(struct qbman_eq_desc
*d
, u32 fqid
)
551 d
->verb
&= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT
);
552 d
->tgtid
= cpu_to_le32(fqid
);
556 * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command
557 * @d: the enqueue descriptor
558 * @qdid: the id of the queuing destination to be enqueued
559 * @qd_bin: the queuing destination bin
560 * @qd_prio: the queuing destination priority
562 void qbman_eq_desc_set_qd(struct qbman_eq_desc
*d
, u32 qdid
,
563 u32 qd_bin
, u32 qd_prio
)
565 d
->verb
|= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT
;
566 d
->tgtid
= cpu_to_le32(qdid
);
567 d
->qdbin
= cpu_to_le16(qd_bin
);
571 #define EQAR_IDX(eqar) ((eqar) & 0x7)
572 #define EQAR_VB(eqar) ((eqar) & 0x80)
573 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
575 #define QB_RT_BIT ((u32)0x100)
577 * qbman_swp_enqueue_direct() - Issue an enqueue command
578 * @s: the software portal used for enqueue
579 * @d: the enqueue descriptor
580 * @fd: the frame descriptor to be enqueued
582 * Please note that 'fd' should only be NULL if the "action" of the
583 * descriptor is "orp_hole" or "orp_nesn".
585 * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
588 int qbman_swp_enqueue_direct(struct qbman_swp
*s
,
589 const struct qbman_eq_desc
*d
,
590 const struct dpaa2_fd
*fd
)
593 int ret
= qbman_swp_enqueue_multiple_direct(s
, d
, fd
, &flags
, 1);
603 * qbman_swp_enqueue_mem_back() - Issue an enqueue command
604 * @s: the software portal used for enqueue
605 * @d: the enqueue descriptor
606 * @fd: the frame descriptor to be enqueued
608 * Please note that 'fd' should only be NULL if the "action" of the
609 * descriptor is "orp_hole" or "orp_nesn".
611 * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
614 int qbman_swp_enqueue_mem_back(struct qbman_swp
*s
,
615 const struct qbman_eq_desc
*d
,
616 const struct dpaa2_fd
*fd
)
619 int ret
= qbman_swp_enqueue_multiple_mem_back(s
, d
, fd
, &flags
, 1);
629 * qbman_swp_enqueue_multiple_direct() - Issue a multi enqueue command
630 * using one enqueue descriptor
631 * @s: the software portal used for enqueue
632 * @d: the enqueue descriptor
633 * @fd: table pointer of frame descriptor table to be enqueued
634 * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
635 * @num_frames: number of fd to be enqueued
637 * Return the number of fd enqueued, or a negative error number.
640 int qbman_swp_enqueue_multiple_direct(struct qbman_swp
*s
,
641 const struct qbman_eq_desc
*d
,
642 const struct dpaa2_fd
*fd
,
647 const uint32_t *cl
= (uint32_t *)d
;
648 uint32_t eqcr_ci
, eqcr_pi
, half_mask
, full_mask
;
649 int i
, num_enqueued
= 0;
651 spin_lock(&s
->access_spinlock
);
652 half_mask
= (s
->eqcr
.pi_ci_mask
>>1);
653 full_mask
= s
->eqcr
.pi_ci_mask
;
655 if (!s
->eqcr
.available
) {
656 eqcr_ci
= s
->eqcr
.ci
;
657 p
= s
->addr_cena
+ QBMAN_CENA_SWP_EQCR_CI
;
658 s
->eqcr
.ci
= qbman_read_register(s
, QBMAN_CINH_SWP_EQCR_CI
);
659 s
->eqcr
.ci
&= full_mask
;
661 s
->eqcr
.available
= qm_cyc_diff(s
->eqcr
.pi_ring_size
,
662 eqcr_ci
, s
->eqcr
.ci
);
663 if (!s
->eqcr
.available
) {
664 spin_unlock(&s
->access_spinlock
);
669 eqcr_pi
= s
->eqcr
.pi
;
670 num_enqueued
= (s
->eqcr
.available
< num_frames
) ?
671 s
->eqcr
.available
: num_frames
;
672 s
->eqcr
.available
-= num_enqueued
;
673 /* Fill in the EQCR ring */
674 for (i
= 0; i
< num_enqueued
; i
++) {
675 p
= (s
->addr_cena
+ QBMAN_CENA_SWP_EQCR(eqcr_pi
& half_mask
));
676 /* Skip copying the verb */
677 memcpy(&p
[1], &cl
[1], EQ_DESC_SIZE_WITHOUT_FD
- 1);
678 memcpy(&p
[EQ_DESC_SIZE_FD_START
/sizeof(uint32_t)],
679 &fd
[i
], sizeof(*fd
));
685 /* Set the verb byte, have to substitute in the valid-bit */
686 eqcr_pi
= s
->eqcr
.pi
;
687 for (i
= 0; i
< num_enqueued
; i
++) {
688 p
= (s
->addr_cena
+ QBMAN_CENA_SWP_EQCR(eqcr_pi
& half_mask
));
689 p
[0] = cl
[0] | s
->eqcr
.pi_vb
;
690 if (flags
&& (flags
[i
] & QBMAN_ENQUEUE_FLAG_DCA
)) {
691 struct qbman_eq_desc
*d
= (struct qbman_eq_desc
*)p
;
693 d
->dca
= (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT
) |
694 ((flags
[i
]) & QBMAN_EQCR_DCA_IDXMASK
);
697 if (!(eqcr_pi
& half_mask
))
698 s
->eqcr
.pi_vb
^= QB_VALID_BIT
;
701 /* Flush all the cacheline without load/store in between */
702 eqcr_pi
= s
->eqcr
.pi
;
703 for (i
= 0; i
< num_enqueued
; i
++)
705 s
->eqcr
.pi
= eqcr_pi
& full_mask
;
706 spin_unlock(&s
->access_spinlock
);
712 * qbman_swp_enqueue_multiple_mem_back() - Issue a multi enqueue command
713 * using one enqueue descriptor
714 * @s: the software portal used for enqueue
715 * @d: the enqueue descriptor
716 * @fd: table pointer of frame descriptor table to be enqueued
717 * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
718 * @num_frames: number of fd to be enqueued
720 * Return the number of fd enqueued, or a negative error number.
723 int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp
*s
,
724 const struct qbman_eq_desc
*d
,
725 const struct dpaa2_fd
*fd
,
730 const uint32_t *cl
= (uint32_t *)(d
);
731 uint32_t eqcr_ci
, eqcr_pi
, half_mask
, full_mask
;
732 int i
, num_enqueued
= 0;
733 unsigned long irq_flags
;
735 spin_lock(&s
->access_spinlock
);
736 local_irq_save(irq_flags
);
738 half_mask
= (s
->eqcr
.pi_ci_mask
>>1);
739 full_mask
= s
->eqcr
.pi_ci_mask
;
740 if (!s
->eqcr
.available
) {
741 eqcr_ci
= s
->eqcr
.ci
;
742 p
= s
->addr_cena
+ QBMAN_CENA_SWP_EQCR_CI_MEMBACK
;
743 s
->eqcr
.ci
= *p
& full_mask
;
744 s
->eqcr
.available
= qm_cyc_diff(s
->eqcr
.pi_ring_size
,
745 eqcr_ci
, s
->eqcr
.ci
);
746 if (!s
->eqcr
.available
) {
747 local_irq_restore(irq_flags
);
748 spin_unlock(&s
->access_spinlock
);
753 eqcr_pi
= s
->eqcr
.pi
;
754 num_enqueued
= (s
->eqcr
.available
< num_frames
) ?
755 s
->eqcr
.available
: num_frames
;
756 s
->eqcr
.available
-= num_enqueued
;
757 /* Fill in the EQCR ring */
758 for (i
= 0; i
< num_enqueued
; i
++) {
759 p
= (s
->addr_cena
+ QBMAN_CENA_SWP_EQCR(eqcr_pi
& half_mask
));
760 /* Skip copying the verb */
761 memcpy(&p
[1], &cl
[1], EQ_DESC_SIZE_WITHOUT_FD
- 1);
762 memcpy(&p
[EQ_DESC_SIZE_FD_START
/sizeof(uint32_t)],
763 &fd
[i
], sizeof(*fd
));
767 /* Set the verb byte, have to substitute in the valid-bit */
768 eqcr_pi
= s
->eqcr
.pi
;
769 for (i
= 0; i
< num_enqueued
; i
++) {
770 p
= (s
->addr_cena
+ QBMAN_CENA_SWP_EQCR(eqcr_pi
& half_mask
));
771 p
[0] = cl
[0] | s
->eqcr
.pi_vb
;
772 if (flags
&& (flags
[i
] & QBMAN_ENQUEUE_FLAG_DCA
)) {
773 struct qbman_eq_desc
*d
= (struct qbman_eq_desc
*)p
;
775 d
->dca
= (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT
) |
776 ((flags
[i
]) & QBMAN_EQCR_DCA_IDXMASK
);
779 if (!(eqcr_pi
& half_mask
))
780 s
->eqcr
.pi_vb
^= QB_VALID_BIT
;
782 s
->eqcr
.pi
= eqcr_pi
& full_mask
;
785 qbman_write_register(s
, QBMAN_CINH_SWP_EQCR_PI
,
786 (QB_RT_BIT
)|(s
->eqcr
.pi
)|s
->eqcr
.pi_vb
);
787 local_irq_restore(irq_flags
);
788 spin_unlock(&s
->access_spinlock
);
794 * qbman_swp_enqueue_multiple_desc_direct() - Issue a multi enqueue command
795 * using multiple enqueue descriptor
796 * @s: the software portal used for enqueue
797 * @d: table of minimal enqueue descriptor
798 * @fd: table pointer of frame descriptor table to be enqueued
799 * @num_frames: number of fd to be enqueued
801 * Return the number of fd enqueued, or a negative error number.
804 int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp
*s
,
805 const struct qbman_eq_desc
*d
,
806 const struct dpaa2_fd
*fd
,
811 uint32_t eqcr_ci
, eqcr_pi
, half_mask
, full_mask
;
812 int i
, num_enqueued
= 0;
814 half_mask
= (s
->eqcr
.pi_ci_mask
>>1);
815 full_mask
= s
->eqcr
.pi_ci_mask
;
816 if (!s
->eqcr
.available
) {
817 eqcr_ci
= s
->eqcr
.ci
;
818 p
= s
->addr_cena
+ QBMAN_CENA_SWP_EQCR_CI
;
819 s
->eqcr
.ci
= qbman_read_register(s
, QBMAN_CINH_SWP_EQCR_CI
);
820 s
->eqcr
.available
= qm_cyc_diff(s
->eqcr
.pi_ring_size
,
821 eqcr_ci
, s
->eqcr
.ci
);
822 if (!s
->eqcr
.available
)
826 eqcr_pi
= s
->eqcr
.pi
;
827 num_enqueued
= (s
->eqcr
.available
< num_frames
) ?
828 s
->eqcr
.available
: num_frames
;
829 s
->eqcr
.available
-= num_enqueued
;
830 /* Fill in the EQCR ring */
831 for (i
= 0; i
< num_enqueued
; i
++) {
832 p
= (s
->addr_cena
+ QBMAN_CENA_SWP_EQCR(eqcr_pi
& half_mask
));
833 cl
= (uint32_t *)(&d
[i
]);
834 /* Skip copying the verb */
835 memcpy(&p
[1], &cl
[1], EQ_DESC_SIZE_WITHOUT_FD
- 1);
836 memcpy(&p
[EQ_DESC_SIZE_FD_START
/sizeof(uint32_t)],
837 &fd
[i
], sizeof(*fd
));
843 /* Set the verb byte, have to substitute in the valid-bit */
844 eqcr_pi
= s
->eqcr
.pi
;
845 for (i
= 0; i
< num_enqueued
; i
++) {
846 p
= (s
->addr_cena
+ QBMAN_CENA_SWP_EQCR(eqcr_pi
& half_mask
));
847 cl
= (uint32_t *)(&d
[i
]);
848 p
[0] = cl
[0] | s
->eqcr
.pi_vb
;
850 if (!(eqcr_pi
& half_mask
))
851 s
->eqcr
.pi_vb
^= QB_VALID_BIT
;
854 /* Flush all the cacheline without load/store in between */
855 eqcr_pi
= s
->eqcr
.pi
;
856 for (i
= 0; i
< num_enqueued
; i
++)
858 s
->eqcr
.pi
= eqcr_pi
& full_mask
;
864 * qbman_swp_enqueue_multiple_desc_mem_back() - Issue a multi enqueue command
865 * using multiple enqueue descriptor
866 * @s: the software portal used for enqueue
867 * @d: table of minimal enqueue descriptor
868 * @fd: table pointer of frame descriptor table to be enqueued
869 * @num_frames: number of fd to be enqueued
871 * Return the number of fd enqueued, or a negative error number.
874 int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp
*s
,
875 const struct qbman_eq_desc
*d
,
876 const struct dpaa2_fd
*fd
,
881 uint32_t eqcr_ci
, eqcr_pi
, half_mask
, full_mask
;
882 int i
, num_enqueued
= 0;
884 half_mask
= (s
->eqcr
.pi_ci_mask
>>1);
885 full_mask
= s
->eqcr
.pi_ci_mask
;
886 if (!s
->eqcr
.available
) {
887 eqcr_ci
= s
->eqcr
.ci
;
888 p
= s
->addr_cena
+ QBMAN_CENA_SWP_EQCR_CI_MEMBACK
;
889 s
->eqcr
.ci
= *p
& full_mask
;
890 s
->eqcr
.available
= qm_cyc_diff(s
->eqcr
.pi_ring_size
,
891 eqcr_ci
, s
->eqcr
.ci
);
892 if (!s
->eqcr
.available
)
896 eqcr_pi
= s
->eqcr
.pi
;
897 num_enqueued
= (s
->eqcr
.available
< num_frames
) ?
898 s
->eqcr
.available
: num_frames
;
899 s
->eqcr
.available
-= num_enqueued
;
900 /* Fill in the EQCR ring */
901 for (i
= 0; i
< num_enqueued
; i
++) {
902 p
= (s
->addr_cena
+ QBMAN_CENA_SWP_EQCR(eqcr_pi
& half_mask
));
903 cl
= (uint32_t *)(&d
[i
]);
904 /* Skip copying the verb */
905 memcpy(&p
[1], &cl
[1], EQ_DESC_SIZE_WITHOUT_FD
- 1);
906 memcpy(&p
[EQ_DESC_SIZE_FD_START
/sizeof(uint32_t)],
907 &fd
[i
], sizeof(*fd
));
911 /* Set the verb byte, have to substitute in the valid-bit */
912 eqcr_pi
= s
->eqcr
.pi
;
913 for (i
= 0; i
< num_enqueued
; i
++) {
914 p
= (s
->addr_cena
+ QBMAN_CENA_SWP_EQCR(eqcr_pi
& half_mask
));
915 cl
= (uint32_t *)(&d
[i
]);
916 p
[0] = cl
[0] | s
->eqcr
.pi_vb
;
918 if (!(eqcr_pi
& half_mask
))
919 s
->eqcr
.pi_vb
^= QB_VALID_BIT
;
922 s
->eqcr
.pi
= eqcr_pi
& full_mask
;
925 qbman_write_register(s
, QBMAN_CINH_SWP_EQCR_PI
,
926 (QB_RT_BIT
)|(s
->eqcr
.pi
)|s
->eqcr
.pi_vb
);
931 /* Static (push) dequeue */
934 * qbman_swp_push_get() - Get the push dequeue setup
935 * @s: the software portal object
936 * @channel_idx: the channel index to query
937 * @enabled: returned boolean to show whether the push dequeue is enabled
938 * for the given channel
940 void qbman_swp_push_get(struct qbman_swp
*s
, u8 channel_idx
, int *enabled
)
942 u16 src
= (s
->sdq
>> QB_SDQCR_SRC_SHIFT
) & QB_SDQCR_SRC_MASK
;
944 WARN_ON(channel_idx
> 15);
945 *enabled
= src
| (1 << channel_idx
);
949 * qbman_swp_push_set() - Enable or disable push dequeue
950 * @s: the software portal object
951 * @channel_idx: the channel index (0 to 15)
952 * @enable: enable or disable push dequeue
954 void qbman_swp_push_set(struct qbman_swp
*s
, u8 channel_idx
, int enable
)
958 WARN_ON(channel_idx
> 15);
960 s
->sdq
|= 1 << channel_idx
;
962 s
->sdq
&= ~(1 << channel_idx
);
964 /* Read make the complete src map. If no channels are enabled
965 * the SDQCR must be 0 or else QMan will assert errors
967 dqsrc
= (s
->sdq
>> QB_SDQCR_SRC_SHIFT
) & QB_SDQCR_SRC_MASK
;
969 qbman_write_register(s
, QBMAN_CINH_SWP_SDQCR
, s
->sdq
);
971 qbman_write_register(s
, QBMAN_CINH_SWP_SDQCR
, 0);
974 #define QB_VDQCR_VERB_DCT_SHIFT 0
975 #define QB_VDQCR_VERB_DT_SHIFT 2
976 #define QB_VDQCR_VERB_RLS_SHIFT 4
977 #define QB_VDQCR_VERB_WAE_SHIFT 5
981 qb_pull_dt_workqueue
,
982 qb_pull_dt_framequeue
986 * qbman_pull_desc_clear() - Clear the contents of a descriptor to
987 * default/starting state
988 * @d: the pull dequeue descriptor to be cleared
990 void qbman_pull_desc_clear(struct qbman_pull_desc
*d
)
992 memset(d
, 0, sizeof(*d
));
996 * qbman_pull_desc_set_storage()- Set the pull dequeue storage
997 * @d: the pull dequeue descriptor to be set
998 * @storage: the pointer of the memory to store the dequeue result
999 * @storage_phys: the physical address of the storage memory
1000 * @stash: to indicate whether write allocate is enabled
1002 * If not called, or if called with 'storage' as NULL, the result pull dequeues
1003 * will produce results to DQRR. If 'storage' is non-NULL, then results are
1004 * produced to the given memory location (using the DMA address which
1005 * the caller provides in 'storage_phys'), and 'stash' controls whether or not
1006 * those writes to main-memory express a cache-warming attribute.
1008 void qbman_pull_desc_set_storage(struct qbman_pull_desc
*d
,
1009 struct dpaa2_dq
*storage
,
1010 dma_addr_t storage_phys
,
1013 /* save the virtual address */
1014 d
->rsp_addr_virt
= (u64
)(uintptr_t)storage
;
1017 d
->verb
&= ~(1 << QB_VDQCR_VERB_RLS_SHIFT
);
1020 d
->verb
|= 1 << QB_VDQCR_VERB_RLS_SHIFT
;
1022 d
->verb
|= 1 << QB_VDQCR_VERB_WAE_SHIFT
;
1024 d
->verb
&= ~(1 << QB_VDQCR_VERB_WAE_SHIFT
);
1026 d
->rsp_addr
= cpu_to_le64(storage_phys
);
1030 * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued
1031 * @d: the pull dequeue descriptor to be set
1032 * @numframes: number of frames to be set, must be between 1 and 16, inclusive
1034 void qbman_pull_desc_set_numframes(struct qbman_pull_desc
*d
, u8 numframes
)
1036 d
->numf
= numframes
- 1;
1040 * Exactly one of the following descriptor "actions" should be set. (Calling any
1041 * one of these will replace the effect of any prior call to one of these.)
1042 * - pull dequeue from the given frame queue (FQ)
1043 * - pull dequeue from any FQ in the given work queue (WQ)
1044 * - pull dequeue from any FQ in any WQ in the given channel
1048 * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues
1049 * @d: the pull dequeue descriptor to be set
1050 * @fqid: the frame queue index of the given FQ
1052 void qbman_pull_desc_set_fq(struct qbman_pull_desc
*d
, u32 fqid
)
1054 d
->verb
|= 1 << QB_VDQCR_VERB_DCT_SHIFT
;
1055 d
->verb
|= qb_pull_dt_framequeue
<< QB_VDQCR_VERB_DT_SHIFT
;
1056 d
->dq_src
= cpu_to_le32(fqid
);
1060 * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues
1061 * @d: the pull dequeue descriptor to be set
1062 * @wqid: composed of channel id and wqid within the channel
1063 * @dct: the dequeue command type
1065 void qbman_pull_desc_set_wq(struct qbman_pull_desc
*d
, u32 wqid
,
1066 enum qbman_pull_type_e dct
)
1068 d
->verb
|= dct
<< QB_VDQCR_VERB_DCT_SHIFT
;
1069 d
->verb
|= qb_pull_dt_workqueue
<< QB_VDQCR_VERB_DT_SHIFT
;
1070 d
->dq_src
= cpu_to_le32(wqid
);
1074 * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
1076 * @d: the pull dequeue descriptor to be set
1077 * @chid: the channel id to be dequeued
1078 * @dct: the dequeue command type
1080 void qbman_pull_desc_set_channel(struct qbman_pull_desc
*d
, u32 chid
,
1081 enum qbman_pull_type_e dct
)
1083 d
->verb
|= dct
<< QB_VDQCR_VERB_DCT_SHIFT
;
1084 d
->verb
|= qb_pull_dt_channel
<< QB_VDQCR_VERB_DT_SHIFT
;
1085 d
->dq_src
= cpu_to_le32(chid
);
1089 * qbman_swp_pull_direct() - Issue the pull dequeue command
1090 * @s: the software portal object
1091 * @d: the software portal descriptor which has been configured with
1092 * the set of qbman_pull_desc_set_*() calls
1094 * Return 0 for success, and -EBUSY if the software portal is not ready
1095 * to do pull dequeue.
1098 int qbman_swp_pull_direct(struct qbman_swp
*s
, struct qbman_pull_desc
*d
)
1100 struct qbman_pull_desc
*p
;
1102 if (!atomic_dec_and_test(&s
->vdq
.available
)) {
1103 atomic_inc(&s
->vdq
.available
);
1106 s
->vdq
.storage
= (void *)(uintptr_t)d
->rsp_addr_virt
;
1107 if ((s
->desc
->qman_version
& QMAN_REV_MASK
) < QMAN_REV_5000
)
1108 p
= qbman_get_cmd(s
, QBMAN_CENA_SWP_VDQCR
);
1110 p
= qbman_get_cmd(s
, QBMAN_CENA_SWP_VDQCR_MEM
);
1112 p
->tok
= QMAN_DQ_TOKEN_VALID
;
1113 p
->dq_src
= d
->dq_src
;
1114 p
->rsp_addr
= d
->rsp_addr
;
1115 p
->rsp_addr_virt
= d
->rsp_addr_virt
;
1117 /* Set the verb byte, have to substitute in the valid-bit */
1118 p
->verb
= d
->verb
| s
->vdq
.valid_bit
;
1119 s
->vdq
.valid_bit
^= QB_VALID_BIT
;
1125 * qbman_swp_pull_mem_back() - Issue the pull dequeue command
1126 * @s: the software portal object
1127 * @d: the software portal descriptor which has been configured with
1128 * the set of qbman_pull_desc_set_*() calls
1130 * Return 0 for success, and -EBUSY if the software portal is not ready
1131 * to do pull dequeue.
1134 int qbman_swp_pull_mem_back(struct qbman_swp
*s
, struct qbman_pull_desc
*d
)
1136 struct qbman_pull_desc
*p
;
1138 if (!atomic_dec_and_test(&s
->vdq
.available
)) {
1139 atomic_inc(&s
->vdq
.available
);
1142 s
->vdq
.storage
= (void *)(uintptr_t)d
->rsp_addr_virt
;
1143 if ((s
->desc
->qman_version
& QMAN_REV_MASK
) < QMAN_REV_5000
)
1144 p
= qbman_get_cmd(s
, QBMAN_CENA_SWP_VDQCR
);
1146 p
= qbman_get_cmd(s
, QBMAN_CENA_SWP_VDQCR_MEM
);
1148 p
->tok
= QMAN_DQ_TOKEN_VALID
;
1149 p
->dq_src
= d
->dq_src
;
1150 p
->rsp_addr
= d
->rsp_addr
;
1151 p
->rsp_addr_virt
= d
->rsp_addr_virt
;
1153 /* Set the verb byte, have to substitute in the valid-bit */
1154 p
->verb
= d
->verb
| s
->vdq
.valid_bit
;
1155 s
->vdq
.valid_bit
^= QB_VALID_BIT
;
1157 qbman_write_register(s
, QBMAN_CINH_SWP_VDQCR_RT
, QMAN_RT_MODE
);
1162 #define QMAN_DQRR_PI_MASK 0xf
1165 * qbman_swp_dqrr_next_direct() - Get an valid DQRR entry
1166 * @s: the software portal object
1168 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
1169 * only once, so repeated calls can return a sequence of DQRR entries, without
1170 * requiring they be consumed immediately or in any particular order.
1172 const struct dpaa2_dq
*qbman_swp_dqrr_next_direct(struct qbman_swp
*s
)
1179 /* Before using valid-bit to detect if something is there, we have to
1180 * handle the case of the DQRR reset bug...
1182 if (unlikely(s
->dqrr
.reset_bug
)) {
1184 * We pick up new entries by cache-inhibited producer index,
1185 * which means that a non-coherent mapping would require us to
1186 * invalidate and read *only* once that PI has indicated that
1187 * there's an entry here. The first trip around the DQRR ring
1188 * will be much less efficient than all subsequent trips around
1191 u8 pi
= qbman_read_register(s
, QBMAN_CINH_SWP_DQPI
) &
1194 /* there are new entries if pi != next_idx */
1195 if (pi
== s
->dqrr
.next_idx
)
1199 * if next_idx is/was the last ring index, and 'pi' is
1200 * different, we can disable the workaround as all the ring
1201 * entries have now been DMA'd to so valid-bit checking is
1202 * repaired. Note: this logic needs to be based on next_idx
1203 * (which increments one at a time), rather than on pi (which
1204 * can burst and wrap-around between our snapshots of it).
1206 if (s
->dqrr
.next_idx
== (s
->dqrr
.dqrr_size
- 1)) {
1207 pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
1208 s
->dqrr
.next_idx
, pi
);
1209 s
->dqrr
.reset_bug
= 0;
1211 prefetch(qbman_get_cmd(s
,
1212 QBMAN_CENA_SWP_DQRR(s
->dqrr
.next_idx
)));
1215 p
= qbman_get_cmd(s
, QBMAN_CENA_SWP_DQRR(s
->dqrr
.next_idx
));
1219 * If the valid-bit isn't of the expected polarity, nothing there. Note,
1220 * in the DQRR reset bug workaround, we shouldn't need to skip these
1221 * check, because we've already determined that a new entry is available
1222 * and we've invalidated the cacheline before reading it, so the
1223 * valid-bit behaviour is repaired and should tell us what we already
1224 * knew from reading PI.
1226 if ((verb
& QB_VALID_BIT
) != s
->dqrr
.valid_bit
) {
1227 prefetch(qbman_get_cmd(s
,
1228 QBMAN_CENA_SWP_DQRR(s
->dqrr
.next_idx
)));
1232 * There's something there. Move "next_idx" attention to the next ring
1233 * entry (and prefetch it) before returning what we found.
1236 s
->dqrr
.next_idx
&= s
->dqrr
.dqrr_size
- 1; /* Wrap around */
1237 if (!s
->dqrr
.next_idx
)
1238 s
->dqrr
.valid_bit
^= QB_VALID_BIT
;
1241 * If this is the final response to a volatile dequeue command
1242 * indicate that the vdq is available
1245 response_verb
= verb
& QBMAN_RESULT_MASK
;
1246 if ((response_verb
== QBMAN_RESULT_DQ
) &&
1247 (flags
& DPAA2_DQ_STAT_VOLATILE
) &&
1248 (flags
& DPAA2_DQ_STAT_EXPIRED
))
1249 atomic_inc(&s
->vdq
.available
);
1251 prefetch(qbman_get_cmd(s
, QBMAN_CENA_SWP_DQRR(s
->dqrr
.next_idx
)));
1257 * qbman_swp_dqrr_next_mem_back() - Get an valid DQRR entry
1258 * @s: the software portal object
1260 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
1261 * only once, so repeated calls can return a sequence of DQRR entries, without
1262 * requiring they be consumed immediately or in any particular order.
1264 const struct dpaa2_dq
*qbman_swp_dqrr_next_mem_back(struct qbman_swp
*s
)
1271 /* Before using valid-bit to detect if something is there, we have to
1272 * handle the case of the DQRR reset bug...
1274 if (unlikely(s
->dqrr
.reset_bug
)) {
1276 * We pick up new entries by cache-inhibited producer index,
1277 * which means that a non-coherent mapping would require us to
1278 * invalidate and read *only* once that PI has indicated that
1279 * there's an entry here. The first trip around the DQRR ring
1280 * will be much less efficient than all subsequent trips around
1283 u8 pi
= qbman_read_register(s
, QBMAN_CINH_SWP_DQPI
) &
1286 /* there are new entries if pi != next_idx */
1287 if (pi
== s
->dqrr
.next_idx
)
1291 * if next_idx is/was the last ring index, and 'pi' is
1292 * different, we can disable the workaround as all the ring
1293 * entries have now been DMA'd to so valid-bit checking is
1294 * repaired. Note: this logic needs to be based on next_idx
1295 * (which increments one at a time), rather than on pi (which
1296 * can burst and wrap-around between our snapshots of it).
1298 if (s
->dqrr
.next_idx
== (s
->dqrr
.dqrr_size
- 1)) {
1299 pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
1300 s
->dqrr
.next_idx
, pi
);
1301 s
->dqrr
.reset_bug
= 0;
1303 prefetch(qbman_get_cmd(s
,
1304 QBMAN_CENA_SWP_DQRR(s
->dqrr
.next_idx
)));
1307 p
= qbman_get_cmd(s
, QBMAN_CENA_SWP_DQRR_MEM(s
->dqrr
.next_idx
));
1311 * If the valid-bit isn't of the expected polarity, nothing there. Note,
1312 * in the DQRR reset bug workaround, we shouldn't need to skip these
1313 * check, because we've already determined that a new entry is available
1314 * and we've invalidated the cacheline before reading it, so the
1315 * valid-bit behaviour is repaired and should tell us what we already
1316 * knew from reading PI.
1318 if ((verb
& QB_VALID_BIT
) != s
->dqrr
.valid_bit
) {
1319 prefetch(qbman_get_cmd(s
,
1320 QBMAN_CENA_SWP_DQRR(s
->dqrr
.next_idx
)));
1324 * There's something there. Move "next_idx" attention to the next ring
1325 * entry (and prefetch it) before returning what we found.
1328 s
->dqrr
.next_idx
&= s
->dqrr
.dqrr_size
- 1; /* Wrap around */
1329 if (!s
->dqrr
.next_idx
)
1330 s
->dqrr
.valid_bit
^= QB_VALID_BIT
;
1333 * If this is the final response to a volatile dequeue command
1334 * indicate that the vdq is available
1337 response_verb
= verb
& QBMAN_RESULT_MASK
;
1338 if ((response_verb
== QBMAN_RESULT_DQ
) &&
1339 (flags
& DPAA2_DQ_STAT_VOLATILE
) &&
1340 (flags
& DPAA2_DQ_STAT_EXPIRED
))
1341 atomic_inc(&s
->vdq
.available
);
1343 prefetch(qbman_get_cmd(s
, QBMAN_CENA_SWP_DQRR(s
->dqrr
.next_idx
)));
1349 * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from
1350 * qbman_swp_dqrr_next().
1351 * @s: the software portal object
1352 * @dq: the DQRR entry to be consumed
1354 void qbman_swp_dqrr_consume(struct qbman_swp
*s
, const struct dpaa2_dq
*dq
)
1356 qbman_write_register(s
, QBMAN_CINH_SWP_DCAP
, QBMAN_IDX_FROM_DQRR(dq
));
1360 * qbman_result_has_new_result() - Check and get the dequeue response from the
1361 * dq storage memory set in pull dequeue command
1362 * @s: the software portal object
1363 * @dq: the dequeue result read from the memory
1365 * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
1368 * Only used for user-provided storage of dequeue results, not DQRR. For
1369 * efficiency purposes, the driver will perform any required endianness
1370 * conversion to ensure that the user's dequeue result storage is in host-endian
1371 * format. As such, once the user has called qbman_result_has_new_result() and
1372 * been returned a valid dequeue result, they should not call it again on
1373 * the same memory location (except of course if another dequeue command has
1374 * been executed to produce a new result to that location).
1376 int qbman_result_has_new_result(struct qbman_swp
*s
, const struct dpaa2_dq
*dq
)
1378 if (dq
->dq
.tok
!= QMAN_DQ_TOKEN_VALID
)
1382 * Set token to be 0 so we will detect change back to 1
1383 * next time the looping is traversed. Const is cast away here
1384 * as we want users to treat the dequeue responses as read only.
1386 ((struct dpaa2_dq
*)dq
)->dq
.tok
= 0;
1389 * Determine whether VDQCR is available based on whether the
1390 * current result is sitting in the first storage location of
1393 if (s
->vdq
.storage
== dq
) {
1394 s
->vdq
.storage
= NULL
;
1395 atomic_inc(&s
->vdq
.available
);
1402 * qbman_release_desc_clear() - Clear the contents of a descriptor to
1403 * default/starting state.
1404 * @d: the pull dequeue descriptor to be cleared
1406 void qbman_release_desc_clear(struct qbman_release_desc
*d
)
1408 memset(d
, 0, sizeof(*d
));
1409 d
->verb
= 1 << 5; /* Release Command Valid */
1413 * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
1414 * @d: the pull dequeue descriptor to be set
1415 * @bpid: the bpid value to be set
1417 void qbman_release_desc_set_bpid(struct qbman_release_desc
*d
, u16 bpid
)
1419 d
->bpid
= cpu_to_le16(bpid
);
1423 * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
1424 * interrupt source should be asserted after the release command is completed.
1425 * @d: the pull dequeue descriptor to be set
1426 * @enable: enable (1) or disable (0) value
1428 void qbman_release_desc_set_rcdi(struct qbman_release_desc
*d
, int enable
)
1433 d
->verb
&= ~(1 << 6);
1436 #define RAR_IDX(rar) ((rar) & 0x7)
1437 #define RAR_VB(rar) ((rar) & 0x80)
1438 #define RAR_SUCCESS(rar) ((rar) & 0x100)
1441 * qbman_swp_release_direct() - Issue a buffer release command
1442 * @s: the software portal object
1443 * @d: the release descriptor
1444 * @buffers: a pointer pointing to the buffer address to be released
1445 * @num_buffers: number of buffers to be released, must be less than 8
1447 * Return 0 for success, -EBUSY if the release command ring is not ready.
1449 int qbman_swp_release_direct(struct qbman_swp
*s
,
1450 const struct qbman_release_desc
*d
,
1451 const u64
*buffers
, unsigned int num_buffers
)
1454 struct qbman_release_desc
*p
;
1457 if (!num_buffers
|| (num_buffers
> 7))
1460 rar
= qbman_read_register(s
, QBMAN_CINH_SWP_RAR
);
1461 if (!RAR_SUCCESS(rar
))
1464 /* Start the release command */
1465 p
= qbman_get_cmd(s
, QBMAN_CENA_SWP_RCR(RAR_IDX(rar
)));
1467 /* Copy the caller's buffer pointers to the command */
1468 for (i
= 0; i
< num_buffers
; i
++)
1469 p
->buf
[i
] = cpu_to_le64(buffers
[i
]);
1473 * Set the verb byte, have to substitute in the valid-bit
1474 * and the number of buffers.
1477 p
->verb
= d
->verb
| RAR_VB(rar
) | num_buffers
;
1483 * qbman_swp_release_mem_back() - Issue a buffer release command
1484 * @s: the software portal object
1485 * @d: the release descriptor
1486 * @buffers: a pointer pointing to the buffer address to be released
1487 * @num_buffers: number of buffers to be released, must be less than 8
1489 * Return 0 for success, -EBUSY if the release command ring is not ready.
1491 int qbman_swp_release_mem_back(struct qbman_swp
*s
,
1492 const struct qbman_release_desc
*d
,
1493 const u64
*buffers
, unsigned int num_buffers
)
1496 struct qbman_release_desc
*p
;
1499 if (!num_buffers
|| (num_buffers
> 7))
1502 rar
= qbman_read_register(s
, QBMAN_CINH_SWP_RAR
);
1503 if (!RAR_SUCCESS(rar
))
1506 /* Start the release command */
1507 p
= qbman_get_cmd(s
, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar
)));
1509 /* Copy the caller's buffer pointers to the command */
1510 for (i
= 0; i
< num_buffers
; i
++)
1511 p
->buf
[i
] = cpu_to_le64(buffers
[i
]);
1514 p
->verb
= d
->verb
| RAR_VB(rar
) | num_buffers
;
1516 qbman_write_register(s
, QBMAN_CINH_SWP_RCR_AM_RT
+
1517 RAR_IDX(rar
) * 4, QMAN_RT_MODE
);
1522 struct qbman_acquire_desc
{
1530 struct qbman_acquire_rslt
{
1540 * qbman_swp_acquire() - Issue a buffer acquire command
1541 * @s: the software portal object
1542 * @bpid: the buffer pool index
1543 * @buffers: a pointer pointing to the acquired buffer addresses
1544 * @num_buffers: number of buffers to be acquired, must be less than 8
1546 * Return 0 for success, or negative error code if the acquire command
1549 int qbman_swp_acquire(struct qbman_swp
*s
, u16 bpid
, u64
*buffers
,
1550 unsigned int num_buffers
)
1552 struct qbman_acquire_desc
*p
;
1553 struct qbman_acquire_rslt
*r
;
1556 if (!num_buffers
|| (num_buffers
> 7))
1559 /* Start the management command */
1560 p
= qbman_swp_mc_start(s
);
1565 /* Encode the caller-provided attributes */
1566 p
->bpid
= cpu_to_le16(bpid
);
1567 p
->num
= num_buffers
;
1569 /* Complete the management command */
1570 r
= qbman_swp_mc_complete(s
, p
, QBMAN_MC_ACQUIRE
);
1572 pr_err("qbman: acquire from BPID %d failed, no response\n",
1577 /* Decode the outcome */
1578 WARN_ON((r
->verb
& 0x7f) != QBMAN_MC_ACQUIRE
);
1580 /* Determine success or failure */
1581 if (unlikely(r
->rslt
!= QBMAN_MC_RSLT_OK
)) {
1582 pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n",
1587 WARN_ON(r
->num
> num_buffers
);
1589 /* Copy the acquired buffers to the caller's array */
1590 for (i
= 0; i
< r
->num
; i
++)
1591 buffers
[i
] = le64_to_cpu(r
->buf
[i
]);
1596 struct qbman_alt_fq_state_desc
{
1603 struct qbman_alt_fq_state_rslt
{
1609 #define ALT_FQ_FQID_MASK 0x00FFFFFF
1611 int qbman_swp_alt_fq_state(struct qbman_swp
*s
, u32 fqid
,
1614 struct qbman_alt_fq_state_desc
*p
;
1615 struct qbman_alt_fq_state_rslt
*r
;
1617 /* Start the management command */
1618 p
= qbman_swp_mc_start(s
);
1622 p
->fqid
= cpu_to_le32(fqid
& ALT_FQ_FQID_MASK
);
1624 /* Complete the management command */
1625 r
= qbman_swp_mc_complete(s
, p
, alt_fq_verb
);
1627 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
1632 /* Decode the outcome */
1633 WARN_ON((r
->verb
& QBMAN_RESULT_MASK
) != alt_fq_verb
);
1635 /* Determine success or failure */
1636 if (unlikely(r
->rslt
!= QBMAN_MC_RSLT_OK
)) {
1637 pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n",
1638 fqid
, r
->verb
, r
->rslt
);
1645 struct qbman_cdan_ctrl_desc
{
1657 struct qbman_cdan_ctrl_rslt
{
1664 int qbman_swp_CDAN_set(struct qbman_swp
*s
, u16 channelid
,
1665 u8 we_mask
, u8 cdan_en
,
1668 struct qbman_cdan_ctrl_desc
*p
= NULL
;
1669 struct qbman_cdan_ctrl_rslt
*r
= NULL
;
1671 /* Start the management command */
1672 p
= qbman_swp_mc_start(s
);
1676 /* Encode the caller-provided attributes */
1677 p
->ch
= cpu_to_le16(channelid
);
1683 p
->cdan_ctx
= cpu_to_le64(ctx
);
1685 /* Complete the management command */
1686 r
= qbman_swp_mc_complete(s
, p
, QBMAN_WQCHAN_CONFIGURE
);
1688 pr_err("qbman: wqchan config failed, no response\n");
1692 WARN_ON((r
->verb
& 0x7f) != QBMAN_WQCHAN_CONFIGURE
);
1694 /* Determine success or failure */
1695 if (unlikely(r
->rslt
!= QBMAN_MC_RSLT_OK
)) {
1696 pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n",
1697 channelid
, r
->rslt
);
1704 #define QBMAN_RESPONSE_VERB_MASK 0x7f
1705 #define QBMAN_FQ_QUERY_NP 0x45
1706 #define QBMAN_BP_QUERY 0x32
1708 struct qbman_fq_query_desc
{
1715 int qbman_fq_query_state(struct qbman_swp
*s
, u32 fqid
,
1716 struct qbman_fq_query_np_rslt
*r
)
1718 struct qbman_fq_query_desc
*p
;
1721 p
= (struct qbman_fq_query_desc
*)qbman_swp_mc_start(s
);
1725 /* FQID is a 24 bit value */
1726 p
->fqid
= cpu_to_le32(fqid
& 0x00FFFFFF);
1727 resp
= qbman_swp_mc_complete(s
, p
, QBMAN_FQ_QUERY_NP
);
1729 pr_err("qbman: Query FQID %d NP fields failed, no response\n",
1733 *r
= *(struct qbman_fq_query_np_rslt
*)resp
;
1734 /* Decode the outcome */
1735 WARN_ON((r
->verb
& QBMAN_RESPONSE_VERB_MASK
) != QBMAN_FQ_QUERY_NP
);
1737 /* Determine success or failure */
1738 if (r
->rslt
!= QBMAN_MC_RSLT_OK
) {
1739 pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
1747 u32
qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt
*r
)
1749 return (le32_to_cpu(r
->frm_cnt
) & 0x00FFFFFF);
1752 u32
qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt
*r
)
1754 return le32_to_cpu(r
->byte_cnt
);
1757 struct qbman_bp_query_desc
{
1764 int qbman_bp_query(struct qbman_swp
*s
, u16 bpid
,
1765 struct qbman_bp_query_rslt
*r
)
1767 struct qbman_bp_query_desc
*p
;
1770 p
= (struct qbman_bp_query_desc
*)qbman_swp_mc_start(s
);
1774 p
->bpid
= cpu_to_le16(bpid
);
1775 resp
= qbman_swp_mc_complete(s
, p
, QBMAN_BP_QUERY
);
1777 pr_err("qbman: Query BPID %d fields failed, no response\n",
1781 *r
= *(struct qbman_bp_query_rslt
*)resp
;
1782 /* Decode the outcome */
1783 WARN_ON((r
->verb
& QBMAN_RESPONSE_VERB_MASK
) != QBMAN_BP_QUERY
);
1785 /* Determine success or failure */
1786 if (r
->rslt
!= QBMAN_MC_RSLT_OK
) {
1787 pr_err("Query fields of BPID 0x%x failed, code=0x%02x\n",
1795 u32
qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt
*a
)
1797 return le32_to_cpu(a
->fill
);