1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
8 #include <asm/cacheflush.h>
10 #include <linux/slab.h>
11 #include <soc/fsl/dpaa2-global.h>
13 #include "qbman-portal.h"
15 #define QMAN_REV_4000 0x04000000
16 #define QMAN_REV_4100 0x04010000
17 #define QMAN_REV_4101 0x04010001
18 #define QMAN_REV_5000 0x05000000
20 #define QMAN_REV_MASK 0xffff0000
22 /* All QBMan command and result structures use this "valid bit" encoding */
23 #define QB_VALID_BIT ((u32)0x80)
25 /* QBMan portal management command codes */
26 #define QBMAN_MC_ACQUIRE 0x30
27 #define QBMAN_WQCHAN_CONFIGURE 0x46
29 /* CINH register offsets */
30 #define QBMAN_CINH_SWP_EQCR_PI 0x800
31 #define QBMAN_CINH_SWP_EQAR 0x8c0
32 #define QBMAN_CINH_SWP_CR_RT 0x900
33 #define QBMAN_CINH_SWP_VDQCR_RT 0x940
34 #define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
35 #define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
36 #define QBMAN_CINH_SWP_DQPI 0xa00
37 #define QBMAN_CINH_SWP_DCAP 0xac0
38 #define QBMAN_CINH_SWP_SDQCR 0xb00
39 #define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
40 #define QBMAN_CINH_SWP_RCR_PI 0xc00
41 #define QBMAN_CINH_SWP_RAR 0xcc0
42 #define QBMAN_CINH_SWP_ISR 0xe00
43 #define QBMAN_CINH_SWP_IER 0xe40
44 #define QBMAN_CINH_SWP_ISDR 0xe80
45 #define QBMAN_CINH_SWP_IIR 0xec0
47 /* CENA register offsets */
48 #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
49 #define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6))
50 #define QBMAN_CENA_SWP_RCR(n) (0x400 + ((u32)(n) << 6))
51 #define QBMAN_CENA_SWP_CR 0x600
52 #define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1))
53 #define QBMAN_CENA_SWP_VDQCR 0x780
55 /* CENA register offsets in memory-backed mode */
56 #define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6))
57 #define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((u32)(n) << 6))
58 #define QBMAN_CENA_SWP_CR_MEM 0x1600
59 #define QBMAN_CENA_SWP_RR_MEM 0x1680
60 #define QBMAN_CENA_SWP_VDQCR_MEM 0x1780
62 /* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
63 #define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
65 /* Define token used to determine if response written to memory is valid */
66 #define QMAN_DQ_TOKEN_VALID 1
68 /* SDQCR attribute codes */
69 #define QB_SDQCR_FC_SHIFT 29
70 #define QB_SDQCR_FC_MASK 0x1
71 #define QB_SDQCR_DCT_SHIFT 24
72 #define QB_SDQCR_DCT_MASK 0x3
73 #define QB_SDQCR_TOK_SHIFT 16
74 #define QB_SDQCR_TOK_MASK 0xff
75 #define QB_SDQCR_SRC_SHIFT 0
76 #define QB_SDQCR_SRC_MASK 0xffff
78 /* opaque token for static dequeues */
79 #define QMAN_SDQCR_TOKEN 0xbb
81 enum qbman_sdqcr_dct
{
82 qbman_sdqcr_dct_null
= 0,
83 qbman_sdqcr_dct_prio_ics
,
84 qbman_sdqcr_dct_active_ics
,
85 qbman_sdqcr_dct_active
89 qbman_sdqcr_fc_one
= 0,
90 qbman_sdqcr_fc_up_to_3
= 1
95 static inline u32
qbman_read_register(struct qbman_swp
*p
, u32 offset
)
97 return readl_relaxed(p
->addr_cinh
+ offset
);
100 static inline void qbman_write_register(struct qbman_swp
*p
, u32 offset
,
103 writel_relaxed(value
, p
->addr_cinh
+ offset
);
106 static inline void *qbman_get_cmd(struct qbman_swp
*p
, u32 offset
)
108 return p
->addr_cena
+ offset
;
111 #define QBMAN_CINH_SWP_CFG 0xd00
113 #define SWP_CFG_DQRR_MF_SHIFT 20
114 #define SWP_CFG_EST_SHIFT 16
115 #define SWP_CFG_CPBS_SHIFT 15
116 #define SWP_CFG_WN_SHIFT 14
117 #define SWP_CFG_RPM_SHIFT 12
118 #define SWP_CFG_DCM_SHIFT 10
119 #define SWP_CFG_EPM_SHIFT 8
120 #define SWP_CFG_VPM_SHIFT 7
121 #define SWP_CFG_CPM_SHIFT 6
122 #define SWP_CFG_SD_SHIFT 5
123 #define SWP_CFG_SP_SHIFT 4
124 #define SWP_CFG_SE_SHIFT 3
125 #define SWP_CFG_DP_SHIFT 2
126 #define SWP_CFG_DE_SHIFT 1
127 #define SWP_CFG_EP_SHIFT 0
129 static inline u32
qbman_set_swp_cfg(u8 max_fill
, u8 wn
, u8 est
, u8 rpm
, u8 dcm
,
130 u8 epm
, int sd
, int sp
, int se
,
131 int dp
, int de
, int ep
)
133 return (max_fill
<< SWP_CFG_DQRR_MF_SHIFT
|
134 est
<< SWP_CFG_EST_SHIFT
|
135 wn
<< SWP_CFG_WN_SHIFT
|
136 rpm
<< SWP_CFG_RPM_SHIFT
|
137 dcm
<< SWP_CFG_DCM_SHIFT
|
138 epm
<< SWP_CFG_EPM_SHIFT
|
139 sd
<< SWP_CFG_SD_SHIFT
|
140 sp
<< SWP_CFG_SP_SHIFT
|
141 se
<< SWP_CFG_SE_SHIFT
|
142 dp
<< SWP_CFG_DP_SHIFT
|
143 de
<< SWP_CFG_DE_SHIFT
|
144 ep
<< SWP_CFG_EP_SHIFT
);
147 #define QMAN_RT_MODE 0x00000100
150 * qbman_swp_init() - Create a functional object representing the given
151 * QBMan portal descriptor.
152 * @d: the given qbman swp descriptor
154 * Return qbman_swp portal for success, NULL if the object cannot
157 struct qbman_swp
*qbman_swp_init(const struct qbman_swp_desc
*d
)
159 struct qbman_swp
*p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
165 p
->mc
.valid_bit
= QB_VALID_BIT
;
167 p
->sdq
|= qbman_sdqcr_dct_prio_ics
<< QB_SDQCR_DCT_SHIFT
;
168 p
->sdq
|= qbman_sdqcr_fc_up_to_3
<< QB_SDQCR_FC_SHIFT
;
169 p
->sdq
|= QMAN_SDQCR_TOKEN
<< QB_SDQCR_TOK_SHIFT
;
170 if ((p
->desc
->qman_version
& QMAN_REV_MASK
) >= QMAN_REV_5000
)
171 p
->mr
.valid_bit
= QB_VALID_BIT
;
173 atomic_set(&p
->vdq
.available
, 1);
174 p
->vdq
.valid_bit
= QB_VALID_BIT
;
175 p
->dqrr
.next_idx
= 0;
176 p
->dqrr
.valid_bit
= QB_VALID_BIT
;
178 if ((p
->desc
->qman_version
& QMAN_REV_MASK
) < QMAN_REV_4100
) {
179 p
->dqrr
.dqrr_size
= 4;
180 p
->dqrr
.reset_bug
= 1;
182 p
->dqrr
.dqrr_size
= 8;
183 p
->dqrr
.reset_bug
= 0;
186 p
->addr_cena
= d
->cena_bar
;
187 p
->addr_cinh
= d
->cinh_bar
;
189 if ((p
->desc
->qman_version
& QMAN_REV_MASK
) >= QMAN_REV_5000
)
190 memset(p
->addr_cena
, 0, 64 * 1024);
192 reg
= qbman_set_swp_cfg(p
->dqrr
.dqrr_size
,
193 1, /* Writes Non-cacheable */
194 0, /* EQCR_CI stashing threshold */
195 3, /* RPM: Valid bit mode, RCR in array mode */
196 2, /* DCM: Discrete consumption ack mode */
197 3, /* EPM: Valid bit mode, EQCR in array mode */
198 1, /* mem stashing drop enable == TRUE */
199 1, /* mem stashing priority == TRUE */
200 1, /* mem stashing enable == TRUE */
201 1, /* dequeue stashing priority == TRUE */
202 0, /* dequeue stashing enable == FALSE */
203 0); /* EQCR_CI stashing priority == FALSE */
204 if ((p
->desc
->qman_version
& QMAN_REV_MASK
) >= QMAN_REV_5000
)
205 reg
|= 1 << SWP_CFG_CPBS_SHIFT
| /* memory-backed mode */
206 1 << SWP_CFG_VPM_SHIFT
| /* VDQCR read triggered mode */
207 1 << SWP_CFG_CPM_SHIFT
; /* CR read triggered mode */
209 qbman_write_register(p
, QBMAN_CINH_SWP_CFG
, reg
);
210 reg
= qbman_read_register(p
, QBMAN_CINH_SWP_CFG
);
212 pr_err("qbman: the portal is not enabled!\n");
217 if ((p
->desc
->qman_version
& QMAN_REV_MASK
) >= QMAN_REV_5000
) {
218 qbman_write_register(p
, QBMAN_CINH_SWP_EQCR_PI
, QMAN_RT_MODE
);
219 qbman_write_register(p
, QBMAN_CINH_SWP_RCR_PI
, QMAN_RT_MODE
);
222 * SDQCR needs to be initialized to 0 when no channels are
223 * being dequeued from or else the QMan HW will indicate an
224 * error. The values that were calculated above will be
225 * applied when dequeues from a specific channel are enabled.
227 qbman_write_register(p
, QBMAN_CINH_SWP_SDQCR
, 0);
232 * qbman_swp_finish() - Create and destroy a functional object representing
233 * the given QBMan portal descriptor.
234 * @p: the qbman_swp object to be destroyed
236 void qbman_swp_finish(struct qbman_swp
*p
)
242 * qbman_swp_interrupt_read_status()
243 * @p: the given software portal
245 * Return the value in the SWP_ISR register.
247 u32
qbman_swp_interrupt_read_status(struct qbman_swp
*p
)
249 return qbman_read_register(p
, QBMAN_CINH_SWP_ISR
);
253 * qbman_swp_interrupt_clear_status()
254 * @p: the given software portal
255 * @mask: The mask to clear in SWP_ISR register
257 void qbman_swp_interrupt_clear_status(struct qbman_swp
*p
, u32 mask
)
259 qbman_write_register(p
, QBMAN_CINH_SWP_ISR
, mask
);
263 * qbman_swp_interrupt_get_trigger() - read interrupt enable register
264 * @p: the given software portal
266 * Return the value in the SWP_IER register.
268 u32
qbman_swp_interrupt_get_trigger(struct qbman_swp
*p
)
270 return qbman_read_register(p
, QBMAN_CINH_SWP_IER
);
274 * qbman_swp_interrupt_set_trigger() - enable interrupts for a swp
275 * @p: the given software portal
276 * @mask: The mask of bits to enable in SWP_IER
278 void qbman_swp_interrupt_set_trigger(struct qbman_swp
*p
, u32 mask
)
280 qbman_write_register(p
, QBMAN_CINH_SWP_IER
, mask
);
284 * qbman_swp_interrupt_get_inhibit() - read interrupt mask register
285 * @p: the given software portal object
287 * Return the value in the SWP_IIR register.
289 int qbman_swp_interrupt_get_inhibit(struct qbman_swp
*p
)
291 return qbman_read_register(p
, QBMAN_CINH_SWP_IIR
);
295 * qbman_swp_interrupt_set_inhibit() - write interrupt mask register
296 * @p: the given software portal object
297 * @mask: The mask to set in SWP_IIR register
299 void qbman_swp_interrupt_set_inhibit(struct qbman_swp
*p
, int inhibit
)
301 qbman_write_register(p
, QBMAN_CINH_SWP_IIR
, inhibit
? 0xffffffff : 0);
305 * Different management commands all use this common base layer of code to issue
306 * commands and poll for results.
310 * Returns a pointer to where the caller should fill in their management command
311 * (caller should ignore the verb byte)
313 void *qbman_swp_mc_start(struct qbman_swp
*p
)
315 if ((p
->desc
->qman_version
& QMAN_REV_MASK
) < QMAN_REV_5000
)
316 return qbman_get_cmd(p
, QBMAN_CENA_SWP_CR
);
318 return qbman_get_cmd(p
, QBMAN_CENA_SWP_CR_MEM
);
322 * Commits merges in the caller-supplied command verb (which should not include
323 * the valid-bit) and submits the command to hardware
325 void qbman_swp_mc_submit(struct qbman_swp
*p
, void *cmd
, u8 cmd_verb
)
329 if ((p
->desc
->qman_version
& QMAN_REV_MASK
) < QMAN_REV_5000
) {
331 *v
= cmd_verb
| p
->mc
.valid_bit
;
333 *v
= cmd_verb
| p
->mc
.valid_bit
;
335 qbman_write_register(p
, QBMAN_CINH_SWP_CR_RT
, QMAN_RT_MODE
);
340 * Checks for a completed response (returns non-NULL if only if the response
343 void *qbman_swp_mc_result(struct qbman_swp
*p
)
347 if ((p
->desc
->qman_version
& QMAN_REV_MASK
) < QMAN_REV_5000
) {
348 ret
= qbman_get_cmd(p
, QBMAN_CENA_SWP_RR(p
->mc
.valid_bit
));
349 /* Remove the valid-bit - command completed if the rest
352 verb
= ret
[0] & ~QB_VALID_BIT
;
355 p
->mc
.valid_bit
^= QB_VALID_BIT
;
357 ret
= qbman_get_cmd(p
, QBMAN_CENA_SWP_RR_MEM
);
358 /* Command completed if the valid bit is toggled */
359 if (p
->mr
.valid_bit
!= (ret
[0] & QB_VALID_BIT
))
361 /* Command completed if the rest is non-zero */
362 verb
= ret
[0] & ~QB_VALID_BIT
;
365 p
->mr
.valid_bit
^= QB_VALID_BIT
;
371 #define QB_ENQUEUE_CMD_OPTIONS_SHIFT 0
372 enum qb_enqueue_commands
{
374 enqueue_response_always
= 1,
375 enqueue_rejects_to_fq
= 2
378 #define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
379 #define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
380 #define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
383 * qbman_eq_desc_clear() - Clear the contents of a descriptor to
384 * default/starting state.
386 void qbman_eq_desc_clear(struct qbman_eq_desc
*d
)
388 memset(d
, 0, sizeof(*d
));
392 * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
393 * @d: the enqueue descriptor.
394 * @response_success: 1 = enqueue with response always; 0 = enqueue with
395 * rejections returned on a FQ.
397 void qbman_eq_desc_set_no_orp(struct qbman_eq_desc
*d
, int respond_success
)
399 d
->verb
&= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT
);
401 d
->verb
|= enqueue_response_always
;
403 d
->verb
|= enqueue_rejects_to_fq
;
407 * Exactly one of the following descriptor "targets" should be set. (Calling any
408 * one of these will replace the effect of any prior call to one of these.)
409 * -enqueue to a frame queue
410 * -enqueue to a queuing destination
414 * qbman_eq_desc_set_fq() - set the FQ for the enqueue command
415 * @d: the enqueue descriptor
416 * @fqid: the id of the frame queue to be enqueued
418 void qbman_eq_desc_set_fq(struct qbman_eq_desc
*d
, u32 fqid
)
420 d
->verb
&= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT
);
421 d
->tgtid
= cpu_to_le32(fqid
);
425 * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command
426 * @d: the enqueue descriptor
427 * @qdid: the id of the queuing destination to be enqueued
428 * @qd_bin: the queuing destination bin
429 * @qd_prio: the queuing destination priority
431 void qbman_eq_desc_set_qd(struct qbman_eq_desc
*d
, u32 qdid
,
432 u32 qd_bin
, u32 qd_prio
)
434 d
->verb
|= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT
;
435 d
->tgtid
= cpu_to_le32(qdid
);
436 d
->qdbin
= cpu_to_le16(qd_bin
);
440 #define EQAR_IDX(eqar) ((eqar) & 0x7)
441 #define EQAR_VB(eqar) ((eqar) & 0x80)
442 #define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
444 static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp
*p
,
448 qbman_write_register(p
, QBMAN_CINH_SWP_EQCR_AM_RT
+ idx
* 4,
451 qbman_write_register(p
, QBMAN_CINH_SWP_EQCR_AM_RT2
+
457 * qbman_swp_enqueue() - Issue an enqueue command
458 * @s: the software portal used for enqueue
459 * @d: the enqueue descriptor
460 * @fd: the frame descriptor to be enqueued
462 * Please note that 'fd' should only be NULL if the "action" of the
463 * descriptor is "orp_hole" or "orp_nesn".
465 * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
467 int qbman_swp_enqueue(struct qbman_swp
*s
, const struct qbman_eq_desc
*d
,
468 const struct dpaa2_fd
*fd
)
470 struct qbman_eq_desc
*p
;
471 u32 eqar
= qbman_read_register(s
, QBMAN_CINH_SWP_EQAR
);
473 if (!EQAR_SUCCESS(eqar
))
476 p
= qbman_get_cmd(s
, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar
)));
477 memcpy(&p
->dca
, &d
->dca
, 31);
478 memcpy(&p
->fd
, fd
, sizeof(*fd
));
480 if ((s
->desc
->qman_version
& QMAN_REV_MASK
) < QMAN_REV_5000
) {
481 /* Set the verb byte, have to substitute in the valid-bit */
483 p
->verb
= d
->verb
| EQAR_VB(eqar
);
485 p
->verb
= d
->verb
| EQAR_VB(eqar
);
487 qbman_write_eqcr_am_rt_register(s
, EQAR_IDX(eqar
));
493 /* Static (push) dequeue */
496 * qbman_swp_push_get() - Get the push dequeue setup
497 * @p: the software portal object
498 * @channel_idx: the channel index to query
499 * @enabled: returned boolean to show whether the push dequeue is enabled
500 * for the given channel
502 void qbman_swp_push_get(struct qbman_swp
*s
, u8 channel_idx
, int *enabled
)
504 u16 src
= (s
->sdq
>> QB_SDQCR_SRC_SHIFT
) & QB_SDQCR_SRC_MASK
;
506 WARN_ON(channel_idx
> 15);
507 *enabled
= src
| (1 << channel_idx
);
511 * qbman_swp_push_set() - Enable or disable push dequeue
512 * @p: the software portal object
513 * @channel_idx: the channel index (0 to 15)
514 * @enable: enable or disable push dequeue
516 void qbman_swp_push_set(struct qbman_swp
*s
, u8 channel_idx
, int enable
)
520 WARN_ON(channel_idx
> 15);
522 s
->sdq
|= 1 << channel_idx
;
524 s
->sdq
&= ~(1 << channel_idx
);
526 /* Read make the complete src map. If no channels are enabled
527 * the SDQCR must be 0 or else QMan will assert errors
529 dqsrc
= (s
->sdq
>> QB_SDQCR_SRC_SHIFT
) & QB_SDQCR_SRC_MASK
;
531 qbman_write_register(s
, QBMAN_CINH_SWP_SDQCR
, s
->sdq
);
533 qbman_write_register(s
, QBMAN_CINH_SWP_SDQCR
, 0);
536 #define QB_VDQCR_VERB_DCT_SHIFT 0
537 #define QB_VDQCR_VERB_DT_SHIFT 2
538 #define QB_VDQCR_VERB_RLS_SHIFT 4
539 #define QB_VDQCR_VERB_WAE_SHIFT 5
543 qb_pull_dt_workqueue
,
544 qb_pull_dt_framequeue
548 * qbman_pull_desc_clear() - Clear the contents of a descriptor to
549 * default/starting state
550 * @d: the pull dequeue descriptor to be cleared
552 void qbman_pull_desc_clear(struct qbman_pull_desc
*d
)
554 memset(d
, 0, sizeof(*d
));
558 * qbman_pull_desc_set_storage()- Set the pull dequeue storage
559 * @d: the pull dequeue descriptor to be set
560 * @storage: the pointer of the memory to store the dequeue result
561 * @storage_phys: the physical address of the storage memory
562 * @stash: to indicate whether write allocate is enabled
564 * If not called, or if called with 'storage' as NULL, the result pull dequeues
565 * will produce results to DQRR. If 'storage' is non-NULL, then results are
566 * produced to the given memory location (using the DMA address which
567 * the caller provides in 'storage_phys'), and 'stash' controls whether or not
568 * those writes to main-memory express a cache-warming attribute.
570 void qbman_pull_desc_set_storage(struct qbman_pull_desc
*d
,
571 struct dpaa2_dq
*storage
,
572 dma_addr_t storage_phys
,
575 /* save the virtual address */
576 d
->rsp_addr_virt
= (u64
)(uintptr_t)storage
;
579 d
->verb
&= ~(1 << QB_VDQCR_VERB_RLS_SHIFT
);
582 d
->verb
|= 1 << QB_VDQCR_VERB_RLS_SHIFT
;
584 d
->verb
|= 1 << QB_VDQCR_VERB_WAE_SHIFT
;
586 d
->verb
&= ~(1 << QB_VDQCR_VERB_WAE_SHIFT
);
588 d
->rsp_addr
= cpu_to_le64(storage_phys
);
592 * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued
593 * @d: the pull dequeue descriptor to be set
594 * @numframes: number of frames to be set, must be between 1 and 16, inclusive
596 void qbman_pull_desc_set_numframes(struct qbman_pull_desc
*d
, u8 numframes
)
598 d
->numf
= numframes
- 1;
602 * Exactly one of the following descriptor "actions" should be set. (Calling any
603 * one of these will replace the effect of any prior call to one of these.)
604 * - pull dequeue from the given frame queue (FQ)
605 * - pull dequeue from any FQ in the given work queue (WQ)
606 * - pull dequeue from any FQ in any WQ in the given channel
610 * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues
611 * @fqid: the frame queue index of the given FQ
613 void qbman_pull_desc_set_fq(struct qbman_pull_desc
*d
, u32 fqid
)
615 d
->verb
|= 1 << QB_VDQCR_VERB_DCT_SHIFT
;
616 d
->verb
|= qb_pull_dt_framequeue
<< QB_VDQCR_VERB_DT_SHIFT
;
617 d
->dq_src
= cpu_to_le32(fqid
);
621 * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues
622 * @wqid: composed of channel id and wqid within the channel
623 * @dct: the dequeue command type
625 void qbman_pull_desc_set_wq(struct qbman_pull_desc
*d
, u32 wqid
,
626 enum qbman_pull_type_e dct
)
628 d
->verb
|= dct
<< QB_VDQCR_VERB_DCT_SHIFT
;
629 d
->verb
|= qb_pull_dt_workqueue
<< QB_VDQCR_VERB_DT_SHIFT
;
630 d
->dq_src
= cpu_to_le32(wqid
);
634 * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
636 * @chid: the channel id to be dequeued
637 * @dct: the dequeue command type
639 void qbman_pull_desc_set_channel(struct qbman_pull_desc
*d
, u32 chid
,
640 enum qbman_pull_type_e dct
)
642 d
->verb
|= dct
<< QB_VDQCR_VERB_DCT_SHIFT
;
643 d
->verb
|= qb_pull_dt_channel
<< QB_VDQCR_VERB_DT_SHIFT
;
644 d
->dq_src
= cpu_to_le32(chid
);
648 * qbman_swp_pull() - Issue the pull dequeue command
649 * @s: the software portal object
650 * @d: the software portal descriptor which has been configured with
651 * the set of qbman_pull_desc_set_*() calls
653 * Return 0 for success, and -EBUSY if the software portal is not ready
654 * to do pull dequeue.
656 int qbman_swp_pull(struct qbman_swp
*s
, struct qbman_pull_desc
*d
)
658 struct qbman_pull_desc
*p
;
660 if (!atomic_dec_and_test(&s
->vdq
.available
)) {
661 atomic_inc(&s
->vdq
.available
);
664 s
->vdq
.storage
= (void *)(uintptr_t)d
->rsp_addr_virt
;
665 if ((s
->desc
->qman_version
& QMAN_REV_MASK
) < QMAN_REV_5000
)
666 p
= qbman_get_cmd(s
, QBMAN_CENA_SWP_VDQCR
);
668 p
= qbman_get_cmd(s
, QBMAN_CENA_SWP_VDQCR_MEM
);
670 p
->tok
= QMAN_DQ_TOKEN_VALID
;
671 p
->dq_src
= d
->dq_src
;
672 p
->rsp_addr
= d
->rsp_addr
;
673 p
->rsp_addr_virt
= d
->rsp_addr_virt
;
675 if ((s
->desc
->qman_version
& QMAN_REV_MASK
) < QMAN_REV_5000
) {
677 /* Set the verb byte, have to substitute in the valid-bit */
678 p
->verb
= d
->verb
| s
->vdq
.valid_bit
;
679 s
->vdq
.valid_bit
^= QB_VALID_BIT
;
681 p
->verb
= d
->verb
| s
->vdq
.valid_bit
;
682 s
->vdq
.valid_bit
^= QB_VALID_BIT
;
684 qbman_write_register(s
, QBMAN_CINH_SWP_VDQCR_RT
, QMAN_RT_MODE
);
690 #define QMAN_DQRR_PI_MASK 0xf
693 * qbman_swp_dqrr_next() - Get an valid DQRR entry
694 * @s: the software portal object
696 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
697 * only once, so repeated calls can return a sequence of DQRR entries, without
698 * requiring they be consumed immediately or in any particular order.
700 const struct dpaa2_dq
*qbman_swp_dqrr_next(struct qbman_swp
*s
)
707 /* Before using valid-bit to detect if something is there, we have to
708 * handle the case of the DQRR reset bug...
710 if (unlikely(s
->dqrr
.reset_bug
)) {
712 * We pick up new entries by cache-inhibited producer index,
713 * which means that a non-coherent mapping would require us to
714 * invalidate and read *only* once that PI has indicated that
715 * there's an entry here. The first trip around the DQRR ring
716 * will be much less efficient than all subsequent trips around
719 u8 pi
= qbman_read_register(s
, QBMAN_CINH_SWP_DQPI
) &
722 /* there are new entries if pi != next_idx */
723 if (pi
== s
->dqrr
.next_idx
)
727 * if next_idx is/was the last ring index, and 'pi' is
728 * different, we can disable the workaround as all the ring
729 * entries have now been DMA'd to so valid-bit checking is
730 * repaired. Note: this logic needs to be based on next_idx
731 * (which increments one at a time), rather than on pi (which
732 * can burst and wrap-around between our snapshots of it).
734 if (s
->dqrr
.next_idx
== (s
->dqrr
.dqrr_size
- 1)) {
735 pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
736 s
->dqrr
.next_idx
, pi
);
737 s
->dqrr
.reset_bug
= 0;
739 prefetch(qbman_get_cmd(s
,
740 QBMAN_CENA_SWP_DQRR(s
->dqrr
.next_idx
)));
743 if ((s
->desc
->qman_version
& QMAN_REV_MASK
) < QMAN_REV_5000
)
744 p
= qbman_get_cmd(s
, QBMAN_CENA_SWP_DQRR(s
->dqrr
.next_idx
));
746 p
= qbman_get_cmd(s
, QBMAN_CENA_SWP_DQRR_MEM(s
->dqrr
.next_idx
));
750 * If the valid-bit isn't of the expected polarity, nothing there. Note,
751 * in the DQRR reset bug workaround, we shouldn't need to skip these
752 * check, because we've already determined that a new entry is available
753 * and we've invalidated the cacheline before reading it, so the
754 * valid-bit behaviour is repaired and should tell us what we already
755 * knew from reading PI.
757 if ((verb
& QB_VALID_BIT
) != s
->dqrr
.valid_bit
) {
758 prefetch(qbman_get_cmd(s
,
759 QBMAN_CENA_SWP_DQRR(s
->dqrr
.next_idx
)));
763 * There's something there. Move "next_idx" attention to the next ring
764 * entry (and prefetch it) before returning what we found.
767 s
->dqrr
.next_idx
&= s
->dqrr
.dqrr_size
- 1; /* Wrap around */
768 if (!s
->dqrr
.next_idx
)
769 s
->dqrr
.valid_bit
^= QB_VALID_BIT
;
772 * If this is the final response to a volatile dequeue command
773 * indicate that the vdq is available
776 response_verb
= verb
& QBMAN_RESULT_MASK
;
777 if ((response_verb
== QBMAN_RESULT_DQ
) &&
778 (flags
& DPAA2_DQ_STAT_VOLATILE
) &&
779 (flags
& DPAA2_DQ_STAT_EXPIRED
))
780 atomic_inc(&s
->vdq
.available
);
782 prefetch(qbman_get_cmd(s
, QBMAN_CENA_SWP_DQRR(s
->dqrr
.next_idx
)));
788 * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from
789 * qbman_swp_dqrr_next().
790 * @s: the software portal object
791 * @dq: the DQRR entry to be consumed
793 void qbman_swp_dqrr_consume(struct qbman_swp
*s
, const struct dpaa2_dq
*dq
)
795 qbman_write_register(s
, QBMAN_CINH_SWP_DCAP
, QBMAN_IDX_FROM_DQRR(dq
));
799 * qbman_result_has_new_result() - Check and get the dequeue response from the
800 * dq storage memory set in pull dequeue command
801 * @s: the software portal object
802 * @dq: the dequeue result read from the memory
804 * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
807 * Only used for user-provided storage of dequeue results, not DQRR. For
808 * efficiency purposes, the driver will perform any required endianness
809 * conversion to ensure that the user's dequeue result storage is in host-endian
810 * format. As such, once the user has called qbman_result_has_new_result() and
811 * been returned a valid dequeue result, they should not call it again on
812 * the same memory location (except of course if another dequeue command has
813 * been executed to produce a new result to that location).
815 int qbman_result_has_new_result(struct qbman_swp
*s
, const struct dpaa2_dq
*dq
)
817 if (dq
->dq
.tok
!= QMAN_DQ_TOKEN_VALID
)
821 * Set token to be 0 so we will detect change back to 1
822 * next time the looping is traversed. Const is cast away here
823 * as we want users to treat the dequeue responses as read only.
825 ((struct dpaa2_dq
*)dq
)->dq
.tok
= 0;
828 * Determine whether VDQCR is available based on whether the
829 * current result is sitting in the first storage location of
832 if (s
->vdq
.storage
== dq
) {
833 s
->vdq
.storage
= NULL
;
834 atomic_inc(&s
->vdq
.available
);
841 * qbman_release_desc_clear() - Clear the contents of a descriptor to
842 * default/starting state.
844 void qbman_release_desc_clear(struct qbman_release_desc
*d
)
846 memset(d
, 0, sizeof(*d
));
847 d
->verb
= 1 << 5; /* Release Command Valid */
851 * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
853 void qbman_release_desc_set_bpid(struct qbman_release_desc
*d
, u16 bpid
)
855 d
->bpid
= cpu_to_le16(bpid
);
859 * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
860 * interrupt source should be asserted after the release command is completed.
862 void qbman_release_desc_set_rcdi(struct qbman_release_desc
*d
, int enable
)
867 d
->verb
&= ~(1 << 6);
870 #define RAR_IDX(rar) ((rar) & 0x7)
871 #define RAR_VB(rar) ((rar) & 0x80)
872 #define RAR_SUCCESS(rar) ((rar) & 0x100)
875 * qbman_swp_release() - Issue a buffer release command
876 * @s: the software portal object
877 * @d: the release descriptor
878 * @buffers: a pointer pointing to the buffer address to be released
879 * @num_buffers: number of buffers to be released, must be less than 8
881 * Return 0 for success, -EBUSY if the release command ring is not ready.
883 int qbman_swp_release(struct qbman_swp
*s
, const struct qbman_release_desc
*d
,
884 const u64
*buffers
, unsigned int num_buffers
)
887 struct qbman_release_desc
*p
;
890 if (!num_buffers
|| (num_buffers
> 7))
893 rar
= qbman_read_register(s
, QBMAN_CINH_SWP_RAR
);
894 if (!RAR_SUCCESS(rar
))
897 /* Start the release command */
898 if ((s
->desc
->qman_version
& QMAN_REV_MASK
) < QMAN_REV_5000
)
899 p
= qbman_get_cmd(s
, QBMAN_CENA_SWP_RCR(RAR_IDX(rar
)));
901 p
= qbman_get_cmd(s
, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar
)));
902 /* Copy the caller's buffer pointers to the command */
903 for (i
= 0; i
< num_buffers
; i
++)
904 p
->buf
[i
] = cpu_to_le64(buffers
[i
]);
907 if ((s
->desc
->qman_version
& QMAN_REV_MASK
) < QMAN_REV_5000
) {
909 * Set the verb byte, have to substitute in the valid-bit
910 * and the number of buffers.
913 p
->verb
= d
->verb
| RAR_VB(rar
) | num_buffers
;
915 p
->verb
= d
->verb
| RAR_VB(rar
) | num_buffers
;
917 qbman_write_register(s
, QBMAN_CINH_SWP_RCR_AM_RT
+
918 RAR_IDX(rar
) * 4, QMAN_RT_MODE
);
924 struct qbman_acquire_desc
{
932 struct qbman_acquire_rslt
{
942 * qbman_swp_acquire() - Issue a buffer acquire command
943 * @s: the software portal object
944 * @bpid: the buffer pool index
945 * @buffers: a pointer pointing to the acquired buffer addresses
946 * @num_buffers: number of buffers to be acquired, must be less than 8
948 * Return 0 for success, or negative error code if the acquire command
951 int qbman_swp_acquire(struct qbman_swp
*s
, u16 bpid
, u64
*buffers
,
952 unsigned int num_buffers
)
954 struct qbman_acquire_desc
*p
;
955 struct qbman_acquire_rslt
*r
;
958 if (!num_buffers
|| (num_buffers
> 7))
961 /* Start the management command */
962 p
= qbman_swp_mc_start(s
);
967 /* Encode the caller-provided attributes */
968 p
->bpid
= cpu_to_le16(bpid
);
969 p
->num
= num_buffers
;
971 /* Complete the management command */
972 r
= qbman_swp_mc_complete(s
, p
, QBMAN_MC_ACQUIRE
);
974 pr_err("qbman: acquire from BPID %d failed, no response\n",
979 /* Decode the outcome */
980 WARN_ON((r
->verb
& 0x7f) != QBMAN_MC_ACQUIRE
);
982 /* Determine success or failure */
983 if (unlikely(r
->rslt
!= QBMAN_MC_RSLT_OK
)) {
984 pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n",
989 WARN_ON(r
->num
> num_buffers
);
991 /* Copy the acquired buffers to the caller's array */
992 for (i
= 0; i
< r
->num
; i
++)
993 buffers
[i
] = le64_to_cpu(r
->buf
[i
]);
998 struct qbman_alt_fq_state_desc
{
1005 struct qbman_alt_fq_state_rslt
{
1011 #define ALT_FQ_FQID_MASK 0x00FFFFFF
1013 int qbman_swp_alt_fq_state(struct qbman_swp
*s
, u32 fqid
,
1016 struct qbman_alt_fq_state_desc
*p
;
1017 struct qbman_alt_fq_state_rslt
*r
;
1019 /* Start the management command */
1020 p
= qbman_swp_mc_start(s
);
1024 p
->fqid
= cpu_to_le32(fqid
& ALT_FQ_FQID_MASK
);
1026 /* Complete the management command */
1027 r
= qbman_swp_mc_complete(s
, p
, alt_fq_verb
);
1029 pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
1034 /* Decode the outcome */
1035 WARN_ON((r
->verb
& QBMAN_RESULT_MASK
) != alt_fq_verb
);
1037 /* Determine success or failure */
1038 if (unlikely(r
->rslt
!= QBMAN_MC_RSLT_OK
)) {
1039 pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n",
1040 fqid
, r
->verb
, r
->rslt
);
1047 struct qbman_cdan_ctrl_desc
{
1059 struct qbman_cdan_ctrl_rslt
{
1066 int qbman_swp_CDAN_set(struct qbman_swp
*s
, u16 channelid
,
1067 u8 we_mask
, u8 cdan_en
,
1070 struct qbman_cdan_ctrl_desc
*p
= NULL
;
1071 struct qbman_cdan_ctrl_rslt
*r
= NULL
;
1073 /* Start the management command */
1074 p
= qbman_swp_mc_start(s
);
1078 /* Encode the caller-provided attributes */
1079 p
->ch
= cpu_to_le16(channelid
);
1085 p
->cdan_ctx
= cpu_to_le64(ctx
);
1087 /* Complete the management command */
1088 r
= qbman_swp_mc_complete(s
, p
, QBMAN_WQCHAN_CONFIGURE
);
1090 pr_err("qbman: wqchan config failed, no response\n");
1094 WARN_ON((r
->verb
& 0x7f) != QBMAN_WQCHAN_CONFIGURE
);
1096 /* Determine success or failure */
1097 if (unlikely(r
->rslt
!= QBMAN_MC_RSLT_OK
)) {
1098 pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n",
1099 channelid
, r
->rslt
);
1106 #define QBMAN_RESPONSE_VERB_MASK 0x7f
1107 #define QBMAN_FQ_QUERY_NP 0x45
1108 #define QBMAN_BP_QUERY 0x32
1110 struct qbman_fq_query_desc
{
1117 int qbman_fq_query_state(struct qbman_swp
*s
, u32 fqid
,
1118 struct qbman_fq_query_np_rslt
*r
)
1120 struct qbman_fq_query_desc
*p
;
1123 p
= (struct qbman_fq_query_desc
*)qbman_swp_mc_start(s
);
1127 /* FQID is a 24 bit value */
1128 p
->fqid
= cpu_to_le32(fqid
& 0x00FFFFFF);
1129 resp
= qbman_swp_mc_complete(s
, p
, QBMAN_FQ_QUERY_NP
);
1131 pr_err("qbman: Query FQID %d NP fields failed, no response\n",
1135 *r
= *(struct qbman_fq_query_np_rslt
*)resp
;
1136 /* Decode the outcome */
1137 WARN_ON((r
->verb
& QBMAN_RESPONSE_VERB_MASK
) != QBMAN_FQ_QUERY_NP
);
1139 /* Determine success or failure */
1140 if (r
->rslt
!= QBMAN_MC_RSLT_OK
) {
1141 pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
1149 u32
qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt
*r
)
1151 return (le32_to_cpu(r
->frm_cnt
) & 0x00FFFFFF);
1154 u32
qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt
*r
)
1156 return le32_to_cpu(r
->byte_cnt
);
1159 struct qbman_bp_query_desc
{
1166 int qbman_bp_query(struct qbman_swp
*s
, u16 bpid
,
1167 struct qbman_bp_query_rslt
*r
)
1169 struct qbman_bp_query_desc
*p
;
1172 p
= (struct qbman_bp_query_desc
*)qbman_swp_mc_start(s
);
1176 p
->bpid
= cpu_to_le16(bpid
);
1177 resp
= qbman_swp_mc_complete(s
, p
, QBMAN_BP_QUERY
);
1179 pr_err("qbman: Query BPID %d fields failed, no response\n",
1183 *r
= *(struct qbman_bp_query_rslt
*)resp
;
1184 /* Decode the outcome */
1185 WARN_ON((r
->verb
& QBMAN_RESPONSE_VERB_MASK
) != QBMAN_BP_QUERY
);
1187 /* Determine success or failure */
1188 if (r
->rslt
!= QBMAN_MC_RSLT_OK
) {
1189 pr_err("Query fields of BPID 0x%x failed, code=0x%02x\n",
1197 u32
qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt
*a
)
1199 return le32_to_cpu(a
->fill
);