1 /* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "bman_priv.h"
33 #define IRQNAME "BMan portal %d"
34 #define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */
36 /* Portal register assists */
38 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
39 /* Cache-inhibited register offsets */
40 #define BM_REG_RCR_PI_CINH 0x3000
41 #define BM_REG_RCR_CI_CINH 0x3100
42 #define BM_REG_RCR_ITR 0x3200
43 #define BM_REG_CFG 0x3300
44 #define BM_REG_SCN(n) (0x3400 + ((n) << 6))
45 #define BM_REG_ISR 0x3e00
46 #define BM_REG_IER 0x3e40
47 #define BM_REG_ISDR 0x3e80
48 #define BM_REG_IIR 0x3ec0
50 /* Cache-enabled register offsets */
51 #define BM_CL_CR 0x0000
52 #define BM_CL_RR0 0x0100
53 #define BM_CL_RR1 0x0140
54 #define BM_CL_RCR 0x1000
55 #define BM_CL_RCR_PI_CENA 0x3000
56 #define BM_CL_RCR_CI_CENA 0x3100
59 /* Cache-inhibited register offsets */
60 #define BM_REG_RCR_PI_CINH 0x0000
61 #define BM_REG_RCR_CI_CINH 0x0004
62 #define BM_REG_RCR_ITR 0x0008
63 #define BM_REG_CFG 0x0100
64 #define BM_REG_SCN(n) (0x0200 + ((n) << 2))
65 #define BM_REG_ISR 0x0e00
66 #define BM_REG_IER 0x0e04
67 #define BM_REG_ISDR 0x0e08
68 #define BM_REG_IIR 0x0e0c
70 /* Cache-enabled register offsets */
71 #define BM_CL_CR 0x0000
72 #define BM_CL_RR0 0x0100
73 #define BM_CL_RR1 0x0140
74 #define BM_CL_RCR 0x1000
75 #define BM_CL_RCR_PI_CENA 0x3000
76 #define BM_CL_RCR_CI_CENA 0x3100
82 * pmode == production mode
83 * cmode == consumption mode,
84 * Enum values use 3 letter codes. First letter matches the portal mode,
85 * remaining two letters indicate;
86 * ci == cache-inhibited portal register
87 * ce == cache-enabled portal register
88 * vb == in-band valid-bit (cache-enabled)
90 enum bm_rcr_pmode
{ /* matches BCSP_CFG::RPM */
91 bm_rcr_pci
= 0, /* PI index, cache-inhibited */
92 bm_rcr_pce
= 1, /* PI index, cache-enabled */
93 bm_rcr_pvb
= 2 /* valid-bit */
95 enum bm_rcr_cmode
{ /* s/w-only */
96 bm_rcr_cci
, /* CI index, cache-inhibited */
97 bm_rcr_cce
/* CI index, cache-enabled */
101 /* --- Portal structures --- */
103 #define BM_RCR_SIZE 8
105 /* Release Command */
106 struct bm_rcr_entry
{
109 u8 _ncw_verb
; /* writes to this are non-coherent */
110 u8 bpid
; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
113 struct bm_buffer bufs
[8];
116 #define BM_RCR_VERB_VBIT 0x80
117 #define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */
118 #define BM_RCR_VERB_CMD_BPID_SINGLE 0x20
119 #define BM_RCR_VERB_CMD_BPID_MULTI 0x30
120 #define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */
123 struct bm_rcr_entry
*ring
, *cursor
;
124 u8 ci
, available
, ithresh
, vbit
;
125 #ifdef CONFIG_FSL_DPAA_CHECKING
127 enum bm_rcr_pmode pmode
;
128 enum bm_rcr_cmode cmode
;
132 /* MC (Management Command) command */
133 struct bm_mc_command
{
134 u8 _ncw_verb
; /* writes to this are non-coherent */
135 u8 bpid
; /* used by acquire command */
138 #define BM_MCC_VERB_VBIT 0x80
139 #define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */
140 #define BM_MCC_VERB_CMD_ACQUIRE 0x10
141 #define BM_MCC_VERB_CMD_QUERY 0x40
142 #define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */
144 /* MC result, Acquire and Query Response */
151 struct bm_buffer bufs
[8];
153 #define BM_MCR_VERB_VBIT 0x80
154 #define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK
155 #define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE
156 #define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY
157 #define BM_MCR_VERB_CMD_ERR_INVALID 0x60
158 #define BM_MCR_VERB_CMD_ERR_ECC 0x70
159 #define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
160 #define BM_MCR_TIMEOUT 10000 /* us */
163 struct bm_mc_command
*cr
;
164 union bm_mc_result
*rr
;
166 #ifdef CONFIG_FSL_DPAA_CHECKING
168 /* Can only be _mc_start()ed */
170 /* Can only be _mc_commit()ed or _mc_abort()ed */
172 /* Can only be _mc_retry()ed */
179 void *ce
; /* cache-enabled */
180 __be32
*ce_be
; /* Same as above but for direct access */
181 void __iomem
*ci
; /* cache-inhibited */
188 } ____cacheline_aligned
;
190 /* Cache-inhibited register access. */
191 static inline u32
bm_in(struct bm_portal
*p
, u32 offset
)
193 return ioread32be(p
->addr
.ci
+ offset
);
196 static inline void bm_out(struct bm_portal
*p
, u32 offset
, u32 val
)
198 iowrite32be(val
, p
->addr
.ci
+ offset
);
201 /* Cache Enabled Portal Access */
202 static inline void bm_cl_invalidate(struct bm_portal
*p
, u32 offset
)
204 dpaa_invalidate(p
->addr
.ce
+ offset
);
207 static inline void bm_cl_touch_ro(struct bm_portal
*p
, u32 offset
)
209 dpaa_touch_ro(p
->addr
.ce
+ offset
);
212 static inline u32
bm_ce_in(struct bm_portal
*p
, u32 offset
)
214 return be32_to_cpu(*(p
->addr
.ce_be
+ (offset
/4)));
219 /* interrupt sources processed by portal_isr(), configurable */
220 unsigned long irq_sources
;
221 /* probing time config params for cpu-affine portals */
222 const struct bm_portal_config
*config
;
223 char irqname
[MAX_IRQNAME
];
226 static cpumask_t affine_mask
;
227 static DEFINE_SPINLOCK(affine_mask_lock
);
228 static DEFINE_PER_CPU(struct bman_portal
, bman_affine_portal
);
230 static inline struct bman_portal
*get_affine_portal(void)
232 return &get_cpu_var(bman_affine_portal
);
235 static inline void put_affine_portal(void)
237 put_cpu_var(bman_affine_portal
);
241 * This object type refers to a pool, it isn't *the* pool. There may be
242 * more than one such object per BMan buffer pool, eg. if different users of the
243 * pool are operating via different portals.
246 /* index of the buffer pool to encapsulate (0-63) */
248 /* Used for hash-table admin when using depletion notifications. */
249 struct bman_portal
*portal
;
250 struct bman_pool
*next
;
253 static u32
poll_portal_slow(struct bman_portal
*p
, u32 is
);
255 static irqreturn_t
portal_isr(int irq
, void *ptr
)
257 struct bman_portal
*p
= ptr
;
258 struct bm_portal
*portal
= &p
->p
;
259 u32 clear
= p
->irq_sources
;
260 u32 is
= bm_in(portal
, BM_REG_ISR
) & p
->irq_sources
;
265 clear
|= poll_portal_slow(p
, is
);
266 bm_out(portal
, BM_REG_ISR
, clear
);
270 /* --- RCR API --- */
272 #define RCR_SHIFT ilog2(sizeof(struct bm_rcr_entry))
273 #define RCR_CARRY (uintptr_t)(BM_RCR_SIZE << RCR_SHIFT)
275 /* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
276 static struct bm_rcr_entry
*rcr_carryclear(struct bm_rcr_entry
*p
)
278 uintptr_t addr
= (uintptr_t)p
;
282 return (struct bm_rcr_entry
*)addr
;
285 #ifdef CONFIG_FSL_DPAA_CHECKING
286 /* Bit-wise logic to convert a ring pointer to a ring index */
287 static int rcr_ptr2idx(struct bm_rcr_entry
*e
)
289 return ((uintptr_t)e
>> RCR_SHIFT
) & (BM_RCR_SIZE
- 1);
293 /* Increment the 'cursor' ring pointer, taking 'vbit' into account */
294 static inline void rcr_inc(struct bm_rcr
*rcr
)
296 /* increment to the next RCR pointer and handle overflow and 'vbit' */
297 struct bm_rcr_entry
*partial
= rcr
->cursor
+ 1;
299 rcr
->cursor
= rcr_carryclear(partial
);
300 if (partial
!= rcr
->cursor
)
301 rcr
->vbit
^= BM_RCR_VERB_VBIT
;
304 static int bm_rcr_get_avail(struct bm_portal
*portal
)
306 struct bm_rcr
*rcr
= &portal
->rcr
;
308 return rcr
->available
;
311 static int bm_rcr_get_fill(struct bm_portal
*portal
)
313 struct bm_rcr
*rcr
= &portal
->rcr
;
315 return BM_RCR_SIZE
- 1 - rcr
->available
;
318 static void bm_rcr_set_ithresh(struct bm_portal
*portal
, u8 ithresh
)
320 struct bm_rcr
*rcr
= &portal
->rcr
;
322 rcr
->ithresh
= ithresh
;
323 bm_out(portal
, BM_REG_RCR_ITR
, ithresh
);
326 static void bm_rcr_cce_prefetch(struct bm_portal
*portal
)
328 __maybe_unused
struct bm_rcr
*rcr
= &portal
->rcr
;
330 DPAA_ASSERT(rcr
->cmode
== bm_rcr_cce
);
331 bm_cl_touch_ro(portal
, BM_CL_RCR_CI_CENA
);
334 static u8
bm_rcr_cce_update(struct bm_portal
*portal
)
336 struct bm_rcr
*rcr
= &portal
->rcr
;
337 u8 diff
, old_ci
= rcr
->ci
;
339 DPAA_ASSERT(rcr
->cmode
== bm_rcr_cce
);
340 rcr
->ci
= bm_ce_in(portal
, BM_CL_RCR_CI_CENA
) & (BM_RCR_SIZE
- 1);
341 bm_cl_invalidate(portal
, BM_CL_RCR_CI_CENA
);
342 diff
= dpaa_cyc_diff(BM_RCR_SIZE
, old_ci
, rcr
->ci
);
343 rcr
->available
+= diff
;
347 static inline struct bm_rcr_entry
*bm_rcr_start(struct bm_portal
*portal
)
349 struct bm_rcr
*rcr
= &portal
->rcr
;
351 DPAA_ASSERT(!rcr
->busy
);
354 #ifdef CONFIG_FSL_DPAA_CHECKING
357 dpaa_zero(rcr
->cursor
);
361 static inline void bm_rcr_pvb_commit(struct bm_portal
*portal
, u8 myverb
)
363 struct bm_rcr
*rcr
= &portal
->rcr
;
364 struct bm_rcr_entry
*rcursor
;
366 DPAA_ASSERT(rcr
->busy
);
367 DPAA_ASSERT(rcr
->pmode
== bm_rcr_pvb
);
368 DPAA_ASSERT(rcr
->available
>= 1);
370 rcursor
= rcr
->cursor
;
371 rcursor
->_ncw_verb
= myverb
| rcr
->vbit
;
375 #ifdef CONFIG_FSL_DPAA_CHECKING
380 static int bm_rcr_init(struct bm_portal
*portal
, enum bm_rcr_pmode pmode
,
381 enum bm_rcr_cmode cmode
)
383 struct bm_rcr
*rcr
= &portal
->rcr
;
387 rcr
->ring
= portal
->addr
.ce
+ BM_CL_RCR
;
388 rcr
->ci
= bm_in(portal
, BM_REG_RCR_CI_CINH
) & (BM_RCR_SIZE
- 1);
389 pi
= bm_in(portal
, BM_REG_RCR_PI_CINH
) & (BM_RCR_SIZE
- 1);
390 rcr
->cursor
= rcr
->ring
+ pi
;
391 rcr
->vbit
= (bm_in(portal
, BM_REG_RCR_PI_CINH
) & BM_RCR_SIZE
) ?
392 BM_RCR_VERB_VBIT
: 0;
393 rcr
->available
= BM_RCR_SIZE
- 1
394 - dpaa_cyc_diff(BM_RCR_SIZE
, rcr
->ci
, pi
);
395 rcr
->ithresh
= bm_in(portal
, BM_REG_RCR_ITR
);
396 #ifdef CONFIG_FSL_DPAA_CHECKING
401 cfg
= (bm_in(portal
, BM_REG_CFG
) & 0xffffffe0)
402 | (pmode
& 0x3); /* BCSP_CFG::RPM */
403 bm_out(portal
, BM_REG_CFG
, cfg
);
407 static void bm_rcr_finish(struct bm_portal
*portal
)
409 #ifdef CONFIG_FSL_DPAA_CHECKING
410 struct bm_rcr
*rcr
= &portal
->rcr
;
413 DPAA_ASSERT(!rcr
->busy
);
415 i
= bm_in(portal
, BM_REG_RCR_PI_CINH
) & (BM_RCR_SIZE
- 1);
416 if (i
!= rcr_ptr2idx(rcr
->cursor
))
417 pr_crit("losing uncommitted RCR entries\n");
419 i
= bm_in(portal
, BM_REG_RCR_CI_CINH
) & (BM_RCR_SIZE
- 1);
421 pr_crit("missing existing RCR completions\n");
422 if (rcr
->ci
!= rcr_ptr2idx(rcr
->cursor
))
423 pr_crit("RCR destroyed unquiesced\n");
427 /* --- Management command API --- */
428 static int bm_mc_init(struct bm_portal
*portal
)
430 struct bm_mc
*mc
= &portal
->mc
;
432 mc
->cr
= portal
->addr
.ce
+ BM_CL_CR
;
433 mc
->rr
= portal
->addr
.ce
+ BM_CL_RR0
;
434 mc
->rridx
= (mc
->cr
->_ncw_verb
& BM_MCC_VERB_VBIT
) ?
436 mc
->vbit
= mc
->rridx
? BM_MCC_VERB_VBIT
: 0;
437 #ifdef CONFIG_FSL_DPAA_CHECKING
443 static void bm_mc_finish(struct bm_portal
*portal
)
445 #ifdef CONFIG_FSL_DPAA_CHECKING
446 struct bm_mc
*mc
= &portal
->mc
;
448 DPAA_ASSERT(mc
->state
== mc_idle
);
449 if (mc
->state
!= mc_idle
)
450 pr_crit("Losing incomplete MC command\n");
454 static inline struct bm_mc_command
*bm_mc_start(struct bm_portal
*portal
)
456 struct bm_mc
*mc
= &portal
->mc
;
458 DPAA_ASSERT(mc
->state
== mc_idle
);
459 #ifdef CONFIG_FSL_DPAA_CHECKING
466 static inline void bm_mc_commit(struct bm_portal
*portal
, u8 myverb
)
468 struct bm_mc
*mc
= &portal
->mc
;
469 union bm_mc_result
*rr
= mc
->rr
+ mc
->rridx
;
471 DPAA_ASSERT(mc
->state
== mc_user
);
473 mc
->cr
->_ncw_verb
= myverb
| mc
->vbit
;
475 dpaa_invalidate_touch_ro(rr
);
476 #ifdef CONFIG_FSL_DPAA_CHECKING
481 static inline union bm_mc_result
*bm_mc_result(struct bm_portal
*portal
)
483 struct bm_mc
*mc
= &portal
->mc
;
484 union bm_mc_result
*rr
= mc
->rr
+ mc
->rridx
;
486 DPAA_ASSERT(mc
->state
== mc_hw
);
488 * The inactive response register's verb byte always returns zero until
489 * its command is submitted and completed. This includes the valid-bit,
490 * in case you were wondering...
493 dpaa_invalidate_touch_ro(rr
);
497 mc
->vbit
^= BM_MCC_VERB_VBIT
;
498 #ifdef CONFIG_FSL_DPAA_CHECKING
504 static inline int bm_mc_result_timeout(struct bm_portal
*portal
,
505 union bm_mc_result
**mcr
)
507 int timeout
= BM_MCR_TIMEOUT
;
510 *mcr
= bm_mc_result(portal
);
519 /* Disable all BSCN interrupts for the portal */
520 static void bm_isr_bscn_disable(struct bm_portal
*portal
)
522 bm_out(portal
, BM_REG_SCN(0), 0);
523 bm_out(portal
, BM_REG_SCN(1), 0);
526 static int bman_create_portal(struct bman_portal
*portal
,
527 const struct bm_portal_config
*c
)
534 * prep the low-level portal struct with the mapped addresses from the
535 * config, everything that follows depends on it and "config" is more
536 * for (de)reference...
538 p
->addr
.ce
= c
->addr_virt_ce
;
539 p
->addr
.ce_be
= c
->addr_virt_ce
;
540 p
->addr
.ci
= c
->addr_virt_ci
;
541 if (bm_rcr_init(p
, bm_rcr_pvb
, bm_rcr_cce
)) {
542 dev_err(c
->dev
, "RCR initialisation failed\n");
546 dev_err(c
->dev
, "MC initialisation failed\n");
550 * Default to all BPIDs disabled, we enable as required at
553 bm_isr_bscn_disable(p
);
555 /* Write-to-clear any stale interrupt status bits */
556 bm_out(p
, BM_REG_ISDR
, 0xffffffff);
557 portal
->irq_sources
= 0;
558 bm_out(p
, BM_REG_IER
, 0);
559 bm_out(p
, BM_REG_ISR
, 0xffffffff);
560 snprintf(portal
->irqname
, MAX_IRQNAME
, IRQNAME
, c
->cpu
);
561 if (request_irq(c
->irq
, portal_isr
, 0, portal
->irqname
, portal
)) {
562 dev_err(c
->dev
, "request_irq() failed\n");
566 if (dpaa_set_portal_irq_affinity(c
->dev
, c
->irq
, c
->cpu
))
569 /* Need RCR to be empty before continuing */
570 ret
= bm_rcr_get_fill(p
);
572 dev_err(c
->dev
, "RCR unclean\n");
578 bm_out(p
, BM_REG_ISDR
, 0);
579 bm_out(p
, BM_REG_IIR
, 0);
585 free_irq(c
->irq
, portal
);
594 struct bman_portal
*bman_create_affine_portal(const struct bm_portal_config
*c
)
596 struct bman_portal
*portal
;
599 portal
= &per_cpu(bman_affine_portal
, c
->cpu
);
600 err
= bman_create_portal(portal
, c
);
604 spin_lock(&affine_mask_lock
);
605 cpumask_set_cpu(c
->cpu
, &affine_mask
);
606 spin_unlock(&affine_mask_lock
);
611 static u32
poll_portal_slow(struct bman_portal
*p
, u32 is
)
615 if (is
& BM_PIRQ_RCRI
) {
616 bm_rcr_cce_update(&p
->p
);
617 bm_rcr_set_ithresh(&p
->p
, 0);
618 bm_out(&p
->p
, BM_REG_ISR
, BM_PIRQ_RCRI
);
622 /* There should be no status register bits left undefined */
627 int bman_p_irqsource_add(struct bman_portal
*p
, u32 bits
)
629 unsigned long irqflags
;
631 local_irq_save(irqflags
);
632 p
->irq_sources
|= bits
& BM_PIRQ_VISIBLE
;
633 bm_out(&p
->p
, BM_REG_IER
, p
->irq_sources
);
634 local_irq_restore(irqflags
);
638 int bm_shutdown_pool(u32 bpid
)
641 struct bm_mc_command
*bm_cmd
;
642 union bm_mc_result
*bm_res
;
645 struct bman_portal
*p
= get_affine_portal();
647 /* Acquire buffers until empty */
648 bm_cmd
= bm_mc_start(&p
->p
);
650 bm_mc_commit(&p
->p
, BM_MCC_VERB_CMD_ACQUIRE
| 1);
651 if (!bm_mc_result_timeout(&p
->p
, &bm_res
)) {
652 pr_crit("BMan Acquire Command timedout\n");
656 if (!(bm_res
->verb
& BM_MCR_VERB_ACQUIRE_BUFCOUNT
)) {
666 struct gen_pool
*bm_bpalloc
;
668 static int bm_alloc_bpid_range(u32
*result
, u32 count
)
672 addr
= gen_pool_alloc(bm_bpalloc
, count
);
676 *result
= addr
& ~DPAA_GENALLOC_OFF
;
681 static int bm_release_bpid(u32 bpid
)
685 ret
= bm_shutdown_pool(bpid
);
687 pr_debug("BPID %d leaked\n", bpid
);
691 gen_pool_free(bm_bpalloc
, bpid
| DPAA_GENALLOC_OFF
, 1);
695 struct bman_pool
*bman_new_pool(void)
697 struct bman_pool
*pool
= NULL
;
700 if (bm_alloc_bpid_range(&bpid
, 1))
703 pool
= kmalloc(sizeof(*pool
), GFP_KERNEL
);
711 bm_release_bpid(bpid
);
715 EXPORT_SYMBOL(bman_new_pool
);
717 void bman_free_pool(struct bman_pool
*pool
)
719 bm_release_bpid(pool
->bpid
);
723 EXPORT_SYMBOL(bman_free_pool
);
725 int bman_get_bpid(const struct bman_pool
*pool
)
729 EXPORT_SYMBOL(bman_get_bpid
);
731 static void update_rcr_ci(struct bman_portal
*p
, int avail
)
734 bm_rcr_cce_prefetch(&p
->p
);
736 bm_rcr_cce_update(&p
->p
);
739 int bman_release(struct bman_pool
*pool
, const struct bm_buffer
*bufs
, u8 num
)
741 struct bman_portal
*p
;
742 struct bm_rcr_entry
*r
;
743 unsigned long irqflags
;
744 int avail
, timeout
= 1000; /* 1ms */
747 DPAA_ASSERT(num
> 0 && num
<= 8);
750 p
= get_affine_portal();
751 local_irq_save(irqflags
);
752 avail
= bm_rcr_get_avail(&p
->p
);
754 update_rcr_ci(p
, avail
);
755 r
= bm_rcr_start(&p
->p
);
756 local_irq_restore(irqflags
);
764 if (unlikely(!timeout
))
767 p
= get_affine_portal();
768 local_irq_save(irqflags
);
770 * we can copy all but the first entry, as this can trigger badness
773 bm_buffer_set64(r
->bufs
, bm_buffer_get64(bufs
));
774 bm_buffer_set_bpid(r
->bufs
, pool
->bpid
);
776 memcpy(&r
->bufs
[1], &bufs
[1], i
* sizeof(bufs
[0]));
778 bm_rcr_pvb_commit(&p
->p
, BM_RCR_VERB_CMD_BPID_SINGLE
|
779 (num
& BM_RCR_VERB_BUFCOUNT_MASK
));
781 local_irq_restore(irqflags
);
785 EXPORT_SYMBOL(bman_release
);
787 int bman_acquire(struct bman_pool
*pool
, struct bm_buffer
*bufs
, u8 num
)
789 struct bman_portal
*p
= get_affine_portal();
790 struct bm_mc_command
*mcc
;
791 union bm_mc_result
*mcr
;
794 DPAA_ASSERT(num
> 0 && num
<= 8);
796 mcc
= bm_mc_start(&p
->p
);
797 mcc
->bpid
= pool
->bpid
;
798 bm_mc_commit(&p
->p
, BM_MCC_VERB_CMD_ACQUIRE
|
799 (num
& BM_MCC_VERB_ACQUIRE_BUFCOUNT
));
800 if (!bm_mc_result_timeout(&p
->p
, &mcr
)) {
802 pr_crit("BMan Acquire Timeout\n");
805 ret
= mcr
->verb
& BM_MCR_VERB_ACQUIRE_BUFCOUNT
;
807 memcpy(&bufs
[0], &mcr
->bufs
[0], num
* sizeof(bufs
[0]));
814 EXPORT_SYMBOL(bman_acquire
);
816 const struct bm_portal_config
*
817 bman_get_bm_portal_config(const struct bman_portal
*portal
)
819 return portal
->config
;