1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2016 Broadcom
7 * Broadcom PDC Mailbox Driver
8 * The PDC provides a ring based programming interface to one or more hardware
9 * offload engines. For example, the PDC driver works with both SPU-M and SPU2
10 * cryptographic offload hardware. In some chips the PDC is referred to as MDE,
11 * and in others the FA2/FA+ hardware is used with this PDC driver.
13 * The PDC driver registers with the Linux mailbox framework as a mailbox
14 * controller, once for each PDC instance. Ring 0 for each PDC is registered as
15 * a mailbox channel. The PDC driver uses interrupts to determine when data
16 * transfers to and from an offload engine are complete. The PDC driver uses
17 * threaded IRQs so that response messages are handled outside of interrupt
20 * The PDC driver allows multiple messages to be pending in the descriptor
21 * rings. The tx_msg_start descriptor index indicates where the last message
22 * starts. The txin_numd value at this index indicates how many descriptor
23 * indexes make up the message. Similar state is kept on the receive side. When
24 * an rx interrupt indicates a response is ready, the PDC driver processes numd
25 * descriptors from the tx and rx ring, thus processing one response at a time.
28 #include <linux/errno.h>
29 #include <linux/module.h>
30 #include <linux/init.h>
31 #include <linux/slab.h>
32 #include <linux/debugfs.h>
33 #include <linux/interrupt.h>
34 #include <linux/wait.h>
35 #include <linux/platform_device.h>
38 #include <linux/of_device.h>
39 #include <linux/of_address.h>
40 #include <linux/of_irq.h>
41 #include <linux/mailbox_controller.h>
42 #include <linux/mailbox/brcm-message.h>
43 #include <linux/scatterlist.h>
44 #include <linux/dma-direction.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/dmapool.h>
50 #define RING_ENTRY_SIZE sizeof(struct dma64dd)
52 /* # entries in PDC dma ring */
53 #define PDC_RING_ENTRIES 512
55 * Minimum number of ring descriptor entries that must be free to tell mailbox
56 * framework that it can submit another request
58 #define PDC_RING_SPACE_MIN 15
60 #define PDC_RING_SIZE (PDC_RING_ENTRIES * RING_ENTRY_SIZE)
61 /* Rings are 8k aligned */
62 #define RING_ALIGN_ORDER 13
63 #define RING_ALIGN BIT(RING_ALIGN_ORDER)
65 #define RX_BUF_ALIGN_ORDER 5
66 #define RX_BUF_ALIGN BIT(RX_BUF_ALIGN_ORDER)
68 /* descriptor bumping macros */
69 #define XXD(x, max_mask) ((x) & (max_mask))
70 #define TXD(x, max_mask) XXD((x), (max_mask))
71 #define RXD(x, max_mask) XXD((x), (max_mask))
72 #define NEXTTXD(i, max_mask) TXD((i) + 1, (max_mask))
73 #define PREVTXD(i, max_mask) TXD((i) - 1, (max_mask))
74 #define NEXTRXD(i, max_mask) RXD((i) + 1, (max_mask))
75 #define PREVRXD(i, max_mask) RXD((i) - 1, (max_mask))
76 #define NTXDACTIVE(h, t, max_mask) TXD((t) - (h), (max_mask))
77 #define NRXDACTIVE(h, t, max_mask) RXD((t) - (h), (max_mask))
79 /* Length of BCM header at start of SPU msg, in bytes */
83 * PDC driver reserves ringset 0 on each SPU for its own use. The driver does
84 * not currently support use of multiple ringsets on a single PDC engine.
89 * Interrupt mask and status definitions. Enable interrupts for tx and rx on
92 #define PDC_RCVINT_0 (16 + PDC_RINGSET)
93 #define PDC_RCVINTEN_0 BIT(PDC_RCVINT_0)
94 #define PDC_INTMASK (PDC_RCVINTEN_0)
95 #define PDC_LAZY_FRAMECOUNT 1
96 #define PDC_LAZY_TIMEOUT 10000
97 #define PDC_LAZY_INT (PDC_LAZY_TIMEOUT | (PDC_LAZY_FRAMECOUNT << 24))
98 #define PDC_INTMASK_OFFSET 0x24
99 #define PDC_INTSTATUS_OFFSET 0x20
100 #define PDC_RCVLAZY0_OFFSET (0x30 + 4 * PDC_RINGSET)
101 #define FA_RCVLAZY0_OFFSET 0x100
104 * For SPU2, configure MDE_CKSUM_CONTROL to write 17 bytes of metadata
107 #define PDC_SPU2_RESP_HDR_LEN 17
108 #define PDC_CKSUM_CTRL BIT(27)
109 #define PDC_CKSUM_CTRL_OFFSET 0x400
111 #define PDC_SPUM_RESP_HDR_LEN 32
114 * Sets the following bits for write to transmit control reg:
115 * 11 - PtyChkDisable - parity check is disabled
116 * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
118 #define PDC_TX_CTL 0x000C0800
120 /* Bit in tx control reg to enable tx channel */
121 #define PDC_TX_ENABLE 0x1
124 * Sets the following bits for write to receive control reg:
125 * 7:1 - RcvOffset - size in bytes of status region at start of rx frame buf
126 * 9 - SepRxHdrDescEn - place start of new frames only in descriptors
127 * that have StartOfFrame set
128 * 10 - OflowContinue - on rx FIFO overflow, clear rx fifo, discard all
129 * remaining bytes in current frame, report error
130 * in rx frame status for current frame
131 * 11 - PtyChkDisable - parity check is disabled
132 * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
134 #define PDC_RX_CTL 0x000C0E00
136 /* Bit in rx control reg to enable rx channel */
137 #define PDC_RX_ENABLE 0x1
139 #define CRYPTO_D64_RS0_CD_MASK ((PDC_RING_ENTRIES * RING_ENTRY_SIZE) - 1)
141 /* descriptor flags */
142 #define D64_CTRL1_EOT BIT(28) /* end of descriptor table */
143 #define D64_CTRL1_IOC BIT(29) /* interrupt on complete */
144 #define D64_CTRL1_EOF BIT(30) /* end of frame */
145 #define D64_CTRL1_SOF BIT(31) /* start of frame */
147 #define RX_STATUS_OVERFLOW 0x00800000
148 #define RX_STATUS_LEN 0x0000FFFF
150 #define PDC_TXREGS_OFFSET 0x200
151 #define PDC_RXREGS_OFFSET 0x220
153 /* Maximum size buffer the DMA engine can handle */
154 #define PDC_DMA_BUF_MAX 16384
157 FA_HW
, /* FA2/FA+ hardware (i.e. Northstar Plus) */
158 PDC_HW
/* PDC/MDE hardware (i.e. Northstar 2, Pegasus) */
162 void *ctx
; /* opaque context associated with frame */
167 u32 ctrl1
; /* misc control bits */
168 u32 ctrl2
; /* buffer count and address extension */
169 u32 addrlow
; /* memory address of the date buffer, bits 31:0 */
170 u32 addrhigh
; /* memory address of the date buffer, bits 63:32 */
173 /* dma registers per channel(xmt or rcv) */
175 u32 control
; /* enable, et al */
176 u32 ptr
; /* last descriptor posted to chip */
177 u32 addrlow
; /* descriptor ring base address low 32-bits */
178 u32 addrhigh
; /* descriptor ring base address bits 63:32 */
179 u32 status0
; /* last rx descriptor written by hw */
180 u32 status1
; /* driver does not use */
183 /* cpp contortions to concatenate w/arg prescan */
185 #define _PADLINE(line) pad ## line
186 #define _XSTR(line) _PADLINE(line)
187 #define PAD _XSTR(__LINE__)
190 /* dma registers. matches hw layout. */
192 struct dma64_regs dmaxmt
; /* dma tx */
194 struct dma64_regs dmarcv
; /* dma rx */
200 u32 devcontrol
; /* 0x000 */
201 u32 devstatus
; /* 0x004 */
203 u32 biststatus
; /* 0x00c */
205 u32 intstatus
; /* 0x020 */
206 u32 intmask
; /* 0x024 */
207 u32 gptimer
; /* 0x028 */
210 u32 intrcvlazy_0
; /* 0x030 (Only in PDC, not FA2) */
211 u32 intrcvlazy_1
; /* 0x034 (Only in PDC, not FA2) */
212 u32 intrcvlazy_2
; /* 0x038 (Only in PDC, not FA2) */
213 u32 intrcvlazy_3
; /* 0x03c (Only in PDC, not FA2) */
216 u32 fa_intrecvlazy
; /* 0x100 (Only in FA2, not PDC) */
217 u32 flowctlthresh
; /* 0x104 */
218 u32 wrrthresh
; /* 0x108 */
219 u32 gmac_idle_cnt_thresh
; /* 0x10c */
222 u32 ifioaccessaddr
; /* 0x120 */
223 u32 ifioaccessbyte
; /* 0x124 */
224 u32 ifioaccessdata
; /* 0x128 */
227 u32 phyaccess
; /* 0x180 */
229 u32 phycontrol
; /* 0x188 */
230 u32 txqctl
; /* 0x18c */
231 u32 rxqctl
; /* 0x190 */
232 u32 gpioselect
; /* 0x194 */
233 u32 gpio_output_en
; /* 0x198 */
235 u32 txq_rxq_mem_ctl
; /* 0x1a0 */
236 u32 memory_ecc_status
; /* 0x1a4 */
237 u32 serdes_ctl
; /* 0x1a8 */
238 u32 serdes_status0
; /* 0x1ac */
239 u32 serdes_status1
; /* 0x1b0 */
240 u32 PAD
[11]; /* 0x1b4-1dc */
241 u32 clk_ctl_st
; /* 0x1e0 */
242 u32 hw_war
; /* 0x1e4 (Only in PDC, not FA2) */
243 u32 pwrctl
; /* 0x1e8 */
246 #define PDC_NUM_DMA_RINGS 4
247 struct dma64 dmaregs
[PDC_NUM_DMA_RINGS
]; /* 0x0200 - 0x2fc */
249 /* more registers follow, but we don't use them */
252 /* structure for allocating/freeing DMA rings */
253 struct pdc_ring_alloc
{
254 dma_addr_t dmabase
; /* DMA address of start of ring */
255 void *vbase
; /* base kernel virtual address of ring */
256 u32 size
; /* ring allocation size in bytes */
260 * context associated with a receive descriptor.
261 * @rxp_ctx: opaque context associated with frame that starts at each
263 * @dst_sg: Scatterlist used to form reply frames beginning at a given ring
264 * index. Retained in order to unmap each sg after reply is processed.
265 * @rxin_numd: Number of rx descriptors associated with the message that starts
266 * at a descriptor index. Not set for every index. For example,
267 * if descriptor index i points to a scatterlist with 4 entries,
268 * then the next three descriptor indexes don't have a value set.
269 * @resp_hdr: Virtual address of buffer used to catch DMA rx status
270 * @resp_hdr_daddr: physical address of DMA rx status buffer
274 struct scatterlist
*dst_sg
;
277 dma_addr_t resp_hdr_daddr
;
280 /* PDC state structure */
282 /* Index of the PDC whose state is in this structure instance */
285 /* Platform device for this PDC instance */
286 struct platform_device
*pdev
;
289 * Each PDC instance has a mailbox controller. PDC receives request
290 * messages through mailboxes, and sends response messages through the
293 struct mbox_controller mbc
;
295 unsigned int pdc_irq
;
297 /* tasklet for deferred processing after DMA rx interrupt */
298 struct tasklet_struct rx_tasklet
;
300 /* Number of bytes of receive status prior to each rx frame */
302 /* Whether a BCM header is prepended to each frame */
304 /* Sum of length of BCM header and rx status header */
305 u32 pdc_resp_hdr_len
;
307 /* The base virtual address of DMA hw registers */
308 void __iomem
*pdc_reg_vbase
;
310 /* Pool for allocation of DMA rings */
311 struct dma_pool
*ring_pool
;
313 /* Pool for allocation of metadata buffers for response messages */
314 struct dma_pool
*rx_buf_pool
;
317 * The base virtual address of DMA tx/rx descriptor rings. Corresponding
318 * DMA address and size of ring allocation.
320 struct pdc_ring_alloc tx_ring_alloc
;
321 struct pdc_ring_alloc rx_ring_alloc
;
323 struct pdc_regs
*regs
; /* start of PDC registers */
325 struct dma64_regs
*txregs_64
; /* dma tx engine registers */
326 struct dma64_regs
*rxregs_64
; /* dma rx engine registers */
329 * Arrays of PDC_RING_ENTRIES descriptors
330 * To use multiple ringsets, this needs to be extended
332 struct dma64dd
*txd_64
; /* tx descriptor ring */
333 struct dma64dd
*rxd_64
; /* rx descriptor ring */
335 /* descriptor ring sizes */
336 u32 ntxd
; /* # tx descriptors */
337 u32 nrxd
; /* # rx descriptors */
338 u32 nrxpost
; /* # rx buffers to keep posted */
339 u32 ntxpost
; /* max number of tx buffers that can be posted */
342 * Index of next tx descriptor to reclaim. That is, the descriptor
343 * index of the oldest tx buffer for which the host has yet to process
344 * the corresponding response.
349 * Index of the first receive descriptor for the sequence of
350 * message fragments currently under construction. Used to build up
351 * the rxin_numd count for a message. Updated to rxout when the host
352 * starts a new sequence of rx buffers for a new message.
356 /* Index of next tx descriptor to post. */
360 * Number of tx descriptors associated with the message that starts
361 * at this tx descriptor index.
363 u32 txin_numd
[PDC_RING_ENTRIES
];
366 * Index of next rx descriptor to reclaim. This is the index of
367 * the next descriptor whose data has yet to be processed by the host.
372 * Index of the first receive descriptor for the sequence of
373 * message fragments currently under construction. Used to build up
374 * the rxin_numd count for a message. Updated to rxout when the host
375 * starts a new sequence of rx buffers for a new message.
380 * Saved value of current hardware rx descriptor index.
381 * The last rx buffer written by the hw is the index previous to
386 /* Index of next rx descriptor to post. */
389 struct pdc_rx_ctx rx_ctx
[PDC_RING_ENTRIES
];
392 * Scatterlists used to form request and reply frames beginning at a
393 * given ring index. Retained in order to unmap each sg after reply
396 struct scatterlist
*src_sg
[PDC_RING_ENTRIES
];
398 struct dentry
*debugfs_stats
; /* debug FS stats file for this PDC */
401 u32 pdc_requests
; /* number of request messages submitted */
402 u32 pdc_replies
; /* number of reply messages received */
403 u32 last_tx_not_done
; /* too few tx descriptors to indicate done */
404 u32 tx_ring_full
; /* unable to accept msg because tx ring full */
405 u32 rx_ring_full
; /* unable to accept msg because rx ring full */
406 u32 txnobuf
; /* unable to create tx descriptor */
407 u32 rxnobuf
; /* unable to create rx descriptor */
408 u32 rx_oflow
; /* count of rx overflows */
410 /* hardware type - FA2 or PDC/MDE */
414 /* Global variables */
417 /* Actual number of SPUs in hardware, as reported by device tree */
421 static struct pdc_globals pdcg
;
423 /* top level debug FS directory for PDC driver */
424 static struct dentry
*debugfs_dir
;
426 static ssize_t
pdc_debugfs_read(struct file
*filp
, char __user
*ubuf
,
427 size_t count
, loff_t
*offp
)
429 struct pdc_state
*pdcs
;
431 ssize_t ret
, out_offset
, out_count
;
435 buf
= kmalloc(out_count
, GFP_KERNEL
);
439 pdcs
= filp
->private_data
;
441 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
442 "SPU %u stats:\n", pdcs
->pdc_idx
);
443 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
444 "PDC requests....................%u\n",
446 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
447 "PDC responses...................%u\n",
449 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
450 "Tx not done.....................%u\n",
451 pdcs
->last_tx_not_done
);
452 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
453 "Tx ring full....................%u\n",
455 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
456 "Rx ring full....................%u\n",
458 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
459 "Tx desc write fail. Ring full...%u\n",
461 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
462 "Rx desc write fail. Ring full...%u\n",
464 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
465 "Receive overflow................%u\n",
467 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
468 "Num frags in rx ring............%u\n",
469 NRXDACTIVE(pdcs
->rxin
, pdcs
->last_rx_curr
,
472 if (out_offset
> out_count
)
473 out_offset
= out_count
;
475 ret
= simple_read_from_buffer(ubuf
, count
, offp
, buf
, out_offset
);
480 static const struct file_operations pdc_debugfs_stats
= {
481 .owner
= THIS_MODULE
,
483 .read
= pdc_debugfs_read
,
487 * pdc_setup_debugfs() - Create the debug FS directories. If the top-level
488 * directory has not yet been created, create it now. Create a stats file in
489 * this directory for a SPU.
490 * @pdcs: PDC state structure
492 static void pdc_setup_debugfs(struct pdc_state
*pdcs
)
494 char spu_stats_name
[16];
496 if (!debugfs_initialized())
499 snprintf(spu_stats_name
, 16, "pdc%d_stats", pdcs
->pdc_idx
);
501 debugfs_dir
= debugfs_create_dir(KBUILD_MODNAME
, NULL
);
503 /* S_IRUSR == 0400 */
504 pdcs
->debugfs_stats
= debugfs_create_file(spu_stats_name
, 0400,
509 static void pdc_free_debugfs(void)
511 debugfs_remove_recursive(debugfs_dir
);
516 * pdc_build_rxd() - Build DMA descriptor to receive SPU result.
517 * @pdcs: PDC state for SPU that will generate result
518 * @dma_addr: DMA address of buffer that descriptor is being built for
519 * @buf_len: Length of the receive buffer, in bytes
520 * @flags: Flags to be stored in descriptor
523 pdc_build_rxd(struct pdc_state
*pdcs
, dma_addr_t dma_addr
,
524 u32 buf_len
, u32 flags
)
526 struct device
*dev
= &pdcs
->pdev
->dev
;
527 struct dma64dd
*rxd
= &pdcs
->rxd_64
[pdcs
->rxout
];
530 "Writing rx descriptor for PDC %u at index %u with length %u. flags %#x\n",
531 pdcs
->pdc_idx
, pdcs
->rxout
, buf_len
, flags
);
533 rxd
->addrlow
= cpu_to_le32(lower_32_bits(dma_addr
));
534 rxd
->addrhigh
= cpu_to_le32(upper_32_bits(dma_addr
));
535 rxd
->ctrl1
= cpu_to_le32(flags
);
536 rxd
->ctrl2
= cpu_to_le32(buf_len
);
538 /* bump ring index and return */
539 pdcs
->rxout
= NEXTRXD(pdcs
->rxout
, pdcs
->nrxpost
);
543 * pdc_build_txd() - Build a DMA descriptor to transmit a SPU request to
545 * @pdcs: PDC state for the SPU that will process this request
546 * @dma_addr: DMA address of packet to be transmitted
547 * @buf_len: Length of tx buffer, in bytes
548 * @flags: Flags to be stored in descriptor
551 pdc_build_txd(struct pdc_state
*pdcs
, dma_addr_t dma_addr
, u32 buf_len
,
554 struct device
*dev
= &pdcs
->pdev
->dev
;
555 struct dma64dd
*txd
= &pdcs
->txd_64
[pdcs
->txout
];
558 "Writing tx descriptor for PDC %u at index %u with length %u, flags %#x\n",
559 pdcs
->pdc_idx
, pdcs
->txout
, buf_len
, flags
);
561 txd
->addrlow
= cpu_to_le32(lower_32_bits(dma_addr
));
562 txd
->addrhigh
= cpu_to_le32(upper_32_bits(dma_addr
));
563 txd
->ctrl1
= cpu_to_le32(flags
);
564 txd
->ctrl2
= cpu_to_le32(buf_len
);
566 /* bump ring index and return */
567 pdcs
->txout
= NEXTTXD(pdcs
->txout
, pdcs
->ntxpost
);
571 * pdc_receive_one() - Receive a response message from a given SPU.
572 * @pdcs: PDC state for the SPU to receive from
574 * When the return code indicates success, the response message is available in
575 * the receive buffers provided prior to submission of the request.
577 * Return: PDC_SUCCESS if one or more receive descriptors was processed
578 * -EAGAIN indicates that no response message is available
579 * -EIO an error occurred
582 pdc_receive_one(struct pdc_state
*pdcs
)
584 struct device
*dev
= &pdcs
->pdev
->dev
;
585 struct mbox_controller
*mbc
;
586 struct mbox_chan
*chan
;
587 struct brcm_message mssg
;
590 u8
*resp_hdr
; /* virtual addr of start of resp message DMA header */
591 u32 frags_rdy
; /* number of fragments ready to read */
592 u32 rx_idx
; /* ring index of start of receive frame */
593 dma_addr_t resp_hdr_daddr
;
594 struct pdc_rx_ctx
*rx_ctx
;
597 chan
= &mbc
->chans
[0];
598 mssg
.type
= BRCM_MESSAGE_SPU
;
601 * return if a complete response message is not yet ready.
602 * rxin_numd[rxin] is the number of fragments in the next msg
605 frags_rdy
= NRXDACTIVE(pdcs
->rxin
, pdcs
->last_rx_curr
, pdcs
->nrxpost
);
606 if ((frags_rdy
== 0) ||
607 (frags_rdy
< pdcs
->rx_ctx
[pdcs
->rxin
].rxin_numd
))
608 /* No response ready */
611 num_frags
= pdcs
->txin_numd
[pdcs
->txin
];
612 WARN_ON(num_frags
== 0);
614 dma_unmap_sg(dev
, pdcs
->src_sg
[pdcs
->txin
],
615 sg_nents(pdcs
->src_sg
[pdcs
->txin
]), DMA_TO_DEVICE
);
617 pdcs
->txin
= (pdcs
->txin
+ num_frags
) & pdcs
->ntxpost
;
619 dev_dbg(dev
, "PDC %u reclaimed %d tx descriptors",
620 pdcs
->pdc_idx
, num_frags
);
623 rx_ctx
= &pdcs
->rx_ctx
[rx_idx
];
624 num_frags
= rx_ctx
->rxin_numd
;
625 /* Return opaque context with result */
626 mssg
.ctx
= rx_ctx
->rxp_ctx
;
627 rx_ctx
->rxp_ctx
= NULL
;
628 resp_hdr
= rx_ctx
->resp_hdr
;
629 resp_hdr_daddr
= rx_ctx
->resp_hdr_daddr
;
630 dma_unmap_sg(dev
, rx_ctx
->dst_sg
, sg_nents(rx_ctx
->dst_sg
),
633 pdcs
->rxin
= (pdcs
->rxin
+ num_frags
) & pdcs
->nrxpost
;
635 dev_dbg(dev
, "PDC %u reclaimed %d rx descriptors",
636 pdcs
->pdc_idx
, num_frags
);
639 "PDC %u txin %u, txout %u, rxin %u, rxout %u, last_rx_curr %u\n",
640 pdcs
->pdc_idx
, pdcs
->txin
, pdcs
->txout
, pdcs
->rxin
,
641 pdcs
->rxout
, pdcs
->last_rx_curr
);
643 if (pdcs
->pdc_resp_hdr_len
== PDC_SPUM_RESP_HDR_LEN
) {
645 * For SPU-M, get length of response msg and rx overflow status.
647 rx_status
= *((u32
*)resp_hdr
);
648 len
= rx_status
& RX_STATUS_LEN
;
650 "SPU response length %u bytes", len
);
651 if (unlikely(((rx_status
& RX_STATUS_OVERFLOW
) || (!len
)))) {
652 if (rx_status
& RX_STATUS_OVERFLOW
) {
653 dev_err_ratelimited(dev
,
654 "crypto receive overflow");
657 dev_info_ratelimited(dev
, "crypto rx len = 0");
663 dma_pool_free(pdcs
->rx_buf_pool
, resp_hdr
, resp_hdr_daddr
);
665 mbox_chan_received_data(chan
, &mssg
);
672 * pdc_receive() - Process as many responses as are available in the rx ring.
675 * Called within the hard IRQ.
679 pdc_receive(struct pdc_state
*pdcs
)
683 /* read last_rx_curr from register once */
685 (ioread32(&pdcs
->rxregs_64
->status0
) &
686 CRYPTO_D64_RS0_CD_MASK
) / RING_ENTRY_SIZE
;
689 /* Could be many frames ready */
690 rx_status
= pdc_receive_one(pdcs
);
691 } while (rx_status
== PDC_SUCCESS
);
697 * pdc_tx_list_sg_add() - Add the buffers in a scatterlist to the transmit
698 * descriptors for a given SPU. The scatterlist buffers contain the data for a
699 * SPU request message.
700 * @spu_idx: The index of the SPU to submit the request to, [0, max_spu)
701 * @sg: Scatterlist whose buffers contain part of the SPU request
703 * If a scatterlist buffer is larger than PDC_DMA_BUF_MAX, multiple descriptors
704 * are written for that buffer, each <= PDC_DMA_BUF_MAX byte in length.
706 * Return: PDC_SUCCESS if successful
709 static int pdc_tx_list_sg_add(struct pdc_state
*pdcs
, struct scatterlist
*sg
)
716 * Num descriptors needed. Conservatively assume we need a descriptor
717 * for every entry in sg.
720 u32 desc_w
= 0; /* Number of tx descriptors written */
721 u32 bufcnt
; /* Number of bytes of buffer pointed to by descriptor */
722 dma_addr_t databufptr
; /* DMA address to put in descriptor */
724 num_desc
= (u32
)sg_nents(sg
);
726 /* check whether enough tx descriptors are available */
727 tx_avail
= pdcs
->ntxpost
- NTXDACTIVE(pdcs
->txin
, pdcs
->txout
,
729 if (unlikely(num_desc
> tx_avail
)) {
734 /* build tx descriptors */
735 if (pdcs
->tx_msg_start
== pdcs
->txout
) {
737 pdcs
->txin_numd
[pdcs
->tx_msg_start
] = 0;
738 pdcs
->src_sg
[pdcs
->txout
] = sg
;
739 flags
= D64_CTRL1_SOF
;
743 if (unlikely(pdcs
->txout
== (pdcs
->ntxd
- 1)))
749 * If sg buffer larger than PDC limit, split across
750 * multiple descriptors
752 bufcnt
= sg_dma_len(sg
);
753 databufptr
= sg_dma_address(sg
);
754 while (bufcnt
> PDC_DMA_BUF_MAX
) {
755 pdc_build_txd(pdcs
, databufptr
, PDC_DMA_BUF_MAX
,
758 bufcnt
-= PDC_DMA_BUF_MAX
;
759 databufptr
+= PDC_DMA_BUF_MAX
;
760 if (unlikely(pdcs
->txout
== (pdcs
->ntxd
- 1)))
767 /* Writing last descriptor for frame */
768 flags
|= (D64_CTRL1_EOF
| D64_CTRL1_IOC
);
769 pdc_build_txd(pdcs
, databufptr
, bufcnt
, flags
| eot
);
771 /* Clear start of frame after first descriptor */
772 flags
&= ~D64_CTRL1_SOF
;
774 pdcs
->txin_numd
[pdcs
->tx_msg_start
] += desc_w
;
780 * pdc_tx_list_final() - Initiate DMA transfer of last frame written to tx
782 * @pdcs: PDC state for SPU to process the request
784 * Sets the index of the last descriptor written in both the rx and tx ring.
786 * Return: PDC_SUCCESS
788 static int pdc_tx_list_final(struct pdc_state
*pdcs
)
791 * write barrier to ensure all register writes are complete
792 * before chip starts to process new request
795 iowrite32(pdcs
->rxout
<< 4, &pdcs
->rxregs_64
->ptr
);
796 iowrite32(pdcs
->txout
<< 4, &pdcs
->txregs_64
->ptr
);
797 pdcs
->pdc_requests
++;
803 * pdc_rx_list_init() - Start a new receive descriptor list for a given PDC.
804 * @pdcs: PDC state for SPU handling request
805 * @dst_sg: scatterlist providing rx buffers for response to be returned to
807 * @ctx: Opaque context for this request
809 * Posts a single receive descriptor to hold the metadata that precedes a
810 * response. For example, with SPU-M, the metadata is a 32-byte DMA header and
811 * an 8-byte BCM header. Moves the msg_start descriptor indexes for both tx and
812 * rx to indicate the start of a new message.
814 * Return: PDC_SUCCESS if successful
815 * < 0 if an error (e.g., rx ring is full)
817 static int pdc_rx_list_init(struct pdc_state
*pdcs
, struct scatterlist
*dst_sg
,
822 u32 rx_pkt_cnt
= 1; /* Adding a single rx buffer */
825 struct pdc_rx_ctx
*rx_ctx
;
827 rx_avail
= pdcs
->nrxpost
- NRXDACTIVE(pdcs
->rxin
, pdcs
->rxout
,
829 if (unlikely(rx_pkt_cnt
> rx_avail
)) {
834 /* allocate a buffer for the dma rx status */
835 vaddr
= dma_pool_zalloc(pdcs
->rx_buf_pool
, GFP_ATOMIC
, &daddr
);
836 if (unlikely(!vaddr
))
840 * Update msg_start indexes for both tx and rx to indicate the start
841 * of a new sequence of descriptor indexes that contain the fragments
842 * of the same message.
844 pdcs
->rx_msg_start
= pdcs
->rxout
;
845 pdcs
->tx_msg_start
= pdcs
->txout
;
847 /* This is always the first descriptor in the receive sequence */
848 flags
= D64_CTRL1_SOF
;
849 pdcs
->rx_ctx
[pdcs
->rx_msg_start
].rxin_numd
= 1;
851 if (unlikely(pdcs
->rxout
== (pdcs
->nrxd
- 1)))
852 flags
|= D64_CTRL1_EOT
;
854 rx_ctx
= &pdcs
->rx_ctx
[pdcs
->rxout
];
855 rx_ctx
->rxp_ctx
= ctx
;
856 rx_ctx
->dst_sg
= dst_sg
;
857 rx_ctx
->resp_hdr
= vaddr
;
858 rx_ctx
->resp_hdr_daddr
= daddr
;
859 pdc_build_rxd(pdcs
, daddr
, pdcs
->pdc_resp_hdr_len
, flags
);
864 * pdc_rx_list_sg_add() - Add the buffers in a scatterlist to the receive
865 * descriptors for a given SPU. The caller must have already DMA mapped the
867 * @spu_idx: Indicates which SPU the buffers are for
868 * @sg: Scatterlist whose buffers are added to the receive ring
870 * If a receive buffer in the scatterlist is larger than PDC_DMA_BUF_MAX,
871 * multiple receive descriptors are written, each with a buffer <=
874 * Return: PDC_SUCCESS if successful
875 * < 0 otherwise (e.g., receive ring is full)
877 static int pdc_rx_list_sg_add(struct pdc_state
*pdcs
, struct scatterlist
*sg
)
883 * Num descriptors needed. Conservatively assume we need a descriptor
884 * for every entry from our starting point in the scatterlist.
887 u32 desc_w
= 0; /* Number of tx descriptors written */
888 u32 bufcnt
; /* Number of bytes of buffer pointed to by descriptor */
889 dma_addr_t databufptr
; /* DMA address to put in descriptor */
891 num_desc
= (u32
)sg_nents(sg
);
893 rx_avail
= pdcs
->nrxpost
- NRXDACTIVE(pdcs
->rxin
, pdcs
->rxout
,
895 if (unlikely(num_desc
> rx_avail
)) {
901 if (unlikely(pdcs
->rxout
== (pdcs
->nrxd
- 1)))
902 flags
= D64_CTRL1_EOT
;
907 * If sg buffer larger than PDC limit, split across
908 * multiple descriptors
910 bufcnt
= sg_dma_len(sg
);
911 databufptr
= sg_dma_address(sg
);
912 while (bufcnt
> PDC_DMA_BUF_MAX
) {
913 pdc_build_rxd(pdcs
, databufptr
, PDC_DMA_BUF_MAX
, flags
);
915 bufcnt
-= PDC_DMA_BUF_MAX
;
916 databufptr
+= PDC_DMA_BUF_MAX
;
917 if (unlikely(pdcs
->rxout
== (pdcs
->nrxd
- 1)))
918 flags
= D64_CTRL1_EOT
;
922 pdc_build_rxd(pdcs
, databufptr
, bufcnt
, flags
);
926 pdcs
->rx_ctx
[pdcs
->rx_msg_start
].rxin_numd
+= desc_w
;
932 * pdc_irq_handler() - Interrupt handler called in interrupt context.
933 * @irq: Interrupt number that has fired
934 * @data: device struct for DMA engine that generated the interrupt
936 * We have to clear the device interrupt status flags here. So cache the
937 * status for later use in the thread function. Other than that, just return
938 * WAKE_THREAD to invoke the thread function.
940 * Return: IRQ_WAKE_THREAD if interrupt is ours
943 static irqreturn_t
pdc_irq_handler(int irq
, void *data
)
945 struct device
*dev
= (struct device
*)data
;
946 struct pdc_state
*pdcs
= dev_get_drvdata(dev
);
947 u32 intstatus
= ioread32(pdcs
->pdc_reg_vbase
+ PDC_INTSTATUS_OFFSET
);
949 if (unlikely(intstatus
== 0))
952 /* Disable interrupts until soft handler runs */
953 iowrite32(0, pdcs
->pdc_reg_vbase
+ PDC_INTMASK_OFFSET
);
955 /* Clear interrupt flags in device */
956 iowrite32(intstatus
, pdcs
->pdc_reg_vbase
+ PDC_INTSTATUS_OFFSET
);
958 /* Wakeup IRQ thread */
959 tasklet_schedule(&pdcs
->rx_tasklet
);
964 * pdc_tasklet_cb() - Tasklet callback that runs the deferred processing after
965 * a DMA receive interrupt. Reenables the receive interrupt.
966 * @data: PDC state structure
968 static void pdc_tasklet_cb(unsigned long data
)
970 struct pdc_state
*pdcs
= (struct pdc_state
*)data
;
974 /* reenable interrupts */
975 iowrite32(PDC_INTMASK
, pdcs
->pdc_reg_vbase
+ PDC_INTMASK_OFFSET
);
979 * pdc_ring_init() - Allocate DMA rings and initialize constant fields of
980 * descriptors in one ringset.
981 * @pdcs: PDC instance state
982 * @ringset: index of ringset being used
984 * Return: PDC_SUCCESS if ring initialized
987 static int pdc_ring_init(struct pdc_state
*pdcs
, int ringset
)
990 int err
= PDC_SUCCESS
;
991 struct dma64
*dma_reg
;
992 struct device
*dev
= &pdcs
->pdev
->dev
;
993 struct pdc_ring_alloc tx
;
994 struct pdc_ring_alloc rx
;
996 /* Allocate tx ring */
997 tx
.vbase
= dma_pool_zalloc(pdcs
->ring_pool
, GFP_KERNEL
, &tx
.dmabase
);
998 if (unlikely(!tx
.vbase
)) {
1003 /* Allocate rx ring */
1004 rx
.vbase
= dma_pool_zalloc(pdcs
->ring_pool
, GFP_KERNEL
, &rx
.dmabase
);
1005 if (unlikely(!rx
.vbase
)) {
1010 dev_dbg(dev
, " - base DMA addr of tx ring %pad", &tx
.dmabase
);
1011 dev_dbg(dev
, " - base virtual addr of tx ring %p", tx
.vbase
);
1012 dev_dbg(dev
, " - base DMA addr of rx ring %pad", &rx
.dmabase
);
1013 dev_dbg(dev
, " - base virtual addr of rx ring %p", rx
.vbase
);
1015 memcpy(&pdcs
->tx_ring_alloc
, &tx
, sizeof(tx
));
1016 memcpy(&pdcs
->rx_ring_alloc
, &rx
, sizeof(rx
));
1019 pdcs
->rx_msg_start
= 0;
1020 pdcs
->last_rx_curr
= 0;
1023 pdcs
->tx_msg_start
= 0;
1026 /* Set descriptor array base addresses */
1027 pdcs
->txd_64
= (struct dma64dd
*)pdcs
->tx_ring_alloc
.vbase
;
1028 pdcs
->rxd_64
= (struct dma64dd
*)pdcs
->rx_ring_alloc
.vbase
;
1030 /* Tell device the base DMA address of each ring */
1031 dma_reg
= &pdcs
->regs
->dmaregs
[ringset
];
1033 /* But first disable DMA and set curptr to 0 for both TX & RX */
1034 iowrite32(PDC_TX_CTL
, &dma_reg
->dmaxmt
.control
);
1035 iowrite32((PDC_RX_CTL
+ (pdcs
->rx_status_len
<< 1)),
1036 &dma_reg
->dmarcv
.control
);
1037 iowrite32(0, &dma_reg
->dmaxmt
.ptr
);
1038 iowrite32(0, &dma_reg
->dmarcv
.ptr
);
1040 /* Set base DMA addresses */
1041 iowrite32(lower_32_bits(pdcs
->tx_ring_alloc
.dmabase
),
1042 &dma_reg
->dmaxmt
.addrlow
);
1043 iowrite32(upper_32_bits(pdcs
->tx_ring_alloc
.dmabase
),
1044 &dma_reg
->dmaxmt
.addrhigh
);
1046 iowrite32(lower_32_bits(pdcs
->rx_ring_alloc
.dmabase
),
1047 &dma_reg
->dmarcv
.addrlow
);
1048 iowrite32(upper_32_bits(pdcs
->rx_ring_alloc
.dmabase
),
1049 &dma_reg
->dmarcv
.addrhigh
);
1052 iowrite32(PDC_TX_CTL
| PDC_TX_ENABLE
, &dma_reg
->dmaxmt
.control
);
1053 iowrite32((PDC_RX_CTL
| PDC_RX_ENABLE
| (pdcs
->rx_status_len
<< 1)),
1054 &dma_reg
->dmarcv
.control
);
1056 /* Initialize descriptors */
1057 for (i
= 0; i
< PDC_RING_ENTRIES
; i
++) {
1058 /* Every tx descriptor can be used for start of frame. */
1059 if (i
!= pdcs
->ntxpost
) {
1060 iowrite32(D64_CTRL1_SOF
| D64_CTRL1_EOF
,
1061 &pdcs
->txd_64
[i
].ctrl1
);
1063 /* Last descriptor in ringset. Set End of Table. */
1064 iowrite32(D64_CTRL1_SOF
| D64_CTRL1_EOF
|
1065 D64_CTRL1_EOT
, &pdcs
->txd_64
[i
].ctrl1
);
1068 /* Every rx descriptor can be used for start of frame */
1069 if (i
!= pdcs
->nrxpost
) {
1070 iowrite32(D64_CTRL1_SOF
,
1071 &pdcs
->rxd_64
[i
].ctrl1
);
1073 /* Last descriptor in ringset. Set End of Table. */
1074 iowrite32(D64_CTRL1_SOF
| D64_CTRL1_EOT
,
1075 &pdcs
->rxd_64
[i
].ctrl1
);
1081 dma_pool_free(pdcs
->ring_pool
, tx
.vbase
, tx
.dmabase
);
1086 static void pdc_ring_free(struct pdc_state
*pdcs
)
1088 if (pdcs
->tx_ring_alloc
.vbase
) {
1089 dma_pool_free(pdcs
->ring_pool
, pdcs
->tx_ring_alloc
.vbase
,
1090 pdcs
->tx_ring_alloc
.dmabase
);
1091 pdcs
->tx_ring_alloc
.vbase
= NULL
;
1094 if (pdcs
->rx_ring_alloc
.vbase
) {
1095 dma_pool_free(pdcs
->ring_pool
, pdcs
->rx_ring_alloc
.vbase
,
1096 pdcs
->rx_ring_alloc
.dmabase
);
1097 pdcs
->rx_ring_alloc
.vbase
= NULL
;
1102 * pdc_desc_count() - Count the number of DMA descriptors that will be required
1103 * for a given scatterlist. Account for the max length of a DMA buffer.
1104 * @sg: Scatterlist to be DMA'd
1105 * Return: Number of descriptors required
1107 static u32
pdc_desc_count(struct scatterlist
*sg
)
1112 cnt
+= ((sg
->length
/ PDC_DMA_BUF_MAX
) + 1);
1119 * pdc_rings_full() - Check whether the tx ring has room for tx_cnt descriptors
1120 * and the rx ring has room for rx_cnt descriptors.
1122 * @tx_cnt: The number of descriptors required in the tx ring
1123 * @rx_cnt: The number of descriptors required i the rx ring
1125 * Return: true if one of the rings does not have enough space
1126 * false if sufficient space is available in both rings
1128 static bool pdc_rings_full(struct pdc_state
*pdcs
, int tx_cnt
, int rx_cnt
)
1134 /* Check if the tx and rx rings are likely to have enough space */
1135 rx_avail
= pdcs
->nrxpost
- NRXDACTIVE(pdcs
->rxin
, pdcs
->rxout
,
1137 if (unlikely(rx_cnt
> rx_avail
)) {
1138 pdcs
->rx_ring_full
++;
1142 if (likely(!full
)) {
1143 tx_avail
= pdcs
->ntxpost
- NTXDACTIVE(pdcs
->txin
, pdcs
->txout
,
1145 if (unlikely(tx_cnt
> tx_avail
)) {
1146 pdcs
->tx_ring_full
++;
1154 * pdc_last_tx_done() - If both the tx and rx rings have at least
1155 * PDC_RING_SPACE_MIN descriptors available, then indicate that the mailbox
1156 * framework can submit another message.
1157 * @chan: mailbox channel to check
1158 * Return: true if PDC can accept another message on this channel
1160 static bool pdc_last_tx_done(struct mbox_chan
*chan
)
1162 struct pdc_state
*pdcs
= chan
->con_priv
;
1165 if (unlikely(pdc_rings_full(pdcs
, PDC_RING_SPACE_MIN
,
1166 PDC_RING_SPACE_MIN
))) {
1167 pdcs
->last_tx_not_done
++;
1176 * pdc_send_data() - mailbox send_data function
1177 * @chan: The mailbox channel on which the data is sent. The channel
1178 * corresponds to a DMA ringset.
1179 * @data: The mailbox message to be sent. The message must be a
1180 * brcm_message structure.
1182 * This function is registered as the send_data function for the mailbox
1183 * controller. From the destination scatterlist in the mailbox message, it
1184 * creates a sequence of receive descriptors in the rx ring. From the source
1185 * scatterlist, it creates a sequence of transmit descriptors in the tx ring.
1186 * After creating the descriptors, it writes the rx ptr and tx ptr registers to
1187 * initiate the DMA transfer.
1189 * This function does the DMA map and unmap of the src and dst scatterlists in
1190 * the mailbox message.
1192 * Return: 0 if successful
1193 * -ENOTSUPP if the mailbox message is a type this driver does not
1197 static int pdc_send_data(struct mbox_chan
*chan
, void *data
)
1199 struct pdc_state
*pdcs
= chan
->con_priv
;
1200 struct device
*dev
= &pdcs
->pdev
->dev
;
1201 struct brcm_message
*mssg
= data
;
1202 int err
= PDC_SUCCESS
;
1209 if (unlikely(mssg
->type
!= BRCM_MESSAGE_SPU
))
1212 src_nent
= sg_nents(mssg
->spu
.src
);
1213 if (likely(src_nent
)) {
1214 nent
= dma_map_sg(dev
, mssg
->spu
.src
, src_nent
, DMA_TO_DEVICE
);
1215 if (unlikely(nent
== 0))
1219 dst_nent
= sg_nents(mssg
->spu
.dst
);
1220 if (likely(dst_nent
)) {
1221 nent
= dma_map_sg(dev
, mssg
->spu
.dst
, dst_nent
,
1223 if (unlikely(nent
== 0)) {
1224 dma_unmap_sg(dev
, mssg
->spu
.src
, src_nent
,
1231 * Check if the tx and rx rings have enough space. Do this prior to
1232 * writing any tx or rx descriptors. Need to ensure that we do not write
1233 * a partial set of descriptors, or write just rx descriptors but
1234 * corresponding tx descriptors don't fit. Note that we want this check
1235 * and the entire sequence of descriptor to happen without another
1236 * thread getting in. The channel spin lock in the mailbox framework
1239 tx_desc_req
= pdc_desc_count(mssg
->spu
.src
);
1240 rx_desc_req
= pdc_desc_count(mssg
->spu
.dst
);
1241 if (unlikely(pdc_rings_full(pdcs
, tx_desc_req
, rx_desc_req
+ 1)))
1244 /* Create rx descriptors to SPU catch response */
1245 err
= pdc_rx_list_init(pdcs
, mssg
->spu
.dst
, mssg
->ctx
);
1246 err
|= pdc_rx_list_sg_add(pdcs
, mssg
->spu
.dst
);
1248 /* Create tx descriptors to submit SPU request */
1249 err
|= pdc_tx_list_sg_add(pdcs
, mssg
->spu
.src
);
1250 err
|= pdc_tx_list_final(pdcs
); /* initiate transfer */
1253 dev_err(&pdcs
->pdev
->dev
,
1254 "%s failed with error %d", __func__
, err
);
1259 static int pdc_startup(struct mbox_chan
*chan
)
1261 return pdc_ring_init(chan
->con_priv
, PDC_RINGSET
);
1264 static void pdc_shutdown(struct mbox_chan
*chan
)
1266 struct pdc_state
*pdcs
= chan
->con_priv
;
1271 dev_dbg(&pdcs
->pdev
->dev
,
1272 "Shutdown mailbox channel for PDC %u", pdcs
->pdc_idx
);
1273 pdc_ring_free(pdcs
);
1277 * pdc_hw_init() - Use the given initialization parameters to initialize the
1278 * state for one of the PDCs.
1279 * @pdcs: state of the PDC
1282 void pdc_hw_init(struct pdc_state
*pdcs
)
1284 struct platform_device
*pdev
;
1286 struct dma64
*dma_reg
;
1287 int ringset
= PDC_RINGSET
;
1292 dev_dbg(dev
, "PDC %u initial values:", pdcs
->pdc_idx
);
1293 dev_dbg(dev
, "state structure: %p",
1295 dev_dbg(dev
, " - base virtual addr of hw regs %p",
1296 pdcs
->pdc_reg_vbase
);
1298 /* initialize data structures */
1299 pdcs
->regs
= (struct pdc_regs
*)pdcs
->pdc_reg_vbase
;
1300 pdcs
->txregs_64
= (struct dma64_regs
*)
1301 (((u8
*)pdcs
->pdc_reg_vbase
) +
1302 PDC_TXREGS_OFFSET
+ (sizeof(struct dma64
) * ringset
));
1303 pdcs
->rxregs_64
= (struct dma64_regs
*)
1304 (((u8
*)pdcs
->pdc_reg_vbase
) +
1305 PDC_RXREGS_OFFSET
+ (sizeof(struct dma64
) * ringset
));
1307 pdcs
->ntxd
= PDC_RING_ENTRIES
;
1308 pdcs
->nrxd
= PDC_RING_ENTRIES
;
1309 pdcs
->ntxpost
= PDC_RING_ENTRIES
- 1;
1310 pdcs
->nrxpost
= PDC_RING_ENTRIES
- 1;
1311 iowrite32(0, &pdcs
->regs
->intmask
);
1313 dma_reg
= &pdcs
->regs
->dmaregs
[ringset
];
1315 /* Configure DMA but will enable later in pdc_ring_init() */
1316 iowrite32(PDC_TX_CTL
, &dma_reg
->dmaxmt
.control
);
1318 iowrite32(PDC_RX_CTL
+ (pdcs
->rx_status_len
<< 1),
1319 &dma_reg
->dmarcv
.control
);
1321 /* Reset current index pointers after making sure DMA is disabled */
1322 iowrite32(0, &dma_reg
->dmaxmt
.ptr
);
1323 iowrite32(0, &dma_reg
->dmarcv
.ptr
);
1325 if (pdcs
->pdc_resp_hdr_len
== PDC_SPU2_RESP_HDR_LEN
)
1326 iowrite32(PDC_CKSUM_CTRL
,
1327 pdcs
->pdc_reg_vbase
+ PDC_CKSUM_CTRL_OFFSET
);
1331 * pdc_hw_disable() - Disable the tx and rx control in the hw.
1332 * @pdcs: PDC state structure
1335 static void pdc_hw_disable(struct pdc_state
*pdcs
)
1337 struct dma64
*dma_reg
;
1339 dma_reg
= &pdcs
->regs
->dmaregs
[PDC_RINGSET
];
1340 iowrite32(PDC_TX_CTL
, &dma_reg
->dmaxmt
.control
);
1341 iowrite32(PDC_RX_CTL
+ (pdcs
->rx_status_len
<< 1),
1342 &dma_reg
->dmarcv
.control
);
1346 * pdc_rx_buf_pool_create() - Pool of receive buffers used to catch the metadata
1347 * header returned with each response message.
1348 * @pdcs: PDC state structure
1350 * The metadata is not returned to the mailbox client. So the PDC driver
1351 * manages these buffers.
1353 * Return: PDC_SUCCESS
1354 * -ENOMEM if pool creation fails
1356 static int pdc_rx_buf_pool_create(struct pdc_state
*pdcs
)
1358 struct platform_device
*pdev
;
1364 pdcs
->pdc_resp_hdr_len
= pdcs
->rx_status_len
;
1365 if (pdcs
->use_bcm_hdr
)
1366 pdcs
->pdc_resp_hdr_len
+= BCM_HDR_LEN
;
1368 pdcs
->rx_buf_pool
= dma_pool_create("pdc rx bufs", dev
,
1369 pdcs
->pdc_resp_hdr_len
,
1371 if (!pdcs
->rx_buf_pool
)
1378 * pdc_interrupts_init() - Initialize the interrupt configuration for a PDC and
1379 * specify a threaded IRQ handler for deferred handling of interrupts outside of
1380 * interrupt context.
1383 * Set the interrupt mask for transmit and receive done.
1384 * Set the lazy interrupt frame count to generate an interrupt for just one pkt.
1386 * Return: PDC_SUCCESS
1387 * <0 if threaded irq request fails
1389 static int pdc_interrupts_init(struct pdc_state
*pdcs
)
1391 struct platform_device
*pdev
= pdcs
->pdev
;
1392 struct device
*dev
= &pdev
->dev
;
1393 struct device_node
*dn
= pdev
->dev
.of_node
;
1396 /* interrupt configuration */
1397 iowrite32(PDC_INTMASK
, pdcs
->pdc_reg_vbase
+ PDC_INTMASK_OFFSET
);
1399 if (pdcs
->hw_type
== FA_HW
)
1400 iowrite32(PDC_LAZY_INT
, pdcs
->pdc_reg_vbase
+
1401 FA_RCVLAZY0_OFFSET
);
1403 iowrite32(PDC_LAZY_INT
, pdcs
->pdc_reg_vbase
+
1404 PDC_RCVLAZY0_OFFSET
);
1406 /* read irq from device tree */
1407 pdcs
->pdc_irq
= irq_of_parse_and_map(dn
, 0);
1408 dev_dbg(dev
, "pdc device %s irq %u for pdcs %p",
1409 dev_name(dev
), pdcs
->pdc_irq
, pdcs
);
1411 err
= devm_request_irq(dev
, pdcs
->pdc_irq
, pdc_irq_handler
, 0,
1412 dev_name(dev
), dev
);
1414 dev_err(dev
, "IRQ %u request failed with err %d\n",
1415 pdcs
->pdc_irq
, err
);
1421 static const struct mbox_chan_ops pdc_mbox_chan_ops
= {
1422 .send_data
= pdc_send_data
,
1423 .last_tx_done
= pdc_last_tx_done
,
1424 .startup
= pdc_startup
,
1425 .shutdown
= pdc_shutdown
1429 * pdc_mb_init() - Initialize the mailbox controller.
1432 * Each PDC is a mailbox controller. Each ringset is a mailbox channel. Kernel
1433 * driver only uses one ringset and thus one mb channel. PDC uses the transmit
1434 * complete interrupt to determine when a mailbox message has successfully been
1437 * Return: 0 on success
1438 * < 0 if there is an allocation or registration failure
1440 static int pdc_mb_init(struct pdc_state
*pdcs
)
1442 struct device
*dev
= &pdcs
->pdev
->dev
;
1443 struct mbox_controller
*mbc
;
1449 mbc
->ops
= &pdc_mbox_chan_ops
;
1451 mbc
->chans
= devm_kcalloc(dev
, mbc
->num_chans
, sizeof(*mbc
->chans
),
1456 mbc
->txdone_irq
= false;
1457 mbc
->txdone_poll
= true;
1458 mbc
->txpoll_period
= 1;
1459 for (chan_index
= 0; chan_index
< mbc
->num_chans
; chan_index
++)
1460 mbc
->chans
[chan_index
].con_priv
= pdcs
;
1462 /* Register mailbox controller */
1463 err
= devm_mbox_controller_register(dev
, mbc
);
1466 "Failed to register PDC mailbox controller. Error %d.",
1473 /* Device tree API */
1474 static const int pdc_hw
= PDC_HW
;
1475 static const int fa_hw
= FA_HW
;
1477 static const struct of_device_id pdc_mbox_of_match
[] = {
1478 {.compatible
= "brcm,iproc-pdc-mbox", .data
= &pdc_hw
},
1479 {.compatible
= "brcm,iproc-fa2-mbox", .data
= &fa_hw
},
1482 MODULE_DEVICE_TABLE(of
, pdc_mbox_of_match
);
1485 * pdc_dt_read() - Read application-specific data from device tree.
1486 * @pdev: Platform device
1489 * Reads the number of bytes of receive status that precede each received frame.
1490 * Reads whether transmit and received frames should be preceded by an 8-byte
1493 * Return: 0 if successful
1494 * -ENODEV if device not available
1496 static int pdc_dt_read(struct platform_device
*pdev
, struct pdc_state
*pdcs
)
1498 struct device
*dev
= &pdev
->dev
;
1499 struct device_node
*dn
= pdev
->dev
.of_node
;
1500 const struct of_device_id
*match
;
1504 err
= of_property_read_u32(dn
, "brcm,rx-status-len",
1505 &pdcs
->rx_status_len
);
1508 "%s failed to get DMA receive status length from device tree",
1511 pdcs
->use_bcm_hdr
= of_property_read_bool(dn
, "brcm,use-bcm-hdr");
1513 pdcs
->hw_type
= PDC_HW
;
1515 match
= of_match_device(of_match_ptr(pdc_mbox_of_match
), dev
);
1516 if (match
!= NULL
) {
1517 hw_type
= match
->data
;
1518 pdcs
->hw_type
= *hw_type
;
1525 * pdc_probe() - Probe function for PDC driver.
1526 * @pdev: PDC platform device
1528 * Reserve and map register regions defined in device tree.
1529 * Allocate and initialize tx and rx DMA rings.
1530 * Initialize a mailbox controller for each PDC.
1532 * Return: 0 if successful
1535 static int pdc_probe(struct platform_device
*pdev
)
1538 struct device
*dev
= &pdev
->dev
;
1539 struct resource
*pdc_regs
;
1540 struct pdc_state
*pdcs
;
1542 /* PDC state for one SPU */
1543 pdcs
= devm_kzalloc(dev
, sizeof(*pdcs
), GFP_KERNEL
);
1550 platform_set_drvdata(pdev
, pdcs
);
1551 pdcs
->pdc_idx
= pdcg
.num_spu
;
1554 err
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(39));
1556 dev_warn(dev
, "PDC device cannot perform DMA. Error %d.", err
);
1560 /* Create DMA pool for tx ring */
1561 pdcs
->ring_pool
= dma_pool_create("pdc rings", dev
, PDC_RING_SIZE
,
1563 if (!pdcs
->ring_pool
) {
1568 err
= pdc_dt_read(pdev
, pdcs
);
1570 goto cleanup_ring_pool
;
1572 pdc_regs
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1575 goto cleanup_ring_pool
;
1577 dev_dbg(dev
, "PDC register region res.start = %pa, res.end = %pa",
1578 &pdc_regs
->start
, &pdc_regs
->end
);
1580 pdcs
->pdc_reg_vbase
= devm_ioremap_resource(&pdev
->dev
, pdc_regs
);
1581 if (IS_ERR(pdcs
->pdc_reg_vbase
)) {
1582 err
= PTR_ERR(pdcs
->pdc_reg_vbase
);
1583 dev_err(&pdev
->dev
, "Failed to map registers: %d\n", err
);
1584 goto cleanup_ring_pool
;
1587 /* create rx buffer pool after dt read to know how big buffers are */
1588 err
= pdc_rx_buf_pool_create(pdcs
);
1590 goto cleanup_ring_pool
;
1594 /* Init tasklet for deferred DMA rx processing */
1595 tasklet_init(&pdcs
->rx_tasklet
, pdc_tasklet_cb
, (unsigned long)pdcs
);
1597 err
= pdc_interrupts_init(pdcs
);
1599 goto cleanup_buf_pool
;
1601 /* Initialize mailbox controller */
1602 err
= pdc_mb_init(pdcs
);
1604 goto cleanup_buf_pool
;
1606 pdcs
->debugfs_stats
= NULL
;
1607 pdc_setup_debugfs(pdcs
);
1609 dev_dbg(dev
, "pdc_probe() successful");
1613 tasklet_kill(&pdcs
->rx_tasklet
);
1614 dma_pool_destroy(pdcs
->rx_buf_pool
);
1617 dma_pool_destroy(pdcs
->ring_pool
);
1623 static int pdc_remove(struct platform_device
*pdev
)
1625 struct pdc_state
*pdcs
= platform_get_drvdata(pdev
);
1629 tasklet_kill(&pdcs
->rx_tasklet
);
1631 pdc_hw_disable(pdcs
);
1633 dma_pool_destroy(pdcs
->rx_buf_pool
);
1634 dma_pool_destroy(pdcs
->ring_pool
);
1638 static struct platform_driver pdc_mbox_driver
= {
1640 .remove
= pdc_remove
,
1642 .name
= "brcm-iproc-pdc-mbox",
1643 .of_match_table
= of_match_ptr(pdc_mbox_of_match
),
1646 module_platform_driver(pdc_mbox_driver
);
1648 MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
1649 MODULE_DESCRIPTION("Broadcom PDC mailbox driver");
1650 MODULE_LICENSE("GPL v2");