2 * Copyright 2016 Broadcom
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation (the "GPL").
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License version 2 (GPLv2) for more details.
13 * You should have received a copy of the GNU General Public License
14 * version 2 (GPLv2) along with this source code.
18 * Broadcom PDC Mailbox Driver
19 * The PDC provides a ring based programming interface to one or more hardware
20 * offload engines. For example, the PDC driver works with both SPU-M and SPU2
21 * cryptographic offload hardware. In some chips the PDC is referred to as MDE,
22 * and in others the FA2/FA+ hardware is used with this PDC driver.
24 * The PDC driver registers with the Linux mailbox framework as a mailbox
25 * controller, once for each PDC instance. Ring 0 for each PDC is registered as
26 * a mailbox channel. The PDC driver uses interrupts to determine when data
27 * transfers to and from an offload engine are complete. The PDC driver uses
28 * threaded IRQs so that response messages are handled outside of interrupt
31 * The PDC driver allows multiple messages to be pending in the descriptor
32 * rings. The tx_msg_start descriptor index indicates where the last message
33 * starts. The txin_numd value at this index indicates how many descriptor
34 * indexes make up the message. Similar state is kept on the receive side. When
35 * an rx interrupt indicates a response is ready, the PDC driver processes numd
36 * descriptors from the tx and rx ring, thus processing one response at a time.
39 #include <linux/errno.h>
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/slab.h>
43 #include <linux/debugfs.h>
44 #include <linux/interrupt.h>
45 #include <linux/wait.h>
46 #include <linux/platform_device.h>
49 #include <linux/of_device.h>
50 #include <linux/of_address.h>
51 #include <linux/of_irq.h>
52 #include <linux/mailbox_controller.h>
53 #include <linux/mailbox/brcm-message.h>
54 #include <linux/scatterlist.h>
55 #include <linux/dma-direction.h>
56 #include <linux/dma-mapping.h>
57 #include <linux/dmapool.h>
61 #define RING_ENTRY_SIZE sizeof(struct dma64dd)
63 /* # entries in PDC dma ring */
64 #define PDC_RING_ENTRIES 512
66 * Minimum number of ring descriptor entries that must be free to tell mailbox
67 * framework that it can submit another request
69 #define PDC_RING_SPACE_MIN 15
71 #define PDC_RING_SIZE (PDC_RING_ENTRIES * RING_ENTRY_SIZE)
72 /* Rings are 8k aligned */
73 #define RING_ALIGN_ORDER 13
74 #define RING_ALIGN BIT(RING_ALIGN_ORDER)
76 #define RX_BUF_ALIGN_ORDER 5
77 #define RX_BUF_ALIGN BIT(RX_BUF_ALIGN_ORDER)
79 /* descriptor bumping macros */
80 #define XXD(x, max_mask) ((x) & (max_mask))
81 #define TXD(x, max_mask) XXD((x), (max_mask))
82 #define RXD(x, max_mask) XXD((x), (max_mask))
83 #define NEXTTXD(i, max_mask) TXD((i) + 1, (max_mask))
84 #define PREVTXD(i, max_mask) TXD((i) - 1, (max_mask))
85 #define NEXTRXD(i, max_mask) RXD((i) + 1, (max_mask))
86 #define PREVRXD(i, max_mask) RXD((i) - 1, (max_mask))
87 #define NTXDACTIVE(h, t, max_mask) TXD((t) - (h), (max_mask))
88 #define NRXDACTIVE(h, t, max_mask) RXD((t) - (h), (max_mask))
90 /* Length of BCM header at start of SPU msg, in bytes */
94 * PDC driver reserves ringset 0 on each SPU for its own use. The driver does
95 * not currently support use of multiple ringsets on a single PDC engine.
100 * Interrupt mask and status definitions. Enable interrupts for tx and rx on
103 #define PDC_RCVINT_0 (16 + PDC_RINGSET)
104 #define PDC_RCVINTEN_0 BIT(PDC_RCVINT_0)
105 #define PDC_INTMASK (PDC_RCVINTEN_0)
106 #define PDC_LAZY_FRAMECOUNT 1
107 #define PDC_LAZY_TIMEOUT 10000
108 #define PDC_LAZY_INT (PDC_LAZY_TIMEOUT | (PDC_LAZY_FRAMECOUNT << 24))
109 #define PDC_INTMASK_OFFSET 0x24
110 #define PDC_INTSTATUS_OFFSET 0x20
111 #define PDC_RCVLAZY0_OFFSET (0x30 + 4 * PDC_RINGSET)
112 #define FA_RCVLAZY0_OFFSET 0x100
115 * For SPU2, configure MDE_CKSUM_CONTROL to write 17 bytes of metadata
118 #define PDC_SPU2_RESP_HDR_LEN 17
119 #define PDC_CKSUM_CTRL BIT(27)
120 #define PDC_CKSUM_CTRL_OFFSET 0x400
122 #define PDC_SPUM_RESP_HDR_LEN 32
125 * Sets the following bits for write to transmit control reg:
126 * 11 - PtyChkDisable - parity check is disabled
127 * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
129 #define PDC_TX_CTL 0x000C0800
131 /* Bit in tx control reg to enable tx channel */
132 #define PDC_TX_ENABLE 0x1
135 * Sets the following bits for write to receive control reg:
136 * 7:1 - RcvOffset - size in bytes of status region at start of rx frame buf
137 * 9 - SepRxHdrDescEn - place start of new frames only in descriptors
138 * that have StartOfFrame set
139 * 10 - OflowContinue - on rx FIFO overflow, clear rx fifo, discard all
140 * remaining bytes in current frame, report error
141 * in rx frame status for current frame
142 * 11 - PtyChkDisable - parity check is disabled
143 * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
145 #define PDC_RX_CTL 0x000C0E00
147 /* Bit in rx control reg to enable rx channel */
148 #define PDC_RX_ENABLE 0x1
150 #define CRYPTO_D64_RS0_CD_MASK ((PDC_RING_ENTRIES * RING_ENTRY_SIZE) - 1)
152 /* descriptor flags */
153 #define D64_CTRL1_EOT BIT(28) /* end of descriptor table */
154 #define D64_CTRL1_IOC BIT(29) /* interrupt on complete */
155 #define D64_CTRL1_EOF BIT(30) /* end of frame */
156 #define D64_CTRL1_SOF BIT(31) /* start of frame */
158 #define RX_STATUS_OVERFLOW 0x00800000
159 #define RX_STATUS_LEN 0x0000FFFF
161 #define PDC_TXREGS_OFFSET 0x200
162 #define PDC_RXREGS_OFFSET 0x220
164 /* Maximum size buffer the DMA engine can handle */
165 #define PDC_DMA_BUF_MAX 16384
168 FA_HW
, /* FA2/FA+ hardware (i.e. Northstar Plus) */
169 PDC_HW
/* PDC/MDE hardware (i.e. Northstar 2, Pegasus) */
173 void *ctx
; /* opaque context associated with frame */
178 u32 ctrl1
; /* misc control bits */
179 u32 ctrl2
; /* buffer count and address extension */
180 u32 addrlow
; /* memory address of the date buffer, bits 31:0 */
181 u32 addrhigh
; /* memory address of the date buffer, bits 63:32 */
184 /* dma registers per channel(xmt or rcv) */
186 u32 control
; /* enable, et al */
187 u32 ptr
; /* last descriptor posted to chip */
188 u32 addrlow
; /* descriptor ring base address low 32-bits */
189 u32 addrhigh
; /* descriptor ring base address bits 63:32 */
190 u32 status0
; /* last rx descriptor written by hw */
191 u32 status1
; /* driver does not use */
194 /* cpp contortions to concatenate w/arg prescan */
196 #define _PADLINE(line) pad ## line
197 #define _XSTR(line) _PADLINE(line)
198 #define PAD _XSTR(__LINE__)
201 /* dma registers. matches hw layout. */
203 struct dma64_regs dmaxmt
; /* dma tx */
205 struct dma64_regs dmarcv
; /* dma rx */
211 u32 devcontrol
; /* 0x000 */
212 u32 devstatus
; /* 0x004 */
214 u32 biststatus
; /* 0x00c */
216 u32 intstatus
; /* 0x020 */
217 u32 intmask
; /* 0x024 */
218 u32 gptimer
; /* 0x028 */
221 u32 intrcvlazy_0
; /* 0x030 (Only in PDC, not FA2) */
222 u32 intrcvlazy_1
; /* 0x034 (Only in PDC, not FA2) */
223 u32 intrcvlazy_2
; /* 0x038 (Only in PDC, not FA2) */
224 u32 intrcvlazy_3
; /* 0x03c (Only in PDC, not FA2) */
227 u32 fa_intrecvlazy
; /* 0x100 (Only in FA2, not PDC) */
228 u32 flowctlthresh
; /* 0x104 */
229 u32 wrrthresh
; /* 0x108 */
230 u32 gmac_idle_cnt_thresh
; /* 0x10c */
233 u32 ifioaccessaddr
; /* 0x120 */
234 u32 ifioaccessbyte
; /* 0x124 */
235 u32 ifioaccessdata
; /* 0x128 */
238 u32 phyaccess
; /* 0x180 */
240 u32 phycontrol
; /* 0x188 */
241 u32 txqctl
; /* 0x18c */
242 u32 rxqctl
; /* 0x190 */
243 u32 gpioselect
; /* 0x194 */
244 u32 gpio_output_en
; /* 0x198 */
246 u32 txq_rxq_mem_ctl
; /* 0x1a0 */
247 u32 memory_ecc_status
; /* 0x1a4 */
248 u32 serdes_ctl
; /* 0x1a8 */
249 u32 serdes_status0
; /* 0x1ac */
250 u32 serdes_status1
; /* 0x1b0 */
251 u32 PAD
[11]; /* 0x1b4-1dc */
252 u32 clk_ctl_st
; /* 0x1e0 */
253 u32 hw_war
; /* 0x1e4 (Only in PDC, not FA2) */
254 u32 pwrctl
; /* 0x1e8 */
257 #define PDC_NUM_DMA_RINGS 4
258 struct dma64 dmaregs
[PDC_NUM_DMA_RINGS
]; /* 0x0200 - 0x2fc */
260 /* more registers follow, but we don't use them */
263 /* structure for allocating/freeing DMA rings */
264 struct pdc_ring_alloc
{
265 dma_addr_t dmabase
; /* DMA address of start of ring */
266 void *vbase
; /* base kernel virtual address of ring */
267 u32 size
; /* ring allocation size in bytes */
271 * context associated with a receive descriptor.
272 * @rxp_ctx: opaque context associated with frame that starts at each
274 * @dst_sg: Scatterlist used to form reply frames beginning at a given ring
275 * index. Retained in order to unmap each sg after reply is processed.
276 * @rxin_numd: Number of rx descriptors associated with the message that starts
277 * at a descriptor index. Not set for every index. For example,
278 * if descriptor index i points to a scatterlist with 4 entries,
279 * then the next three descriptor indexes don't have a value set.
280 * @resp_hdr: Virtual address of buffer used to catch DMA rx status
281 * @resp_hdr_daddr: physical address of DMA rx status buffer
285 struct scatterlist
*dst_sg
;
288 dma_addr_t resp_hdr_daddr
;
291 /* PDC state structure */
293 /* Index of the PDC whose state is in this structure instance */
296 /* Platform device for this PDC instance */
297 struct platform_device
*pdev
;
300 * Each PDC instance has a mailbox controller. PDC receives request
301 * messages through mailboxes, and sends response messages through the
304 struct mbox_controller mbc
;
306 unsigned int pdc_irq
;
308 /* tasklet for deferred processing after DMA rx interrupt */
309 struct tasklet_struct rx_tasklet
;
311 /* Number of bytes of receive status prior to each rx frame */
313 /* Whether a BCM header is prepended to each frame */
315 /* Sum of length of BCM header and rx status header */
316 u32 pdc_resp_hdr_len
;
318 /* The base virtual address of DMA hw registers */
319 void __iomem
*pdc_reg_vbase
;
321 /* Pool for allocation of DMA rings */
322 struct dma_pool
*ring_pool
;
324 /* Pool for allocation of metadata buffers for response messages */
325 struct dma_pool
*rx_buf_pool
;
328 * The base virtual address of DMA tx/rx descriptor rings. Corresponding
329 * DMA address and size of ring allocation.
331 struct pdc_ring_alloc tx_ring_alloc
;
332 struct pdc_ring_alloc rx_ring_alloc
;
334 struct pdc_regs
*regs
; /* start of PDC registers */
336 struct dma64_regs
*txregs_64
; /* dma tx engine registers */
337 struct dma64_regs
*rxregs_64
; /* dma rx engine registers */
340 * Arrays of PDC_RING_ENTRIES descriptors
341 * To use multiple ringsets, this needs to be extended
343 struct dma64dd
*txd_64
; /* tx descriptor ring */
344 struct dma64dd
*rxd_64
; /* rx descriptor ring */
346 /* descriptor ring sizes */
347 u32 ntxd
; /* # tx descriptors */
348 u32 nrxd
; /* # rx descriptors */
349 u32 nrxpost
; /* # rx buffers to keep posted */
350 u32 ntxpost
; /* max number of tx buffers that can be posted */
353 * Index of next tx descriptor to reclaim. That is, the descriptor
354 * index of the oldest tx buffer for which the host has yet to process
355 * the corresponding response.
360 * Index of the first receive descriptor for the sequence of
361 * message fragments currently under construction. Used to build up
362 * the rxin_numd count for a message. Updated to rxout when the host
363 * starts a new sequence of rx buffers for a new message.
367 /* Index of next tx descriptor to post. */
371 * Number of tx descriptors associated with the message that starts
372 * at this tx descriptor index.
374 u32 txin_numd
[PDC_RING_ENTRIES
];
377 * Index of next rx descriptor to reclaim. This is the index of
378 * the next descriptor whose data has yet to be processed by the host.
383 * Index of the first receive descriptor for the sequence of
384 * message fragments currently under construction. Used to build up
385 * the rxin_numd count for a message. Updated to rxout when the host
386 * starts a new sequence of rx buffers for a new message.
391 * Saved value of current hardware rx descriptor index.
392 * The last rx buffer written by the hw is the index previous to
397 /* Index of next rx descriptor to post. */
400 struct pdc_rx_ctx rx_ctx
[PDC_RING_ENTRIES
];
403 * Scatterlists used to form request and reply frames beginning at a
404 * given ring index. Retained in order to unmap each sg after reply
407 struct scatterlist
*src_sg
[PDC_RING_ENTRIES
];
409 struct dentry
*debugfs_stats
; /* debug FS stats file for this PDC */
412 u32 pdc_requests
; /* number of request messages submitted */
413 u32 pdc_replies
; /* number of reply messages received */
414 u32 last_tx_not_done
; /* too few tx descriptors to indicate done */
415 u32 tx_ring_full
; /* unable to accept msg because tx ring full */
416 u32 rx_ring_full
; /* unable to accept msg because rx ring full */
417 u32 txnobuf
; /* unable to create tx descriptor */
418 u32 rxnobuf
; /* unable to create rx descriptor */
419 u32 rx_oflow
; /* count of rx overflows */
421 /* hardware type - FA2 or PDC/MDE */
425 /* Global variables */
428 /* Actual number of SPUs in hardware, as reported by device tree */
432 static struct pdc_globals pdcg
;
434 /* top level debug FS directory for PDC driver */
435 static struct dentry
*debugfs_dir
;
437 static ssize_t
pdc_debugfs_read(struct file
*filp
, char __user
*ubuf
,
438 size_t count
, loff_t
*offp
)
440 struct pdc_state
*pdcs
;
442 ssize_t ret
, out_offset
, out_count
;
446 buf
= kmalloc(out_count
, GFP_KERNEL
);
450 pdcs
= filp
->private_data
;
452 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
453 "SPU %u stats:\n", pdcs
->pdc_idx
);
454 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
455 "PDC requests....................%u\n",
457 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
458 "PDC responses...................%u\n",
460 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
461 "Tx not done.....................%u\n",
462 pdcs
->last_tx_not_done
);
463 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
464 "Tx ring full....................%u\n",
466 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
467 "Rx ring full....................%u\n",
469 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
470 "Tx desc write fail. Ring full...%u\n",
472 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
473 "Rx desc write fail. Ring full...%u\n",
475 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
476 "Receive overflow................%u\n",
478 out_offset
+= snprintf(buf
+ out_offset
, out_count
- out_offset
,
479 "Num frags in rx ring............%u\n",
480 NRXDACTIVE(pdcs
->rxin
, pdcs
->last_rx_curr
,
483 if (out_offset
> out_count
)
484 out_offset
= out_count
;
486 ret
= simple_read_from_buffer(ubuf
, count
, offp
, buf
, out_offset
);
491 static const struct file_operations pdc_debugfs_stats
= {
492 .owner
= THIS_MODULE
,
494 .read
= pdc_debugfs_read
,
498 * pdc_setup_debugfs() - Create the debug FS directories. If the top-level
499 * directory has not yet been created, create it now. Create a stats file in
500 * this directory for a SPU.
501 * @pdcs: PDC state structure
503 static void pdc_setup_debugfs(struct pdc_state
*pdcs
)
505 char spu_stats_name
[16];
507 if (!debugfs_initialized())
510 snprintf(spu_stats_name
, 16, "pdc%d_stats", pdcs
->pdc_idx
);
512 debugfs_dir
= debugfs_create_dir(KBUILD_MODNAME
, NULL
);
514 /* S_IRUSR == 0400 */
515 pdcs
->debugfs_stats
= debugfs_create_file(spu_stats_name
, 0400,
520 static void pdc_free_debugfs(void)
522 debugfs_remove_recursive(debugfs_dir
);
527 * pdc_build_rxd() - Build DMA descriptor to receive SPU result.
528 * @pdcs: PDC state for SPU that will generate result
529 * @dma_addr: DMA address of buffer that descriptor is being built for
530 * @buf_len: Length of the receive buffer, in bytes
531 * @flags: Flags to be stored in descriptor
534 pdc_build_rxd(struct pdc_state
*pdcs
, dma_addr_t dma_addr
,
535 u32 buf_len
, u32 flags
)
537 struct device
*dev
= &pdcs
->pdev
->dev
;
538 struct dma64dd
*rxd
= &pdcs
->rxd_64
[pdcs
->rxout
];
541 "Writing rx descriptor for PDC %u at index %u with length %u. flags %#x\n",
542 pdcs
->pdc_idx
, pdcs
->rxout
, buf_len
, flags
);
544 rxd
->addrlow
= cpu_to_le32(lower_32_bits(dma_addr
));
545 rxd
->addrhigh
= cpu_to_le32(upper_32_bits(dma_addr
));
546 rxd
->ctrl1
= cpu_to_le32(flags
);
547 rxd
->ctrl2
= cpu_to_le32(buf_len
);
549 /* bump ring index and return */
550 pdcs
->rxout
= NEXTRXD(pdcs
->rxout
, pdcs
->nrxpost
);
554 * pdc_build_txd() - Build a DMA descriptor to transmit a SPU request to
556 * @pdcs: PDC state for the SPU that will process this request
557 * @dma_addr: DMA address of packet to be transmitted
558 * @buf_len: Length of tx buffer, in bytes
559 * @flags: Flags to be stored in descriptor
562 pdc_build_txd(struct pdc_state
*pdcs
, dma_addr_t dma_addr
, u32 buf_len
,
565 struct device
*dev
= &pdcs
->pdev
->dev
;
566 struct dma64dd
*txd
= &pdcs
->txd_64
[pdcs
->txout
];
569 "Writing tx descriptor for PDC %u at index %u with length %u, flags %#x\n",
570 pdcs
->pdc_idx
, pdcs
->txout
, buf_len
, flags
);
572 txd
->addrlow
= cpu_to_le32(lower_32_bits(dma_addr
));
573 txd
->addrhigh
= cpu_to_le32(upper_32_bits(dma_addr
));
574 txd
->ctrl1
= cpu_to_le32(flags
);
575 txd
->ctrl2
= cpu_to_le32(buf_len
);
577 /* bump ring index and return */
578 pdcs
->txout
= NEXTTXD(pdcs
->txout
, pdcs
->ntxpost
);
582 * pdc_receive_one() - Receive a response message from a given SPU.
583 * @pdcs: PDC state for the SPU to receive from
585 * When the return code indicates success, the response message is available in
586 * the receive buffers provided prior to submission of the request.
588 * Return: PDC_SUCCESS if one or more receive descriptors was processed
589 * -EAGAIN indicates that no response message is available
590 * -EIO an error occurred
593 pdc_receive_one(struct pdc_state
*pdcs
)
595 struct device
*dev
= &pdcs
->pdev
->dev
;
596 struct mbox_controller
*mbc
;
597 struct mbox_chan
*chan
;
598 struct brcm_message mssg
;
601 u8
*resp_hdr
; /* virtual addr of start of resp message DMA header */
602 u32 frags_rdy
; /* number of fragments ready to read */
603 u32 rx_idx
; /* ring index of start of receive frame */
604 dma_addr_t resp_hdr_daddr
;
605 struct pdc_rx_ctx
*rx_ctx
;
608 chan
= &mbc
->chans
[0];
609 mssg
.type
= BRCM_MESSAGE_SPU
;
612 * return if a complete response message is not yet ready.
613 * rxin_numd[rxin] is the number of fragments in the next msg
616 frags_rdy
= NRXDACTIVE(pdcs
->rxin
, pdcs
->last_rx_curr
, pdcs
->nrxpost
);
617 if ((frags_rdy
== 0) ||
618 (frags_rdy
< pdcs
->rx_ctx
[pdcs
->rxin
].rxin_numd
))
619 /* No response ready */
622 num_frags
= pdcs
->txin_numd
[pdcs
->txin
];
623 WARN_ON(num_frags
== 0);
625 dma_unmap_sg(dev
, pdcs
->src_sg
[pdcs
->txin
],
626 sg_nents(pdcs
->src_sg
[pdcs
->txin
]), DMA_TO_DEVICE
);
628 pdcs
->txin
= (pdcs
->txin
+ num_frags
) & pdcs
->ntxpost
;
630 dev_dbg(dev
, "PDC %u reclaimed %d tx descriptors",
631 pdcs
->pdc_idx
, num_frags
);
634 rx_ctx
= &pdcs
->rx_ctx
[rx_idx
];
635 num_frags
= rx_ctx
->rxin_numd
;
636 /* Return opaque context with result */
637 mssg
.ctx
= rx_ctx
->rxp_ctx
;
638 rx_ctx
->rxp_ctx
= NULL
;
639 resp_hdr
= rx_ctx
->resp_hdr
;
640 resp_hdr_daddr
= rx_ctx
->resp_hdr_daddr
;
641 dma_unmap_sg(dev
, rx_ctx
->dst_sg
, sg_nents(rx_ctx
->dst_sg
),
644 pdcs
->rxin
= (pdcs
->rxin
+ num_frags
) & pdcs
->nrxpost
;
646 dev_dbg(dev
, "PDC %u reclaimed %d rx descriptors",
647 pdcs
->pdc_idx
, num_frags
);
650 "PDC %u txin %u, txout %u, rxin %u, rxout %u, last_rx_curr %u\n",
651 pdcs
->pdc_idx
, pdcs
->txin
, pdcs
->txout
, pdcs
->rxin
,
652 pdcs
->rxout
, pdcs
->last_rx_curr
);
654 if (pdcs
->pdc_resp_hdr_len
== PDC_SPUM_RESP_HDR_LEN
) {
656 * For SPU-M, get length of response msg and rx overflow status.
658 rx_status
= *((u32
*)resp_hdr
);
659 len
= rx_status
& RX_STATUS_LEN
;
661 "SPU response length %u bytes", len
);
662 if (unlikely(((rx_status
& RX_STATUS_OVERFLOW
) || (!len
)))) {
663 if (rx_status
& RX_STATUS_OVERFLOW
) {
664 dev_err_ratelimited(dev
,
665 "crypto receive overflow");
668 dev_info_ratelimited(dev
, "crypto rx len = 0");
674 dma_pool_free(pdcs
->rx_buf_pool
, resp_hdr
, resp_hdr_daddr
);
676 mbox_chan_received_data(chan
, &mssg
);
683 * pdc_receive() - Process as many responses as are available in the rx ring.
686 * Called within the hard IRQ.
690 pdc_receive(struct pdc_state
*pdcs
)
694 /* read last_rx_curr from register once */
696 (ioread32(&pdcs
->rxregs_64
->status0
) &
697 CRYPTO_D64_RS0_CD_MASK
) / RING_ENTRY_SIZE
;
700 /* Could be many frames ready */
701 rx_status
= pdc_receive_one(pdcs
);
702 } while (rx_status
== PDC_SUCCESS
);
708 * pdc_tx_list_sg_add() - Add the buffers in a scatterlist to the transmit
709 * descriptors for a given SPU. The scatterlist buffers contain the data for a
710 * SPU request message.
711 * @spu_idx: The index of the SPU to submit the request to, [0, max_spu)
712 * @sg: Scatterlist whose buffers contain part of the SPU request
714 * If a scatterlist buffer is larger than PDC_DMA_BUF_MAX, multiple descriptors
715 * are written for that buffer, each <= PDC_DMA_BUF_MAX byte in length.
717 * Return: PDC_SUCCESS if successful
720 static int pdc_tx_list_sg_add(struct pdc_state
*pdcs
, struct scatterlist
*sg
)
727 * Num descriptors needed. Conservatively assume we need a descriptor
728 * for every entry in sg.
731 u32 desc_w
= 0; /* Number of tx descriptors written */
732 u32 bufcnt
; /* Number of bytes of buffer pointed to by descriptor */
733 dma_addr_t databufptr
; /* DMA address to put in descriptor */
735 num_desc
= (u32
)sg_nents(sg
);
737 /* check whether enough tx descriptors are available */
738 tx_avail
= pdcs
->ntxpost
- NTXDACTIVE(pdcs
->txin
, pdcs
->txout
,
740 if (unlikely(num_desc
> tx_avail
)) {
745 /* build tx descriptors */
746 if (pdcs
->tx_msg_start
== pdcs
->txout
) {
748 pdcs
->txin_numd
[pdcs
->tx_msg_start
] = 0;
749 pdcs
->src_sg
[pdcs
->txout
] = sg
;
750 flags
= D64_CTRL1_SOF
;
754 if (unlikely(pdcs
->txout
== (pdcs
->ntxd
- 1)))
760 * If sg buffer larger than PDC limit, split across
761 * multiple descriptors
763 bufcnt
= sg_dma_len(sg
);
764 databufptr
= sg_dma_address(sg
);
765 while (bufcnt
> PDC_DMA_BUF_MAX
) {
766 pdc_build_txd(pdcs
, databufptr
, PDC_DMA_BUF_MAX
,
769 bufcnt
-= PDC_DMA_BUF_MAX
;
770 databufptr
+= PDC_DMA_BUF_MAX
;
771 if (unlikely(pdcs
->txout
== (pdcs
->ntxd
- 1)))
778 /* Writing last descriptor for frame */
779 flags
|= (D64_CTRL1_EOF
| D64_CTRL1_IOC
);
780 pdc_build_txd(pdcs
, databufptr
, bufcnt
, flags
| eot
);
782 /* Clear start of frame after first descriptor */
783 flags
&= ~D64_CTRL1_SOF
;
785 pdcs
->txin_numd
[pdcs
->tx_msg_start
] += desc_w
;
791 * pdc_tx_list_final() - Initiate DMA transfer of last frame written to tx
793 * @pdcs: PDC state for SPU to process the request
795 * Sets the index of the last descriptor written in both the rx and tx ring.
797 * Return: PDC_SUCCESS
799 static int pdc_tx_list_final(struct pdc_state
*pdcs
)
802 * write barrier to ensure all register writes are complete
803 * before chip starts to process new request
806 iowrite32(pdcs
->rxout
<< 4, &pdcs
->rxregs_64
->ptr
);
807 iowrite32(pdcs
->txout
<< 4, &pdcs
->txregs_64
->ptr
);
808 pdcs
->pdc_requests
++;
814 * pdc_rx_list_init() - Start a new receive descriptor list for a given PDC.
815 * @pdcs: PDC state for SPU handling request
816 * @dst_sg: scatterlist providing rx buffers for response to be returned to
818 * @ctx: Opaque context for this request
820 * Posts a single receive descriptor to hold the metadata that precedes a
821 * response. For example, with SPU-M, the metadata is a 32-byte DMA header and
822 * an 8-byte BCM header. Moves the msg_start descriptor indexes for both tx and
823 * rx to indicate the start of a new message.
825 * Return: PDC_SUCCESS if successful
826 * < 0 if an error (e.g., rx ring is full)
828 static int pdc_rx_list_init(struct pdc_state
*pdcs
, struct scatterlist
*dst_sg
,
833 u32 rx_pkt_cnt
= 1; /* Adding a single rx buffer */
836 struct pdc_rx_ctx
*rx_ctx
;
838 rx_avail
= pdcs
->nrxpost
- NRXDACTIVE(pdcs
->rxin
, pdcs
->rxout
,
840 if (unlikely(rx_pkt_cnt
> rx_avail
)) {
845 /* allocate a buffer for the dma rx status */
846 vaddr
= dma_pool_zalloc(pdcs
->rx_buf_pool
, GFP_ATOMIC
, &daddr
);
847 if (unlikely(!vaddr
))
851 * Update msg_start indexes for both tx and rx to indicate the start
852 * of a new sequence of descriptor indexes that contain the fragments
853 * of the same message.
855 pdcs
->rx_msg_start
= pdcs
->rxout
;
856 pdcs
->tx_msg_start
= pdcs
->txout
;
858 /* This is always the first descriptor in the receive sequence */
859 flags
= D64_CTRL1_SOF
;
860 pdcs
->rx_ctx
[pdcs
->rx_msg_start
].rxin_numd
= 1;
862 if (unlikely(pdcs
->rxout
== (pdcs
->nrxd
- 1)))
863 flags
|= D64_CTRL1_EOT
;
865 rx_ctx
= &pdcs
->rx_ctx
[pdcs
->rxout
];
866 rx_ctx
->rxp_ctx
= ctx
;
867 rx_ctx
->dst_sg
= dst_sg
;
868 rx_ctx
->resp_hdr
= vaddr
;
869 rx_ctx
->resp_hdr_daddr
= daddr
;
870 pdc_build_rxd(pdcs
, daddr
, pdcs
->pdc_resp_hdr_len
, flags
);
875 * pdc_rx_list_sg_add() - Add the buffers in a scatterlist to the receive
876 * descriptors for a given SPU. The caller must have already DMA mapped the
878 * @spu_idx: Indicates which SPU the buffers are for
879 * @sg: Scatterlist whose buffers are added to the receive ring
881 * If a receive buffer in the scatterlist is larger than PDC_DMA_BUF_MAX,
882 * multiple receive descriptors are written, each with a buffer <=
885 * Return: PDC_SUCCESS if successful
886 * < 0 otherwise (e.g., receive ring is full)
888 static int pdc_rx_list_sg_add(struct pdc_state
*pdcs
, struct scatterlist
*sg
)
894 * Num descriptors needed. Conservatively assume we need a descriptor
895 * for every entry from our starting point in the scatterlist.
898 u32 desc_w
= 0; /* Number of tx descriptors written */
899 u32 bufcnt
; /* Number of bytes of buffer pointed to by descriptor */
900 dma_addr_t databufptr
; /* DMA address to put in descriptor */
902 num_desc
= (u32
)sg_nents(sg
);
904 rx_avail
= pdcs
->nrxpost
- NRXDACTIVE(pdcs
->rxin
, pdcs
->rxout
,
906 if (unlikely(num_desc
> rx_avail
)) {
912 if (unlikely(pdcs
->rxout
== (pdcs
->nrxd
- 1)))
913 flags
= D64_CTRL1_EOT
;
918 * If sg buffer larger than PDC limit, split across
919 * multiple descriptors
921 bufcnt
= sg_dma_len(sg
);
922 databufptr
= sg_dma_address(sg
);
923 while (bufcnt
> PDC_DMA_BUF_MAX
) {
924 pdc_build_rxd(pdcs
, databufptr
, PDC_DMA_BUF_MAX
, flags
);
926 bufcnt
-= PDC_DMA_BUF_MAX
;
927 databufptr
+= PDC_DMA_BUF_MAX
;
928 if (unlikely(pdcs
->rxout
== (pdcs
->nrxd
- 1)))
929 flags
= D64_CTRL1_EOT
;
933 pdc_build_rxd(pdcs
, databufptr
, bufcnt
, flags
);
937 pdcs
->rx_ctx
[pdcs
->rx_msg_start
].rxin_numd
+= desc_w
;
943 * pdc_irq_handler() - Interrupt handler called in interrupt context.
944 * @irq: Interrupt number that has fired
945 * @data: device struct for DMA engine that generated the interrupt
947 * We have to clear the device interrupt status flags here. So cache the
948 * status for later use in the thread function. Other than that, just return
949 * WAKE_THREAD to invoke the thread function.
951 * Return: IRQ_WAKE_THREAD if interrupt is ours
954 static irqreturn_t
pdc_irq_handler(int irq
, void *data
)
956 struct device
*dev
= (struct device
*)data
;
957 struct pdc_state
*pdcs
= dev_get_drvdata(dev
);
958 u32 intstatus
= ioread32(pdcs
->pdc_reg_vbase
+ PDC_INTSTATUS_OFFSET
);
960 if (unlikely(intstatus
== 0))
963 /* Disable interrupts until soft handler runs */
964 iowrite32(0, pdcs
->pdc_reg_vbase
+ PDC_INTMASK_OFFSET
);
966 /* Clear interrupt flags in device */
967 iowrite32(intstatus
, pdcs
->pdc_reg_vbase
+ PDC_INTSTATUS_OFFSET
);
969 /* Wakeup IRQ thread */
970 tasklet_schedule(&pdcs
->rx_tasklet
);
975 * pdc_tasklet_cb() - Tasklet callback that runs the deferred processing after
976 * a DMA receive interrupt. Reenables the receive interrupt.
977 * @data: PDC state structure
979 static void pdc_tasklet_cb(unsigned long data
)
981 struct pdc_state
*pdcs
= (struct pdc_state
*)data
;
985 /* reenable interrupts */
986 iowrite32(PDC_INTMASK
, pdcs
->pdc_reg_vbase
+ PDC_INTMASK_OFFSET
);
990 * pdc_ring_init() - Allocate DMA rings and initialize constant fields of
991 * descriptors in one ringset.
992 * @pdcs: PDC instance state
993 * @ringset: index of ringset being used
995 * Return: PDC_SUCCESS if ring initialized
998 static int pdc_ring_init(struct pdc_state
*pdcs
, int ringset
)
1001 int err
= PDC_SUCCESS
;
1002 struct dma64
*dma_reg
;
1003 struct device
*dev
= &pdcs
->pdev
->dev
;
1004 struct pdc_ring_alloc tx
;
1005 struct pdc_ring_alloc rx
;
1007 /* Allocate tx ring */
1008 tx
.vbase
= dma_pool_zalloc(pdcs
->ring_pool
, GFP_KERNEL
, &tx
.dmabase
);
1009 if (unlikely(!tx
.vbase
)) {
1014 /* Allocate rx ring */
1015 rx
.vbase
= dma_pool_zalloc(pdcs
->ring_pool
, GFP_KERNEL
, &rx
.dmabase
);
1016 if (unlikely(!rx
.vbase
)) {
1021 dev_dbg(dev
, " - base DMA addr of tx ring %pad", &tx
.dmabase
);
1022 dev_dbg(dev
, " - base virtual addr of tx ring %p", tx
.vbase
);
1023 dev_dbg(dev
, " - base DMA addr of rx ring %pad", &rx
.dmabase
);
1024 dev_dbg(dev
, " - base virtual addr of rx ring %p", rx
.vbase
);
1026 memcpy(&pdcs
->tx_ring_alloc
, &tx
, sizeof(tx
));
1027 memcpy(&pdcs
->rx_ring_alloc
, &rx
, sizeof(rx
));
1030 pdcs
->rx_msg_start
= 0;
1031 pdcs
->last_rx_curr
= 0;
1034 pdcs
->tx_msg_start
= 0;
1037 /* Set descriptor array base addresses */
1038 pdcs
->txd_64
= (struct dma64dd
*)pdcs
->tx_ring_alloc
.vbase
;
1039 pdcs
->rxd_64
= (struct dma64dd
*)pdcs
->rx_ring_alloc
.vbase
;
1041 /* Tell device the base DMA address of each ring */
1042 dma_reg
= &pdcs
->regs
->dmaregs
[ringset
];
1044 /* But first disable DMA and set curptr to 0 for both TX & RX */
1045 iowrite32(PDC_TX_CTL
, &dma_reg
->dmaxmt
.control
);
1046 iowrite32((PDC_RX_CTL
+ (pdcs
->rx_status_len
<< 1)),
1047 &dma_reg
->dmarcv
.control
);
1048 iowrite32(0, &dma_reg
->dmaxmt
.ptr
);
1049 iowrite32(0, &dma_reg
->dmarcv
.ptr
);
1051 /* Set base DMA addresses */
1052 iowrite32(lower_32_bits(pdcs
->tx_ring_alloc
.dmabase
),
1053 &dma_reg
->dmaxmt
.addrlow
);
1054 iowrite32(upper_32_bits(pdcs
->tx_ring_alloc
.dmabase
),
1055 &dma_reg
->dmaxmt
.addrhigh
);
1057 iowrite32(lower_32_bits(pdcs
->rx_ring_alloc
.dmabase
),
1058 &dma_reg
->dmarcv
.addrlow
);
1059 iowrite32(upper_32_bits(pdcs
->rx_ring_alloc
.dmabase
),
1060 &dma_reg
->dmarcv
.addrhigh
);
1063 iowrite32(PDC_TX_CTL
| PDC_TX_ENABLE
, &dma_reg
->dmaxmt
.control
);
1064 iowrite32((PDC_RX_CTL
| PDC_RX_ENABLE
| (pdcs
->rx_status_len
<< 1)),
1065 &dma_reg
->dmarcv
.control
);
1067 /* Initialize descriptors */
1068 for (i
= 0; i
< PDC_RING_ENTRIES
; i
++) {
1069 /* Every tx descriptor can be used for start of frame. */
1070 if (i
!= pdcs
->ntxpost
) {
1071 iowrite32(D64_CTRL1_SOF
| D64_CTRL1_EOF
,
1072 &pdcs
->txd_64
[i
].ctrl1
);
1074 /* Last descriptor in ringset. Set End of Table. */
1075 iowrite32(D64_CTRL1_SOF
| D64_CTRL1_EOF
|
1076 D64_CTRL1_EOT
, &pdcs
->txd_64
[i
].ctrl1
);
1079 /* Every rx descriptor can be used for start of frame */
1080 if (i
!= pdcs
->nrxpost
) {
1081 iowrite32(D64_CTRL1_SOF
,
1082 &pdcs
->rxd_64
[i
].ctrl1
);
1084 /* Last descriptor in ringset. Set End of Table. */
1085 iowrite32(D64_CTRL1_SOF
| D64_CTRL1_EOT
,
1086 &pdcs
->rxd_64
[i
].ctrl1
);
1092 dma_pool_free(pdcs
->ring_pool
, tx
.vbase
, tx
.dmabase
);
1097 static void pdc_ring_free(struct pdc_state
*pdcs
)
1099 if (pdcs
->tx_ring_alloc
.vbase
) {
1100 dma_pool_free(pdcs
->ring_pool
, pdcs
->tx_ring_alloc
.vbase
,
1101 pdcs
->tx_ring_alloc
.dmabase
);
1102 pdcs
->tx_ring_alloc
.vbase
= NULL
;
1105 if (pdcs
->rx_ring_alloc
.vbase
) {
1106 dma_pool_free(pdcs
->ring_pool
, pdcs
->rx_ring_alloc
.vbase
,
1107 pdcs
->rx_ring_alloc
.dmabase
);
1108 pdcs
->rx_ring_alloc
.vbase
= NULL
;
1113 * pdc_desc_count() - Count the number of DMA descriptors that will be required
1114 * for a given scatterlist. Account for the max length of a DMA buffer.
1115 * @sg: Scatterlist to be DMA'd
1116 * Return: Number of descriptors required
1118 static u32
pdc_desc_count(struct scatterlist
*sg
)
1123 cnt
+= ((sg
->length
/ PDC_DMA_BUF_MAX
) + 1);
1130 * pdc_rings_full() - Check whether the tx ring has room for tx_cnt descriptors
1131 * and the rx ring has room for rx_cnt descriptors.
1133 * @tx_cnt: The number of descriptors required in the tx ring
1134 * @rx_cnt: The number of descriptors required i the rx ring
1136 * Return: true if one of the rings does not have enough space
1137 * false if sufficient space is available in both rings
1139 static bool pdc_rings_full(struct pdc_state
*pdcs
, int tx_cnt
, int rx_cnt
)
1145 /* Check if the tx and rx rings are likely to have enough space */
1146 rx_avail
= pdcs
->nrxpost
- NRXDACTIVE(pdcs
->rxin
, pdcs
->rxout
,
1148 if (unlikely(rx_cnt
> rx_avail
)) {
1149 pdcs
->rx_ring_full
++;
1153 if (likely(!full
)) {
1154 tx_avail
= pdcs
->ntxpost
- NTXDACTIVE(pdcs
->txin
, pdcs
->txout
,
1156 if (unlikely(tx_cnt
> tx_avail
)) {
1157 pdcs
->tx_ring_full
++;
1165 * pdc_last_tx_done() - If both the tx and rx rings have at least
1166 * PDC_RING_SPACE_MIN descriptors available, then indicate that the mailbox
1167 * framework can submit another message.
1168 * @chan: mailbox channel to check
1169 * Return: true if PDC can accept another message on this channel
1171 static bool pdc_last_tx_done(struct mbox_chan
*chan
)
1173 struct pdc_state
*pdcs
= chan
->con_priv
;
1176 if (unlikely(pdc_rings_full(pdcs
, PDC_RING_SPACE_MIN
,
1177 PDC_RING_SPACE_MIN
))) {
1178 pdcs
->last_tx_not_done
++;
1187 * pdc_send_data() - mailbox send_data function
1188 * @chan: The mailbox channel on which the data is sent. The channel
1189 * corresponds to a DMA ringset.
1190 * @data: The mailbox message to be sent. The message must be a
1191 * brcm_message structure.
1193 * This function is registered as the send_data function for the mailbox
1194 * controller. From the destination scatterlist in the mailbox message, it
1195 * creates a sequence of receive descriptors in the rx ring. From the source
1196 * scatterlist, it creates a sequence of transmit descriptors in the tx ring.
1197 * After creating the descriptors, it writes the rx ptr and tx ptr registers to
1198 * initiate the DMA transfer.
1200 * This function does the DMA map and unmap of the src and dst scatterlists in
1201 * the mailbox message.
1203 * Return: 0 if successful
1204 * -ENOTSUPP if the mailbox message is a type this driver does not
1208 static int pdc_send_data(struct mbox_chan
*chan
, void *data
)
1210 struct pdc_state
*pdcs
= chan
->con_priv
;
1211 struct device
*dev
= &pdcs
->pdev
->dev
;
1212 struct brcm_message
*mssg
= data
;
1213 int err
= PDC_SUCCESS
;
1220 if (unlikely(mssg
->type
!= BRCM_MESSAGE_SPU
))
1223 src_nent
= sg_nents(mssg
->spu
.src
);
1224 if (likely(src_nent
)) {
1225 nent
= dma_map_sg(dev
, mssg
->spu
.src
, src_nent
, DMA_TO_DEVICE
);
1226 if (unlikely(nent
== 0))
1230 dst_nent
= sg_nents(mssg
->spu
.dst
);
1231 if (likely(dst_nent
)) {
1232 nent
= dma_map_sg(dev
, mssg
->spu
.dst
, dst_nent
,
1234 if (unlikely(nent
== 0)) {
1235 dma_unmap_sg(dev
, mssg
->spu
.src
, src_nent
,
1242 * Check if the tx and rx rings have enough space. Do this prior to
1243 * writing any tx or rx descriptors. Need to ensure that we do not write
1244 * a partial set of descriptors, or write just rx descriptors but
1245 * corresponding tx descriptors don't fit. Note that we want this check
1246 * and the entire sequence of descriptor to happen without another
1247 * thread getting in. The channel spin lock in the mailbox framework
1250 tx_desc_req
= pdc_desc_count(mssg
->spu
.src
);
1251 rx_desc_req
= pdc_desc_count(mssg
->spu
.dst
);
1252 if (unlikely(pdc_rings_full(pdcs
, tx_desc_req
, rx_desc_req
+ 1)))
1255 /* Create rx descriptors to SPU catch response */
1256 err
= pdc_rx_list_init(pdcs
, mssg
->spu
.dst
, mssg
->ctx
);
1257 err
|= pdc_rx_list_sg_add(pdcs
, mssg
->spu
.dst
);
1259 /* Create tx descriptors to submit SPU request */
1260 err
|= pdc_tx_list_sg_add(pdcs
, mssg
->spu
.src
);
1261 err
|= pdc_tx_list_final(pdcs
); /* initiate transfer */
1264 dev_err(&pdcs
->pdev
->dev
,
1265 "%s failed with error %d", __func__
, err
);
1270 static int pdc_startup(struct mbox_chan
*chan
)
1272 return pdc_ring_init(chan
->con_priv
, PDC_RINGSET
);
1275 static void pdc_shutdown(struct mbox_chan
*chan
)
1277 struct pdc_state
*pdcs
= chan
->con_priv
;
1282 dev_dbg(&pdcs
->pdev
->dev
,
1283 "Shutdown mailbox channel for PDC %u", pdcs
->pdc_idx
);
1284 pdc_ring_free(pdcs
);
1288 * pdc_hw_init() - Use the given initialization parameters to initialize the
1289 * state for one of the PDCs.
1290 * @pdcs: state of the PDC
1293 void pdc_hw_init(struct pdc_state
*pdcs
)
1295 struct platform_device
*pdev
;
1297 struct dma64
*dma_reg
;
1298 int ringset
= PDC_RINGSET
;
1303 dev_dbg(dev
, "PDC %u initial values:", pdcs
->pdc_idx
);
1304 dev_dbg(dev
, "state structure: %p",
1306 dev_dbg(dev
, " - base virtual addr of hw regs %p",
1307 pdcs
->pdc_reg_vbase
);
1309 /* initialize data structures */
1310 pdcs
->regs
= (struct pdc_regs
*)pdcs
->pdc_reg_vbase
;
1311 pdcs
->txregs_64
= (struct dma64_regs
*)
1312 (((u8
*)pdcs
->pdc_reg_vbase
) +
1313 PDC_TXREGS_OFFSET
+ (sizeof(struct dma64
) * ringset
));
1314 pdcs
->rxregs_64
= (struct dma64_regs
*)
1315 (((u8
*)pdcs
->pdc_reg_vbase
) +
1316 PDC_RXREGS_OFFSET
+ (sizeof(struct dma64
) * ringset
));
1318 pdcs
->ntxd
= PDC_RING_ENTRIES
;
1319 pdcs
->nrxd
= PDC_RING_ENTRIES
;
1320 pdcs
->ntxpost
= PDC_RING_ENTRIES
- 1;
1321 pdcs
->nrxpost
= PDC_RING_ENTRIES
- 1;
1322 iowrite32(0, &pdcs
->regs
->intmask
);
1324 dma_reg
= &pdcs
->regs
->dmaregs
[ringset
];
1326 /* Configure DMA but will enable later in pdc_ring_init() */
1327 iowrite32(PDC_TX_CTL
, &dma_reg
->dmaxmt
.control
);
1329 iowrite32(PDC_RX_CTL
+ (pdcs
->rx_status_len
<< 1),
1330 &dma_reg
->dmarcv
.control
);
1332 /* Reset current index pointers after making sure DMA is disabled */
1333 iowrite32(0, &dma_reg
->dmaxmt
.ptr
);
1334 iowrite32(0, &dma_reg
->dmarcv
.ptr
);
1336 if (pdcs
->pdc_resp_hdr_len
== PDC_SPU2_RESP_HDR_LEN
)
1337 iowrite32(PDC_CKSUM_CTRL
,
1338 pdcs
->pdc_reg_vbase
+ PDC_CKSUM_CTRL_OFFSET
);
1342 * pdc_hw_disable() - Disable the tx and rx control in the hw.
1343 * @pdcs: PDC state structure
1346 static void pdc_hw_disable(struct pdc_state
*pdcs
)
1348 struct dma64
*dma_reg
;
1350 dma_reg
= &pdcs
->regs
->dmaregs
[PDC_RINGSET
];
1351 iowrite32(PDC_TX_CTL
, &dma_reg
->dmaxmt
.control
);
1352 iowrite32(PDC_RX_CTL
+ (pdcs
->rx_status_len
<< 1),
1353 &dma_reg
->dmarcv
.control
);
1357 * pdc_rx_buf_pool_create() - Pool of receive buffers used to catch the metadata
1358 * header returned with each response message.
1359 * @pdcs: PDC state structure
1361 * The metadata is not returned to the mailbox client. So the PDC driver
1362 * manages these buffers.
1364 * Return: PDC_SUCCESS
1365 * -ENOMEM if pool creation fails
1367 static int pdc_rx_buf_pool_create(struct pdc_state
*pdcs
)
1369 struct platform_device
*pdev
;
1375 pdcs
->pdc_resp_hdr_len
= pdcs
->rx_status_len
;
1376 if (pdcs
->use_bcm_hdr
)
1377 pdcs
->pdc_resp_hdr_len
+= BCM_HDR_LEN
;
1379 pdcs
->rx_buf_pool
= dma_pool_create("pdc rx bufs", dev
,
1380 pdcs
->pdc_resp_hdr_len
,
1382 if (!pdcs
->rx_buf_pool
)
1389 * pdc_interrupts_init() - Initialize the interrupt configuration for a PDC and
1390 * specify a threaded IRQ handler for deferred handling of interrupts outside of
1391 * interrupt context.
1394 * Set the interrupt mask for transmit and receive done.
1395 * Set the lazy interrupt frame count to generate an interrupt for just one pkt.
1397 * Return: PDC_SUCCESS
1398 * <0 if threaded irq request fails
1400 static int pdc_interrupts_init(struct pdc_state
*pdcs
)
1402 struct platform_device
*pdev
= pdcs
->pdev
;
1403 struct device
*dev
= &pdev
->dev
;
1404 struct device_node
*dn
= pdev
->dev
.of_node
;
1407 /* interrupt configuration */
1408 iowrite32(PDC_INTMASK
, pdcs
->pdc_reg_vbase
+ PDC_INTMASK_OFFSET
);
1410 if (pdcs
->hw_type
== FA_HW
)
1411 iowrite32(PDC_LAZY_INT
, pdcs
->pdc_reg_vbase
+
1412 FA_RCVLAZY0_OFFSET
);
1414 iowrite32(PDC_LAZY_INT
, pdcs
->pdc_reg_vbase
+
1415 PDC_RCVLAZY0_OFFSET
);
1417 /* read irq from device tree */
1418 pdcs
->pdc_irq
= irq_of_parse_and_map(dn
, 0);
1419 dev_dbg(dev
, "pdc device %s irq %u for pdcs %p",
1420 dev_name(dev
), pdcs
->pdc_irq
, pdcs
);
1422 err
= devm_request_irq(dev
, pdcs
->pdc_irq
, pdc_irq_handler
, 0,
1423 dev_name(dev
), dev
);
1425 dev_err(dev
, "IRQ %u request failed with err %d\n",
1426 pdcs
->pdc_irq
, err
);
1432 static const struct mbox_chan_ops pdc_mbox_chan_ops
= {
1433 .send_data
= pdc_send_data
,
1434 .last_tx_done
= pdc_last_tx_done
,
1435 .startup
= pdc_startup
,
1436 .shutdown
= pdc_shutdown
1440 * pdc_mb_init() - Initialize the mailbox controller.
1443 * Each PDC is a mailbox controller. Each ringset is a mailbox channel. Kernel
1444 * driver only uses one ringset and thus one mb channel. PDC uses the transmit
1445 * complete interrupt to determine when a mailbox message has successfully been
1448 * Return: 0 on success
1449 * < 0 if there is an allocation or registration failure
1451 static int pdc_mb_init(struct pdc_state
*pdcs
)
1453 struct device
*dev
= &pdcs
->pdev
->dev
;
1454 struct mbox_controller
*mbc
;
1460 mbc
->ops
= &pdc_mbox_chan_ops
;
1462 mbc
->chans
= devm_kcalloc(dev
, mbc
->num_chans
, sizeof(*mbc
->chans
),
1467 mbc
->txdone_irq
= false;
1468 mbc
->txdone_poll
= true;
1469 mbc
->txpoll_period
= 1;
1470 for (chan_index
= 0; chan_index
< mbc
->num_chans
; chan_index
++)
1471 mbc
->chans
[chan_index
].con_priv
= pdcs
;
1473 /* Register mailbox controller */
1474 err
= mbox_controller_register(mbc
);
1477 "Failed to register PDC mailbox controller. Error %d.",
1484 /* Device tree API */
1485 static const int pdc_hw
= PDC_HW
;
1486 static const int fa_hw
= FA_HW
;
1488 static const struct of_device_id pdc_mbox_of_match
[] = {
1489 {.compatible
= "brcm,iproc-pdc-mbox", .data
= &pdc_hw
},
1490 {.compatible
= "brcm,iproc-fa2-mbox", .data
= &fa_hw
},
1493 MODULE_DEVICE_TABLE(of
, pdc_mbox_of_match
);
1496 * pdc_dt_read() - Read application-specific data from device tree.
1497 * @pdev: Platform device
1500 * Reads the number of bytes of receive status that precede each received frame.
1501 * Reads whether transmit and received frames should be preceded by an 8-byte
1504 * Return: 0 if successful
1505 * -ENODEV if device not available
1507 static int pdc_dt_read(struct platform_device
*pdev
, struct pdc_state
*pdcs
)
1509 struct device
*dev
= &pdev
->dev
;
1510 struct device_node
*dn
= pdev
->dev
.of_node
;
1511 const struct of_device_id
*match
;
1515 err
= of_property_read_u32(dn
, "brcm,rx-status-len",
1516 &pdcs
->rx_status_len
);
1519 "%s failed to get DMA receive status length from device tree",
1522 pdcs
->use_bcm_hdr
= of_property_read_bool(dn
, "brcm,use-bcm-hdr");
1524 pdcs
->hw_type
= PDC_HW
;
1526 match
= of_match_device(of_match_ptr(pdc_mbox_of_match
), dev
);
1527 if (match
!= NULL
) {
1528 hw_type
= match
->data
;
1529 pdcs
->hw_type
= *hw_type
;
1536 * pdc_probe() - Probe function for PDC driver.
1537 * @pdev: PDC platform device
1539 * Reserve and map register regions defined in device tree.
1540 * Allocate and initialize tx and rx DMA rings.
1541 * Initialize a mailbox controller for each PDC.
1543 * Return: 0 if successful
1546 static int pdc_probe(struct platform_device
*pdev
)
1549 struct device
*dev
= &pdev
->dev
;
1550 struct resource
*pdc_regs
;
1551 struct pdc_state
*pdcs
;
1553 /* PDC state for one SPU */
1554 pdcs
= devm_kzalloc(dev
, sizeof(*pdcs
), GFP_KERNEL
);
1561 platform_set_drvdata(pdev
, pdcs
);
1562 pdcs
->pdc_idx
= pdcg
.num_spu
;
1565 err
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(39));
1567 dev_warn(dev
, "PDC device cannot perform DMA. Error %d.", err
);
1571 /* Create DMA pool for tx ring */
1572 pdcs
->ring_pool
= dma_pool_create("pdc rings", dev
, PDC_RING_SIZE
,
1574 if (!pdcs
->ring_pool
) {
1579 err
= pdc_dt_read(pdev
, pdcs
);
1581 goto cleanup_ring_pool
;
1583 pdc_regs
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1586 goto cleanup_ring_pool
;
1588 dev_dbg(dev
, "PDC register region res.start = %pa, res.end = %pa",
1589 &pdc_regs
->start
, &pdc_regs
->end
);
1591 pdcs
->pdc_reg_vbase
= devm_ioremap_resource(&pdev
->dev
, pdc_regs
);
1592 if (IS_ERR(pdcs
->pdc_reg_vbase
)) {
1593 err
= PTR_ERR(pdcs
->pdc_reg_vbase
);
1594 dev_err(&pdev
->dev
, "Failed to map registers: %d\n", err
);
1595 goto cleanup_ring_pool
;
1598 /* create rx buffer pool after dt read to know how big buffers are */
1599 err
= pdc_rx_buf_pool_create(pdcs
);
1601 goto cleanup_ring_pool
;
1605 /* Init tasklet for deferred DMA rx processing */
1606 tasklet_init(&pdcs
->rx_tasklet
, pdc_tasklet_cb
, (unsigned long)pdcs
);
1608 err
= pdc_interrupts_init(pdcs
);
1610 goto cleanup_buf_pool
;
1612 /* Initialize mailbox controller */
1613 err
= pdc_mb_init(pdcs
);
1615 goto cleanup_buf_pool
;
1617 pdcs
->debugfs_stats
= NULL
;
1618 pdc_setup_debugfs(pdcs
);
1620 dev_dbg(dev
, "pdc_probe() successful");
1624 tasklet_kill(&pdcs
->rx_tasklet
);
1625 dma_pool_destroy(pdcs
->rx_buf_pool
);
1628 dma_pool_destroy(pdcs
->ring_pool
);
1634 static int pdc_remove(struct platform_device
*pdev
)
1636 struct pdc_state
*pdcs
= platform_get_drvdata(pdev
);
1640 tasklet_kill(&pdcs
->rx_tasklet
);
1642 pdc_hw_disable(pdcs
);
1644 mbox_controller_unregister(&pdcs
->mbc
);
1646 dma_pool_destroy(pdcs
->rx_buf_pool
);
1647 dma_pool_destroy(pdcs
->ring_pool
);
1651 static struct platform_driver pdc_mbox_driver
= {
1653 .remove
= pdc_remove
,
1655 .name
= "brcm-iproc-pdc-mbox",
1656 .of_match_table
= of_match_ptr(pdc_mbox_of_match
),
1659 module_platform_driver(pdc_mbox_driver
);
1661 MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
1662 MODULE_DESCRIPTION("Broadcom PDC mailbox driver");
1663 MODULE_LICENSE("GPL v2");