powerpc/opal: Fix EBUSY bug in acquiring tokens
[linux/fpc-iii.git] / drivers / mailbox / bcm-pdc-mailbox.c
blobc19dd820ea9b4baafb1b2d15ebb5031f464af9d0
1 /*
2 * Copyright 2016 Broadcom
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation (the "GPL").
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License version 2 (GPLv2) for more details.
13 * You should have received a copy of the GNU General Public License
14 * version 2 (GPLv2) along with this source code.
18 * Broadcom PDC Mailbox Driver
19 * The PDC provides a ring based programming interface to one or more hardware
20 * offload engines. For example, the PDC driver works with both SPU-M and SPU2
21 * cryptographic offload hardware. In some chips the PDC is referred to as MDE.
23 * The PDC driver registers with the Linux mailbox framework as a mailbox
24 * controller, once for each PDC instance. Ring 0 for each PDC is registered as
25 * a mailbox channel. The PDC driver uses interrupts to determine when data
26 * transfers to and from an offload engine are complete. The PDC driver uses
27 * threaded IRQs so that response messages are handled outside of interrupt
28 * context.
30 * The PDC driver allows multiple messages to be pending in the descriptor
31 * rings. The tx_msg_start descriptor index indicates where the last message
32 * starts. The txin_numd value at this index indicates how many descriptor
33 * indexes make up the message. Similar state is kept on the receive side. When
34 * an rx interrupt indicates a response is ready, the PDC driver processes numd
35 * descriptors from the tx and rx ring, thus processing one response at a time.
38 #include <linux/errno.h>
39 #include <linux/module.h>
40 #include <linux/init.h>
41 #include <linux/slab.h>
42 #include <linux/debugfs.h>
43 #include <linux/interrupt.h>
44 #include <linux/wait.h>
45 #include <linux/platform_device.h>
46 #include <linux/io.h>
47 #include <linux/of.h>
48 #include <linux/of_device.h>
49 #include <linux/of_address.h>
50 #include <linux/of_irq.h>
51 #include <linux/mailbox_controller.h>
52 #include <linux/mailbox/brcm-message.h>
53 #include <linux/scatterlist.h>
54 #include <linux/dma-direction.h>
55 #include <linux/dma-mapping.h>
56 #include <linux/dmapool.h>
58 #define PDC_SUCCESS 0
60 #define RING_ENTRY_SIZE sizeof(struct dma64dd)
62 /* # entries in PDC dma ring */
63 #define PDC_RING_ENTRIES 128
64 #define PDC_RING_SIZE (PDC_RING_ENTRIES * RING_ENTRY_SIZE)
65 /* Rings are 8k aligned */
66 #define RING_ALIGN_ORDER 13
67 #define RING_ALIGN BIT(RING_ALIGN_ORDER)
69 #define RX_BUF_ALIGN_ORDER 5
70 #define RX_BUF_ALIGN BIT(RX_BUF_ALIGN_ORDER)
72 /* descriptor bumping macros */
73 #define XXD(x, max_mask) ((x) & (max_mask))
74 #define TXD(x, max_mask) XXD((x), (max_mask))
75 #define RXD(x, max_mask) XXD((x), (max_mask))
76 #define NEXTTXD(i, max_mask) TXD((i) + 1, (max_mask))
77 #define PREVTXD(i, max_mask) TXD((i) - 1, (max_mask))
78 #define NEXTRXD(i, max_mask) RXD((i) + 1, (max_mask))
79 #define PREVRXD(i, max_mask) RXD((i) - 1, (max_mask))
80 #define NTXDACTIVE(h, t, max_mask) TXD((t) - (h), (max_mask))
81 #define NRXDACTIVE(h, t, max_mask) RXD((t) - (h), (max_mask))
83 /* Length of BCM header at start of SPU msg, in bytes */
84 #define BCM_HDR_LEN 8
87 * PDC driver reserves ringset 0 on each SPU for its own use. The driver does
88 * not currently support use of multiple ringsets on a single PDC engine.
90 #define PDC_RINGSET 0
93 * Interrupt mask and status definitions. Enable interrupts for tx and rx on
94 * ring 0
96 #define PDC_XMTINT_0 (24 + PDC_RINGSET)
97 #define PDC_RCVINT_0 (16 + PDC_RINGSET)
98 #define PDC_XMTINTEN_0 BIT(PDC_XMTINT_0)
99 #define PDC_RCVINTEN_0 BIT(PDC_RCVINT_0)
100 #define PDC_INTMASK (PDC_XMTINTEN_0 | PDC_RCVINTEN_0)
101 #define PDC_LAZY_FRAMECOUNT 1
102 #define PDC_LAZY_TIMEOUT 10000
103 #define PDC_LAZY_INT (PDC_LAZY_TIMEOUT | (PDC_LAZY_FRAMECOUNT << 24))
104 #define PDC_INTMASK_OFFSET 0x24
105 #define PDC_INTSTATUS_OFFSET 0x20
106 #define PDC_RCVLAZY0_OFFSET (0x30 + 4 * PDC_RINGSET)
109 * For SPU2, configure MDE_CKSUM_CONTROL to write 17 bytes of metadata
110 * before frame
112 #define PDC_SPU2_RESP_HDR_LEN 17
113 #define PDC_CKSUM_CTRL BIT(27)
114 #define PDC_CKSUM_CTRL_OFFSET 0x400
116 #define PDC_SPUM_RESP_HDR_LEN 32
119 * Sets the following bits for write to transmit control reg:
120 * 0 - XmtEn - enable activity on the tx channel
121 * 11 - PtyChkDisable - parity check is disabled
122 * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
124 #define PDC_TX_CTL 0x000C0801
127 * Sets the following bits for write to receive control reg:
128 * 0 - RcvEn - enable activity on the rx channel
129 * 7:1 - RcvOffset - size in bytes of status region at start of rx frame buf
130 * 9 - SepRxHdrDescEn - place start of new frames only in descriptors
131 * that have StartOfFrame set
132 * 10 - OflowContinue - on rx FIFO overflow, clear rx fifo, discard all
133 * remaining bytes in current frame, report error
134 * in rx frame status for current frame
135 * 11 - PtyChkDisable - parity check is disabled
136 * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
138 #define PDC_RX_CTL 0x000C0E01
140 #define CRYPTO_D64_RS0_CD_MASK ((PDC_RING_ENTRIES * RING_ENTRY_SIZE) - 1)
142 /* descriptor flags */
143 #define D64_CTRL1_EOT BIT(28) /* end of descriptor table */
144 #define D64_CTRL1_IOC BIT(29) /* interrupt on complete */
145 #define D64_CTRL1_EOF BIT(30) /* end of frame */
146 #define D64_CTRL1_SOF BIT(31) /* start of frame */
148 #define RX_STATUS_OVERFLOW 0x00800000
149 #define RX_STATUS_LEN 0x0000FFFF
151 #define PDC_TXREGS_OFFSET 0x200
152 #define PDC_RXREGS_OFFSET 0x220
154 /* Maximum size buffer the DMA engine can handle */
155 #define PDC_DMA_BUF_MAX 16384
157 struct pdc_dma_map {
158 void *ctx; /* opaque context associated with frame */
161 /* dma descriptor */
162 struct dma64dd {
163 u32 ctrl1; /* misc control bits */
164 u32 ctrl2; /* buffer count and address extension */
165 u32 addrlow; /* memory address of the date buffer, bits 31:0 */
166 u32 addrhigh; /* memory address of the date buffer, bits 63:32 */
169 /* dma registers per channel(xmt or rcv) */
170 struct dma64_regs {
171 u32 control; /* enable, et al */
172 u32 ptr; /* last descriptor posted to chip */
173 u32 addrlow; /* descriptor ring base address low 32-bits */
174 u32 addrhigh; /* descriptor ring base address bits 63:32 */
175 u32 status0; /* last rx descriptor written by hw */
176 u32 status1; /* driver does not use */
179 /* cpp contortions to concatenate w/arg prescan */
180 #ifndef PAD
181 #define _PADLINE(line) pad ## line
182 #define _XSTR(line) _PADLINE(line)
183 #define PAD _XSTR(__LINE__)
184 #endif /* PAD */
186 /* dma registers. matches hw layout. */
187 struct dma64 {
188 struct dma64_regs dmaxmt; /* dma tx */
189 u32 PAD[2];
190 struct dma64_regs dmarcv; /* dma rx */
191 u32 PAD[2];
194 /* PDC registers */
195 struct pdc_regs {
196 u32 devcontrol; /* 0x000 */
197 u32 devstatus; /* 0x004 */
198 u32 PAD;
199 u32 biststatus; /* 0x00c */
200 u32 PAD[4];
201 u32 intstatus; /* 0x020 */
202 u32 intmask; /* 0x024 */
203 u32 gptimer; /* 0x028 */
205 u32 PAD;
206 u32 intrcvlazy_0; /* 0x030 */
207 u32 intrcvlazy_1; /* 0x034 */
208 u32 intrcvlazy_2; /* 0x038 */
209 u32 intrcvlazy_3; /* 0x03c */
211 u32 PAD[48];
212 u32 removed_intrecvlazy; /* 0x100 */
213 u32 flowctlthresh; /* 0x104 */
214 u32 wrrthresh; /* 0x108 */
215 u32 gmac_idle_cnt_thresh; /* 0x10c */
217 u32 PAD[4];
218 u32 ifioaccessaddr; /* 0x120 */
219 u32 ifioaccessbyte; /* 0x124 */
220 u32 ifioaccessdata; /* 0x128 */
222 u32 PAD[21];
223 u32 phyaccess; /* 0x180 */
224 u32 PAD;
225 u32 phycontrol; /* 0x188 */
226 u32 txqctl; /* 0x18c */
227 u32 rxqctl; /* 0x190 */
228 u32 gpioselect; /* 0x194 */
229 u32 gpio_output_en; /* 0x198 */
230 u32 PAD; /* 0x19c */
231 u32 txq_rxq_mem_ctl; /* 0x1a0 */
232 u32 memory_ecc_status; /* 0x1a4 */
233 u32 serdes_ctl; /* 0x1a8 */
234 u32 serdes_status0; /* 0x1ac */
235 u32 serdes_status1; /* 0x1b0 */
236 u32 PAD[11]; /* 0x1b4-1dc */
237 u32 clk_ctl_st; /* 0x1e0 */
238 u32 hw_war; /* 0x1e4 */
239 u32 pwrctl; /* 0x1e8 */
240 u32 PAD[5];
242 #define PDC_NUM_DMA_RINGS 4
243 struct dma64 dmaregs[PDC_NUM_DMA_RINGS]; /* 0x0200 - 0x2fc */
245 /* more registers follow, but we don't use them */
248 /* structure for allocating/freeing DMA rings */
249 struct pdc_ring_alloc {
250 dma_addr_t dmabase; /* DMA address of start of ring */
251 void *vbase; /* base kernel virtual address of ring */
252 u32 size; /* ring allocation size in bytes */
255 /* PDC state structure */
256 struct pdc_state {
257 /* synchronize access to this PDC state structure */
258 spinlock_t pdc_lock;
260 /* Index of the PDC whose state is in this structure instance */
261 u8 pdc_idx;
263 /* Platform device for this PDC instance */
264 struct platform_device *pdev;
267 * Each PDC instance has a mailbox controller. PDC receives request
268 * messages through mailboxes, and sends response messages through the
269 * mailbox framework.
271 struct mbox_controller mbc;
273 unsigned int pdc_irq;
276 * Last interrupt status read from PDC device. Saved in interrupt
277 * handler so the handler can clear the interrupt in the device,
278 * and the interrupt thread called later can know which interrupt
279 * bits are active.
281 unsigned long intstatus;
283 /* Number of bytes of receive status prior to each rx frame */
284 u32 rx_status_len;
285 /* Whether a BCM header is prepended to each frame */
286 bool use_bcm_hdr;
287 /* Sum of length of BCM header and rx status header */
288 u32 pdc_resp_hdr_len;
290 /* The base virtual address of DMA hw registers */
291 void __iomem *pdc_reg_vbase;
293 /* Pool for allocation of DMA rings */
294 struct dma_pool *ring_pool;
296 /* Pool for allocation of metadata buffers for response messages */
297 struct dma_pool *rx_buf_pool;
300 * The base virtual address of DMA tx/rx descriptor rings. Corresponding
301 * DMA address and size of ring allocation.
303 struct pdc_ring_alloc tx_ring_alloc;
304 struct pdc_ring_alloc rx_ring_alloc;
306 struct pdc_regs *regs; /* start of PDC registers */
308 struct dma64_regs *txregs_64; /* dma tx engine registers */
309 struct dma64_regs *rxregs_64; /* dma rx engine registers */
312 * Arrays of PDC_RING_ENTRIES descriptors
313 * To use multiple ringsets, this needs to be extended
315 struct dma64dd *txd_64; /* tx descriptor ring */
316 struct dma64dd *rxd_64; /* rx descriptor ring */
318 /* descriptor ring sizes */
319 u32 ntxd; /* # tx descriptors */
320 u32 nrxd; /* # rx descriptors */
321 u32 nrxpost; /* # rx buffers to keep posted */
322 u32 ntxpost; /* max number of tx buffers that can be posted */
325 * Index of next tx descriptor to reclaim. That is, the descriptor
326 * index of the oldest tx buffer for which the host has yet to process
327 * the corresponding response.
329 u32 txin;
332 * Index of the first receive descriptor for the sequence of
333 * message fragments currently under construction. Used to build up
334 * the rxin_numd count for a message. Updated to rxout when the host
335 * starts a new sequence of rx buffers for a new message.
337 u32 tx_msg_start;
339 /* Index of next tx descriptor to post. */
340 u32 txout;
343 * Number of tx descriptors associated with the message that starts
344 * at this tx descriptor index.
346 u32 txin_numd[PDC_RING_ENTRIES];
349 * Index of next rx descriptor to reclaim. This is the index of
350 * the next descriptor whose data has yet to be processed by the host.
352 u32 rxin;
355 * Index of the first receive descriptor for the sequence of
356 * message fragments currently under construction. Used to build up
357 * the rxin_numd count for a message. Updated to rxout when the host
358 * starts a new sequence of rx buffers for a new message.
360 u32 rx_msg_start;
363 * Saved value of current hardware rx descriptor index.
364 * The last rx buffer written by the hw is the index previous to
365 * this one.
367 u32 last_rx_curr;
369 /* Index of next rx descriptor to post. */
370 u32 rxout;
373 * opaque context associated with frame that starts at each
374 * rx ring index.
376 void *rxp_ctx[PDC_RING_ENTRIES];
379 * Scatterlists used to form request and reply frames beginning at a
380 * given ring index. Retained in order to unmap each sg after reply
381 * is processed
383 struct scatterlist *src_sg[PDC_RING_ENTRIES];
384 struct scatterlist *dst_sg[PDC_RING_ENTRIES];
387 * Number of rx descriptors associated with the message that starts
388 * at this descriptor index. Not set for every index. For example,
389 * if descriptor index i points to a scatterlist with 4 entries, then
390 * the next three descriptor indexes don't have a value set.
392 u32 rxin_numd[PDC_RING_ENTRIES];
394 void *resp_hdr[PDC_RING_ENTRIES];
395 dma_addr_t resp_hdr_daddr[PDC_RING_ENTRIES];
397 struct dentry *debugfs_stats; /* debug FS stats file for this PDC */
399 /* counters */
400 u32 pdc_requests; /* number of request messages submitted */
401 u32 pdc_replies; /* number of reply messages received */
402 u32 txnobuf; /* count of tx ring full */
403 u32 rxnobuf; /* count of rx ring full */
404 u32 rx_oflow; /* count of rx overflows */
407 /* Global variables */
409 struct pdc_globals {
410 /* Actual number of SPUs in hardware, as reported by device tree */
411 u32 num_spu;
414 static struct pdc_globals pdcg;
416 /* top level debug FS directory for PDC driver */
417 static struct dentry *debugfs_dir;
419 static ssize_t pdc_debugfs_read(struct file *filp, char __user *ubuf,
420 size_t count, loff_t *offp)
422 struct pdc_state *pdcs;
423 char *buf;
424 ssize_t ret, out_offset, out_count;
426 out_count = 512;
428 buf = kmalloc(out_count, GFP_KERNEL);
429 if (!buf)
430 return -ENOMEM;
432 pdcs = filp->private_data;
433 out_offset = 0;
434 out_offset += snprintf(buf + out_offset, out_count - out_offset,
435 "SPU %u stats:\n", pdcs->pdc_idx);
436 out_offset += snprintf(buf + out_offset, out_count - out_offset,
437 "PDC requests............%u\n",
438 pdcs->pdc_requests);
439 out_offset += snprintf(buf + out_offset, out_count - out_offset,
440 "PDC responses...........%u\n",
441 pdcs->pdc_replies);
442 out_offset += snprintf(buf + out_offset, out_count - out_offset,
443 "Tx err ring full........%u\n",
444 pdcs->txnobuf);
445 out_offset += snprintf(buf + out_offset, out_count - out_offset,
446 "Rx err ring full........%u\n",
447 pdcs->rxnobuf);
448 out_offset += snprintf(buf + out_offset, out_count - out_offset,
449 "Receive overflow........%u\n",
450 pdcs->rx_oflow);
452 if (out_offset > out_count)
453 out_offset = out_count;
455 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
456 kfree(buf);
457 return ret;
460 static const struct file_operations pdc_debugfs_stats = {
461 .owner = THIS_MODULE,
462 .open = simple_open,
463 .read = pdc_debugfs_read,
467 * pdc_setup_debugfs() - Create the debug FS directories. If the top-level
468 * directory has not yet been created, create it now. Create a stats file in
469 * this directory for a SPU.
470 * @pdcs: PDC state structure
472 static void pdc_setup_debugfs(struct pdc_state *pdcs)
474 char spu_stats_name[16];
476 if (!debugfs_initialized())
477 return;
479 snprintf(spu_stats_name, 16, "pdc%d_stats", pdcs->pdc_idx);
480 if (!debugfs_dir)
481 debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
483 pdcs->debugfs_stats = debugfs_create_file(spu_stats_name, S_IRUSR,
484 debugfs_dir, pdcs,
485 &pdc_debugfs_stats);
488 static void pdc_free_debugfs(void)
490 if (debugfs_dir && simple_empty(debugfs_dir)) {
491 debugfs_remove_recursive(debugfs_dir);
492 debugfs_dir = NULL;
497 * pdc_build_rxd() - Build DMA descriptor to receive SPU result.
498 * @pdcs: PDC state for SPU that will generate result
499 * @dma_addr: DMA address of buffer that descriptor is being built for
500 * @buf_len: Length of the receive buffer, in bytes
501 * @flags: Flags to be stored in descriptor
503 static inline void
504 pdc_build_rxd(struct pdc_state *pdcs, dma_addr_t dma_addr,
505 u32 buf_len, u32 flags)
507 struct device *dev = &pdcs->pdev->dev;
509 dev_dbg(dev,
510 "Writing rx descriptor for PDC %u at index %u with length %u. flags %#x\n",
511 pdcs->pdc_idx, pdcs->rxout, buf_len, flags);
513 iowrite32(lower_32_bits(dma_addr),
514 (void *)&pdcs->rxd_64[pdcs->rxout].addrlow);
515 iowrite32(upper_32_bits(dma_addr),
516 (void *)&pdcs->rxd_64[pdcs->rxout].addrhigh);
517 iowrite32(flags, (void *)&pdcs->rxd_64[pdcs->rxout].ctrl1);
518 iowrite32(buf_len, (void *)&pdcs->rxd_64[pdcs->rxout].ctrl2);
519 /* bump ring index and return */
520 pdcs->rxout = NEXTRXD(pdcs->rxout, pdcs->nrxpost);
524 * pdc_build_txd() - Build a DMA descriptor to transmit a SPU request to
525 * hardware.
526 * @pdcs: PDC state for the SPU that will process this request
527 * @dma_addr: DMA address of packet to be transmitted
528 * @buf_len: Length of tx buffer, in bytes
529 * @flags: Flags to be stored in descriptor
531 static inline void
532 pdc_build_txd(struct pdc_state *pdcs, dma_addr_t dma_addr, u32 buf_len,
533 u32 flags)
535 struct device *dev = &pdcs->pdev->dev;
537 dev_dbg(dev,
538 "Writing tx descriptor for PDC %u at index %u with length %u, flags %#x\n",
539 pdcs->pdc_idx, pdcs->txout, buf_len, flags);
541 iowrite32(lower_32_bits(dma_addr),
542 (void *)&pdcs->txd_64[pdcs->txout].addrlow);
543 iowrite32(upper_32_bits(dma_addr),
544 (void *)&pdcs->txd_64[pdcs->txout].addrhigh);
545 iowrite32(flags, (void *)&pdcs->txd_64[pdcs->txout].ctrl1);
546 iowrite32(buf_len, (void *)&pdcs->txd_64[pdcs->txout].ctrl2);
548 /* bump ring index and return */
549 pdcs->txout = NEXTTXD(pdcs->txout, pdcs->ntxpost);
553 * pdc_receive() - Receive a response message from a given SPU.
554 * @pdcs: PDC state for the SPU to receive from
555 * @mssg: mailbox message to be returned to client
557 * When the return code indicates success, the response message is available in
558 * the receive buffers provided prior to submission of the request.
560 * Input:
561 * pdcs - PDC state structure for the SPU to be polled
562 * mssg - mailbox message to be returned to client. This function sets the
563 * context pointer on the message to help the client associate the
564 * response with a request.
566 * Return: PDC_SUCCESS if one or more receive descriptors was processed
567 * -EAGAIN indicates that no response message is available
568 * -EIO an error occurred
570 static int
571 pdc_receive(struct pdc_state *pdcs, struct brcm_message *mssg)
573 struct device *dev = &pdcs->pdev->dev;
574 u32 len, rx_status;
575 u32 num_frags;
576 int i;
577 u8 *resp_hdr; /* virtual addr of start of resp message DMA header */
578 u32 frags_rdy; /* number of fragments ready to read */
579 u32 rx_idx; /* ring index of start of receive frame */
580 dma_addr_t resp_hdr_daddr;
582 spin_lock(&pdcs->pdc_lock);
585 * return if a complete response message is not yet ready.
586 * rxin_numd[rxin] is the number of fragments in the next msg
587 * to read.
589 frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, pdcs->nrxpost);
590 if ((frags_rdy == 0) || (frags_rdy < pdcs->rxin_numd[pdcs->rxin])) {
591 /* See if the hw has written more fragments than we know */
592 pdcs->last_rx_curr =
593 (ioread32((void *)&pdcs->rxregs_64->status0) &
594 CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE;
595 frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr,
596 pdcs->nrxpost);
597 if ((frags_rdy == 0) ||
598 (frags_rdy < pdcs->rxin_numd[pdcs->rxin])) {
599 /* No response ready */
600 spin_unlock(&pdcs->pdc_lock);
601 return -EAGAIN;
603 /* can't read descriptors/data until write index is read */
604 rmb();
607 num_frags = pdcs->txin_numd[pdcs->txin];
608 dma_unmap_sg(dev, pdcs->src_sg[pdcs->txin],
609 sg_nents(pdcs->src_sg[pdcs->txin]), DMA_TO_DEVICE);
611 for (i = 0; i < num_frags; i++)
612 pdcs->txin = NEXTTXD(pdcs->txin, pdcs->ntxpost);
614 dev_dbg(dev, "PDC %u reclaimed %d tx descriptors",
615 pdcs->pdc_idx, num_frags);
617 rx_idx = pdcs->rxin;
618 num_frags = pdcs->rxin_numd[rx_idx];
619 /* Return opaque context with result */
620 mssg->ctx = pdcs->rxp_ctx[rx_idx];
621 pdcs->rxp_ctx[rx_idx] = NULL;
622 resp_hdr = pdcs->resp_hdr[rx_idx];
623 resp_hdr_daddr = pdcs->resp_hdr_daddr[rx_idx];
624 dma_unmap_sg(dev, pdcs->dst_sg[rx_idx],
625 sg_nents(pdcs->dst_sg[rx_idx]), DMA_FROM_DEVICE);
627 for (i = 0; i < num_frags; i++)
628 pdcs->rxin = NEXTRXD(pdcs->rxin, pdcs->nrxpost);
630 spin_unlock(&pdcs->pdc_lock);
632 dev_dbg(dev, "PDC %u reclaimed %d rx descriptors",
633 pdcs->pdc_idx, num_frags);
635 dev_dbg(dev,
636 "PDC %u txin %u, txout %u, rxin %u, rxout %u, last_rx_curr %u\n",
637 pdcs->pdc_idx, pdcs->txin, pdcs->txout, pdcs->rxin,
638 pdcs->rxout, pdcs->last_rx_curr);
640 if (pdcs->pdc_resp_hdr_len == PDC_SPUM_RESP_HDR_LEN) {
642 * For SPU-M, get length of response msg and rx overflow status.
644 rx_status = *((u32 *)resp_hdr);
645 len = rx_status & RX_STATUS_LEN;
646 dev_dbg(dev,
647 "SPU response length %u bytes", len);
648 if (unlikely(((rx_status & RX_STATUS_OVERFLOW) || (!len)))) {
649 if (rx_status & RX_STATUS_OVERFLOW) {
650 dev_err_ratelimited(dev,
651 "crypto receive overflow");
652 pdcs->rx_oflow++;
653 } else {
654 dev_info_ratelimited(dev, "crypto rx len = 0");
656 return -EIO;
660 dma_pool_free(pdcs->rx_buf_pool, resp_hdr, resp_hdr_daddr);
662 pdcs->pdc_replies++;
663 /* if we read one or more rx descriptors, claim success */
664 if (num_frags > 0)
665 return PDC_SUCCESS;
666 else
667 return -EIO;
671 * pdc_tx_list_sg_add() - Add the buffers in a scatterlist to the transmit
672 * descriptors for a given SPU. The scatterlist buffers contain the data for a
673 * SPU request message.
674 * @spu_idx: The index of the SPU to submit the request to, [0, max_spu)
675 * @sg: Scatterlist whose buffers contain part of the SPU request
677 * If a scatterlist buffer is larger than PDC_DMA_BUF_MAX, multiple descriptors
678 * are written for that buffer, each <= PDC_DMA_BUF_MAX byte in length.
680 * Return: PDC_SUCCESS if successful
681 * < 0 otherwise
683 static int pdc_tx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
685 u32 flags = 0;
686 u32 eot;
687 u32 tx_avail;
690 * Num descriptors needed. Conservatively assume we need a descriptor
691 * for every entry in sg.
693 u32 num_desc;
694 u32 desc_w = 0; /* Number of tx descriptors written */
695 u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */
696 dma_addr_t databufptr; /* DMA address to put in descriptor */
698 num_desc = (u32)sg_nents(sg);
700 /* check whether enough tx descriptors are available */
701 tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout,
702 pdcs->ntxpost);
703 if (unlikely(num_desc > tx_avail)) {
704 pdcs->txnobuf++;
705 return -ENOSPC;
708 /* build tx descriptors */
709 if (pdcs->tx_msg_start == pdcs->txout) {
710 /* Start of frame */
711 pdcs->txin_numd[pdcs->tx_msg_start] = 0;
712 pdcs->src_sg[pdcs->txout] = sg;
713 flags = D64_CTRL1_SOF;
716 while (sg) {
717 if (unlikely(pdcs->txout == (pdcs->ntxd - 1)))
718 eot = D64_CTRL1_EOT;
719 else
720 eot = 0;
723 * If sg buffer larger than PDC limit, split across
724 * multiple descriptors
726 bufcnt = sg_dma_len(sg);
727 databufptr = sg_dma_address(sg);
728 while (bufcnt > PDC_DMA_BUF_MAX) {
729 pdc_build_txd(pdcs, databufptr, PDC_DMA_BUF_MAX,
730 flags | eot);
731 desc_w++;
732 bufcnt -= PDC_DMA_BUF_MAX;
733 databufptr += PDC_DMA_BUF_MAX;
734 if (unlikely(pdcs->txout == (pdcs->ntxd - 1)))
735 eot = D64_CTRL1_EOT;
736 else
737 eot = 0;
739 sg = sg_next(sg);
740 if (!sg)
741 /* Writing last descriptor for frame */
742 flags |= (D64_CTRL1_EOF | D64_CTRL1_IOC);
743 pdc_build_txd(pdcs, databufptr, bufcnt, flags | eot);
744 desc_w++;
745 /* Clear start of frame after first descriptor */
746 flags &= ~D64_CTRL1_SOF;
748 pdcs->txin_numd[pdcs->tx_msg_start] += desc_w;
750 return PDC_SUCCESS;
754 * pdc_tx_list_final() - Initiate DMA transfer of last frame written to tx
755 * ring.
756 * @pdcs: PDC state for SPU to process the request
758 * Sets the index of the last descriptor written in both the rx and tx ring.
760 * Return: PDC_SUCCESS
762 static int pdc_tx_list_final(struct pdc_state *pdcs)
765 * write barrier to ensure all register writes are complete
766 * before chip starts to process new request
768 wmb();
769 iowrite32(pdcs->rxout << 4, (void *)&pdcs->rxregs_64->ptr);
770 iowrite32(pdcs->txout << 4, (void *)&pdcs->txregs_64->ptr);
771 pdcs->pdc_requests++;
773 return PDC_SUCCESS;
777 * pdc_rx_list_init() - Start a new receive descriptor list for a given PDC.
778 * @pdcs: PDC state for SPU handling request
779 * @dst_sg: scatterlist providing rx buffers for response to be returned to
780 * mailbox client
781 * @ctx: Opaque context for this request
783 * Posts a single receive descriptor to hold the metadata that precedes a
784 * response. For example, with SPU-M, the metadata is a 32-byte DMA header and
785 * an 8-byte BCM header. Moves the msg_start descriptor indexes for both tx and
786 * rx to indicate the start of a new message.
788 * Return: PDC_SUCCESS if successful
789 * < 0 if an error (e.g., rx ring is full)
791 static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
792 void *ctx)
794 u32 flags = 0;
795 u32 rx_avail;
796 u32 rx_pkt_cnt = 1; /* Adding a single rx buffer */
797 dma_addr_t daddr;
798 void *vaddr;
800 rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
801 pdcs->nrxpost);
802 if (unlikely(rx_pkt_cnt > rx_avail)) {
803 pdcs->rxnobuf++;
804 return -ENOSPC;
807 /* allocate a buffer for the dma rx status */
808 vaddr = dma_pool_zalloc(pdcs->rx_buf_pool, GFP_ATOMIC, &daddr);
809 if (!vaddr)
810 return -ENOMEM;
813 * Update msg_start indexes for both tx and rx to indicate the start
814 * of a new sequence of descriptor indexes that contain the fragments
815 * of the same message.
817 pdcs->rx_msg_start = pdcs->rxout;
818 pdcs->tx_msg_start = pdcs->txout;
820 /* This is always the first descriptor in the receive sequence */
821 flags = D64_CTRL1_SOF;
822 pdcs->rxin_numd[pdcs->rx_msg_start] = 1;
824 if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
825 flags |= D64_CTRL1_EOT;
827 pdcs->rxp_ctx[pdcs->rxout] = ctx;
828 pdcs->dst_sg[pdcs->rxout] = dst_sg;
829 pdcs->resp_hdr[pdcs->rxout] = vaddr;
830 pdcs->resp_hdr_daddr[pdcs->rxout] = daddr;
831 pdc_build_rxd(pdcs, daddr, pdcs->pdc_resp_hdr_len, flags);
832 return PDC_SUCCESS;
836 * pdc_rx_list_sg_add() - Add the buffers in a scatterlist to the receive
837 * descriptors for a given SPU. The caller must have already DMA mapped the
838 * scatterlist.
839 * @spu_idx: Indicates which SPU the buffers are for
840 * @sg: Scatterlist whose buffers are added to the receive ring
842 * If a receive buffer in the scatterlist is larger than PDC_DMA_BUF_MAX,
843 * multiple receive descriptors are written, each with a buffer <=
844 * PDC_DMA_BUF_MAX.
846 * Return: PDC_SUCCESS if successful
847 * < 0 otherwise (e.g., receive ring is full)
849 static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
851 u32 flags = 0;
852 u32 rx_avail;
855 * Num descriptors needed. Conservatively assume we need a descriptor
856 * for every entry from our starting point in the scatterlist.
858 u32 num_desc;
859 u32 desc_w = 0; /* Number of tx descriptors written */
860 u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */
861 dma_addr_t databufptr; /* DMA address to put in descriptor */
863 num_desc = (u32)sg_nents(sg);
865 rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
866 pdcs->nrxpost);
867 if (unlikely(num_desc > rx_avail)) {
868 pdcs->rxnobuf++;
869 return -ENOSPC;
872 while (sg) {
873 if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
874 flags = D64_CTRL1_EOT;
875 else
876 flags = 0;
879 * If sg buffer larger than PDC limit, split across
880 * multiple descriptors
882 bufcnt = sg_dma_len(sg);
883 databufptr = sg_dma_address(sg);
884 while (bufcnt > PDC_DMA_BUF_MAX) {
885 pdc_build_rxd(pdcs, databufptr, PDC_DMA_BUF_MAX, flags);
886 desc_w++;
887 bufcnt -= PDC_DMA_BUF_MAX;
888 databufptr += PDC_DMA_BUF_MAX;
889 if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
890 flags = D64_CTRL1_EOT;
891 else
892 flags = 0;
894 pdc_build_rxd(pdcs, databufptr, bufcnt, flags);
895 desc_w++;
896 sg = sg_next(sg);
898 pdcs->rxin_numd[pdcs->rx_msg_start] += desc_w;
900 return PDC_SUCCESS;
904 * pdc_irq_handler() - Interrupt handler called in interrupt context.
905 * @irq: Interrupt number that has fired
906 * @cookie: PDC state for DMA engine that generated the interrupt
908 * We have to clear the device interrupt status flags here. So cache the
909 * status for later use in the thread function. Other than that, just return
910 * WAKE_THREAD to invoke the thread function.
912 * Return: IRQ_WAKE_THREAD if interrupt is ours
913 * IRQ_NONE otherwise
915 static irqreturn_t pdc_irq_handler(int irq, void *cookie)
917 struct pdc_state *pdcs = cookie;
918 u32 intstatus = ioread32(pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
920 if (intstatus & PDC_XMTINTEN_0)
921 set_bit(PDC_XMTINT_0, &pdcs->intstatus);
922 if (intstatus & PDC_RCVINTEN_0)
923 set_bit(PDC_RCVINT_0, &pdcs->intstatus);
925 /* Clear interrupt flags in device */
926 iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
928 /* Wakeup IRQ thread */
929 if (pdcs && (irq == pdcs->pdc_irq) && (intstatus & PDC_INTMASK))
930 return IRQ_WAKE_THREAD;
932 return IRQ_NONE;
936 * pdc_irq_thread() - Function invoked on deferred thread when a DMA tx has
937 * completed or data is available to receive.
938 * @irq: Interrupt number
939 * @cookie: PDC state for PDC that generated the interrupt
941 * On DMA tx complete, notify the mailbox client. On DMA rx complete, process
942 * as many SPU response messages as are available and send each to the mailbox
943 * client.
945 * Return: IRQ_HANDLED if we recognized and handled the interrupt
946 * IRQ_NONE otherwise
948 static irqreturn_t pdc_irq_thread(int irq, void *cookie)
950 struct pdc_state *pdcs = cookie;
951 struct mbox_controller *mbc;
952 struct mbox_chan *chan;
953 bool tx_int;
954 bool rx_int;
955 int rx_status;
956 struct brcm_message mssg;
958 tx_int = test_and_clear_bit(PDC_XMTINT_0, &pdcs->intstatus);
959 rx_int = test_and_clear_bit(PDC_RCVINT_0, &pdcs->intstatus);
961 if (pdcs && (tx_int || rx_int)) {
962 dev_dbg(&pdcs->pdev->dev,
963 "%s() got irq %d with tx_int %s, rx_int %s",
964 __func__, irq,
965 tx_int ? "set" : "clear", rx_int ? "set" : "clear");
967 mbc = &pdcs->mbc;
968 chan = &mbc->chans[0];
970 if (tx_int) {
971 dev_dbg(&pdcs->pdev->dev, "%s(): tx done", __func__);
972 /* only one frame in flight at a time */
973 mbox_chan_txdone(chan, PDC_SUCCESS);
975 if (rx_int) {
976 while (1) {
977 /* Could be many frames ready */
978 memset(&mssg, 0, sizeof(mssg));
979 mssg.type = BRCM_MESSAGE_SPU;
980 rx_status = pdc_receive(pdcs, &mssg);
981 if (rx_status >= 0) {
982 dev_dbg(&pdcs->pdev->dev,
983 "%s(): invoking client rx cb",
984 __func__);
985 mbox_chan_received_data(chan, &mssg);
986 } else {
987 dev_dbg(&pdcs->pdev->dev,
988 "%s(): no SPU response available",
989 __func__);
990 break;
994 return IRQ_HANDLED;
996 return IRQ_NONE;
1000 * pdc_ring_init() - Allocate DMA rings and initialize constant fields of
1001 * descriptors in one ringset.
1002 * @pdcs: PDC instance state
1003 * @ringset: index of ringset being used
1005 * Return: PDC_SUCCESS if ring initialized
1006 * < 0 otherwise
1008 static int pdc_ring_init(struct pdc_state *pdcs, int ringset)
1010 int i;
1011 int err = PDC_SUCCESS;
1012 struct dma64 *dma_reg;
1013 struct device *dev = &pdcs->pdev->dev;
1014 struct pdc_ring_alloc tx;
1015 struct pdc_ring_alloc rx;
1017 /* Allocate tx ring */
1018 tx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &tx.dmabase);
1019 if (!tx.vbase) {
1020 err = -ENOMEM;
1021 goto done;
1024 /* Allocate rx ring */
1025 rx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &rx.dmabase);
1026 if (!rx.vbase) {
1027 err = -ENOMEM;
1028 goto fail_dealloc;
1031 dev_dbg(dev, " - base DMA addr of tx ring %pad", &tx.dmabase);
1032 dev_dbg(dev, " - base virtual addr of tx ring %p", tx.vbase);
1033 dev_dbg(dev, " - base DMA addr of rx ring %pad", &rx.dmabase);
1034 dev_dbg(dev, " - base virtual addr of rx ring %p", rx.vbase);
1036 /* lock after ring allocation to avoid scheduling while atomic */
1037 spin_lock(&pdcs->pdc_lock);
1039 memcpy(&pdcs->tx_ring_alloc, &tx, sizeof(tx));
1040 memcpy(&pdcs->rx_ring_alloc, &rx, sizeof(rx));
1042 pdcs->rxin = 0;
1043 pdcs->rx_msg_start = 0;
1044 pdcs->last_rx_curr = 0;
1045 pdcs->rxout = 0;
1046 pdcs->txin = 0;
1047 pdcs->tx_msg_start = 0;
1048 pdcs->txout = 0;
1050 /* Set descriptor array base addresses */
1051 pdcs->txd_64 = (struct dma64dd *)pdcs->tx_ring_alloc.vbase;
1052 pdcs->rxd_64 = (struct dma64dd *)pdcs->rx_ring_alloc.vbase;
1054 /* Tell device the base DMA address of each ring */
1055 dma_reg = &pdcs->regs->dmaregs[ringset];
1056 iowrite32(lower_32_bits(pdcs->tx_ring_alloc.dmabase),
1057 (void *)&dma_reg->dmaxmt.addrlow);
1058 iowrite32(upper_32_bits(pdcs->tx_ring_alloc.dmabase),
1059 (void *)&dma_reg->dmaxmt.addrhigh);
1061 iowrite32(lower_32_bits(pdcs->rx_ring_alloc.dmabase),
1062 (void *)&dma_reg->dmarcv.addrlow);
1063 iowrite32(upper_32_bits(pdcs->rx_ring_alloc.dmabase),
1064 (void *)&dma_reg->dmarcv.addrhigh);
1066 /* Initialize descriptors */
1067 for (i = 0; i < PDC_RING_ENTRIES; i++) {
1068 /* Every tx descriptor can be used for start of frame. */
1069 if (i != pdcs->ntxpost) {
1070 iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF,
1071 (void *)&pdcs->txd_64[i].ctrl1);
1072 } else {
1073 /* Last descriptor in ringset. Set End of Table. */
1074 iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF |
1075 D64_CTRL1_EOT,
1076 (void *)&pdcs->txd_64[i].ctrl1);
1079 /* Every rx descriptor can be used for start of frame */
1080 if (i != pdcs->nrxpost) {
1081 iowrite32(D64_CTRL1_SOF,
1082 (void *)&pdcs->rxd_64[i].ctrl1);
1083 } else {
1084 /* Last descriptor in ringset. Set End of Table. */
1085 iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOT,
1086 (void *)&pdcs->rxd_64[i].ctrl1);
1089 spin_unlock(&pdcs->pdc_lock);
1090 return PDC_SUCCESS;
1092 fail_dealloc:
1093 dma_pool_free(pdcs->ring_pool, tx.vbase, tx.dmabase);
1094 done:
1095 return err;
1098 static void pdc_ring_free(struct pdc_state *pdcs)
1100 if (pdcs->tx_ring_alloc.vbase) {
1101 dma_pool_free(pdcs->ring_pool, pdcs->tx_ring_alloc.vbase,
1102 pdcs->tx_ring_alloc.dmabase);
1103 pdcs->tx_ring_alloc.vbase = NULL;
1106 if (pdcs->rx_ring_alloc.vbase) {
1107 dma_pool_free(pdcs->ring_pool, pdcs->rx_ring_alloc.vbase,
1108 pdcs->rx_ring_alloc.dmabase);
1109 pdcs->rx_ring_alloc.vbase = NULL;
1114 * pdc_send_data() - mailbox send_data function
1115 * @chan: The mailbox channel on which the data is sent. The channel
1116 * corresponds to a DMA ringset.
1117 * @data: The mailbox message to be sent. The message must be a
1118 * brcm_message structure.
1120 * This function is registered as the send_data function for the mailbox
1121 * controller. From the destination scatterlist in the mailbox message, it
1122 * creates a sequence of receive descriptors in the rx ring. From the source
1123 * scatterlist, it creates a sequence of transmit descriptors in the tx ring.
1124 * After creating the descriptors, it writes the rx ptr and tx ptr registers to
1125 * initiate the DMA transfer.
1127 * This function does the DMA map and unmap of the src and dst scatterlists in
1128 * the mailbox message.
1130 * Return: 0 if successful
1131 * -ENOTSUPP if the mailbox message is a type this driver does not
1132 * support
1133 * < 0 if an error
1135 static int pdc_send_data(struct mbox_chan *chan, void *data)
1137 struct pdc_state *pdcs = chan->con_priv;
1138 struct device *dev = &pdcs->pdev->dev;
1139 struct brcm_message *mssg = data;
1140 int err = PDC_SUCCESS;
1141 int src_nent;
1142 int dst_nent;
1143 int nent;
1145 if (mssg->type != BRCM_MESSAGE_SPU)
1146 return -ENOTSUPP;
1148 src_nent = sg_nents(mssg->spu.src);
1149 if (src_nent) {
1150 nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE);
1151 if (nent == 0)
1152 return -EIO;
1155 dst_nent = sg_nents(mssg->spu.dst);
1156 if (dst_nent) {
1157 nent = dma_map_sg(dev, mssg->spu.dst, dst_nent,
1158 DMA_FROM_DEVICE);
1159 if (nent == 0) {
1160 dma_unmap_sg(dev, mssg->spu.src, src_nent,
1161 DMA_TO_DEVICE);
1162 return -EIO;
1166 spin_lock(&pdcs->pdc_lock);
1168 /* Create rx descriptors to SPU catch response */
1169 err = pdc_rx_list_init(pdcs, mssg->spu.dst, mssg->ctx);
1170 err |= pdc_rx_list_sg_add(pdcs, mssg->spu.dst);
1172 /* Create tx descriptors to submit SPU request */
1173 err |= pdc_tx_list_sg_add(pdcs, mssg->spu.src);
1174 err |= pdc_tx_list_final(pdcs); /* initiate transfer */
1176 spin_unlock(&pdcs->pdc_lock);
1178 if (err)
1179 dev_err(&pdcs->pdev->dev,
1180 "%s failed with error %d", __func__, err);
1182 return err;
1185 static int pdc_startup(struct mbox_chan *chan)
1187 return pdc_ring_init(chan->con_priv, PDC_RINGSET);
1190 static void pdc_shutdown(struct mbox_chan *chan)
1192 struct pdc_state *pdcs = chan->con_priv;
1194 if (!pdcs)
1195 return;
1197 dev_dbg(&pdcs->pdev->dev,
1198 "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx);
1199 pdc_ring_free(pdcs);
1203 * pdc_hw_init() - Use the given initialization parameters to initialize the
1204 * state for one of the PDCs.
1205 * @pdcs: state of the PDC
1207 static
1208 void pdc_hw_init(struct pdc_state *pdcs)
1210 struct platform_device *pdev;
1211 struct device *dev;
1212 struct dma64 *dma_reg;
1213 int ringset = PDC_RINGSET;
1215 pdev = pdcs->pdev;
1216 dev = &pdev->dev;
1218 dev_dbg(dev, "PDC %u initial values:", pdcs->pdc_idx);
1219 dev_dbg(dev, "state structure: %p",
1220 pdcs);
1221 dev_dbg(dev, " - base virtual addr of hw regs %p",
1222 pdcs->pdc_reg_vbase);
1224 /* initialize data structures */
1225 pdcs->regs = (struct pdc_regs *)pdcs->pdc_reg_vbase;
1226 pdcs->txregs_64 = (struct dma64_regs *)
1227 (void *)(((u8 *)pdcs->pdc_reg_vbase) +
1228 PDC_TXREGS_OFFSET + (sizeof(struct dma64) * ringset));
1229 pdcs->rxregs_64 = (struct dma64_regs *)
1230 (void *)(((u8 *)pdcs->pdc_reg_vbase) +
1231 PDC_RXREGS_OFFSET + (sizeof(struct dma64) * ringset));
1233 pdcs->ntxd = PDC_RING_ENTRIES;
1234 pdcs->nrxd = PDC_RING_ENTRIES;
1235 pdcs->ntxpost = PDC_RING_ENTRIES - 1;
1236 pdcs->nrxpost = PDC_RING_ENTRIES - 1;
1237 pdcs->regs->intmask = 0;
1239 dma_reg = &pdcs->regs->dmaregs[ringset];
1240 iowrite32(0, (void *)&dma_reg->dmaxmt.ptr);
1241 iowrite32(0, (void *)&dma_reg->dmarcv.ptr);
1243 iowrite32(PDC_TX_CTL, (void *)&dma_reg->dmaxmt.control);
1245 iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
1246 (void *)&dma_reg->dmarcv.control);
1248 if (pdcs->pdc_resp_hdr_len == PDC_SPU2_RESP_HDR_LEN)
1249 iowrite32(PDC_CKSUM_CTRL,
1250 pdcs->pdc_reg_vbase + PDC_CKSUM_CTRL_OFFSET);
1254 * pdc_rx_buf_pool_create() - Pool of receive buffers used to catch the metadata
1255 * header returned with each response message.
1256 * @pdcs: PDC state structure
1258 * The metadata is not returned to the mailbox client. So the PDC driver
1259 * manages these buffers.
1261 * Return: PDC_SUCCESS
1262 * -ENOMEM if pool creation fails
1264 static int pdc_rx_buf_pool_create(struct pdc_state *pdcs)
1266 struct platform_device *pdev;
1267 struct device *dev;
1269 pdev = pdcs->pdev;
1270 dev = &pdev->dev;
1272 pdcs->pdc_resp_hdr_len = pdcs->rx_status_len;
1273 if (pdcs->use_bcm_hdr)
1274 pdcs->pdc_resp_hdr_len += BCM_HDR_LEN;
1276 pdcs->rx_buf_pool = dma_pool_create("pdc rx bufs", dev,
1277 pdcs->pdc_resp_hdr_len,
1278 RX_BUF_ALIGN, 0);
1279 if (!pdcs->rx_buf_pool)
1280 return -ENOMEM;
1282 return PDC_SUCCESS;
1286 * pdc_interrupts_init() - Initialize the interrupt configuration for a PDC and
1287 * specify a threaded IRQ handler for deferred handling of interrupts outside of
1288 * interrupt context.
1289 * @pdcs: PDC state
1291 * Set the interrupt mask for transmit and receive done.
1292 * Set the lazy interrupt frame count to generate an interrupt for just one pkt.
1294 * Return: PDC_SUCCESS
1295 * <0 if threaded irq request fails
1297 static int pdc_interrupts_init(struct pdc_state *pdcs)
1299 struct platform_device *pdev = pdcs->pdev;
1300 struct device *dev = &pdev->dev;
1301 struct device_node *dn = pdev->dev.of_node;
1302 int err;
1304 pdcs->intstatus = 0;
1306 /* interrupt configuration */
1307 iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
1308 iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase + PDC_RCVLAZY0_OFFSET);
1310 /* read irq from device tree */
1311 pdcs->pdc_irq = irq_of_parse_and_map(dn, 0);
1312 dev_dbg(dev, "pdc device %s irq %u for pdcs %p",
1313 dev_name(dev), pdcs->pdc_irq, pdcs);
1314 err = devm_request_threaded_irq(dev, pdcs->pdc_irq,
1315 pdc_irq_handler,
1316 pdc_irq_thread, 0, dev_name(dev), pdcs);
1317 if (err) {
1318 dev_err(dev, "threaded tx IRQ %u request failed with err %d\n",
1319 pdcs->pdc_irq, err);
1320 return err;
1322 return PDC_SUCCESS;
1325 static const struct mbox_chan_ops pdc_mbox_chan_ops = {
1326 .send_data = pdc_send_data,
1327 .startup = pdc_startup,
1328 .shutdown = pdc_shutdown
1332 * pdc_mb_init() - Initialize the mailbox controller.
1333 * @pdcs: PDC state
1335 * Each PDC is a mailbox controller. Each ringset is a mailbox channel. Kernel
1336 * driver only uses one ringset and thus one mb channel. PDC uses the transmit
1337 * complete interrupt to determine when a mailbox message has successfully been
1338 * transmitted.
1340 * Return: 0 on success
1341 * < 0 if there is an allocation or registration failure
1343 static int pdc_mb_init(struct pdc_state *pdcs)
1345 struct device *dev = &pdcs->pdev->dev;
1346 struct mbox_controller *mbc;
1347 int chan_index;
1348 int err;
1350 mbc = &pdcs->mbc;
1351 mbc->dev = dev;
1352 mbc->ops = &pdc_mbox_chan_ops;
1353 mbc->num_chans = 1;
1354 mbc->chans = devm_kcalloc(dev, mbc->num_chans, sizeof(*mbc->chans),
1355 GFP_KERNEL);
1356 if (!mbc->chans)
1357 return -ENOMEM;
1359 mbc->txdone_irq = true;
1360 mbc->txdone_poll = false;
1361 for (chan_index = 0; chan_index < mbc->num_chans; chan_index++)
1362 mbc->chans[chan_index].con_priv = pdcs;
1364 /* Register mailbox controller */
1365 err = mbox_controller_register(mbc);
1366 if (err) {
1367 dev_crit(dev,
1368 "Failed to register PDC mailbox controller. Error %d.",
1369 err);
1370 return err;
1372 return 0;
1376 * pdc_dt_read() - Read application-specific data from device tree.
1377 * @pdev: Platform device
1378 * @pdcs: PDC state
1380 * Reads the number of bytes of receive status that precede each received frame.
1381 * Reads whether transmit and received frames should be preceded by an 8-byte
1382 * BCM header.
1384 * Return: 0 if successful
1385 * -ENODEV if device not available
1387 static int pdc_dt_read(struct platform_device *pdev, struct pdc_state *pdcs)
1389 struct device *dev = &pdev->dev;
1390 struct device_node *dn = pdev->dev.of_node;
1391 int err;
1393 err = of_property_read_u32(dn, "brcm,rx-status-len",
1394 &pdcs->rx_status_len);
1395 if (err < 0)
1396 dev_err(dev,
1397 "%s failed to get DMA receive status length from device tree",
1398 __func__);
1400 pdcs->use_bcm_hdr = of_property_read_bool(dn, "brcm,use-bcm-hdr");
1402 return 0;
1406 * pdc_probe() - Probe function for PDC driver.
1407 * @pdev: PDC platform device
1409 * Reserve and map register regions defined in device tree.
1410 * Allocate and initialize tx and rx DMA rings.
1411 * Initialize a mailbox controller for each PDC.
1413 * Return: 0 if successful
1414 * < 0 if an error
1416 static int pdc_probe(struct platform_device *pdev)
1418 int err = 0;
1419 struct device *dev = &pdev->dev;
1420 struct resource *pdc_regs;
1421 struct pdc_state *pdcs;
1423 /* PDC state for one SPU */
1424 pdcs = devm_kzalloc(dev, sizeof(*pdcs), GFP_KERNEL);
1425 if (!pdcs) {
1426 err = -ENOMEM;
1427 goto cleanup;
1430 spin_lock_init(&pdcs->pdc_lock);
1431 pdcs->pdev = pdev;
1432 platform_set_drvdata(pdev, pdcs);
1433 pdcs->pdc_idx = pdcg.num_spu;
1434 pdcg.num_spu++;
1436 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
1437 if (err) {
1438 dev_warn(dev, "PDC device cannot perform DMA. Error %d.", err);
1439 goto cleanup;
1442 /* Create DMA pool for tx ring */
1443 pdcs->ring_pool = dma_pool_create("pdc rings", dev, PDC_RING_SIZE,
1444 RING_ALIGN, 0);
1445 if (!pdcs->ring_pool) {
1446 err = -ENOMEM;
1447 goto cleanup;
1450 err = pdc_dt_read(pdev, pdcs);
1451 if (err)
1452 goto cleanup_ring_pool;
1454 pdc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1455 if (!pdc_regs) {
1456 err = -ENODEV;
1457 goto cleanup_ring_pool;
1459 dev_dbg(dev, "PDC register region res.start = %pa, res.end = %pa",
1460 &pdc_regs->start, &pdc_regs->end);
1462 pdcs->pdc_reg_vbase = devm_ioremap_resource(&pdev->dev, pdc_regs);
1463 if (IS_ERR(pdcs->pdc_reg_vbase)) {
1464 err = PTR_ERR(pdcs->pdc_reg_vbase);
1465 dev_err(&pdev->dev, "Failed to map registers: %d\n", err);
1466 goto cleanup_ring_pool;
1469 /* create rx buffer pool after dt read to know how big buffers are */
1470 err = pdc_rx_buf_pool_create(pdcs);
1471 if (err)
1472 goto cleanup_ring_pool;
1474 pdc_hw_init(pdcs);
1476 err = pdc_interrupts_init(pdcs);
1477 if (err)
1478 goto cleanup_buf_pool;
1480 /* Initialize mailbox controller */
1481 err = pdc_mb_init(pdcs);
1482 if (err)
1483 goto cleanup_buf_pool;
1485 pdcs->debugfs_stats = NULL;
1486 pdc_setup_debugfs(pdcs);
1488 dev_dbg(dev, "pdc_probe() successful");
1489 return PDC_SUCCESS;
1491 cleanup_buf_pool:
1492 dma_pool_destroy(pdcs->rx_buf_pool);
1494 cleanup_ring_pool:
1495 dma_pool_destroy(pdcs->ring_pool);
1497 cleanup:
1498 return err;
1501 static int pdc_remove(struct platform_device *pdev)
1503 struct pdc_state *pdcs = platform_get_drvdata(pdev);
1505 pdc_free_debugfs();
1507 mbox_controller_unregister(&pdcs->mbc);
1509 dma_pool_destroy(pdcs->rx_buf_pool);
1510 dma_pool_destroy(pdcs->ring_pool);
1511 return 0;
1514 static const struct of_device_id pdc_mbox_of_match[] = {
1515 {.compatible = "brcm,iproc-pdc-mbox"},
1516 { /* sentinel */ }
1518 MODULE_DEVICE_TABLE(of, pdc_mbox_of_match);
1520 static struct platform_driver pdc_mbox_driver = {
1521 .probe = pdc_probe,
1522 .remove = pdc_remove,
1523 .driver = {
1524 .name = "brcm-iproc-pdc-mbox",
1525 .of_match_table = of_match_ptr(pdc_mbox_of_match),
1528 module_platform_driver(pdc_mbox_driver);
1530 MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
1531 MODULE_DESCRIPTION("Broadcom PDC mailbox driver");
1532 MODULE_LICENSE("GPL v2");