2 * Copyright (C) 2017 Broadcom
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation version 2.
8 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
9 * kind, whether express or implied; without even the implied warranty
10 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
15 * Broadcom SBA RAID Driver
17 * The Broadcom stream buffer accelerator (SBA) provides offloading
18 * capabilities for RAID operations. The SBA offload engine is accessible
19 * via Broadcom SoC specific ring manager. Two or more offload engines
20 * can share same Broadcom SoC specific ring manager due to this Broadcom
21 * SoC specific ring manager driver is implemented as a mailbox controller
22 * driver and offload engine drivers are implemented as mallbox clients.
24 * Typically, Broadcom SoC specific ring manager will implement larger
25 * number of hardware rings over one or more SBA hardware devices. By
26 * design, the internal buffer size of SBA hardware device is limited
27 * but all offload operations supported by SBA can be broken down into
28 * multiple small size requests and executed parallely on multiple SBA
29 * hardware devices for achieving high through-put.
31 * The Broadcom SBA RAID driver does not require any register programming
32 * except submitting request to SBA hardware device via mailbox channels.
33 * This driver implements a DMA device with one DMA channel using a single
34 * mailbox channel provided by Broadcom SoC specific ring manager driver.
35 * For having more SBA DMA channels, we can create more SBA device nodes
36 * in Broadcom SoC specific DTS based on number of hardware rings supported
37 * by Broadcom SoC ring manager.
40 #include <linux/bitops.h>
41 #include <linux/debugfs.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/dmaengine.h>
44 #include <linux/list.h>
45 #include <linux/mailbox_client.h>
46 #include <linux/mailbox/brcm-message.h>
47 #include <linux/module.h>
48 #include <linux/of_device.h>
49 #include <linux/slab.h>
50 #include <linux/raid/pq.h>
52 #include "dmaengine.h"
54 /* ====== Driver macros and defines ===== */
56 #define SBA_TYPE_SHIFT 48
57 #define SBA_TYPE_MASK GENMASK(1, 0)
58 #define SBA_TYPE_A 0x0
59 #define SBA_TYPE_B 0x2
60 #define SBA_TYPE_C 0x3
61 #define SBA_USER_DEF_SHIFT 32
62 #define SBA_USER_DEF_MASK GENMASK(15, 0)
63 #define SBA_R_MDATA_SHIFT 24
64 #define SBA_R_MDATA_MASK GENMASK(7, 0)
65 #define SBA_C_MDATA_MS_SHIFT 18
66 #define SBA_C_MDATA_MS_MASK GENMASK(1, 0)
67 #define SBA_INT_SHIFT 17
68 #define SBA_INT_MASK BIT(0)
69 #define SBA_RESP_SHIFT 16
70 #define SBA_RESP_MASK BIT(0)
71 #define SBA_C_MDATA_SHIFT 8
72 #define SBA_C_MDATA_MASK GENMASK(7, 0)
73 #define SBA_C_MDATA_BNUMx_SHIFT(__bnum) (2 * (__bnum))
74 #define SBA_C_MDATA_BNUMx_MASK GENMASK(1, 0)
75 #define SBA_C_MDATA_DNUM_SHIFT 5
76 #define SBA_C_MDATA_DNUM_MASK GENMASK(4, 0)
77 #define SBA_C_MDATA_LS(__v) ((__v) & 0xff)
78 #define SBA_C_MDATA_MS(__v) (((__v) >> 8) & 0x3)
79 #define SBA_CMD_SHIFT 0
80 #define SBA_CMD_MASK GENMASK(3, 0)
81 #define SBA_CMD_ZERO_BUFFER 0x4
82 #define SBA_CMD_ZERO_ALL_BUFFERS 0x8
83 #define SBA_CMD_LOAD_BUFFER 0x9
84 #define SBA_CMD_XOR 0xa
85 #define SBA_CMD_GALOIS_XOR 0xb
86 #define SBA_CMD_WRITE_BUFFER 0xc
87 #define SBA_CMD_GALOIS 0xe
89 #define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192
90 #define SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL 8
92 /* Driver helper macros */
93 #define to_sba_request(tx) \
94 container_of(tx, struct sba_request, tx)
95 #define to_sba_device(dchan) \
96 container_of(dchan, struct sba_device, dma_chan)
98 /* ===== Driver data structures ===== */
100 enum sba_request_flags
{
101 SBA_REQUEST_STATE_FREE
= 0x001,
102 SBA_REQUEST_STATE_ALLOCED
= 0x002,
103 SBA_REQUEST_STATE_PENDING
= 0x004,
104 SBA_REQUEST_STATE_ACTIVE
= 0x008,
105 SBA_REQUEST_STATE_ABORTED
= 0x010,
106 SBA_REQUEST_STATE_MASK
= 0x0ff,
107 SBA_REQUEST_FENCE
= 0x100,
112 struct list_head node
;
113 struct sba_device
*sba
;
115 /* Chained requests management */
116 struct sba_request
*first
;
117 struct list_head next
;
118 atomic_t next_pending_count
;
119 /* BRCM message data */
120 struct brcm_message msg
;
121 struct dma_async_tx_descriptor tx
;
123 struct brcm_sba_command cmds
[0];
132 /* Underlying device */
134 /* DT configuration parameters */
135 enum sba_version ver
;
136 /* Derived configuration parameters */
144 u32 max_resp_pool_size
;
145 u32 max_cmds_pool_size
;
146 /* Maibox client and Mailbox channels */
147 struct mbox_client client
;
148 struct mbox_chan
*mchan
;
149 struct device
*mbox_dev
;
150 /* DMA device and DMA channel */
151 struct dma_device dma_dev
;
152 struct dma_chan dma_chan
;
153 /* DMA channel resources */
155 dma_addr_t resp_dma_base
;
157 dma_addr_t cmds_dma_base
;
158 spinlock_t reqs_lock
;
160 struct list_head reqs_alloc_list
;
161 struct list_head reqs_pending_list
;
162 struct list_head reqs_active_list
;
163 struct list_head reqs_aborted_list
;
164 struct list_head reqs_free_list
;
165 /* DebugFS directory entries */
169 /* ====== Command helper routines ===== */
171 static inline u64 __pure
sba_cmd_enc(u64 cmd
, u32 val
, u32 shift
, u32 mask
)
173 cmd
&= ~((u64
)mask
<< shift
);
174 cmd
|= ((u64
)(val
& mask
) << shift
);
178 static inline u32 __pure
sba_cmd_load_c_mdata(u32 b0
)
180 return b0
& SBA_C_MDATA_BNUMx_MASK
;
183 static inline u32 __pure
sba_cmd_write_c_mdata(u32 b0
)
185 return b0
& SBA_C_MDATA_BNUMx_MASK
;
188 static inline u32 __pure
sba_cmd_xor_c_mdata(u32 b1
, u32 b0
)
190 return (b0
& SBA_C_MDATA_BNUMx_MASK
) |
191 ((b1
& SBA_C_MDATA_BNUMx_MASK
) << SBA_C_MDATA_BNUMx_SHIFT(1));
194 static inline u32 __pure
sba_cmd_pq_c_mdata(u32 d
, u32 b1
, u32 b0
)
196 return (b0
& SBA_C_MDATA_BNUMx_MASK
) |
197 ((b1
& SBA_C_MDATA_BNUMx_MASK
) << SBA_C_MDATA_BNUMx_SHIFT(1)) |
198 ((d
& SBA_C_MDATA_DNUM_MASK
) << SBA_C_MDATA_DNUM_SHIFT
);
201 /* ====== General helper routines ===== */
203 static struct sba_request
*sba_alloc_request(struct sba_device
*sba
)
207 struct sba_request
*req
= NULL
;
209 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
210 list_for_each_entry(req
, &sba
->reqs_free_list
, node
) {
211 if (async_tx_test_ack(&req
->tx
)) {
212 list_move_tail(&req
->node
, &sba
->reqs_alloc_list
);
217 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
221 * We have no more free requests so, we peek
222 * mailbox channels hoping few active requests
223 * would have completed which will create more
224 * room for new requests.
226 mbox_client_peek_data(sba
->mchan
);
230 req
->flags
= SBA_REQUEST_STATE_ALLOCED
;
232 INIT_LIST_HEAD(&req
->next
);
233 atomic_set(&req
->next_pending_count
, 1);
235 dma_async_tx_descriptor_init(&req
->tx
, &sba
->dma_chan
);
236 async_tx_ack(&req
->tx
);
241 /* Note: Must be called with sba->reqs_lock held */
242 static void _sba_pending_request(struct sba_device
*sba
,
243 struct sba_request
*req
)
245 lockdep_assert_held(&sba
->reqs_lock
);
246 req
->flags
&= ~SBA_REQUEST_STATE_MASK
;
247 req
->flags
|= SBA_REQUEST_STATE_PENDING
;
248 list_move_tail(&req
->node
, &sba
->reqs_pending_list
);
249 if (list_empty(&sba
->reqs_active_list
))
250 sba
->reqs_fence
= false;
253 /* Note: Must be called with sba->reqs_lock held */
254 static bool _sba_active_request(struct sba_device
*sba
,
255 struct sba_request
*req
)
257 lockdep_assert_held(&sba
->reqs_lock
);
258 if (list_empty(&sba
->reqs_active_list
))
259 sba
->reqs_fence
= false;
262 req
->flags
&= ~SBA_REQUEST_STATE_MASK
;
263 req
->flags
|= SBA_REQUEST_STATE_ACTIVE
;
264 list_move_tail(&req
->node
, &sba
->reqs_active_list
);
265 if (req
->flags
& SBA_REQUEST_FENCE
)
266 sba
->reqs_fence
= true;
270 /* Note: Must be called with sba->reqs_lock held */
271 static void _sba_abort_request(struct sba_device
*sba
,
272 struct sba_request
*req
)
274 lockdep_assert_held(&sba
->reqs_lock
);
275 req
->flags
&= ~SBA_REQUEST_STATE_MASK
;
276 req
->flags
|= SBA_REQUEST_STATE_ABORTED
;
277 list_move_tail(&req
->node
, &sba
->reqs_aborted_list
);
278 if (list_empty(&sba
->reqs_active_list
))
279 sba
->reqs_fence
= false;
282 /* Note: Must be called with sba->reqs_lock held */
283 static void _sba_free_request(struct sba_device
*sba
,
284 struct sba_request
*req
)
286 lockdep_assert_held(&sba
->reqs_lock
);
287 req
->flags
&= ~SBA_REQUEST_STATE_MASK
;
288 req
->flags
|= SBA_REQUEST_STATE_FREE
;
289 list_move_tail(&req
->node
, &sba
->reqs_free_list
);
290 if (list_empty(&sba
->reqs_active_list
))
291 sba
->reqs_fence
= false;
294 static void sba_free_chained_requests(struct sba_request
*req
)
297 struct sba_request
*nreq
;
298 struct sba_device
*sba
= req
->sba
;
300 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
302 _sba_free_request(sba
, req
);
303 list_for_each_entry(nreq
, &req
->next
, next
)
304 _sba_free_request(sba
, nreq
);
306 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
309 static void sba_chain_request(struct sba_request
*first
,
310 struct sba_request
*req
)
313 struct sba_device
*sba
= req
->sba
;
315 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
317 list_add_tail(&req
->next
, &first
->next
);
319 atomic_inc(&first
->next_pending_count
);
321 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
324 static void sba_cleanup_nonpending_requests(struct sba_device
*sba
)
327 struct sba_request
*req
, *req1
;
329 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
331 /* Freeup all alloced request */
332 list_for_each_entry_safe(req
, req1
, &sba
->reqs_alloc_list
, node
)
333 _sba_free_request(sba
, req
);
335 /* Set all active requests as aborted */
336 list_for_each_entry_safe(req
, req1
, &sba
->reqs_active_list
, node
)
337 _sba_abort_request(sba
, req
);
340 * Note: We expect that aborted request will be eventually
341 * freed by sba_receive_message()
344 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
347 static void sba_cleanup_pending_requests(struct sba_device
*sba
)
350 struct sba_request
*req
, *req1
;
352 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
354 /* Freeup all pending request */
355 list_for_each_entry_safe(req
, req1
, &sba
->reqs_pending_list
, node
)
356 _sba_free_request(sba
, req
);
358 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
361 static int sba_send_mbox_request(struct sba_device
*sba
,
362 struct sba_request
*req
)
366 /* Send message for the request */
368 ret
= mbox_send_message(sba
->mchan
, &req
->msg
);
370 dev_err(sba
->dev
, "send message failed with error %d", ret
);
374 /* Check error returned by mailbox controller */
375 ret
= req
->msg
.error
;
377 dev_err(sba
->dev
, "message error %d", ret
);
380 /* Signal txdone for mailbox channel */
381 mbox_client_txdone(sba
->mchan
, ret
);
386 /* Note: Must be called with sba->reqs_lock held */
387 static void _sba_process_pending_requests(struct sba_device
*sba
)
391 struct sba_request
*req
;
393 /* Process few pending requests */
394 count
= SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL
;
395 while (!list_empty(&sba
->reqs_pending_list
) && count
) {
396 /* Get the first pending request */
397 req
= list_first_entry(&sba
->reqs_pending_list
,
398 struct sba_request
, node
);
400 /* Try to make request active */
401 if (!_sba_active_request(sba
, req
))
404 /* Send request to mailbox channel */
405 ret
= sba_send_mbox_request(sba
, req
);
407 _sba_pending_request(sba
, req
);
415 static void sba_process_received_request(struct sba_device
*sba
,
416 struct sba_request
*req
)
419 struct dma_async_tx_descriptor
*tx
;
420 struct sba_request
*nreq
, *first
= req
->first
;
422 /* Process only after all chained requests are received */
423 if (!atomic_dec_return(&first
->next_pending_count
)) {
426 WARN_ON(tx
->cookie
< 0);
427 if (tx
->cookie
> 0) {
428 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
429 dma_cookie_complete(tx
);
430 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
431 dmaengine_desc_get_callback_invoke(tx
, NULL
);
432 dma_descriptor_unmap(tx
);
434 tx
->callback_result
= NULL
;
437 dma_run_dependencies(tx
);
439 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
441 /* Free all requests chained to first request */
442 list_for_each_entry(nreq
, &first
->next
, next
)
443 _sba_free_request(sba
, nreq
);
444 INIT_LIST_HEAD(&first
->next
);
446 /* Free the first request */
447 _sba_free_request(sba
, first
);
449 /* Process pending requests */
450 _sba_process_pending_requests(sba
);
452 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
456 static void sba_write_stats_in_seqfile(struct sba_device
*sba
,
457 struct seq_file
*file
)
460 struct sba_request
*req
;
461 u32 free_count
= 0, alloced_count
= 0;
462 u32 pending_count
= 0, active_count
= 0, aborted_count
= 0;
464 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
466 list_for_each_entry(req
, &sba
->reqs_free_list
, node
)
467 if (async_tx_test_ack(&req
->tx
))
470 list_for_each_entry(req
, &sba
->reqs_alloc_list
, node
)
473 list_for_each_entry(req
, &sba
->reqs_pending_list
, node
)
476 list_for_each_entry(req
, &sba
->reqs_active_list
, node
)
479 list_for_each_entry(req
, &sba
->reqs_aborted_list
, node
)
482 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
484 seq_printf(file
, "maximum requests = %d\n", sba
->max_req
);
485 seq_printf(file
, "free requests = %d\n", free_count
);
486 seq_printf(file
, "alloced requests = %d\n", alloced_count
);
487 seq_printf(file
, "pending requests = %d\n", pending_count
);
488 seq_printf(file
, "active requests = %d\n", active_count
);
489 seq_printf(file
, "aborted requests = %d\n", aborted_count
);
492 /* ====== DMAENGINE callbacks ===== */
494 static void sba_free_chan_resources(struct dma_chan
*dchan
)
497 * Channel resources are pre-alloced so we just free-up
498 * whatever we can so that we can re-use pre-alloced
499 * channel resources next time.
501 sba_cleanup_nonpending_requests(to_sba_device(dchan
));
504 static int sba_device_terminate_all(struct dma_chan
*dchan
)
506 /* Cleanup all pending requests */
507 sba_cleanup_pending_requests(to_sba_device(dchan
));
512 static void sba_issue_pending(struct dma_chan
*dchan
)
515 struct sba_device
*sba
= to_sba_device(dchan
);
517 /* Process pending requests */
518 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
519 _sba_process_pending_requests(sba
);
520 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
523 static dma_cookie_t
sba_tx_submit(struct dma_async_tx_descriptor
*tx
)
527 struct sba_device
*sba
;
528 struct sba_request
*req
, *nreq
;
533 sba
= to_sba_device(tx
->chan
);
534 req
= to_sba_request(tx
);
536 /* Assign cookie and mark all chained requests pending */
537 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
538 cookie
= dma_cookie_assign(tx
);
539 _sba_pending_request(sba
, req
);
540 list_for_each_entry(nreq
, &req
->next
, next
)
541 _sba_pending_request(sba
, nreq
);
542 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
547 static enum dma_status
sba_tx_status(struct dma_chan
*dchan
,
549 struct dma_tx_state
*txstate
)
552 struct sba_device
*sba
= to_sba_device(dchan
);
554 ret
= dma_cookie_status(dchan
, cookie
, txstate
);
555 if (ret
== DMA_COMPLETE
)
558 mbox_client_peek_data(sba
->mchan
);
560 return dma_cookie_status(dchan
, cookie
, txstate
);
563 static void sba_fillup_interrupt_msg(struct sba_request
*req
,
564 struct brcm_sba_command
*cmds
,
565 struct brcm_message
*msg
)
569 dma_addr_t resp_dma
= req
->tx
.phys
;
570 struct brcm_sba_command
*cmdsp
= cmds
;
572 /* Type-B command to load dummy data into buf0 */
573 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
574 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
575 cmd
= sba_cmd_enc(cmd
, req
->sba
->hw_resp_size
,
576 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
577 c_mdata
= sba_cmd_load_c_mdata(0);
578 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
579 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
580 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
581 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
583 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
584 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
585 cmdsp
->data
= resp_dma
;
586 cmdsp
->data_len
= req
->sba
->hw_resp_size
;
589 /* Type-A command to write buf0 to dummy location */
590 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
591 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
592 cmd
= sba_cmd_enc(cmd
, req
->sba
->hw_resp_size
,
593 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
594 cmd
= sba_cmd_enc(cmd
, 0x1,
595 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
596 c_mdata
= sba_cmd_write_c_mdata(0);
597 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
598 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
599 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
600 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
602 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
603 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
604 if (req
->sba
->hw_resp_size
) {
605 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
606 cmdsp
->resp
= resp_dma
;
607 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
609 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
610 cmdsp
->data
= resp_dma
;
611 cmdsp
->data_len
= req
->sba
->hw_resp_size
;
614 /* Fillup brcm_message */
615 msg
->type
= BRCM_MESSAGE_SBA
;
616 msg
->sba
.cmds
= cmds
;
617 msg
->sba
.cmds_count
= cmdsp
- cmds
;
622 static struct dma_async_tx_descriptor
*
623 sba_prep_dma_interrupt(struct dma_chan
*dchan
, unsigned long flags
)
625 struct sba_request
*req
= NULL
;
626 struct sba_device
*sba
= to_sba_device(dchan
);
628 /* Alloc new request */
629 req
= sba_alloc_request(sba
);
634 * Force fence so that no requests are submitted
635 * until DMA callback for this request is invoked.
637 req
->flags
|= SBA_REQUEST_FENCE
;
639 /* Fillup request message */
640 sba_fillup_interrupt_msg(req
, req
->cmds
, &req
->msg
);
642 /* Init async_tx descriptor */
643 req
->tx
.flags
= flags
;
644 req
->tx
.cookie
= -EBUSY
;
649 static void sba_fillup_memcpy_msg(struct sba_request
*req
,
650 struct brcm_sba_command
*cmds
,
651 struct brcm_message
*msg
,
652 dma_addr_t msg_offset
, size_t msg_len
,
653 dma_addr_t dst
, dma_addr_t src
)
657 dma_addr_t resp_dma
= req
->tx
.phys
;
658 struct brcm_sba_command
*cmdsp
= cmds
;
660 /* Type-B command to load data into buf0 */
661 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
662 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
663 cmd
= sba_cmd_enc(cmd
, msg_len
,
664 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
665 c_mdata
= sba_cmd_load_c_mdata(0);
666 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
667 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
668 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
669 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
671 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
672 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
673 cmdsp
->data
= src
+ msg_offset
;
674 cmdsp
->data_len
= msg_len
;
677 /* Type-A command to write buf0 */
678 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
679 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
680 cmd
= sba_cmd_enc(cmd
, msg_len
,
681 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
682 cmd
= sba_cmd_enc(cmd
, 0x1,
683 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
684 c_mdata
= sba_cmd_write_c_mdata(0);
685 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
686 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
687 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
688 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
690 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
691 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
692 if (req
->sba
->hw_resp_size
) {
693 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
694 cmdsp
->resp
= resp_dma
;
695 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
697 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
698 cmdsp
->data
= dst
+ msg_offset
;
699 cmdsp
->data_len
= msg_len
;
702 /* Fillup brcm_message */
703 msg
->type
= BRCM_MESSAGE_SBA
;
704 msg
->sba
.cmds
= cmds
;
705 msg
->sba
.cmds_count
= cmdsp
- cmds
;
710 static struct sba_request
*
711 sba_prep_dma_memcpy_req(struct sba_device
*sba
,
712 dma_addr_t off
, dma_addr_t dst
, dma_addr_t src
,
713 size_t len
, unsigned long flags
)
715 struct sba_request
*req
= NULL
;
717 /* Alloc new request */
718 req
= sba_alloc_request(sba
);
721 if (flags
& DMA_PREP_FENCE
)
722 req
->flags
|= SBA_REQUEST_FENCE
;
724 /* Fillup request message */
725 sba_fillup_memcpy_msg(req
, req
->cmds
, &req
->msg
,
728 /* Init async_tx descriptor */
729 req
->tx
.flags
= flags
;
730 req
->tx
.cookie
= -EBUSY
;
735 static struct dma_async_tx_descriptor
*
736 sba_prep_dma_memcpy(struct dma_chan
*dchan
, dma_addr_t dst
, dma_addr_t src
,
737 size_t len
, unsigned long flags
)
741 struct sba_device
*sba
= to_sba_device(dchan
);
742 struct sba_request
*first
= NULL
, *req
;
744 /* Create chained requests where each request is upto hw_buf_size */
746 req_len
= (len
< sba
->hw_buf_size
) ? len
: sba
->hw_buf_size
;
748 req
= sba_prep_dma_memcpy_req(sba
, off
, dst
, src
,
752 sba_free_chained_requests(first
);
757 sba_chain_request(first
, req
);
765 return (first
) ? &first
->tx
: NULL
;
768 static void sba_fillup_xor_msg(struct sba_request
*req
,
769 struct brcm_sba_command
*cmds
,
770 struct brcm_message
*msg
,
771 dma_addr_t msg_offset
, size_t msg_len
,
772 dma_addr_t dst
, dma_addr_t
*src
, u32 src_cnt
)
777 dma_addr_t resp_dma
= req
->tx
.phys
;
778 struct brcm_sba_command
*cmdsp
= cmds
;
780 /* Type-B command to load data into buf0 */
781 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
782 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
783 cmd
= sba_cmd_enc(cmd
, msg_len
,
784 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
785 c_mdata
= sba_cmd_load_c_mdata(0);
786 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
787 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
788 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
789 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
791 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
792 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
793 cmdsp
->data
= src
[0] + msg_offset
;
794 cmdsp
->data_len
= msg_len
;
797 /* Type-B commands to xor data with buf0 and put it back in buf0 */
798 for (i
= 1; i
< src_cnt
; i
++) {
799 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
800 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
801 cmd
= sba_cmd_enc(cmd
, msg_len
,
802 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
803 c_mdata
= sba_cmd_xor_c_mdata(0, 0);
804 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
805 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
806 cmd
= sba_cmd_enc(cmd
, SBA_CMD_XOR
,
807 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
809 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
810 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
811 cmdsp
->data
= src
[i
] + msg_offset
;
812 cmdsp
->data_len
= msg_len
;
816 /* Type-A command to write buf0 */
817 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
818 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
819 cmd
= sba_cmd_enc(cmd
, msg_len
,
820 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
821 cmd
= sba_cmd_enc(cmd
, 0x1,
822 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
823 c_mdata
= sba_cmd_write_c_mdata(0);
824 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
825 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
826 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
827 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
829 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
830 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
831 if (req
->sba
->hw_resp_size
) {
832 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
833 cmdsp
->resp
= resp_dma
;
834 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
836 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
837 cmdsp
->data
= dst
+ msg_offset
;
838 cmdsp
->data_len
= msg_len
;
841 /* Fillup brcm_message */
842 msg
->type
= BRCM_MESSAGE_SBA
;
843 msg
->sba
.cmds
= cmds
;
844 msg
->sba
.cmds_count
= cmdsp
- cmds
;
849 static struct sba_request
*
850 sba_prep_dma_xor_req(struct sba_device
*sba
,
851 dma_addr_t off
, dma_addr_t dst
, dma_addr_t
*src
,
852 u32 src_cnt
, size_t len
, unsigned long flags
)
854 struct sba_request
*req
= NULL
;
856 /* Alloc new request */
857 req
= sba_alloc_request(sba
);
860 if (flags
& DMA_PREP_FENCE
)
861 req
->flags
|= SBA_REQUEST_FENCE
;
863 /* Fillup request message */
864 sba_fillup_xor_msg(req
, req
->cmds
, &req
->msg
,
865 off
, len
, dst
, src
, src_cnt
);
867 /* Init async_tx descriptor */
868 req
->tx
.flags
= flags
;
869 req
->tx
.cookie
= -EBUSY
;
874 static struct dma_async_tx_descriptor
*
875 sba_prep_dma_xor(struct dma_chan
*dchan
, dma_addr_t dst
, dma_addr_t
*src
,
876 u32 src_cnt
, size_t len
, unsigned long flags
)
880 struct sba_device
*sba
= to_sba_device(dchan
);
881 struct sba_request
*first
= NULL
, *req
;
884 if (unlikely(src_cnt
> sba
->max_xor_srcs
))
887 /* Create chained requests where each request is upto hw_buf_size */
889 req_len
= (len
< sba
->hw_buf_size
) ? len
: sba
->hw_buf_size
;
891 req
= sba_prep_dma_xor_req(sba
, off
, dst
, src
, src_cnt
,
895 sba_free_chained_requests(first
);
900 sba_chain_request(first
, req
);
908 return (first
) ? &first
->tx
: NULL
;
911 static void sba_fillup_pq_msg(struct sba_request
*req
,
913 struct brcm_sba_command
*cmds
,
914 struct brcm_message
*msg
,
915 dma_addr_t msg_offset
, size_t msg_len
,
916 dma_addr_t
*dst_p
, dma_addr_t
*dst_q
,
917 const u8
*scf
, dma_addr_t
*src
, u32 src_cnt
)
922 dma_addr_t resp_dma
= req
->tx
.phys
;
923 struct brcm_sba_command
*cmdsp
= cmds
;
926 /* Type-B command to load old P into buf0 */
928 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
929 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
930 cmd
= sba_cmd_enc(cmd
, msg_len
,
931 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
932 c_mdata
= sba_cmd_load_c_mdata(0);
933 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
934 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
935 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
936 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
938 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
939 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
940 cmdsp
->data
= *dst_p
+ msg_offset
;
941 cmdsp
->data_len
= msg_len
;
945 /* Type-B command to load old Q into buf1 */
947 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
948 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
949 cmd
= sba_cmd_enc(cmd
, msg_len
,
950 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
951 c_mdata
= sba_cmd_load_c_mdata(1);
952 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
953 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
954 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
955 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
957 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
958 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
959 cmdsp
->data
= *dst_q
+ msg_offset
;
960 cmdsp
->data_len
= msg_len
;
964 /* Type-A command to zero all buffers */
965 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
966 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
967 cmd
= sba_cmd_enc(cmd
, msg_len
,
968 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
969 cmd
= sba_cmd_enc(cmd
, SBA_CMD_ZERO_ALL_BUFFERS
,
970 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
972 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
973 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
977 /* Type-B commands for generate P onto buf0 and Q onto buf1 */
978 for (i
= 0; i
< src_cnt
; i
++) {
979 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
980 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
981 cmd
= sba_cmd_enc(cmd
, msg_len
,
982 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
983 c_mdata
= sba_cmd_pq_c_mdata(raid6_gflog
[scf
[i
]], 1, 0);
984 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
985 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
986 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_MS(c_mdata
),
987 SBA_C_MDATA_MS_SHIFT
, SBA_C_MDATA_MS_MASK
);
988 cmd
= sba_cmd_enc(cmd
, SBA_CMD_GALOIS_XOR
,
989 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
991 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
992 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
993 cmdsp
->data
= src
[i
] + msg_offset
;
994 cmdsp
->data_len
= msg_len
;
998 /* Type-A command to write buf0 */
1000 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
1001 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1002 cmd
= sba_cmd_enc(cmd
, msg_len
,
1003 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1004 cmd
= sba_cmd_enc(cmd
, 0x1,
1005 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
1006 c_mdata
= sba_cmd_write_c_mdata(0);
1007 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1008 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1009 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
1010 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1012 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1013 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
1014 if (req
->sba
->hw_resp_size
) {
1015 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
1016 cmdsp
->resp
= resp_dma
;
1017 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
1019 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
1020 cmdsp
->data
= *dst_p
+ msg_offset
;
1021 cmdsp
->data_len
= msg_len
;
1025 /* Type-A command to write buf1 */
1027 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
1028 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1029 cmd
= sba_cmd_enc(cmd
, msg_len
,
1030 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1031 cmd
= sba_cmd_enc(cmd
, 0x1,
1032 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
1033 c_mdata
= sba_cmd_write_c_mdata(1);
1034 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1035 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1036 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
1037 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1039 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1040 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
1041 if (req
->sba
->hw_resp_size
) {
1042 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
1043 cmdsp
->resp
= resp_dma
;
1044 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
1046 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
1047 cmdsp
->data
= *dst_q
+ msg_offset
;
1048 cmdsp
->data_len
= msg_len
;
1052 /* Fillup brcm_message */
1053 msg
->type
= BRCM_MESSAGE_SBA
;
1054 msg
->sba
.cmds
= cmds
;
1055 msg
->sba
.cmds_count
= cmdsp
- cmds
;
1060 static struct sba_request
*
1061 sba_prep_dma_pq_req(struct sba_device
*sba
, dma_addr_t off
,
1062 dma_addr_t
*dst_p
, dma_addr_t
*dst_q
, dma_addr_t
*src
,
1063 u32 src_cnt
, const u8
*scf
, size_t len
, unsigned long flags
)
1065 struct sba_request
*req
= NULL
;
1067 /* Alloc new request */
1068 req
= sba_alloc_request(sba
);
1071 if (flags
& DMA_PREP_FENCE
)
1072 req
->flags
|= SBA_REQUEST_FENCE
;
1074 /* Fillup request messages */
1075 sba_fillup_pq_msg(req
, dmaf_continue(flags
),
1076 req
->cmds
, &req
->msg
,
1077 off
, len
, dst_p
, dst_q
, scf
, src
, src_cnt
);
1079 /* Init async_tx descriptor */
1080 req
->tx
.flags
= flags
;
1081 req
->tx
.cookie
= -EBUSY
;
1086 static void sba_fillup_pq_single_msg(struct sba_request
*req
,
1088 struct brcm_sba_command
*cmds
,
1089 struct brcm_message
*msg
,
1090 dma_addr_t msg_offset
, size_t msg_len
,
1091 dma_addr_t
*dst_p
, dma_addr_t
*dst_q
,
1092 dma_addr_t src
, u8 scf
)
1096 u8 pos
, dpos
= raid6_gflog
[scf
];
1097 dma_addr_t resp_dma
= req
->tx
.phys
;
1098 struct brcm_sba_command
*cmdsp
= cmds
;
1104 /* Type-B command to load old P into buf0 */
1105 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
1106 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1107 cmd
= sba_cmd_enc(cmd
, msg_len
,
1108 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1109 c_mdata
= sba_cmd_load_c_mdata(0);
1110 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1111 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1112 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
1113 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1115 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1116 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
1117 cmdsp
->data
= *dst_p
+ msg_offset
;
1118 cmdsp
->data_len
= msg_len
;
1122 * Type-B commands to xor data with buf0 and put it
1125 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
1126 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1127 cmd
= sba_cmd_enc(cmd
, msg_len
,
1128 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1129 c_mdata
= sba_cmd_xor_c_mdata(0, 0);
1130 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1131 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1132 cmd
= sba_cmd_enc(cmd
, SBA_CMD_XOR
,
1133 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1135 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1136 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
1137 cmdsp
->data
= src
+ msg_offset
;
1138 cmdsp
->data_len
= msg_len
;
1141 /* Type-B command to load old P into buf0 */
1142 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
1143 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1144 cmd
= sba_cmd_enc(cmd
, msg_len
,
1145 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1146 c_mdata
= sba_cmd_load_c_mdata(0);
1147 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1148 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1149 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
1150 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1152 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1153 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
1154 cmdsp
->data
= src
+ msg_offset
;
1155 cmdsp
->data_len
= msg_len
;
1159 /* Type-A command to write buf0 */
1160 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
1161 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1162 cmd
= sba_cmd_enc(cmd
, msg_len
,
1163 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1164 cmd
= sba_cmd_enc(cmd
, 0x1,
1165 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
1166 c_mdata
= sba_cmd_write_c_mdata(0);
1167 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1168 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1169 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
1170 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1172 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1173 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
1174 if (req
->sba
->hw_resp_size
) {
1175 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
1176 cmdsp
->resp
= resp_dma
;
1177 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
1179 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
1180 cmdsp
->data
= *dst_p
+ msg_offset
;
1181 cmdsp
->data_len
= msg_len
;
1188 /* Type-A command to zero all buffers */
1189 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
1190 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1191 cmd
= sba_cmd_enc(cmd
, msg_len
,
1192 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1193 cmd
= sba_cmd_enc(cmd
, SBA_CMD_ZERO_ALL_BUFFERS
,
1194 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1196 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1197 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
1201 goto skip_q_computation
;
1202 pos
= (dpos
< req
->sba
->max_pq_coefs
) ?
1203 dpos
: (req
->sba
->max_pq_coefs
- 1);
1206 * Type-B command to generate initial Q from data
1207 * and store output into buf0
1209 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
1210 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1211 cmd
= sba_cmd_enc(cmd
, msg_len
,
1212 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1213 c_mdata
= sba_cmd_pq_c_mdata(pos
, 0, 0);
1214 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1215 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1216 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_MS(c_mdata
),
1217 SBA_C_MDATA_MS_SHIFT
, SBA_C_MDATA_MS_MASK
);
1218 cmd
= sba_cmd_enc(cmd
, SBA_CMD_GALOIS
,
1219 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1221 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1222 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
1223 cmdsp
->data
= src
+ msg_offset
;
1224 cmdsp
->data_len
= msg_len
;
1229 /* Multiple Type-A command to generate final Q */
1231 pos
= (dpos
< req
->sba
->max_pq_coefs
) ?
1232 dpos
: (req
->sba
->max_pq_coefs
- 1);
1235 * Type-A command to generate Q with buf0 and
1236 * buf1 store result in buf0
1238 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
1239 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1240 cmd
= sba_cmd_enc(cmd
, msg_len
,
1241 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1242 c_mdata
= sba_cmd_pq_c_mdata(pos
, 0, 1);
1243 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1244 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1245 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_MS(c_mdata
),
1246 SBA_C_MDATA_MS_SHIFT
, SBA_C_MDATA_MS_MASK
);
1247 cmd
= sba_cmd_enc(cmd
, SBA_CMD_GALOIS
,
1248 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1250 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1251 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
1260 * Type-B command to XOR previous output with
1261 * buf0 and write it into buf0
1263 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
1264 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1265 cmd
= sba_cmd_enc(cmd
, msg_len
,
1266 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1267 c_mdata
= sba_cmd_xor_c_mdata(0, 0);
1268 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1269 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1270 cmd
= sba_cmd_enc(cmd
, SBA_CMD_XOR
,
1271 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1273 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1274 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
1275 cmdsp
->data
= *dst_q
+ msg_offset
;
1276 cmdsp
->data_len
= msg_len
;
1280 /* Type-A command to write buf0 */
1281 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
1282 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1283 cmd
= sba_cmd_enc(cmd
, msg_len
,
1284 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1285 cmd
= sba_cmd_enc(cmd
, 0x1,
1286 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
1287 c_mdata
= sba_cmd_write_c_mdata(0);
1288 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1289 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1290 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
1291 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1293 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1294 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
1295 if (req
->sba
->hw_resp_size
) {
1296 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
1297 cmdsp
->resp
= resp_dma
;
1298 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
1300 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
1301 cmdsp
->data
= *dst_q
+ msg_offset
;
1302 cmdsp
->data_len
= msg_len
;
1306 /* Fillup brcm_message */
1307 msg
->type
= BRCM_MESSAGE_SBA
;
1308 msg
->sba
.cmds
= cmds
;
1309 msg
->sba
.cmds_count
= cmdsp
- cmds
;
1314 static struct sba_request
*
1315 sba_prep_dma_pq_single_req(struct sba_device
*sba
, dma_addr_t off
,
1316 dma_addr_t
*dst_p
, dma_addr_t
*dst_q
,
1317 dma_addr_t src
, u8 scf
, size_t len
,
1318 unsigned long flags
)
1320 struct sba_request
*req
= NULL
;
1322 /* Alloc new request */
1323 req
= sba_alloc_request(sba
);
1326 if (flags
& DMA_PREP_FENCE
)
1327 req
->flags
|= SBA_REQUEST_FENCE
;
1329 /* Fillup request messages */
1330 sba_fillup_pq_single_msg(req
, dmaf_continue(flags
),
1331 req
->cmds
, &req
->msg
, off
, len
,
1332 dst_p
, dst_q
, src
, scf
);
1334 /* Init async_tx descriptor */
1335 req
->tx
.flags
= flags
;
1336 req
->tx
.cookie
= -EBUSY
;
1341 static struct dma_async_tx_descriptor
*
1342 sba_prep_dma_pq(struct dma_chan
*dchan
, dma_addr_t
*dst
, dma_addr_t
*src
,
1343 u32 src_cnt
, const u8
*scf
, size_t len
, unsigned long flags
)
1349 dma_addr_t
*dst_p
= NULL
, *dst_q
= NULL
;
1350 struct sba_device
*sba
= to_sba_device(dchan
);
1351 struct sba_request
*first
= NULL
, *req
;
1354 if (unlikely(src_cnt
> sba
->max_pq_srcs
))
1356 for (i
= 0; i
< src_cnt
; i
++)
1357 if (sba
->max_pq_coefs
<= raid6_gflog
[scf
[i
]])
1360 /* Figure-out P and Q destination addresses */
1361 if (!(flags
& DMA_PREP_PQ_DISABLE_P
))
1363 if (!(flags
& DMA_PREP_PQ_DISABLE_Q
))
1366 /* Create chained requests where each request is upto hw_buf_size */
1368 req_len
= (len
< sba
->hw_buf_size
) ? len
: sba
->hw_buf_size
;
1371 dst_q_index
= src_cnt
;
1374 for (i
= 0; i
< src_cnt
; i
++) {
1375 if (*dst_q
== src
[i
]) {
1382 if (dst_q_index
< src_cnt
) {
1384 req
= sba_prep_dma_pq_single_req(sba
,
1385 off
, dst_p
, dst_q
, src
[i
], scf
[i
],
1386 req_len
, flags
| DMA_PREP_FENCE
);
1391 sba_chain_request(first
, req
);
1395 flags
|= DMA_PREP_CONTINUE
;
1398 for (i
= 0; i
< src_cnt
; i
++) {
1399 if (dst_q_index
== i
)
1402 req
= sba_prep_dma_pq_single_req(sba
,
1403 off
, dst_p
, dst_q
, src
[i
], scf
[i
],
1404 req_len
, flags
| DMA_PREP_FENCE
);
1409 sba_chain_request(first
, req
);
1413 flags
|= DMA_PREP_CONTINUE
;
1416 req
= sba_prep_dma_pq_req(sba
, off
,
1417 dst_p
, dst_q
, src
, src_cnt
,
1418 scf
, req_len
, flags
);
1423 sba_chain_request(first
, req
);
1432 return (first
) ? &first
->tx
: NULL
;
1436 sba_free_chained_requests(first
);
1440 /* ====== Mailbox callbacks ===== */
1442 static void sba_receive_message(struct mbox_client
*cl
, void *msg
)
1444 struct brcm_message
*m
= msg
;
1445 struct sba_request
*req
= m
->ctx
;
1446 struct sba_device
*sba
= req
->sba
;
1448 /* Error count if message has error */
1450 dev_err(sba
->dev
, "%s got message with error %d",
1451 dma_chan_name(&sba
->dma_chan
), m
->error
);
1453 /* Process received request */
1454 sba_process_received_request(sba
, req
);
1457 /* ====== Debugfs callbacks ====== */
1459 static int sba_debugfs_stats_show(struct seq_file
*file
, void *offset
)
1461 struct sba_device
*sba
= dev_get_drvdata(file
->private);
1463 /* Write stats in file */
1464 sba_write_stats_in_seqfile(sba
, file
);
1469 /* ====== Platform driver routines ===== */
1471 static int sba_prealloc_channel_resources(struct sba_device
*sba
)
1474 struct sba_request
*req
= NULL
;
1476 sba
->resp_base
= dma_alloc_coherent(sba
->mbox_dev
,
1477 sba
->max_resp_pool_size
,
1478 &sba
->resp_dma_base
, GFP_KERNEL
);
1479 if (!sba
->resp_base
)
1482 sba
->cmds_base
= dma_alloc_coherent(sba
->mbox_dev
,
1483 sba
->max_cmds_pool_size
,
1484 &sba
->cmds_dma_base
, GFP_KERNEL
);
1485 if (!sba
->cmds_base
) {
1487 goto fail_free_resp_pool
;
1490 spin_lock_init(&sba
->reqs_lock
);
1491 sba
->reqs_fence
= false;
1492 INIT_LIST_HEAD(&sba
->reqs_alloc_list
);
1493 INIT_LIST_HEAD(&sba
->reqs_pending_list
);
1494 INIT_LIST_HEAD(&sba
->reqs_active_list
);
1495 INIT_LIST_HEAD(&sba
->reqs_aborted_list
);
1496 INIT_LIST_HEAD(&sba
->reqs_free_list
);
1498 for (i
= 0; i
< sba
->max_req
; i
++) {
1499 req
= devm_kzalloc(sba
->dev
,
1500 struct_size(req
, cmds
, sba
->max_cmd_per_req
),
1504 goto fail_free_cmds_pool
;
1506 INIT_LIST_HEAD(&req
->node
);
1508 req
->flags
= SBA_REQUEST_STATE_FREE
;
1509 INIT_LIST_HEAD(&req
->next
);
1510 atomic_set(&req
->next_pending_count
, 0);
1511 for (j
= 0; j
< sba
->max_cmd_per_req
; j
++) {
1512 req
->cmds
[j
].cmd
= 0;
1513 req
->cmds
[j
].cmd_dma
= sba
->cmds_base
+
1514 (i
* sba
->max_cmd_per_req
+ j
) * sizeof(u64
);
1515 req
->cmds
[j
].cmd_dma_addr
= sba
->cmds_dma_base
+
1516 (i
* sba
->max_cmd_per_req
+ j
) * sizeof(u64
);
1517 req
->cmds
[j
].flags
= 0;
1519 memset(&req
->msg
, 0, sizeof(req
->msg
));
1520 dma_async_tx_descriptor_init(&req
->tx
, &sba
->dma_chan
);
1521 async_tx_ack(&req
->tx
);
1522 req
->tx
.tx_submit
= sba_tx_submit
;
1523 req
->tx
.phys
= sba
->resp_dma_base
+ i
* sba
->hw_resp_size
;
1524 list_add_tail(&req
->node
, &sba
->reqs_free_list
);
1529 fail_free_cmds_pool
:
1530 dma_free_coherent(sba
->mbox_dev
,
1531 sba
->max_cmds_pool_size
,
1532 sba
->cmds_base
, sba
->cmds_dma_base
);
1533 fail_free_resp_pool
:
1534 dma_free_coherent(sba
->mbox_dev
,
1535 sba
->max_resp_pool_size
,
1536 sba
->resp_base
, sba
->resp_dma_base
);
1540 static void sba_freeup_channel_resources(struct sba_device
*sba
)
1542 dmaengine_terminate_all(&sba
->dma_chan
);
1543 dma_free_coherent(sba
->mbox_dev
, sba
->max_cmds_pool_size
,
1544 sba
->cmds_base
, sba
->cmds_dma_base
);
1545 dma_free_coherent(sba
->mbox_dev
, sba
->max_resp_pool_size
,
1546 sba
->resp_base
, sba
->resp_dma_base
);
1547 sba
->resp_base
= NULL
;
1548 sba
->resp_dma_base
= 0;
1551 static int sba_async_register(struct sba_device
*sba
)
1554 struct dma_device
*dma_dev
= &sba
->dma_dev
;
1556 /* Initialize DMA channel cookie */
1557 sba
->dma_chan
.device
= dma_dev
;
1558 dma_cookie_init(&sba
->dma_chan
);
1560 /* Initialize DMA device capability mask */
1561 dma_cap_zero(dma_dev
->cap_mask
);
1562 dma_cap_set(DMA_INTERRUPT
, dma_dev
->cap_mask
);
1563 dma_cap_set(DMA_MEMCPY
, dma_dev
->cap_mask
);
1564 dma_cap_set(DMA_XOR
, dma_dev
->cap_mask
);
1565 dma_cap_set(DMA_PQ
, dma_dev
->cap_mask
);
1568 * Set mailbox channel device as the base device of
1569 * our dma_device because the actual memory accesses
1570 * will be done by mailbox controller
1572 dma_dev
->dev
= sba
->mbox_dev
;
1574 /* Set base prep routines */
1575 dma_dev
->device_free_chan_resources
= sba_free_chan_resources
;
1576 dma_dev
->device_terminate_all
= sba_device_terminate_all
;
1577 dma_dev
->device_issue_pending
= sba_issue_pending
;
1578 dma_dev
->device_tx_status
= sba_tx_status
;
1580 /* Set interrupt routine */
1581 if (dma_has_cap(DMA_INTERRUPT
, dma_dev
->cap_mask
))
1582 dma_dev
->device_prep_dma_interrupt
= sba_prep_dma_interrupt
;
1584 /* Set memcpy routine */
1585 if (dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
))
1586 dma_dev
->device_prep_dma_memcpy
= sba_prep_dma_memcpy
;
1588 /* Set xor routine and capability */
1589 if (dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
)) {
1590 dma_dev
->device_prep_dma_xor
= sba_prep_dma_xor
;
1591 dma_dev
->max_xor
= sba
->max_xor_srcs
;
1594 /* Set pq routine and capability */
1595 if (dma_has_cap(DMA_PQ
, dma_dev
->cap_mask
)) {
1596 dma_dev
->device_prep_dma_pq
= sba_prep_dma_pq
;
1597 dma_set_maxpq(dma_dev
, sba
->max_pq_srcs
, 0);
1600 /* Initialize DMA device channel list */
1601 INIT_LIST_HEAD(&dma_dev
->channels
);
1602 list_add_tail(&sba
->dma_chan
.device_node
, &dma_dev
->channels
);
1604 /* Register with Linux async DMA framework*/
1605 ret
= dma_async_device_register(dma_dev
);
1607 dev_err(sba
->dev
, "async device register error %d", ret
);
1611 dev_info(sba
->dev
, "%s capabilities: %s%s%s%s\n",
1612 dma_chan_name(&sba
->dma_chan
),
1613 dma_has_cap(DMA_INTERRUPT
, dma_dev
->cap_mask
) ? "interrupt " : "",
1614 dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
) ? "memcpy " : "",
1615 dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
) ? "xor " : "",
1616 dma_has_cap(DMA_PQ
, dma_dev
->cap_mask
) ? "pq " : "");
1621 static int sba_probe(struct platform_device
*pdev
)
1624 struct sba_device
*sba
;
1625 struct platform_device
*mbox_pdev
;
1626 struct of_phandle_args args
;
1628 /* Allocate main SBA struct */
1629 sba
= devm_kzalloc(&pdev
->dev
, sizeof(*sba
), GFP_KERNEL
);
1633 sba
->dev
= &pdev
->dev
;
1634 platform_set_drvdata(pdev
, sba
);
1636 /* Number of mailbox channels should be atleast 1 */
1637 ret
= of_count_phandle_with_args(pdev
->dev
.of_node
,
1638 "mboxes", "#mbox-cells");
1642 /* Determine SBA version from DT compatible string */
1643 if (of_device_is_compatible(sba
->dev
->of_node
, "brcm,iproc-sba"))
1644 sba
->ver
= SBA_VER_1
;
1645 else if (of_device_is_compatible(sba
->dev
->of_node
,
1646 "brcm,iproc-sba-v2"))
1647 sba
->ver
= SBA_VER_2
;
1651 /* Derived Configuration parameters */
1654 sba
->hw_buf_size
= 4096;
1655 sba
->hw_resp_size
= 8;
1656 sba
->max_pq_coefs
= 6;
1657 sba
->max_pq_srcs
= 6;
1660 sba
->hw_buf_size
= 4096;
1661 sba
->hw_resp_size
= 8;
1662 sba
->max_pq_coefs
= 30;
1664 * We can support max_pq_srcs == max_pq_coefs because
1665 * we are limited by number of SBA commands that we can
1666 * fit in one message for underlying ring manager HW.
1668 sba
->max_pq_srcs
= 12;
1673 sba
->max_req
= SBA_MAX_REQ_PER_MBOX_CHANNEL
;
1674 sba
->max_cmd_per_req
= sba
->max_pq_srcs
+ 3;
1675 sba
->max_xor_srcs
= sba
->max_cmd_per_req
- 1;
1676 sba
->max_resp_pool_size
= sba
->max_req
* sba
->hw_resp_size
;
1677 sba
->max_cmds_pool_size
= sba
->max_req
*
1678 sba
->max_cmd_per_req
* sizeof(u64
);
1680 /* Setup mailbox client */
1681 sba
->client
.dev
= &pdev
->dev
;
1682 sba
->client
.rx_callback
= sba_receive_message
;
1683 sba
->client
.tx_block
= false;
1684 sba
->client
.knows_txdone
= true;
1685 sba
->client
.tx_tout
= 0;
1687 /* Request mailbox channel */
1688 sba
->mchan
= mbox_request_channel(&sba
->client
, 0);
1689 if (IS_ERR(sba
->mchan
)) {
1690 ret
= PTR_ERR(sba
->mchan
);
1691 goto fail_free_mchan
;
1694 /* Find-out underlying mailbox device */
1695 ret
= of_parse_phandle_with_args(pdev
->dev
.of_node
,
1696 "mboxes", "#mbox-cells", 0, &args
);
1698 goto fail_free_mchan
;
1699 mbox_pdev
= of_find_device_by_node(args
.np
);
1700 of_node_put(args
.np
);
1703 goto fail_free_mchan
;
1705 sba
->mbox_dev
= &mbox_pdev
->dev
;
1707 /* Prealloc channel resource */
1708 ret
= sba_prealloc_channel_resources(sba
);
1710 goto fail_free_mchan
;
1712 /* Check availability of debugfs */
1713 if (!debugfs_initialized())
1716 /* Create debugfs root entry */
1717 sba
->root
= debugfs_create_dir(dev_name(sba
->dev
), NULL
);
1719 /* Create debugfs stats entry */
1720 debugfs_create_devm_seqfile(sba
->dev
, "stats", sba
->root
,
1721 sba_debugfs_stats_show
);
1725 /* Register DMA device with Linux async framework */
1726 ret
= sba_async_register(sba
);
1728 goto fail_free_resources
;
1730 /* Print device info */
1731 dev_info(sba
->dev
, "%s using SBAv%d mailbox channel from %s",
1732 dma_chan_name(&sba
->dma_chan
), sba
->ver
+1,
1733 dev_name(sba
->mbox_dev
));
1737 fail_free_resources
:
1738 debugfs_remove_recursive(sba
->root
);
1739 sba_freeup_channel_resources(sba
);
1741 mbox_free_channel(sba
->mchan
);
1745 static int sba_remove(struct platform_device
*pdev
)
1747 struct sba_device
*sba
= platform_get_drvdata(pdev
);
1749 dma_async_device_unregister(&sba
->dma_dev
);
1751 debugfs_remove_recursive(sba
->root
);
1753 sba_freeup_channel_resources(sba
);
1755 mbox_free_channel(sba
->mchan
);
1760 static const struct of_device_id sba_of_match
[] = {
1761 { .compatible
= "brcm,iproc-sba", },
1762 { .compatible
= "brcm,iproc-sba-v2", },
1765 MODULE_DEVICE_TABLE(of
, sba_of_match
);
1767 static struct platform_driver sba_driver
= {
1769 .remove
= sba_remove
,
1771 .name
= "bcm-sba-raid",
1772 .of_match_table
= sba_of_match
,
1775 module_platform_driver(sba_driver
);
1777 MODULE_DESCRIPTION("Broadcom SBA RAID driver");
1778 MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>");
1779 MODULE_LICENSE("GPL v2");