2 * Copyright (C) 2017 Broadcom
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
10 * Broadcom SBA RAID Driver
12 * The Broadcom stream buffer accelerator (SBA) provides offloading
13 * capabilities for RAID operations. The SBA offload engine is accessible
14 * via Broadcom SoC specific ring manager. Two or more offload engines
15 * can share same Broadcom SoC specific ring manager due to this Broadcom
16 * SoC specific ring manager driver is implemented as a mailbox controller
17 * driver and offload engine drivers are implemented as mallbox clients.
19 * Typically, Broadcom SoC specific ring manager will implement larger
20 * number of hardware rings over one or more SBA hardware devices. By
21 * design, the internal buffer size of SBA hardware device is limited
22 * but all offload operations supported by SBA can be broken down into
23 * multiple small size requests and executed parallely on multiple SBA
24 * hardware devices for achieving high through-put.
26 * The Broadcom SBA RAID driver does not require any register programming
27 * except submitting request to SBA hardware device via mailbox channels.
28 * This driver implements a DMA device with one DMA channel using a set
29 * of mailbox channels provided by Broadcom SoC specific ring manager
30 * driver. To exploit parallelism (as described above), all DMA request
31 * coming to SBA RAID DMA channel are broken down to smaller requests
32 * and submitted to multiple mailbox channels in round-robin fashion.
33 * For having more SBA DMA channels, we can create more SBA device nodes
34 * in Broadcom SoC specific DTS based on number of hardware rings supported
35 * by Broadcom SoC ring manager.
38 #include <linux/bitops.h>
39 #include <linux/debugfs.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/dmaengine.h>
42 #include <linux/list.h>
43 #include <linux/mailbox_client.h>
44 #include <linux/mailbox/brcm-message.h>
45 #include <linux/module.h>
46 #include <linux/of_device.h>
47 #include <linux/slab.h>
48 #include <linux/raid/pq.h>
50 #include "dmaengine.h"
52 /* ====== Driver macros and defines ===== */
54 #define SBA_TYPE_SHIFT 48
55 #define SBA_TYPE_MASK GENMASK(1, 0)
56 #define SBA_TYPE_A 0x0
57 #define SBA_TYPE_B 0x2
58 #define SBA_TYPE_C 0x3
59 #define SBA_USER_DEF_SHIFT 32
60 #define SBA_USER_DEF_MASK GENMASK(15, 0)
61 #define SBA_R_MDATA_SHIFT 24
62 #define SBA_R_MDATA_MASK GENMASK(7, 0)
63 #define SBA_C_MDATA_MS_SHIFT 18
64 #define SBA_C_MDATA_MS_MASK GENMASK(1, 0)
65 #define SBA_INT_SHIFT 17
66 #define SBA_INT_MASK BIT(0)
67 #define SBA_RESP_SHIFT 16
68 #define SBA_RESP_MASK BIT(0)
69 #define SBA_C_MDATA_SHIFT 8
70 #define SBA_C_MDATA_MASK GENMASK(7, 0)
71 #define SBA_C_MDATA_BNUMx_SHIFT(__bnum) (2 * (__bnum))
72 #define SBA_C_MDATA_BNUMx_MASK GENMASK(1, 0)
73 #define SBA_C_MDATA_DNUM_SHIFT 5
74 #define SBA_C_MDATA_DNUM_MASK GENMASK(4, 0)
75 #define SBA_C_MDATA_LS(__v) ((__v) & 0xff)
76 #define SBA_C_MDATA_MS(__v) (((__v) >> 8) & 0x3)
77 #define SBA_CMD_SHIFT 0
78 #define SBA_CMD_MASK GENMASK(3, 0)
79 #define SBA_CMD_ZERO_BUFFER 0x4
80 #define SBA_CMD_ZERO_ALL_BUFFERS 0x8
81 #define SBA_CMD_LOAD_BUFFER 0x9
82 #define SBA_CMD_XOR 0xa
83 #define SBA_CMD_GALOIS_XOR 0xb
84 #define SBA_CMD_WRITE_BUFFER 0xc
85 #define SBA_CMD_GALOIS 0xe
87 #define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192
89 /* Driver helper macros */
90 #define to_sba_request(tx) \
91 container_of(tx, struct sba_request, tx)
92 #define to_sba_device(dchan) \
93 container_of(dchan, struct sba_device, dma_chan)
95 /* ===== Driver data structures ===== */
97 enum sba_request_flags
{
98 SBA_REQUEST_STATE_FREE
= 0x001,
99 SBA_REQUEST_STATE_ALLOCED
= 0x002,
100 SBA_REQUEST_STATE_PENDING
= 0x004,
101 SBA_REQUEST_STATE_ACTIVE
= 0x008,
102 SBA_REQUEST_STATE_ABORTED
= 0x010,
103 SBA_REQUEST_STATE_MASK
= 0x0ff,
104 SBA_REQUEST_FENCE
= 0x100,
109 struct list_head node
;
110 struct sba_device
*sba
;
112 /* Chained requests management */
113 struct sba_request
*first
;
114 struct list_head next
;
115 atomic_t next_pending_count
;
116 /* BRCM message data */
117 struct brcm_message msg
;
118 struct dma_async_tx_descriptor tx
;
120 struct brcm_sba_command cmds
[0];
129 /* Underlying device */
131 /* DT configuration parameters */
132 enum sba_version ver
;
133 /* Derived configuration parameters */
141 u32 max_resp_pool_size
;
142 u32 max_cmds_pool_size
;
143 /* Maibox client and Mailbox channels */
144 struct mbox_client client
;
146 atomic_t mchans_current
;
147 struct mbox_chan
**mchans
;
148 struct device
*mbox_dev
;
149 /* DMA device and DMA channel */
150 struct dma_device dma_dev
;
151 struct dma_chan dma_chan
;
152 /* DMA channel resources */
154 dma_addr_t resp_dma_base
;
156 dma_addr_t cmds_dma_base
;
157 spinlock_t reqs_lock
;
159 struct list_head reqs_alloc_list
;
160 struct list_head reqs_pending_list
;
161 struct list_head reqs_active_list
;
162 struct list_head reqs_aborted_list
;
163 struct list_head reqs_free_list
;
164 /* DebugFS directory entries */
166 struct dentry
*stats
;
169 /* ====== Command helper routines ===== */
171 static inline u64 __pure
sba_cmd_enc(u64 cmd
, u32 val
, u32 shift
, u32 mask
)
173 cmd
&= ~((u64
)mask
<< shift
);
174 cmd
|= ((u64
)(val
& mask
) << shift
);
178 static inline u32 __pure
sba_cmd_load_c_mdata(u32 b0
)
180 return b0
& SBA_C_MDATA_BNUMx_MASK
;
183 static inline u32 __pure
sba_cmd_write_c_mdata(u32 b0
)
185 return b0
& SBA_C_MDATA_BNUMx_MASK
;
188 static inline u32 __pure
sba_cmd_xor_c_mdata(u32 b1
, u32 b0
)
190 return (b0
& SBA_C_MDATA_BNUMx_MASK
) |
191 ((b1
& SBA_C_MDATA_BNUMx_MASK
) << SBA_C_MDATA_BNUMx_SHIFT(1));
194 static inline u32 __pure
sba_cmd_pq_c_mdata(u32 d
, u32 b1
, u32 b0
)
196 return (b0
& SBA_C_MDATA_BNUMx_MASK
) |
197 ((b1
& SBA_C_MDATA_BNUMx_MASK
) << SBA_C_MDATA_BNUMx_SHIFT(1)) |
198 ((d
& SBA_C_MDATA_DNUM_MASK
) << SBA_C_MDATA_DNUM_SHIFT
);
201 /* ====== General helper routines ===== */
203 static void sba_peek_mchans(struct sba_device
*sba
)
207 for (mchan_idx
= 0; mchan_idx
< sba
->mchans_count
; mchan_idx
++)
208 mbox_client_peek_data(sba
->mchans
[mchan_idx
]);
211 static struct sba_request
*sba_alloc_request(struct sba_device
*sba
)
215 struct sba_request
*req
= NULL
;
217 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
218 list_for_each_entry(req
, &sba
->reqs_free_list
, node
) {
219 if (async_tx_test_ack(&req
->tx
)) {
220 list_move_tail(&req
->node
, &sba
->reqs_alloc_list
);
225 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
229 * We have no more free requests so, we peek
230 * mailbox channels hoping few active requests
231 * would have completed which will create more
232 * room for new requests.
234 sba_peek_mchans(sba
);
238 req
->flags
= SBA_REQUEST_STATE_ALLOCED
;
240 INIT_LIST_HEAD(&req
->next
);
241 atomic_set(&req
->next_pending_count
, 1);
243 dma_async_tx_descriptor_init(&req
->tx
, &sba
->dma_chan
);
244 async_tx_ack(&req
->tx
);
249 /* Note: Must be called with sba->reqs_lock held */
250 static void _sba_pending_request(struct sba_device
*sba
,
251 struct sba_request
*req
)
253 lockdep_assert_held(&sba
->reqs_lock
);
254 req
->flags
&= ~SBA_REQUEST_STATE_MASK
;
255 req
->flags
|= SBA_REQUEST_STATE_PENDING
;
256 list_move_tail(&req
->node
, &sba
->reqs_pending_list
);
257 if (list_empty(&sba
->reqs_active_list
))
258 sba
->reqs_fence
= false;
261 /* Note: Must be called with sba->reqs_lock held */
262 static bool _sba_active_request(struct sba_device
*sba
,
263 struct sba_request
*req
)
265 lockdep_assert_held(&sba
->reqs_lock
);
266 if (list_empty(&sba
->reqs_active_list
))
267 sba
->reqs_fence
= false;
270 req
->flags
&= ~SBA_REQUEST_STATE_MASK
;
271 req
->flags
|= SBA_REQUEST_STATE_ACTIVE
;
272 list_move_tail(&req
->node
, &sba
->reqs_active_list
);
273 if (req
->flags
& SBA_REQUEST_FENCE
)
274 sba
->reqs_fence
= true;
278 /* Note: Must be called with sba->reqs_lock held */
279 static void _sba_abort_request(struct sba_device
*sba
,
280 struct sba_request
*req
)
282 lockdep_assert_held(&sba
->reqs_lock
);
283 req
->flags
&= ~SBA_REQUEST_STATE_MASK
;
284 req
->flags
|= SBA_REQUEST_STATE_ABORTED
;
285 list_move_tail(&req
->node
, &sba
->reqs_aborted_list
);
286 if (list_empty(&sba
->reqs_active_list
))
287 sba
->reqs_fence
= false;
290 /* Note: Must be called with sba->reqs_lock held */
291 static void _sba_free_request(struct sba_device
*sba
,
292 struct sba_request
*req
)
294 lockdep_assert_held(&sba
->reqs_lock
);
295 req
->flags
&= ~SBA_REQUEST_STATE_MASK
;
296 req
->flags
|= SBA_REQUEST_STATE_FREE
;
297 list_move_tail(&req
->node
, &sba
->reqs_free_list
);
298 if (list_empty(&sba
->reqs_active_list
))
299 sba
->reqs_fence
= false;
302 static void sba_free_chained_requests(struct sba_request
*req
)
305 struct sba_request
*nreq
;
306 struct sba_device
*sba
= req
->sba
;
308 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
310 _sba_free_request(sba
, req
);
311 list_for_each_entry(nreq
, &req
->next
, next
)
312 _sba_free_request(sba
, nreq
);
314 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
317 static void sba_chain_request(struct sba_request
*first
,
318 struct sba_request
*req
)
321 struct sba_device
*sba
= req
->sba
;
323 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
325 list_add_tail(&req
->next
, &first
->next
);
327 atomic_inc(&first
->next_pending_count
);
329 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
332 static void sba_cleanup_nonpending_requests(struct sba_device
*sba
)
335 struct sba_request
*req
, *req1
;
337 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
339 /* Freeup all alloced request */
340 list_for_each_entry_safe(req
, req1
, &sba
->reqs_alloc_list
, node
)
341 _sba_free_request(sba
, req
);
343 /* Set all active requests as aborted */
344 list_for_each_entry_safe(req
, req1
, &sba
->reqs_active_list
, node
)
345 _sba_abort_request(sba
, req
);
348 * Note: We expect that aborted request will be eventually
349 * freed by sba_receive_message()
352 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
355 static void sba_cleanup_pending_requests(struct sba_device
*sba
)
358 struct sba_request
*req
, *req1
;
360 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
362 /* Freeup all pending request */
363 list_for_each_entry_safe(req
, req1
, &sba
->reqs_pending_list
, node
)
364 _sba_free_request(sba
, req
);
366 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
369 static int sba_send_mbox_request(struct sba_device
*sba
,
370 struct sba_request
*req
)
372 int mchans_idx
, ret
= 0;
374 /* Select mailbox channel in round-robin fashion */
375 mchans_idx
= atomic_inc_return(&sba
->mchans_current
);
376 mchans_idx
= mchans_idx
% sba
->mchans_count
;
378 /* Send message for the request */
380 ret
= mbox_send_message(sba
->mchans
[mchans_idx
], &req
->msg
);
382 dev_err(sba
->dev
, "send message failed with error %d", ret
);
386 /* Check error returned by mailbox controller */
387 ret
= req
->msg
.error
;
389 dev_err(sba
->dev
, "message error %d", ret
);
392 /* Signal txdone for mailbox channel */
393 mbox_client_txdone(sba
->mchans
[mchans_idx
], ret
);
398 /* Note: Must be called with sba->reqs_lock held */
399 static void _sba_process_pending_requests(struct sba_device
*sba
)
403 struct sba_request
*req
;
406 * Process few pending requests
408 * For now, we process (<number_of_mailbox_channels> * 8)
409 * number of requests at a time.
411 count
= sba
->mchans_count
* 8;
412 while (!list_empty(&sba
->reqs_pending_list
) && count
) {
413 /* Get the first pending request */
414 req
= list_first_entry(&sba
->reqs_pending_list
,
415 struct sba_request
, node
);
417 /* Try to make request active */
418 if (!_sba_active_request(sba
, req
))
421 /* Send request to mailbox channel */
422 ret
= sba_send_mbox_request(sba
, req
);
424 _sba_pending_request(sba
, req
);
432 static void sba_process_received_request(struct sba_device
*sba
,
433 struct sba_request
*req
)
436 struct dma_async_tx_descriptor
*tx
;
437 struct sba_request
*nreq
, *first
= req
->first
;
439 /* Process only after all chained requests are received */
440 if (!atomic_dec_return(&first
->next_pending_count
)) {
443 WARN_ON(tx
->cookie
< 0);
444 if (tx
->cookie
> 0) {
445 dma_cookie_complete(tx
);
446 dmaengine_desc_get_callback_invoke(tx
, NULL
);
447 dma_descriptor_unmap(tx
);
449 tx
->callback_result
= NULL
;
452 dma_run_dependencies(tx
);
454 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
456 /* Free all requests chained to first request */
457 list_for_each_entry(nreq
, &first
->next
, next
)
458 _sba_free_request(sba
, nreq
);
459 INIT_LIST_HEAD(&first
->next
);
461 /* Free the first request */
462 _sba_free_request(sba
, first
);
464 /* Process pending requests */
465 _sba_process_pending_requests(sba
);
467 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
471 static void sba_write_stats_in_seqfile(struct sba_device
*sba
,
472 struct seq_file
*file
)
475 struct sba_request
*req
;
476 u32 free_count
= 0, alloced_count
= 0;
477 u32 pending_count
= 0, active_count
= 0, aborted_count
= 0;
479 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
481 list_for_each_entry(req
, &sba
->reqs_free_list
, node
)
482 if (async_tx_test_ack(&req
->tx
))
485 list_for_each_entry(req
, &sba
->reqs_alloc_list
, node
)
488 list_for_each_entry(req
, &sba
->reqs_pending_list
, node
)
491 list_for_each_entry(req
, &sba
->reqs_active_list
, node
)
494 list_for_each_entry(req
, &sba
->reqs_aborted_list
, node
)
497 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
499 seq_printf(file
, "maximum requests = %d\n", sba
->max_req
);
500 seq_printf(file
, "free requests = %d\n", free_count
);
501 seq_printf(file
, "alloced requests = %d\n", alloced_count
);
502 seq_printf(file
, "pending requests = %d\n", pending_count
);
503 seq_printf(file
, "active requests = %d\n", active_count
);
504 seq_printf(file
, "aborted requests = %d\n", aborted_count
);
507 /* ====== DMAENGINE callbacks ===== */
509 static void sba_free_chan_resources(struct dma_chan
*dchan
)
512 * Channel resources are pre-alloced so we just free-up
513 * whatever we can so that we can re-use pre-alloced
514 * channel resources next time.
516 sba_cleanup_nonpending_requests(to_sba_device(dchan
));
519 static int sba_device_terminate_all(struct dma_chan
*dchan
)
521 /* Cleanup all pending requests */
522 sba_cleanup_pending_requests(to_sba_device(dchan
));
527 static void sba_issue_pending(struct dma_chan
*dchan
)
530 struct sba_device
*sba
= to_sba_device(dchan
);
532 /* Process pending requests */
533 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
534 _sba_process_pending_requests(sba
);
535 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
538 static dma_cookie_t
sba_tx_submit(struct dma_async_tx_descriptor
*tx
)
542 struct sba_device
*sba
;
543 struct sba_request
*req
, *nreq
;
548 sba
= to_sba_device(tx
->chan
);
549 req
= to_sba_request(tx
);
551 /* Assign cookie and mark all chained requests pending */
552 spin_lock_irqsave(&sba
->reqs_lock
, flags
);
553 cookie
= dma_cookie_assign(tx
);
554 _sba_pending_request(sba
, req
);
555 list_for_each_entry(nreq
, &req
->next
, next
)
556 _sba_pending_request(sba
, nreq
);
557 spin_unlock_irqrestore(&sba
->reqs_lock
, flags
);
562 static enum dma_status
sba_tx_status(struct dma_chan
*dchan
,
564 struct dma_tx_state
*txstate
)
567 struct sba_device
*sba
= to_sba_device(dchan
);
569 ret
= dma_cookie_status(dchan
, cookie
, txstate
);
570 if (ret
== DMA_COMPLETE
)
573 sba_peek_mchans(sba
);
575 return dma_cookie_status(dchan
, cookie
, txstate
);
578 static void sba_fillup_interrupt_msg(struct sba_request
*req
,
579 struct brcm_sba_command
*cmds
,
580 struct brcm_message
*msg
)
584 dma_addr_t resp_dma
= req
->tx
.phys
;
585 struct brcm_sba_command
*cmdsp
= cmds
;
587 /* Type-B command to load dummy data into buf0 */
588 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
589 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
590 cmd
= sba_cmd_enc(cmd
, req
->sba
->hw_resp_size
,
591 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
592 c_mdata
= sba_cmd_load_c_mdata(0);
593 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
594 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
595 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
596 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
598 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
599 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
600 cmdsp
->data
= resp_dma
;
601 cmdsp
->data_len
= req
->sba
->hw_resp_size
;
604 /* Type-A command to write buf0 to dummy location */
605 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
606 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
607 cmd
= sba_cmd_enc(cmd
, req
->sba
->hw_resp_size
,
608 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
609 cmd
= sba_cmd_enc(cmd
, 0x1,
610 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
611 c_mdata
= sba_cmd_write_c_mdata(0);
612 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
613 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
614 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
615 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
617 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
618 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
619 if (req
->sba
->hw_resp_size
) {
620 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
621 cmdsp
->resp
= resp_dma
;
622 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
624 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
625 cmdsp
->data
= resp_dma
;
626 cmdsp
->data_len
= req
->sba
->hw_resp_size
;
629 /* Fillup brcm_message */
630 msg
->type
= BRCM_MESSAGE_SBA
;
631 msg
->sba
.cmds
= cmds
;
632 msg
->sba
.cmds_count
= cmdsp
- cmds
;
637 static struct dma_async_tx_descriptor
*
638 sba_prep_dma_interrupt(struct dma_chan
*dchan
, unsigned long flags
)
640 struct sba_request
*req
= NULL
;
641 struct sba_device
*sba
= to_sba_device(dchan
);
643 /* Alloc new request */
644 req
= sba_alloc_request(sba
);
649 * Force fence so that no requests are submitted
650 * until DMA callback for this request is invoked.
652 req
->flags
|= SBA_REQUEST_FENCE
;
654 /* Fillup request message */
655 sba_fillup_interrupt_msg(req
, req
->cmds
, &req
->msg
);
657 /* Init async_tx descriptor */
658 req
->tx
.flags
= flags
;
659 req
->tx
.cookie
= -EBUSY
;
664 static void sba_fillup_memcpy_msg(struct sba_request
*req
,
665 struct brcm_sba_command
*cmds
,
666 struct brcm_message
*msg
,
667 dma_addr_t msg_offset
, size_t msg_len
,
668 dma_addr_t dst
, dma_addr_t src
)
672 dma_addr_t resp_dma
= req
->tx
.phys
;
673 struct brcm_sba_command
*cmdsp
= cmds
;
675 /* Type-B command to load data into buf0 */
676 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
677 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
678 cmd
= sba_cmd_enc(cmd
, msg_len
,
679 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
680 c_mdata
= sba_cmd_load_c_mdata(0);
681 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
682 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
683 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
684 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
686 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
687 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
688 cmdsp
->data
= src
+ msg_offset
;
689 cmdsp
->data_len
= msg_len
;
692 /* Type-A command to write buf0 */
693 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
694 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
695 cmd
= sba_cmd_enc(cmd
, msg_len
,
696 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
697 cmd
= sba_cmd_enc(cmd
, 0x1,
698 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
699 c_mdata
= sba_cmd_write_c_mdata(0);
700 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
701 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
702 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
703 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
705 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
706 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
707 if (req
->sba
->hw_resp_size
) {
708 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
709 cmdsp
->resp
= resp_dma
;
710 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
712 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
713 cmdsp
->data
= dst
+ msg_offset
;
714 cmdsp
->data_len
= msg_len
;
717 /* Fillup brcm_message */
718 msg
->type
= BRCM_MESSAGE_SBA
;
719 msg
->sba
.cmds
= cmds
;
720 msg
->sba
.cmds_count
= cmdsp
- cmds
;
725 static struct sba_request
*
726 sba_prep_dma_memcpy_req(struct sba_device
*sba
,
727 dma_addr_t off
, dma_addr_t dst
, dma_addr_t src
,
728 size_t len
, unsigned long flags
)
730 struct sba_request
*req
= NULL
;
732 /* Alloc new request */
733 req
= sba_alloc_request(sba
);
736 if (flags
& DMA_PREP_FENCE
)
737 req
->flags
|= SBA_REQUEST_FENCE
;
739 /* Fillup request message */
740 sba_fillup_memcpy_msg(req
, req
->cmds
, &req
->msg
,
743 /* Init async_tx descriptor */
744 req
->tx
.flags
= flags
;
745 req
->tx
.cookie
= -EBUSY
;
750 static struct dma_async_tx_descriptor
*
751 sba_prep_dma_memcpy(struct dma_chan
*dchan
, dma_addr_t dst
, dma_addr_t src
,
752 size_t len
, unsigned long flags
)
756 struct sba_device
*sba
= to_sba_device(dchan
);
757 struct sba_request
*first
= NULL
, *req
;
759 /* Create chained requests where each request is upto hw_buf_size */
761 req_len
= (len
< sba
->hw_buf_size
) ? len
: sba
->hw_buf_size
;
763 req
= sba_prep_dma_memcpy_req(sba
, off
, dst
, src
,
767 sba_free_chained_requests(first
);
772 sba_chain_request(first
, req
);
780 return (first
) ? &first
->tx
: NULL
;
783 static void sba_fillup_xor_msg(struct sba_request
*req
,
784 struct brcm_sba_command
*cmds
,
785 struct brcm_message
*msg
,
786 dma_addr_t msg_offset
, size_t msg_len
,
787 dma_addr_t dst
, dma_addr_t
*src
, u32 src_cnt
)
792 dma_addr_t resp_dma
= req
->tx
.phys
;
793 struct brcm_sba_command
*cmdsp
= cmds
;
795 /* Type-B command to load data into buf0 */
796 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
797 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
798 cmd
= sba_cmd_enc(cmd
, msg_len
,
799 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
800 c_mdata
= sba_cmd_load_c_mdata(0);
801 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
802 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
803 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
804 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
806 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
807 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
808 cmdsp
->data
= src
[0] + msg_offset
;
809 cmdsp
->data_len
= msg_len
;
812 /* Type-B commands to xor data with buf0 and put it back in buf0 */
813 for (i
= 1; i
< src_cnt
; i
++) {
814 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
815 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
816 cmd
= sba_cmd_enc(cmd
, msg_len
,
817 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
818 c_mdata
= sba_cmd_xor_c_mdata(0, 0);
819 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
820 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
821 cmd
= sba_cmd_enc(cmd
, SBA_CMD_XOR
,
822 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
824 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
825 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
826 cmdsp
->data
= src
[i
] + msg_offset
;
827 cmdsp
->data_len
= msg_len
;
831 /* Type-A command to write buf0 */
832 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
833 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
834 cmd
= sba_cmd_enc(cmd
, msg_len
,
835 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
836 cmd
= sba_cmd_enc(cmd
, 0x1,
837 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
838 c_mdata
= sba_cmd_write_c_mdata(0);
839 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
840 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
841 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
842 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
844 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
845 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
846 if (req
->sba
->hw_resp_size
) {
847 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
848 cmdsp
->resp
= resp_dma
;
849 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
851 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
852 cmdsp
->data
= dst
+ msg_offset
;
853 cmdsp
->data_len
= msg_len
;
856 /* Fillup brcm_message */
857 msg
->type
= BRCM_MESSAGE_SBA
;
858 msg
->sba
.cmds
= cmds
;
859 msg
->sba
.cmds_count
= cmdsp
- cmds
;
864 static struct sba_request
*
865 sba_prep_dma_xor_req(struct sba_device
*sba
,
866 dma_addr_t off
, dma_addr_t dst
, dma_addr_t
*src
,
867 u32 src_cnt
, size_t len
, unsigned long flags
)
869 struct sba_request
*req
= NULL
;
871 /* Alloc new request */
872 req
= sba_alloc_request(sba
);
875 if (flags
& DMA_PREP_FENCE
)
876 req
->flags
|= SBA_REQUEST_FENCE
;
878 /* Fillup request message */
879 sba_fillup_xor_msg(req
, req
->cmds
, &req
->msg
,
880 off
, len
, dst
, src
, src_cnt
);
882 /* Init async_tx descriptor */
883 req
->tx
.flags
= flags
;
884 req
->tx
.cookie
= -EBUSY
;
889 static struct dma_async_tx_descriptor
*
890 sba_prep_dma_xor(struct dma_chan
*dchan
, dma_addr_t dst
, dma_addr_t
*src
,
891 u32 src_cnt
, size_t len
, unsigned long flags
)
895 struct sba_device
*sba
= to_sba_device(dchan
);
896 struct sba_request
*first
= NULL
, *req
;
899 if (unlikely(src_cnt
> sba
->max_xor_srcs
))
902 /* Create chained requests where each request is upto hw_buf_size */
904 req_len
= (len
< sba
->hw_buf_size
) ? len
: sba
->hw_buf_size
;
906 req
= sba_prep_dma_xor_req(sba
, off
, dst
, src
, src_cnt
,
910 sba_free_chained_requests(first
);
915 sba_chain_request(first
, req
);
923 return (first
) ? &first
->tx
: NULL
;
926 static void sba_fillup_pq_msg(struct sba_request
*req
,
928 struct brcm_sba_command
*cmds
,
929 struct brcm_message
*msg
,
930 dma_addr_t msg_offset
, size_t msg_len
,
931 dma_addr_t
*dst_p
, dma_addr_t
*dst_q
,
932 const u8
*scf
, dma_addr_t
*src
, u32 src_cnt
)
937 dma_addr_t resp_dma
= req
->tx
.phys
;
938 struct brcm_sba_command
*cmdsp
= cmds
;
941 /* Type-B command to load old P into buf0 */
943 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
944 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
945 cmd
= sba_cmd_enc(cmd
, msg_len
,
946 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
947 c_mdata
= sba_cmd_load_c_mdata(0);
948 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
949 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
950 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
951 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
953 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
954 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
955 cmdsp
->data
= *dst_p
+ msg_offset
;
956 cmdsp
->data_len
= msg_len
;
960 /* Type-B command to load old Q into buf1 */
962 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
963 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
964 cmd
= sba_cmd_enc(cmd
, msg_len
,
965 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
966 c_mdata
= sba_cmd_load_c_mdata(1);
967 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
968 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
969 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
970 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
972 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
973 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
974 cmdsp
->data
= *dst_q
+ msg_offset
;
975 cmdsp
->data_len
= msg_len
;
979 /* Type-A command to zero all buffers */
980 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
981 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
982 cmd
= sba_cmd_enc(cmd
, msg_len
,
983 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
984 cmd
= sba_cmd_enc(cmd
, SBA_CMD_ZERO_ALL_BUFFERS
,
985 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
987 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
988 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
992 /* Type-B commands for generate P onto buf0 and Q onto buf1 */
993 for (i
= 0; i
< src_cnt
; i
++) {
994 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
995 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
996 cmd
= sba_cmd_enc(cmd
, msg_len
,
997 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
998 c_mdata
= sba_cmd_pq_c_mdata(raid6_gflog
[scf
[i
]], 1, 0);
999 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1000 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1001 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_MS(c_mdata
),
1002 SBA_C_MDATA_MS_SHIFT
, SBA_C_MDATA_MS_MASK
);
1003 cmd
= sba_cmd_enc(cmd
, SBA_CMD_GALOIS_XOR
,
1004 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1006 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1007 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
1008 cmdsp
->data
= src
[i
] + msg_offset
;
1009 cmdsp
->data_len
= msg_len
;
1013 /* Type-A command to write buf0 */
1015 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
1016 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1017 cmd
= sba_cmd_enc(cmd
, msg_len
,
1018 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1019 cmd
= sba_cmd_enc(cmd
, 0x1,
1020 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
1021 c_mdata
= sba_cmd_write_c_mdata(0);
1022 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1023 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1024 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
1025 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1027 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1028 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
1029 if (req
->sba
->hw_resp_size
) {
1030 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
1031 cmdsp
->resp
= resp_dma
;
1032 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
1034 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
1035 cmdsp
->data
= *dst_p
+ msg_offset
;
1036 cmdsp
->data_len
= msg_len
;
1040 /* Type-A command to write buf1 */
1042 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
1043 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1044 cmd
= sba_cmd_enc(cmd
, msg_len
,
1045 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1046 cmd
= sba_cmd_enc(cmd
, 0x1,
1047 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
1048 c_mdata
= sba_cmd_write_c_mdata(1);
1049 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1050 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1051 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
1052 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1054 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1055 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
1056 if (req
->sba
->hw_resp_size
) {
1057 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
1058 cmdsp
->resp
= resp_dma
;
1059 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
1061 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
1062 cmdsp
->data
= *dst_q
+ msg_offset
;
1063 cmdsp
->data_len
= msg_len
;
1067 /* Fillup brcm_message */
1068 msg
->type
= BRCM_MESSAGE_SBA
;
1069 msg
->sba
.cmds
= cmds
;
1070 msg
->sba
.cmds_count
= cmdsp
- cmds
;
1075 static struct sba_request
*
1076 sba_prep_dma_pq_req(struct sba_device
*sba
, dma_addr_t off
,
1077 dma_addr_t
*dst_p
, dma_addr_t
*dst_q
, dma_addr_t
*src
,
1078 u32 src_cnt
, const u8
*scf
, size_t len
, unsigned long flags
)
1080 struct sba_request
*req
= NULL
;
1082 /* Alloc new request */
1083 req
= sba_alloc_request(sba
);
1086 if (flags
& DMA_PREP_FENCE
)
1087 req
->flags
|= SBA_REQUEST_FENCE
;
1089 /* Fillup request messages */
1090 sba_fillup_pq_msg(req
, dmaf_continue(flags
),
1091 req
->cmds
, &req
->msg
,
1092 off
, len
, dst_p
, dst_q
, scf
, src
, src_cnt
);
1094 /* Init async_tx descriptor */
1095 req
->tx
.flags
= flags
;
1096 req
->tx
.cookie
= -EBUSY
;
1101 static void sba_fillup_pq_single_msg(struct sba_request
*req
,
1103 struct brcm_sba_command
*cmds
,
1104 struct brcm_message
*msg
,
1105 dma_addr_t msg_offset
, size_t msg_len
,
1106 dma_addr_t
*dst_p
, dma_addr_t
*dst_q
,
1107 dma_addr_t src
, u8 scf
)
1111 u8 pos
, dpos
= raid6_gflog
[scf
];
1112 dma_addr_t resp_dma
= req
->tx
.phys
;
1113 struct brcm_sba_command
*cmdsp
= cmds
;
1119 /* Type-B command to load old P into buf0 */
1120 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
1121 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1122 cmd
= sba_cmd_enc(cmd
, msg_len
,
1123 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1124 c_mdata
= sba_cmd_load_c_mdata(0);
1125 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1126 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1127 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
1128 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1130 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1131 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
1132 cmdsp
->data
= *dst_p
+ msg_offset
;
1133 cmdsp
->data_len
= msg_len
;
1137 * Type-B commands to xor data with buf0 and put it
1140 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
1141 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1142 cmd
= sba_cmd_enc(cmd
, msg_len
,
1143 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1144 c_mdata
= sba_cmd_xor_c_mdata(0, 0);
1145 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1146 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1147 cmd
= sba_cmd_enc(cmd
, SBA_CMD_XOR
,
1148 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1150 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1151 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
1152 cmdsp
->data
= src
+ msg_offset
;
1153 cmdsp
->data_len
= msg_len
;
1156 /* Type-B command to load old P into buf0 */
1157 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
1158 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1159 cmd
= sba_cmd_enc(cmd
, msg_len
,
1160 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1161 c_mdata
= sba_cmd_load_c_mdata(0);
1162 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1163 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1164 cmd
= sba_cmd_enc(cmd
, SBA_CMD_LOAD_BUFFER
,
1165 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1167 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1168 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
1169 cmdsp
->data
= src
+ msg_offset
;
1170 cmdsp
->data_len
= msg_len
;
1174 /* Type-A command to write buf0 */
1175 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
1176 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1177 cmd
= sba_cmd_enc(cmd
, msg_len
,
1178 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1179 cmd
= sba_cmd_enc(cmd
, 0x1,
1180 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
1181 c_mdata
= sba_cmd_write_c_mdata(0);
1182 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1183 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1184 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
1185 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1187 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1188 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
1189 if (req
->sba
->hw_resp_size
) {
1190 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
1191 cmdsp
->resp
= resp_dma
;
1192 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
1194 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
1195 cmdsp
->data
= *dst_p
+ msg_offset
;
1196 cmdsp
->data_len
= msg_len
;
1203 /* Type-A command to zero all buffers */
1204 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
1205 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1206 cmd
= sba_cmd_enc(cmd
, msg_len
,
1207 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1208 cmd
= sba_cmd_enc(cmd
, SBA_CMD_ZERO_ALL_BUFFERS
,
1209 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1211 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1212 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
1216 goto skip_q_computation
;
1217 pos
= (dpos
< req
->sba
->max_pq_coefs
) ?
1218 dpos
: (req
->sba
->max_pq_coefs
- 1);
1221 * Type-B command to generate initial Q from data
1222 * and store output into buf0
1224 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
1225 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1226 cmd
= sba_cmd_enc(cmd
, msg_len
,
1227 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1228 c_mdata
= sba_cmd_pq_c_mdata(pos
, 0, 0);
1229 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1230 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1231 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_MS(c_mdata
),
1232 SBA_C_MDATA_MS_SHIFT
, SBA_C_MDATA_MS_MASK
);
1233 cmd
= sba_cmd_enc(cmd
, SBA_CMD_GALOIS
,
1234 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1236 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1237 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
1238 cmdsp
->data
= src
+ msg_offset
;
1239 cmdsp
->data_len
= msg_len
;
1244 /* Multiple Type-A command to generate final Q */
1246 pos
= (dpos
< req
->sba
->max_pq_coefs
) ?
1247 dpos
: (req
->sba
->max_pq_coefs
- 1);
1250 * Type-A command to generate Q with buf0 and
1251 * buf1 store result in buf0
1253 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
1254 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1255 cmd
= sba_cmd_enc(cmd
, msg_len
,
1256 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1257 c_mdata
= sba_cmd_pq_c_mdata(pos
, 0, 1);
1258 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1259 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1260 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_MS(c_mdata
),
1261 SBA_C_MDATA_MS_SHIFT
, SBA_C_MDATA_MS_MASK
);
1262 cmd
= sba_cmd_enc(cmd
, SBA_CMD_GALOIS
,
1263 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1265 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1266 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
1275 * Type-B command to XOR previous output with
1276 * buf0 and write it into buf0
1278 cmd
= sba_cmd_enc(0x0, SBA_TYPE_B
,
1279 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1280 cmd
= sba_cmd_enc(cmd
, msg_len
,
1281 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1282 c_mdata
= sba_cmd_xor_c_mdata(0, 0);
1283 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1284 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1285 cmd
= sba_cmd_enc(cmd
, SBA_CMD_XOR
,
1286 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1288 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1289 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_B
;
1290 cmdsp
->data
= *dst_q
+ msg_offset
;
1291 cmdsp
->data_len
= msg_len
;
1295 /* Type-A command to write buf0 */
1296 cmd
= sba_cmd_enc(0x0, SBA_TYPE_A
,
1297 SBA_TYPE_SHIFT
, SBA_TYPE_MASK
);
1298 cmd
= sba_cmd_enc(cmd
, msg_len
,
1299 SBA_USER_DEF_SHIFT
, SBA_USER_DEF_MASK
);
1300 cmd
= sba_cmd_enc(cmd
, 0x1,
1301 SBA_RESP_SHIFT
, SBA_RESP_MASK
);
1302 c_mdata
= sba_cmd_write_c_mdata(0);
1303 cmd
= sba_cmd_enc(cmd
, SBA_C_MDATA_LS(c_mdata
),
1304 SBA_C_MDATA_SHIFT
, SBA_C_MDATA_MASK
);
1305 cmd
= sba_cmd_enc(cmd
, SBA_CMD_WRITE_BUFFER
,
1306 SBA_CMD_SHIFT
, SBA_CMD_MASK
);
1308 *cmdsp
->cmd_dma
= cpu_to_le64(cmd
);
1309 cmdsp
->flags
= BRCM_SBA_CMD_TYPE_A
;
1310 if (req
->sba
->hw_resp_size
) {
1311 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_RESP
;
1312 cmdsp
->resp
= resp_dma
;
1313 cmdsp
->resp_len
= req
->sba
->hw_resp_size
;
1315 cmdsp
->flags
|= BRCM_SBA_CMD_HAS_OUTPUT
;
1316 cmdsp
->data
= *dst_q
+ msg_offset
;
1317 cmdsp
->data_len
= msg_len
;
1321 /* Fillup brcm_message */
1322 msg
->type
= BRCM_MESSAGE_SBA
;
1323 msg
->sba
.cmds
= cmds
;
1324 msg
->sba
.cmds_count
= cmdsp
- cmds
;
1329 static struct sba_request
*
1330 sba_prep_dma_pq_single_req(struct sba_device
*sba
, dma_addr_t off
,
1331 dma_addr_t
*dst_p
, dma_addr_t
*dst_q
,
1332 dma_addr_t src
, u8 scf
, size_t len
,
1333 unsigned long flags
)
1335 struct sba_request
*req
= NULL
;
1337 /* Alloc new request */
1338 req
= sba_alloc_request(sba
);
1341 if (flags
& DMA_PREP_FENCE
)
1342 req
->flags
|= SBA_REQUEST_FENCE
;
1344 /* Fillup request messages */
1345 sba_fillup_pq_single_msg(req
, dmaf_continue(flags
),
1346 req
->cmds
, &req
->msg
, off
, len
,
1347 dst_p
, dst_q
, src
, scf
);
1349 /* Init async_tx descriptor */
1350 req
->tx
.flags
= flags
;
1351 req
->tx
.cookie
= -EBUSY
;
1356 static struct dma_async_tx_descriptor
*
1357 sba_prep_dma_pq(struct dma_chan
*dchan
, dma_addr_t
*dst
, dma_addr_t
*src
,
1358 u32 src_cnt
, const u8
*scf
, size_t len
, unsigned long flags
)
1364 dma_addr_t
*dst_p
= NULL
, *dst_q
= NULL
;
1365 struct sba_device
*sba
= to_sba_device(dchan
);
1366 struct sba_request
*first
= NULL
, *req
;
1369 if (unlikely(src_cnt
> sba
->max_pq_srcs
))
1371 for (i
= 0; i
< src_cnt
; i
++)
1372 if (sba
->max_pq_coefs
<= raid6_gflog
[scf
[i
]])
1375 /* Figure-out P and Q destination addresses */
1376 if (!(flags
& DMA_PREP_PQ_DISABLE_P
))
1378 if (!(flags
& DMA_PREP_PQ_DISABLE_Q
))
1381 /* Create chained requests where each request is upto hw_buf_size */
1383 req_len
= (len
< sba
->hw_buf_size
) ? len
: sba
->hw_buf_size
;
1386 dst_q_index
= src_cnt
;
1389 for (i
= 0; i
< src_cnt
; i
++) {
1390 if (*dst_q
== src
[i
]) {
1397 if (dst_q_index
< src_cnt
) {
1399 req
= sba_prep_dma_pq_single_req(sba
,
1400 off
, dst_p
, dst_q
, src
[i
], scf
[i
],
1401 req_len
, flags
| DMA_PREP_FENCE
);
1406 sba_chain_request(first
, req
);
1410 flags
|= DMA_PREP_CONTINUE
;
1413 for (i
= 0; i
< src_cnt
; i
++) {
1414 if (dst_q_index
== i
)
1417 req
= sba_prep_dma_pq_single_req(sba
,
1418 off
, dst_p
, dst_q
, src
[i
], scf
[i
],
1419 req_len
, flags
| DMA_PREP_FENCE
);
1424 sba_chain_request(first
, req
);
1428 flags
|= DMA_PREP_CONTINUE
;
1431 req
= sba_prep_dma_pq_req(sba
, off
,
1432 dst_p
, dst_q
, src
, src_cnt
,
1433 scf
, req_len
, flags
);
1438 sba_chain_request(first
, req
);
1447 return (first
) ? &first
->tx
: NULL
;
1451 sba_free_chained_requests(first
);
1455 /* ====== Mailbox callbacks ===== */
1457 static void sba_receive_message(struct mbox_client
*cl
, void *msg
)
1459 struct brcm_message
*m
= msg
;
1460 struct sba_request
*req
= m
->ctx
;
1461 struct sba_device
*sba
= req
->sba
;
1463 /* Error count if message has error */
1465 dev_err(sba
->dev
, "%s got message with error %d",
1466 dma_chan_name(&sba
->dma_chan
), m
->error
);
1468 /* Process received request */
1469 sba_process_received_request(sba
, req
);
1472 /* ====== Debugfs callbacks ====== */
1474 static int sba_debugfs_stats_show(struct seq_file
*file
, void *offset
)
1476 struct platform_device
*pdev
= to_platform_device(file
->private);
1477 struct sba_device
*sba
= platform_get_drvdata(pdev
);
1479 /* Write stats in file */
1480 sba_write_stats_in_seqfile(sba
, file
);
1485 /* ====== Platform driver routines ===== */
1487 static int sba_prealloc_channel_resources(struct sba_device
*sba
)
1490 struct sba_request
*req
= NULL
;
1492 sba
->resp_base
= dma_alloc_coherent(sba
->mbox_dev
,
1493 sba
->max_resp_pool_size
,
1494 &sba
->resp_dma_base
, GFP_KERNEL
);
1495 if (!sba
->resp_base
)
1498 sba
->cmds_base
= dma_alloc_coherent(sba
->mbox_dev
,
1499 sba
->max_cmds_pool_size
,
1500 &sba
->cmds_dma_base
, GFP_KERNEL
);
1501 if (!sba
->cmds_base
) {
1503 goto fail_free_resp_pool
;
1506 spin_lock_init(&sba
->reqs_lock
);
1507 sba
->reqs_fence
= false;
1508 INIT_LIST_HEAD(&sba
->reqs_alloc_list
);
1509 INIT_LIST_HEAD(&sba
->reqs_pending_list
);
1510 INIT_LIST_HEAD(&sba
->reqs_active_list
);
1511 INIT_LIST_HEAD(&sba
->reqs_aborted_list
);
1512 INIT_LIST_HEAD(&sba
->reqs_free_list
);
1514 for (i
= 0; i
< sba
->max_req
; i
++) {
1515 req
= devm_kzalloc(sba
->dev
,
1517 sba
->max_cmd_per_req
* sizeof(req
->cmds
[0]),
1521 goto fail_free_cmds_pool
;
1523 INIT_LIST_HEAD(&req
->node
);
1525 req
->flags
= SBA_REQUEST_STATE_FREE
;
1526 INIT_LIST_HEAD(&req
->next
);
1527 atomic_set(&req
->next_pending_count
, 0);
1528 for (j
= 0; j
< sba
->max_cmd_per_req
; j
++) {
1529 req
->cmds
[j
].cmd
= 0;
1530 req
->cmds
[j
].cmd_dma
= sba
->cmds_base
+
1531 (i
* sba
->max_cmd_per_req
+ j
) * sizeof(u64
);
1532 req
->cmds
[j
].cmd_dma_addr
= sba
->cmds_dma_base
+
1533 (i
* sba
->max_cmd_per_req
+ j
) * sizeof(u64
);
1534 req
->cmds
[j
].flags
= 0;
1536 memset(&req
->msg
, 0, sizeof(req
->msg
));
1537 dma_async_tx_descriptor_init(&req
->tx
, &sba
->dma_chan
);
1538 async_tx_ack(&req
->tx
);
1539 req
->tx
.tx_submit
= sba_tx_submit
;
1540 req
->tx
.phys
= sba
->resp_dma_base
+ i
* sba
->hw_resp_size
;
1541 list_add_tail(&req
->node
, &sba
->reqs_free_list
);
1546 fail_free_cmds_pool
:
1547 dma_free_coherent(sba
->mbox_dev
,
1548 sba
->max_cmds_pool_size
,
1549 sba
->cmds_base
, sba
->cmds_dma_base
);
1550 fail_free_resp_pool
:
1551 dma_free_coherent(sba
->mbox_dev
,
1552 sba
->max_resp_pool_size
,
1553 sba
->resp_base
, sba
->resp_dma_base
);
1557 static void sba_freeup_channel_resources(struct sba_device
*sba
)
1559 dmaengine_terminate_all(&sba
->dma_chan
);
1560 dma_free_coherent(sba
->mbox_dev
, sba
->max_cmds_pool_size
,
1561 sba
->cmds_base
, sba
->cmds_dma_base
);
1562 dma_free_coherent(sba
->mbox_dev
, sba
->max_resp_pool_size
,
1563 sba
->resp_base
, sba
->resp_dma_base
);
1564 sba
->resp_base
= NULL
;
1565 sba
->resp_dma_base
= 0;
1568 static int sba_async_register(struct sba_device
*sba
)
1571 struct dma_device
*dma_dev
= &sba
->dma_dev
;
1573 /* Initialize DMA channel cookie */
1574 sba
->dma_chan
.device
= dma_dev
;
1575 dma_cookie_init(&sba
->dma_chan
);
1577 /* Initialize DMA device capability mask */
1578 dma_cap_zero(dma_dev
->cap_mask
);
1579 dma_cap_set(DMA_INTERRUPT
, dma_dev
->cap_mask
);
1580 dma_cap_set(DMA_MEMCPY
, dma_dev
->cap_mask
);
1581 dma_cap_set(DMA_XOR
, dma_dev
->cap_mask
);
1582 dma_cap_set(DMA_PQ
, dma_dev
->cap_mask
);
1585 * Set mailbox channel device as the base device of
1586 * our dma_device because the actual memory accesses
1587 * will be done by mailbox controller
1589 dma_dev
->dev
= sba
->mbox_dev
;
1591 /* Set base prep routines */
1592 dma_dev
->device_free_chan_resources
= sba_free_chan_resources
;
1593 dma_dev
->device_terminate_all
= sba_device_terminate_all
;
1594 dma_dev
->device_issue_pending
= sba_issue_pending
;
1595 dma_dev
->device_tx_status
= sba_tx_status
;
1597 /* Set interrupt routine */
1598 if (dma_has_cap(DMA_INTERRUPT
, dma_dev
->cap_mask
))
1599 dma_dev
->device_prep_dma_interrupt
= sba_prep_dma_interrupt
;
1601 /* Set memcpy routine */
1602 if (dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
))
1603 dma_dev
->device_prep_dma_memcpy
= sba_prep_dma_memcpy
;
1605 /* Set xor routine and capability */
1606 if (dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
)) {
1607 dma_dev
->device_prep_dma_xor
= sba_prep_dma_xor
;
1608 dma_dev
->max_xor
= sba
->max_xor_srcs
;
1611 /* Set pq routine and capability */
1612 if (dma_has_cap(DMA_PQ
, dma_dev
->cap_mask
)) {
1613 dma_dev
->device_prep_dma_pq
= sba_prep_dma_pq
;
1614 dma_set_maxpq(dma_dev
, sba
->max_pq_srcs
, 0);
1617 /* Initialize DMA device channel list */
1618 INIT_LIST_HEAD(&dma_dev
->channels
);
1619 list_add_tail(&sba
->dma_chan
.device_node
, &dma_dev
->channels
);
1621 /* Register with Linux async DMA framework*/
1622 ret
= dma_async_device_register(dma_dev
);
1624 dev_err(sba
->dev
, "async device register error %d", ret
);
1628 dev_info(sba
->dev
, "%s capabilities: %s%s%s%s\n",
1629 dma_chan_name(&sba
->dma_chan
),
1630 dma_has_cap(DMA_INTERRUPT
, dma_dev
->cap_mask
) ? "interrupt " : "",
1631 dma_has_cap(DMA_MEMCPY
, dma_dev
->cap_mask
) ? "memcpy " : "",
1632 dma_has_cap(DMA_XOR
, dma_dev
->cap_mask
) ? "xor " : "",
1633 dma_has_cap(DMA_PQ
, dma_dev
->cap_mask
) ? "pq " : "");
1638 static int sba_probe(struct platform_device
*pdev
)
1640 int i
, ret
= 0, mchans_count
;
1641 struct sba_device
*sba
;
1642 struct platform_device
*mbox_pdev
;
1643 struct of_phandle_args args
;
1645 /* Allocate main SBA struct */
1646 sba
= devm_kzalloc(&pdev
->dev
, sizeof(*sba
), GFP_KERNEL
);
1650 sba
->dev
= &pdev
->dev
;
1651 platform_set_drvdata(pdev
, sba
);
1653 /* Number of channels equals number of mailbox channels */
1654 ret
= of_count_phandle_with_args(pdev
->dev
.of_node
,
1655 "mboxes", "#mbox-cells");
1660 /* Determine SBA version from DT compatible string */
1661 if (of_device_is_compatible(sba
->dev
->of_node
, "brcm,iproc-sba"))
1662 sba
->ver
= SBA_VER_1
;
1663 else if (of_device_is_compatible(sba
->dev
->of_node
,
1664 "brcm,iproc-sba-v2"))
1665 sba
->ver
= SBA_VER_2
;
1669 /* Derived Configuration parameters */
1672 sba
->hw_buf_size
= 4096;
1673 sba
->hw_resp_size
= 8;
1674 sba
->max_pq_coefs
= 6;
1675 sba
->max_pq_srcs
= 6;
1678 sba
->hw_buf_size
= 4096;
1679 sba
->hw_resp_size
= 8;
1680 sba
->max_pq_coefs
= 30;
1682 * We can support max_pq_srcs == max_pq_coefs because
1683 * we are limited by number of SBA commands that we can
1684 * fit in one message for underlying ring manager HW.
1686 sba
->max_pq_srcs
= 12;
1691 sba
->max_req
= SBA_MAX_REQ_PER_MBOX_CHANNEL
* mchans_count
;
1692 sba
->max_cmd_per_req
= sba
->max_pq_srcs
+ 3;
1693 sba
->max_xor_srcs
= sba
->max_cmd_per_req
- 1;
1694 sba
->max_resp_pool_size
= sba
->max_req
* sba
->hw_resp_size
;
1695 sba
->max_cmds_pool_size
= sba
->max_req
*
1696 sba
->max_cmd_per_req
* sizeof(u64
);
1698 /* Setup mailbox client */
1699 sba
->client
.dev
= &pdev
->dev
;
1700 sba
->client
.rx_callback
= sba_receive_message
;
1701 sba
->client
.tx_block
= false;
1702 sba
->client
.knows_txdone
= true;
1703 sba
->client
.tx_tout
= 0;
1705 /* Allocate mailbox channel array */
1706 sba
->mchans
= devm_kcalloc(&pdev
->dev
, mchans_count
,
1707 sizeof(*sba
->mchans
), GFP_KERNEL
);
1711 /* Request mailbox channels */
1712 sba
->mchans_count
= 0;
1713 for (i
= 0; i
< mchans_count
; i
++) {
1714 sba
->mchans
[i
] = mbox_request_channel(&sba
->client
, i
);
1715 if (IS_ERR(sba
->mchans
[i
])) {
1716 ret
= PTR_ERR(sba
->mchans
[i
]);
1717 goto fail_free_mchans
;
1719 sba
->mchans_count
++;
1721 atomic_set(&sba
->mchans_current
, 0);
1723 /* Find-out underlying mailbox device */
1724 ret
= of_parse_phandle_with_args(pdev
->dev
.of_node
,
1725 "mboxes", "#mbox-cells", 0, &args
);
1727 goto fail_free_mchans
;
1728 mbox_pdev
= of_find_device_by_node(args
.np
);
1729 of_node_put(args
.np
);
1732 goto fail_free_mchans
;
1734 sba
->mbox_dev
= &mbox_pdev
->dev
;
1736 /* All mailbox channels should be of same ring manager device */
1737 for (i
= 1; i
< mchans_count
; i
++) {
1738 ret
= of_parse_phandle_with_args(pdev
->dev
.of_node
,
1739 "mboxes", "#mbox-cells", i
, &args
);
1741 goto fail_free_mchans
;
1742 mbox_pdev
= of_find_device_by_node(args
.np
);
1743 of_node_put(args
.np
);
1744 if (sba
->mbox_dev
!= &mbox_pdev
->dev
) {
1746 goto fail_free_mchans
;
1750 /* Prealloc channel resource */
1751 ret
= sba_prealloc_channel_resources(sba
);
1753 goto fail_free_mchans
;
1755 /* Check availability of debugfs */
1756 if (!debugfs_initialized())
1759 /* Create debugfs root entry */
1760 sba
->root
= debugfs_create_dir(dev_name(sba
->dev
), NULL
);
1761 if (IS_ERR_OR_NULL(sba
->root
)) {
1762 dev_err(sba
->dev
, "failed to create debugfs root entry\n");
1767 /* Create debugfs stats entry */
1768 sba
->stats
= debugfs_create_devm_seqfile(sba
->dev
, "stats", sba
->root
,
1769 sba_debugfs_stats_show
);
1770 if (IS_ERR_OR_NULL(sba
->stats
))
1771 dev_err(sba
->dev
, "failed to create debugfs stats file\n");
1774 /* Register DMA device with Linux async framework */
1775 ret
= sba_async_register(sba
);
1777 goto fail_free_resources
;
1779 /* Print device info */
1780 dev_info(sba
->dev
, "%s using SBAv%d and %d mailbox channels",
1781 dma_chan_name(&sba
->dma_chan
), sba
->ver
+1,
1786 fail_free_resources
:
1787 debugfs_remove_recursive(sba
->root
);
1788 sba_freeup_channel_resources(sba
);
1790 for (i
= 0; i
< sba
->mchans_count
; i
++)
1791 mbox_free_channel(sba
->mchans
[i
]);
1795 static int sba_remove(struct platform_device
*pdev
)
1798 struct sba_device
*sba
= platform_get_drvdata(pdev
);
1800 dma_async_device_unregister(&sba
->dma_dev
);
1802 debugfs_remove_recursive(sba
->root
);
1804 sba_freeup_channel_resources(sba
);
1806 for (i
= 0; i
< sba
->mchans_count
; i
++)
1807 mbox_free_channel(sba
->mchans
[i
]);
1812 static const struct of_device_id sba_of_match
[] = {
1813 { .compatible
= "brcm,iproc-sba", },
1814 { .compatible
= "brcm,iproc-sba-v2", },
1817 MODULE_DEVICE_TABLE(of
, sba_of_match
);
1819 static struct platform_driver sba_driver
= {
1821 .remove
= sba_remove
,
1823 .name
= "bcm-sba-raid",
1824 .of_match_table
= sba_of_match
,
1827 module_platform_driver(sba_driver
);
1829 MODULE_DESCRIPTION("Broadcom SBA RAID driver");
1830 MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>");
1831 MODULE_LICENSE("GPL v2");