5 * Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6 * Philip Kelleher <pjk1939@linux.vnet.ibm.com>
8 * (C) Copyright 2013 IBM Corporation
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software Foundation,
22 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/slab.h>
26 #include "rsxx_priv.h"
29 struct list_head list
;
31 unsigned int laddr
; /* Logical address */
38 unsigned int pg_off
; /* Page Offset */
43 /* This timeout is used to detect a stalled DMA channel */
44 #define DMA_ACTIVITY_TIMEOUT msecs_to_jiffies(10000)
54 enum rsxx_dma_status
{
64 u8 sub_page
; /* Bit[0:2]: 512byte offset */
65 /* Bit[4:6]: 512byte count */
71 HW_CMD_BLK_DISCARD
= 0x70,
72 HW_CMD_BLK_WRITE
= 0x80,
73 HW_CMD_BLK_READ
= 0xC0,
74 HW_CMD_BLK_RECON_READ
= 0xE0,
79 HW_STATUS_HARD_ERR
= 0x02,
80 HW_STATUS_SOFT_ERR
= 0x04,
81 HW_STATUS_FAULT
= 0x08,
84 static struct kmem_cache
*rsxx_dma_pool
;
91 #define DMA_TRACKER_LIST_SIZE8 (sizeof(struct dma_tracker_list) + \
92 (sizeof(struct dma_tracker) * RSXX_MAX_OUTSTANDING_CMDS))
94 struct dma_tracker_list
{
97 struct dma_tracker list
[0];
101 /*----------------- Misc Utility Functions -------------------*/
102 static unsigned int rsxx_addr8_to_laddr(u64 addr8
, struct rsxx_cardinfo
*card
)
104 unsigned long long tgt_addr8
;
106 tgt_addr8
= ((addr8
>> card
->_stripe
.upper_shift
) &
107 card
->_stripe
.upper_mask
) |
108 ((addr8
) & card
->_stripe
.lower_mask
);
109 do_div(tgt_addr8
, RSXX_HW_BLK_SIZE
);
113 static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo
*card
, u64 addr8
)
117 tgt
= (addr8
>> card
->_stripe
.target_shift
) & card
->_stripe
.target_mask
;
122 void rsxx_dma_queue_reset(struct rsxx_cardinfo
*card
)
124 /* Reset all DMA Command/Status Queues */
125 iowrite32(DMA_QUEUE_RESET
, card
->regmap
+ RESET
);
128 static unsigned int get_dma_size(struct rsxx_dma
*dma
)
130 if (dma
->sub_page
.cnt
)
131 return dma
->sub_page
.cnt
<< 9;
133 return RSXX_HW_BLK_SIZE
;
137 /*----------------- DMA Tracker -------------------*/
138 static void set_tracker_dma(struct dma_tracker_list
*trackers
,
140 struct rsxx_dma
*dma
)
142 trackers
->list
[tag
].dma
= dma
;
145 static struct rsxx_dma
*get_tracker_dma(struct dma_tracker_list
*trackers
,
148 return trackers
->list
[tag
].dma
;
151 static int pop_tracker(struct dma_tracker_list
*trackers
)
155 spin_lock(&trackers
->lock
);
156 tag
= trackers
->head
;
158 trackers
->head
= trackers
->list
[tag
].next_tag
;
159 trackers
->list
[tag
].next_tag
= -1;
161 spin_unlock(&trackers
->lock
);
166 static void push_tracker(struct dma_tracker_list
*trackers
, int tag
)
168 spin_lock(&trackers
->lock
);
169 trackers
->list
[tag
].next_tag
= trackers
->head
;
170 trackers
->head
= tag
;
171 trackers
->list
[tag
].dma
= NULL
;
172 spin_unlock(&trackers
->lock
);
176 /*----------------- Interrupt Coalescing -------------*/
178 * Interrupt Coalescing Register Format:
179 * Interrupt Timer (64ns units) [15:0]
180 * Interrupt Count [24:16]
183 #define INTR_COAL_LATENCY_MASK (0x0000ffff)
185 #define INTR_COAL_COUNT_SHIFT 16
186 #define INTR_COAL_COUNT_BITS 9
187 #define INTR_COAL_COUNT_MASK (((1 << INTR_COAL_COUNT_BITS) - 1) << \
188 INTR_COAL_COUNT_SHIFT)
189 #define INTR_COAL_LATENCY_UNITS_NS 64
192 static u32
dma_intr_coal_val(u32 mode
, u32 count
, u32 latency
)
194 u32 latency_units
= latency
/ INTR_COAL_LATENCY_UNITS_NS
;
196 if (mode
== RSXX_INTR_COAL_DISABLED
)
199 return ((count
<< INTR_COAL_COUNT_SHIFT
) & INTR_COAL_COUNT_MASK
) |
200 (latency_units
& INTR_COAL_LATENCY_MASK
);
204 static void dma_intr_coal_auto_tune(struct rsxx_cardinfo
*card
)
210 if (card
->config
.data
.intr_coal
.mode
!= RSXX_INTR_COAL_AUTO_TUNE
||
211 unlikely(card
->eeh_state
))
214 for (i
= 0; i
< card
->n_targets
; i
++)
215 q_depth
+= atomic_read(&card
->ctrl
[i
].stats
.hw_q_depth
);
217 intr_coal
= dma_intr_coal_val(card
->config
.data
.intr_coal
.mode
,
219 card
->config
.data
.intr_coal
.latency
);
220 iowrite32(intr_coal
, card
->regmap
+ INTR_COAL
);
223 /*----------------- RSXX DMA Handling -------------------*/
224 static void rsxx_free_dma(struct rsxx_dma_ctrl
*ctrl
, struct rsxx_dma
*dma
)
226 if (dma
->cmd
!= HW_CMD_BLK_DISCARD
) {
227 if (!dma_mapping_error(&ctrl
->card
->dev
->dev
, dma
->dma_addr
)) {
228 dma_unmap_page(&ctrl
->card
->dev
->dev
, dma
->dma_addr
,
230 dma
->cmd
== HW_CMD_BLK_WRITE
?
236 kmem_cache_free(rsxx_dma_pool
, dma
);
239 static void rsxx_complete_dma(struct rsxx_dma_ctrl
*ctrl
,
240 struct rsxx_dma
*dma
,
243 if (status
& DMA_SW_ERR
)
244 ctrl
->stats
.dma_sw_err
++;
245 if (status
& DMA_HW_FAULT
)
246 ctrl
->stats
.dma_hw_fault
++;
247 if (status
& DMA_CANCELLED
)
248 ctrl
->stats
.dma_cancelled
++;
251 dma
->cb(ctrl
->card
, dma
->cb_data
, status
? 1 : 0);
253 rsxx_free_dma(ctrl
, dma
);
256 int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl
*ctrl
,
257 struct list_head
*q
, unsigned int done
)
259 struct rsxx_dma
*dma
;
260 struct rsxx_dma
*tmp
;
263 list_for_each_entry_safe(dma
, tmp
, q
, list
) {
264 list_del(&dma
->list
);
265 if (done
& COMPLETE_DMA
)
266 rsxx_complete_dma(ctrl
, dma
, DMA_CANCELLED
);
268 rsxx_free_dma(ctrl
, dma
);
275 static void rsxx_requeue_dma(struct rsxx_dma_ctrl
*ctrl
,
276 struct rsxx_dma
*dma
)
279 * Requeued DMAs go to the front of the queue so they are issued
282 spin_lock_bh(&ctrl
->queue_lock
);
283 ctrl
->stats
.sw_q_depth
++;
284 list_add(&dma
->list
, &ctrl
->queue
);
285 spin_unlock_bh(&ctrl
->queue_lock
);
288 static void rsxx_handle_dma_error(struct rsxx_dma_ctrl
*ctrl
,
289 struct rsxx_dma
*dma
,
292 unsigned int status
= 0;
295 dev_dbg(CARD_TO_DEV(ctrl
->card
),
296 "Handling DMA error(cmd x%02x, laddr x%08x st:x%02x)\n",
297 dma
->cmd
, dma
->laddr
, hw_st
);
299 if (hw_st
& HW_STATUS_CRC
)
300 ctrl
->stats
.crc_errors
++;
301 if (hw_st
& HW_STATUS_HARD_ERR
)
302 ctrl
->stats
.hard_errors
++;
303 if (hw_st
& HW_STATUS_SOFT_ERR
)
304 ctrl
->stats
.soft_errors
++;
307 case HW_CMD_BLK_READ
:
308 if (hw_st
& (HW_STATUS_CRC
| HW_STATUS_HARD_ERR
)) {
309 if (ctrl
->card
->scrub_hard
) {
310 dma
->cmd
= HW_CMD_BLK_RECON_READ
;
312 ctrl
->stats
.reads_retried
++;
314 status
|= DMA_HW_FAULT
;
315 ctrl
->stats
.reads_failed
++;
317 } else if (hw_st
& HW_STATUS_FAULT
) {
318 status
|= DMA_HW_FAULT
;
319 ctrl
->stats
.reads_failed
++;
323 case HW_CMD_BLK_RECON_READ
:
324 if (hw_st
& (HW_STATUS_CRC
| HW_STATUS_HARD_ERR
)) {
325 /* Data could not be reconstructed. */
326 status
|= DMA_HW_FAULT
;
327 ctrl
->stats
.reads_failed
++;
331 case HW_CMD_BLK_WRITE
:
332 status
|= DMA_HW_FAULT
;
333 ctrl
->stats
.writes_failed
++;
336 case HW_CMD_BLK_DISCARD
:
337 status
|= DMA_HW_FAULT
;
338 ctrl
->stats
.discards_failed
++;
342 dev_err(CARD_TO_DEV(ctrl
->card
),
343 "Unknown command in DMA!(cmd: x%02x "
344 "laddr x%08x st: x%02x\n",
345 dma
->cmd
, dma
->laddr
, hw_st
);
346 status
|= DMA_SW_ERR
;
352 rsxx_requeue_dma(ctrl
, dma
);
354 rsxx_complete_dma(ctrl
, dma
, status
);
357 static void dma_engine_stalled(struct timer_list
*t
)
359 struct rsxx_dma_ctrl
*ctrl
= from_timer(ctrl
, t
, activity_timer
);
362 if (atomic_read(&ctrl
->stats
.hw_q_depth
) == 0 ||
363 unlikely(ctrl
->card
->eeh_state
))
366 if (ctrl
->cmd
.idx
!= ioread32(ctrl
->regmap
+ SW_CMD_IDX
)) {
368 * The dma engine was stalled because the SW_CMD_IDX write
369 * was lost. Issue it again to recover.
371 dev_warn(CARD_TO_DEV(ctrl
->card
),
372 "SW_CMD_IDX write was lost, re-writing...\n");
373 iowrite32(ctrl
->cmd
.idx
, ctrl
->regmap
+ SW_CMD_IDX
);
374 mod_timer(&ctrl
->activity_timer
,
375 jiffies
+ DMA_ACTIVITY_TIMEOUT
);
377 dev_warn(CARD_TO_DEV(ctrl
->card
),
378 "DMA channel %d has stalled, faulting interface.\n",
380 ctrl
->card
->dma_fault
= 1;
382 /* Clean up the DMA queue */
383 spin_lock(&ctrl
->queue_lock
);
384 cnt
= rsxx_cleanup_dma_queue(ctrl
, &ctrl
->queue
, COMPLETE_DMA
);
385 spin_unlock(&ctrl
->queue_lock
);
387 cnt
+= rsxx_dma_cancel(ctrl
);
390 dev_info(CARD_TO_DEV(ctrl
->card
),
391 "Freed %d queued DMAs on channel %d\n",
396 static void rsxx_issue_dmas(struct rsxx_dma_ctrl
*ctrl
)
398 struct rsxx_dma
*dma
;
400 int cmds_pending
= 0;
401 struct hw_cmd
*hw_cmd_buf
;
404 hw_cmd_buf
= ctrl
->cmd
.buf
;
406 if (unlikely(ctrl
->card
->halt
) ||
407 unlikely(ctrl
->card
->eeh_state
))
411 spin_lock_bh(&ctrl
->queue_lock
);
412 if (list_empty(&ctrl
->queue
)) {
413 spin_unlock_bh(&ctrl
->queue_lock
);
416 spin_unlock_bh(&ctrl
->queue_lock
);
418 tag
= pop_tracker(ctrl
->trackers
);
422 spin_lock_bh(&ctrl
->queue_lock
);
423 dma
= list_entry(ctrl
->queue
.next
, struct rsxx_dma
, list
);
424 list_del(&dma
->list
);
425 ctrl
->stats
.sw_q_depth
--;
426 spin_unlock_bh(&ctrl
->queue_lock
);
429 * This will catch any DMAs that slipped in right before the
430 * fault, but was queued after all the other DMAs were
433 if (unlikely(ctrl
->card
->dma_fault
)) {
434 push_tracker(ctrl
->trackers
, tag
);
435 rsxx_complete_dma(ctrl
, dma
, DMA_CANCELLED
);
439 if (dma
->cmd
!= HW_CMD_BLK_DISCARD
) {
440 if (dma
->cmd
== HW_CMD_BLK_WRITE
)
443 dir
= DMA_FROM_DEVICE
;
446 * The function dma_map_page is placed here because we
447 * can only, by design, issue up to 255 commands to the
448 * hardware at one time per DMA channel. So the maximum
449 * amount of mapped memory would be 255 * 4 channels *
450 * 4096 Bytes which is less than 2GB, the limit of a x8
451 * Non-HWWD PCIe slot. This way the dma_map_page
452 * function should never fail because of a lack of
455 dma
->dma_addr
= dma_map_page(&ctrl
->card
->dev
->dev
, dma
->page
,
456 dma
->pg_off
, dma
->sub_page
.cnt
<< 9, dir
);
457 if (dma_mapping_error(&ctrl
->card
->dev
->dev
, dma
->dma_addr
)) {
458 push_tracker(ctrl
->trackers
, tag
);
459 rsxx_complete_dma(ctrl
, dma
, DMA_CANCELLED
);
464 set_tracker_dma(ctrl
->trackers
, tag
, dma
);
465 hw_cmd_buf
[ctrl
->cmd
.idx
].command
= dma
->cmd
;
466 hw_cmd_buf
[ctrl
->cmd
.idx
].tag
= tag
;
467 hw_cmd_buf
[ctrl
->cmd
.idx
]._rsvd
= 0;
468 hw_cmd_buf
[ctrl
->cmd
.idx
].sub_page
=
469 ((dma
->sub_page
.cnt
& 0x7) << 4) |
470 (dma
->sub_page
.off
& 0x7);
472 hw_cmd_buf
[ctrl
->cmd
.idx
].device_addr
=
473 cpu_to_le32(dma
->laddr
);
475 hw_cmd_buf
[ctrl
->cmd
.idx
].host_addr
=
476 cpu_to_le64(dma
->dma_addr
);
478 dev_dbg(CARD_TO_DEV(ctrl
->card
),
479 "Issue DMA%d(laddr %d tag %d) to idx %d\n",
480 ctrl
->id
, dma
->laddr
, tag
, ctrl
->cmd
.idx
);
482 ctrl
->cmd
.idx
= (ctrl
->cmd
.idx
+ 1) & RSXX_CS_IDX_MASK
;
485 if (dma
->cmd
== HW_CMD_BLK_WRITE
)
486 ctrl
->stats
.writes_issued
++;
487 else if (dma
->cmd
== HW_CMD_BLK_DISCARD
)
488 ctrl
->stats
.discards_issued
++;
490 ctrl
->stats
.reads_issued
++;
493 /* Let HW know we've queued commands. */
495 atomic_add(cmds_pending
, &ctrl
->stats
.hw_q_depth
);
496 mod_timer(&ctrl
->activity_timer
,
497 jiffies
+ DMA_ACTIVITY_TIMEOUT
);
499 if (unlikely(ctrl
->card
->eeh_state
)) {
500 del_timer_sync(&ctrl
->activity_timer
);
504 iowrite32(ctrl
->cmd
.idx
, ctrl
->regmap
+ SW_CMD_IDX
);
508 static void rsxx_dma_done(struct rsxx_dma_ctrl
*ctrl
)
510 struct rsxx_dma
*dma
;
515 struct hw_status
*hw_st_buf
;
517 hw_st_buf
= ctrl
->status
.buf
;
519 if (unlikely(ctrl
->card
->halt
) ||
520 unlikely(ctrl
->card
->dma_fault
) ||
521 unlikely(ctrl
->card
->eeh_state
))
524 count
= le16_to_cpu(hw_st_buf
[ctrl
->status
.idx
].count
);
526 while (count
== ctrl
->e_cnt
) {
528 * The read memory-barrier is necessary to keep aggressive
529 * processors/optimizers (such as the PPC Apple G5) from
530 * reordering the following status-buffer tag & status read
531 * *before* the count read on subsequent iterations of the
536 status
= hw_st_buf
[ctrl
->status
.idx
].status
;
537 tag
= hw_st_buf
[ctrl
->status
.idx
].tag
;
539 dma
= get_tracker_dma(ctrl
->trackers
, tag
);
541 spin_lock_irqsave(&ctrl
->card
->irq_lock
, flags
);
542 rsxx_disable_ier(ctrl
->card
, CR_INTR_DMA_ALL
);
543 spin_unlock_irqrestore(&ctrl
->card
->irq_lock
, flags
);
545 dev_err(CARD_TO_DEV(ctrl
->card
),
546 "No tracker for tag %d "
548 tag
, ctrl
->status
.idx
, ctrl
->id
);
552 dev_dbg(CARD_TO_DEV(ctrl
->card
),
554 "(laddr x%x tag %d st: x%x cnt: x%04x) from idx %d.\n",
555 ctrl
->id
, dma
->laddr
, tag
, status
, count
,
558 atomic_dec(&ctrl
->stats
.hw_q_depth
);
560 mod_timer(&ctrl
->activity_timer
,
561 jiffies
+ DMA_ACTIVITY_TIMEOUT
);
564 rsxx_handle_dma_error(ctrl
, dma
, status
);
566 rsxx_complete_dma(ctrl
, dma
, 0);
568 push_tracker(ctrl
->trackers
, tag
);
570 ctrl
->status
.idx
= (ctrl
->status
.idx
+ 1) &
574 count
= le16_to_cpu(hw_st_buf
[ctrl
->status
.idx
].count
);
577 dma_intr_coal_auto_tune(ctrl
->card
);
579 if (atomic_read(&ctrl
->stats
.hw_q_depth
) == 0)
580 del_timer_sync(&ctrl
->activity_timer
);
582 spin_lock_irqsave(&ctrl
->card
->irq_lock
, flags
);
583 rsxx_enable_ier(ctrl
->card
, CR_INTR_DMA(ctrl
->id
));
584 spin_unlock_irqrestore(&ctrl
->card
->irq_lock
, flags
);
586 spin_lock_bh(&ctrl
->queue_lock
);
587 if (ctrl
->stats
.sw_q_depth
)
588 queue_work(ctrl
->issue_wq
, &ctrl
->issue_dma_work
);
589 spin_unlock_bh(&ctrl
->queue_lock
);
592 static void rsxx_schedule_issue(struct work_struct
*work
)
594 struct rsxx_dma_ctrl
*ctrl
;
596 ctrl
= container_of(work
, struct rsxx_dma_ctrl
, issue_dma_work
);
598 mutex_lock(&ctrl
->work_lock
);
599 rsxx_issue_dmas(ctrl
);
600 mutex_unlock(&ctrl
->work_lock
);
603 static void rsxx_schedule_done(struct work_struct
*work
)
605 struct rsxx_dma_ctrl
*ctrl
;
607 ctrl
= container_of(work
, struct rsxx_dma_ctrl
, dma_done_work
);
609 mutex_lock(&ctrl
->work_lock
);
611 mutex_unlock(&ctrl
->work_lock
);
614 static blk_status_t
rsxx_queue_discard(struct rsxx_cardinfo
*card
,
620 struct rsxx_dma
*dma
;
622 dma
= kmem_cache_alloc(rsxx_dma_pool
, GFP_KERNEL
);
624 return BLK_STS_RESOURCE
;
626 dma
->cmd
= HW_CMD_BLK_DISCARD
;
629 dma
->sub_page
.off
= 0;
630 dma
->sub_page
.cnt
= 0;
634 dma
->cb_data
= cb_data
;
636 dev_dbg(CARD_TO_DEV(card
), "Queuing[D] laddr %x\n", dma
->laddr
);
638 list_add_tail(&dma
->list
, q
);
643 static blk_status_t
rsxx_queue_dma(struct rsxx_cardinfo
*card
,
646 unsigned int dma_off
,
647 unsigned int dma_len
,
654 struct rsxx_dma
*dma
;
656 dma
= kmem_cache_alloc(rsxx_dma_pool
, GFP_KERNEL
);
658 return BLK_STS_RESOURCE
;
660 dma
->cmd
= dir
? HW_CMD_BLK_WRITE
: HW_CMD_BLK_READ
;
662 dma
->sub_page
.off
= (dma_off
>> 9);
663 dma
->sub_page
.cnt
= (dma_len
>> 9);
665 dma
->pg_off
= pg_off
;
667 dma
->cb_data
= cb_data
;
669 dev_dbg(CARD_TO_DEV(card
),
670 "Queuing[%c] laddr %x off %d cnt %d page %p pg_off %d\n",
671 dir
? 'W' : 'R', dma
->laddr
, dma
->sub_page
.off
,
672 dma
->sub_page
.cnt
, dma
->page
, dma
->pg_off
);
675 list_add_tail(&dma
->list
, q
);
680 blk_status_t
rsxx_dma_queue_bio(struct rsxx_cardinfo
*card
,
686 struct list_head dma_list
[RSXX_MAX_TARGETS
];
688 struct bvec_iter iter
;
689 unsigned long long addr8
;
693 unsigned int dma_off
;
694 unsigned int dma_len
;
695 int dma_cnt
[RSXX_MAX_TARGETS
];
700 addr8
= bio
->bi_iter
.bi_sector
<< 9; /* sectors are 512 bytes */
701 atomic_set(n_dmas
, 0);
703 for (i
= 0; i
< card
->n_targets
; i
++) {
704 INIT_LIST_HEAD(&dma_list
[i
]);
708 if (bio_op(bio
) == REQ_OP_DISCARD
) {
709 bv_len
= bio
->bi_iter
.bi_size
;
712 tgt
= rsxx_get_dma_tgt(card
, addr8
);
713 laddr
= rsxx_addr8_to_laddr(addr8
, card
);
715 st
= rsxx_queue_discard(card
, &dma_list
[tgt
], laddr
,
722 addr8
+= RSXX_HW_BLK_SIZE
;
723 bv_len
-= RSXX_HW_BLK_SIZE
;
726 bio_for_each_segment(bvec
, bio
, iter
) {
727 bv_len
= bvec
.bv_len
;
728 bv_off
= bvec
.bv_offset
;
731 tgt
= rsxx_get_dma_tgt(card
, addr8
);
732 laddr
= rsxx_addr8_to_laddr(addr8
, card
);
733 dma_off
= addr8
& RSXX_HW_BLK_MASK
;
734 dma_len
= min(bv_len
,
735 RSXX_HW_BLK_SIZE
- dma_off
);
737 st
= rsxx_queue_dma(card
, &dma_list
[tgt
],
741 bv_off
, cb
, cb_data
);
754 for (i
= 0; i
< card
->n_targets
; i
++) {
755 if (!list_empty(&dma_list
[i
])) {
756 spin_lock_bh(&card
->ctrl
[i
].queue_lock
);
757 card
->ctrl
[i
].stats
.sw_q_depth
+= dma_cnt
[i
];
758 list_splice_tail(&dma_list
[i
], &card
->ctrl
[i
].queue
);
759 spin_unlock_bh(&card
->ctrl
[i
].queue_lock
);
761 queue_work(card
->ctrl
[i
].issue_wq
,
762 &card
->ctrl
[i
].issue_dma_work
);
769 for (i
= 0; i
< card
->n_targets
; i
++)
770 rsxx_cleanup_dma_queue(&card
->ctrl
[i
], &dma_list
[i
],
776 /*----------------- DMA Engine Initialization & Setup -------------------*/
777 int rsxx_hw_buffers_init(struct pci_dev
*dev
, struct rsxx_dma_ctrl
*ctrl
)
779 ctrl
->status
.buf
= dma_alloc_coherent(&dev
->dev
, STATUS_BUFFER_SIZE8
,
780 &ctrl
->status
.dma_addr
, GFP_KERNEL
);
781 ctrl
->cmd
.buf
= dma_alloc_coherent(&dev
->dev
, COMMAND_BUFFER_SIZE8
,
782 &ctrl
->cmd
.dma_addr
, GFP_KERNEL
);
783 if (ctrl
->status
.buf
== NULL
|| ctrl
->cmd
.buf
== NULL
)
786 memset(ctrl
->status
.buf
, 0xac, STATUS_BUFFER_SIZE8
);
787 iowrite32(lower_32_bits(ctrl
->status
.dma_addr
),
788 ctrl
->regmap
+ SB_ADD_LO
);
789 iowrite32(upper_32_bits(ctrl
->status
.dma_addr
),
790 ctrl
->regmap
+ SB_ADD_HI
);
792 memset(ctrl
->cmd
.buf
, 0x83, COMMAND_BUFFER_SIZE8
);
793 iowrite32(lower_32_bits(ctrl
->cmd
.dma_addr
), ctrl
->regmap
+ CB_ADD_LO
);
794 iowrite32(upper_32_bits(ctrl
->cmd
.dma_addr
), ctrl
->regmap
+ CB_ADD_HI
);
796 ctrl
->status
.idx
= ioread32(ctrl
->regmap
+ HW_STATUS_CNT
);
797 if (ctrl
->status
.idx
> RSXX_MAX_OUTSTANDING_CMDS
) {
798 dev_crit(&dev
->dev
, "Failed reading status cnt x%x\n",
802 iowrite32(ctrl
->status
.idx
, ctrl
->regmap
+ HW_STATUS_CNT
);
803 iowrite32(ctrl
->status
.idx
, ctrl
->regmap
+ SW_STATUS_CNT
);
805 ctrl
->cmd
.idx
= ioread32(ctrl
->regmap
+ HW_CMD_IDX
);
806 if (ctrl
->cmd
.idx
> RSXX_MAX_OUTSTANDING_CMDS
) {
807 dev_crit(&dev
->dev
, "Failed reading cmd cnt x%x\n",
811 iowrite32(ctrl
->cmd
.idx
, ctrl
->regmap
+ HW_CMD_IDX
);
812 iowrite32(ctrl
->cmd
.idx
, ctrl
->regmap
+ SW_CMD_IDX
);
817 static int rsxx_dma_ctrl_init(struct pci_dev
*dev
,
818 struct rsxx_dma_ctrl
*ctrl
)
823 memset(&ctrl
->stats
, 0, sizeof(ctrl
->stats
));
825 ctrl
->trackers
= vmalloc(DMA_TRACKER_LIST_SIZE8
);
829 ctrl
->trackers
->head
= 0;
830 for (i
= 0; i
< RSXX_MAX_OUTSTANDING_CMDS
; i
++) {
831 ctrl
->trackers
->list
[i
].next_tag
= i
+ 1;
832 ctrl
->trackers
->list
[i
].dma
= NULL
;
834 ctrl
->trackers
->list
[RSXX_MAX_OUTSTANDING_CMDS
-1].next_tag
= -1;
835 spin_lock_init(&ctrl
->trackers
->lock
);
837 spin_lock_init(&ctrl
->queue_lock
);
838 mutex_init(&ctrl
->work_lock
);
839 INIT_LIST_HEAD(&ctrl
->queue
);
841 timer_setup(&ctrl
->activity_timer
, dma_engine_stalled
, 0);
843 ctrl
->issue_wq
= alloc_ordered_workqueue(DRIVER_NAME
"_issue", 0);
847 ctrl
->done_wq
= alloc_ordered_workqueue(DRIVER_NAME
"_done", 0);
851 INIT_WORK(&ctrl
->issue_dma_work
, rsxx_schedule_issue
);
852 INIT_WORK(&ctrl
->dma_done_work
, rsxx_schedule_done
);
854 st
= rsxx_hw_buffers_init(dev
, ctrl
);
861 static int rsxx_dma_stripe_setup(struct rsxx_cardinfo
*card
,
862 unsigned int stripe_size8
)
864 if (!is_power_of_2(stripe_size8
)) {
865 dev_err(CARD_TO_DEV(card
),
866 "stripe_size is NOT a power of 2!\n");
870 card
->_stripe
.lower_mask
= stripe_size8
- 1;
872 card
->_stripe
.upper_mask
= ~(card
->_stripe
.lower_mask
);
873 card
->_stripe
.upper_shift
= ffs(card
->n_targets
) - 1;
875 card
->_stripe
.target_mask
= card
->n_targets
- 1;
876 card
->_stripe
.target_shift
= ffs(stripe_size8
) - 1;
878 dev_dbg(CARD_TO_DEV(card
), "_stripe.lower_mask = x%016llx\n",
879 card
->_stripe
.lower_mask
);
880 dev_dbg(CARD_TO_DEV(card
), "_stripe.upper_shift = x%016llx\n",
881 card
->_stripe
.upper_shift
);
882 dev_dbg(CARD_TO_DEV(card
), "_stripe.upper_mask = x%016llx\n",
883 card
->_stripe
.upper_mask
);
884 dev_dbg(CARD_TO_DEV(card
), "_stripe.target_mask = x%016llx\n",
885 card
->_stripe
.target_mask
);
886 dev_dbg(CARD_TO_DEV(card
), "_stripe.target_shift = x%016llx\n",
887 card
->_stripe
.target_shift
);
892 int rsxx_dma_configure(struct rsxx_cardinfo
*card
)
896 intr_coal
= dma_intr_coal_val(card
->config
.data
.intr_coal
.mode
,
897 card
->config
.data
.intr_coal
.count
,
898 card
->config
.data
.intr_coal
.latency
);
899 iowrite32(intr_coal
, card
->regmap
+ INTR_COAL
);
901 return rsxx_dma_stripe_setup(card
, card
->config
.data
.stripe_size
);
904 int rsxx_dma_setup(struct rsxx_cardinfo
*card
)
910 dev_info(CARD_TO_DEV(card
),
911 "Initializing %d DMA targets\n",
914 /* Regmap is divided up into 4K chunks. One for each DMA channel */
915 for (i
= 0; i
< card
->n_targets
; i
++)
916 card
->ctrl
[i
].regmap
= card
->regmap
+ (i
* 4096);
920 /* Reset the DMA queues */
921 rsxx_dma_queue_reset(card
);
923 /************* Setup DMA Control *************/
924 for (i
= 0; i
< card
->n_targets
; i
++) {
925 st
= rsxx_dma_ctrl_init(card
->dev
, &card
->ctrl
[i
]);
927 goto failed_dma_setup
;
929 card
->ctrl
[i
].card
= card
;
930 card
->ctrl
[i
].id
= i
;
933 card
->scrub_hard
= 1;
935 if (card
->config_valid
)
936 rsxx_dma_configure(card
);
938 /* Enable the interrupts after all setup has completed. */
939 for (i
= 0; i
< card
->n_targets
; i
++) {
940 spin_lock_irqsave(&card
->irq_lock
, flags
);
941 rsxx_enable_ier_and_isr(card
, CR_INTR_DMA(i
));
942 spin_unlock_irqrestore(&card
->irq_lock
, flags
);
948 for (i
= 0; i
< card
->n_targets
; i
++) {
949 struct rsxx_dma_ctrl
*ctrl
= &card
->ctrl
[i
];
951 if (ctrl
->issue_wq
) {
952 destroy_workqueue(ctrl
->issue_wq
);
953 ctrl
->issue_wq
= NULL
;
957 destroy_workqueue(ctrl
->done_wq
);
958 ctrl
->done_wq
= NULL
;
962 vfree(ctrl
->trackers
);
964 if (ctrl
->status
.buf
)
965 dma_free_coherent(&card
->dev
->dev
, STATUS_BUFFER_SIZE8
,
967 ctrl
->status
.dma_addr
);
969 dma_free_coherent(&card
->dev
->dev
, COMMAND_BUFFER_SIZE8
,
970 ctrl
->cmd
.buf
, ctrl
->cmd
.dma_addr
);
976 int rsxx_dma_cancel(struct rsxx_dma_ctrl
*ctrl
)
978 struct rsxx_dma
*dma
;
982 /* Clean up issued DMAs */
983 for (i
= 0; i
< RSXX_MAX_OUTSTANDING_CMDS
; i
++) {
984 dma
= get_tracker_dma(ctrl
->trackers
, i
);
986 atomic_dec(&ctrl
->stats
.hw_q_depth
);
987 rsxx_complete_dma(ctrl
, dma
, DMA_CANCELLED
);
988 push_tracker(ctrl
->trackers
, i
);
996 void rsxx_dma_destroy(struct rsxx_cardinfo
*card
)
998 struct rsxx_dma_ctrl
*ctrl
;
1001 for (i
= 0; i
< card
->n_targets
; i
++) {
1002 ctrl
= &card
->ctrl
[i
];
1004 if (ctrl
->issue_wq
) {
1005 destroy_workqueue(ctrl
->issue_wq
);
1006 ctrl
->issue_wq
= NULL
;
1009 if (ctrl
->done_wq
) {
1010 destroy_workqueue(ctrl
->done_wq
);
1011 ctrl
->done_wq
= NULL
;
1014 if (timer_pending(&ctrl
->activity_timer
))
1015 del_timer_sync(&ctrl
->activity_timer
);
1017 /* Clean up the DMA queue */
1018 spin_lock_bh(&ctrl
->queue_lock
);
1019 rsxx_cleanup_dma_queue(ctrl
, &ctrl
->queue
, COMPLETE_DMA
);
1020 spin_unlock_bh(&ctrl
->queue_lock
);
1022 rsxx_dma_cancel(ctrl
);
1024 vfree(ctrl
->trackers
);
1026 dma_free_coherent(&card
->dev
->dev
, STATUS_BUFFER_SIZE8
,
1027 ctrl
->status
.buf
, ctrl
->status
.dma_addr
);
1028 dma_free_coherent(&card
->dev
->dev
, COMMAND_BUFFER_SIZE8
,
1029 ctrl
->cmd
.buf
, ctrl
->cmd
.dma_addr
);
1033 int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo
*card
)
1038 struct rsxx_dma
*dma
;
1039 struct list_head
*issued_dmas
;
1041 issued_dmas
= kcalloc(card
->n_targets
, sizeof(*issued_dmas
),
1046 for (i
= 0; i
< card
->n_targets
; i
++) {
1047 INIT_LIST_HEAD(&issued_dmas
[i
]);
1049 for (j
= 0; j
< RSXX_MAX_OUTSTANDING_CMDS
; j
++) {
1050 dma
= get_tracker_dma(card
->ctrl
[i
].trackers
, j
);
1054 if (dma
->cmd
== HW_CMD_BLK_WRITE
)
1055 card
->ctrl
[i
].stats
.writes_issued
--;
1056 else if (dma
->cmd
== HW_CMD_BLK_DISCARD
)
1057 card
->ctrl
[i
].stats
.discards_issued
--;
1059 card
->ctrl
[i
].stats
.reads_issued
--;
1061 if (dma
->cmd
!= HW_CMD_BLK_DISCARD
) {
1062 dma_unmap_page(&card
->dev
->dev
, dma
->dma_addr
,
1064 dma
->cmd
== HW_CMD_BLK_WRITE
?
1069 list_add_tail(&dma
->list
, &issued_dmas
[i
]);
1070 push_tracker(card
->ctrl
[i
].trackers
, j
);
1074 spin_lock_bh(&card
->ctrl
[i
].queue_lock
);
1075 list_splice(&issued_dmas
[i
], &card
->ctrl
[i
].queue
);
1077 atomic_sub(cnt
, &card
->ctrl
[i
].stats
.hw_q_depth
);
1078 card
->ctrl
[i
].stats
.sw_q_depth
+= cnt
;
1079 card
->ctrl
[i
].e_cnt
= 0;
1080 spin_unlock_bh(&card
->ctrl
[i
].queue_lock
);
1088 int rsxx_dma_init(void)
1090 rsxx_dma_pool
= KMEM_CACHE(rsxx_dma
, SLAB_HWCACHE_ALIGN
);
1098 void rsxx_dma_cleanup(void)
1100 kmem_cache_destroy(rsxx_dma_pool
);